aboutsummaryrefslogtreecommitdiff
path: root/llvm/test
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test')
-rw-r--r--llvm/test/Analysis/BranchProbabilityInfo/pr22718.ll17
-rw-r--r--llvm/test/Analysis/CostModel/ARM/fparith.ll172
-rw-r--r--llvm/test/Analysis/CostModel/ARM/fptoi_sat.ll612
-rw-r--r--llvm/test/Analysis/CostModel/ARM/intrinsic-cost-kinds.ll371
-rw-r--r--llvm/test/Analysis/CostModel/ARM/mve-intrinsic-cost-kinds.ll181
-rw-r--r--llvm/test/Analysis/CostModel/ARM/mve-target-intrinsics.ll24
-rw-r--r--llvm/test/Analysis/CostModel/ARM/target-intrinsics.ll41
-rw-r--r--llvm/test/Analysis/CostModel/SystemZ/intrinsic-cost-crash.ll6
-rw-r--r--llvm/test/Analysis/Delinearization/constant_functions_multi_dim.ll15
-rw-r--r--llvm/test/Analysis/DependenceAnalysis/MIVCheckConst.ll4
-rw-r--r--llvm/test/Analysis/DependenceAnalysis/NonCanonicalizedSubscript.ll4
-rw-r--r--llvm/test/Analysis/MemoryDependenceAnalysis/invariant.group-bug.ll13
-rw-r--r--llvm/test/Analysis/MemorySSA/pr28880.ll5
-rw-r--r--llvm/test/Analysis/MemorySSA/pr39197.ll34
-rw-r--r--llvm/test/Analysis/MemorySSA/pr40038.ll11
-rw-r--r--llvm/test/Analysis/MemorySSA/pr43569.ll8
-rw-r--r--llvm/test/Analysis/ScalarEvolution/pr22674.ll4
-rw-r--r--llvm/test/Analysis/ScalarEvolution/scev-canonical-mode.ll4
-rw-r--r--llvm/test/Analysis/TypeBasedAliasAnalysis/PR17620.ll7
-rw-r--r--llvm/test/Analysis/TypeBasedAliasAnalysis/tbaa-path.ll26
-rw-r--r--llvm/test/Assembler/atomic.ll19
-rw-r--r--llvm/test/Bitcode/DILocation-implicit-code.ll31
-rw-r--r--llvm/test/Bitcode/drop-debug-info.3.5.ll4
-rw-r--r--llvm/test/Bitcode/upgrade-tbaa.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/load-zext-bitcast.ll51
-rw-r--r--llvm/test/CodeGen/AArch64/pr164181.ll640
-rw-r--r--llvm/test/CodeGen/AMDGPU/combine_andor_with_cmps.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/fdiv.f64.ll7
-rw-r--r--llvm/test/CodeGen/AMDGPU/fmad-formation-fmul-distribute-denormal-mode.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/fmed3.bf16.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/fmed3.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/fneg-combines.legal.f16.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/fneg-combines.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/frem.ll11
-rw-r--r--llvm/test/CodeGen/AMDGPU/fsqrt.f64.ll3
-rw-r--r--llvm/test/CodeGen/AMDGPU/fsqrt.r600.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/inline-attr.ll16
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.add.min.max.ll191
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.exp2.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.log2.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/minmax.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/stackguard.ll14
-rw-r--r--llvm/test/CodeGen/ARM/2014-05-14-DwarfEHCrash.ll2
-rw-r--r--llvm/test/CodeGen/ARM/ARMLoadStoreDBG.mir4
-rw-r--r--llvm/test/CodeGen/ARM/O3-pipeline.ll1
-rw-r--r--llvm/test/CodeGen/ARM/Windows/wineh-basic.ll4
-rw-r--r--llvm/test/CodeGen/ARM/byval_load_align.ll4
-rw-r--r--llvm/test/CodeGen/ARM/call-graph-section-addrtaken.ll2
-rw-r--r--llvm/test/CodeGen/ARM/call-graph-section-assembly.ll2
-rw-r--r--llvm/test/CodeGen/ARM/cfguard-module-flag.ll2
-rw-r--r--llvm/test/CodeGen/ARM/clang-section.ll4
-rw-r--r--llvm/test/CodeGen/ARM/cmse-clear-float-bigend.mir2
-rw-r--r--llvm/test/CodeGen/ARM/coalesce-dbgvalue.ll4
-rw-r--r--llvm/test/CodeGen/ARM/constantpool-promote-dbg.ll2
-rw-r--r--llvm/test/CodeGen/ARM/constantpool-promote.ll4
-rw-r--r--llvm/test/CodeGen/ARM/early-cfi-sections.ll2
-rw-r--r--llvm/test/CodeGen/ARM/fp16-vld.ll2
-rw-r--r--llvm/test/CodeGen/ARM/global-merge-1.ll6
-rw-r--r--llvm/test/CodeGen/ARM/isel-v8i32-crash.ll2
-rw-r--r--llvm/test/CodeGen/ARM/kcfi-arm.ll138
-rw-r--r--llvm/test/CodeGen/ARM/kcfi-cbz-range.ll81
-rw-r--r--llvm/test/CodeGen/ARM/kcfi-patchable-function-prefix.ll99
-rw-r--r--llvm/test/CodeGen/ARM/kcfi-thumb.ll215
-rw-r--r--llvm/test/CodeGen/ARM/kcfi-thumb2.ll163
-rw-r--r--llvm/test/CodeGen/ARM/kcfi.ll28
-rw-r--r--llvm/test/CodeGen/ARM/out-of-registers.ll2
-rw-r--r--llvm/test/CodeGen/ARM/relax-per-target-feature.ll2
-rw-r--r--llvm/test/CodeGen/ARM/softfp-constant-comparison.ll2
-rw-r--r--llvm/test/CodeGen/ARM/stack-protector-bmovpcb_call.ll4
-rw-r--r--llvm/test/CodeGen/ARM/stack_guard_remat.ll2
-rw-r--r--llvm/test/CodeGen/ARM/struct-byval-frame-index.ll2
-rw-r--r--llvm/test/CodeGen/ARM/subtarget-align.ll2
-rw-r--r--llvm/test/CodeGen/ARM/unschedule-first-call.ll2
-rw-r--r--llvm/test/CodeGen/ARM/vector-spilling.ll2
-rw-r--r--llvm/test/CodeGen/ARM/vldm-sched-a9.ll2
-rw-r--r--llvm/test/CodeGen/DirectX/CBufferAccess/unused.ll13
-rw-r--r--llvm/test/CodeGen/LoongArch/lasx/shuffle-as-permute-and-shuffle.ll12
-rw-r--r--llvm/test/CodeGen/MSP430/libcalls.ll14
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/store-fp-zero-to-x0.ll320
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfadd-sdnode.ll815
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll2336
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_shader_clock/shader_clock.ll9
-rw-r--r--llvm/test/CodeGen/Thumb/PR17309.ll4
-rw-r--r--llvm/test/CodeGen/Thumb/fastcc.ll2
-rw-r--r--llvm/test/CodeGen/Thumb/ldm-merge-call.ll4
-rw-r--r--llvm/test/CodeGen/Thumb/stack_guard_remat.ll2
-rw-r--r--llvm/test/CodeGen/Thumb/stm-merge.ll2
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/vpt-block-debug.mir2
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-vpt-2-blocks-1-pred.mir2
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-vpt-2-blocks-2-preds.mir2
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-vpt-2-blocks-ctrl-flow.mir2
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-vpt-2-blocks-non-consecutive-ins.mir2
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-vpt-2-blocks.mir2
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-vpt-3-blocks-kill-vpr.mir2
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-vpt-block-1-ins.mir2
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-vpt-block-2-ins.mir2
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-vpt-block-4-ins.mir2
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-vpt-block-elses.mir2
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-vpt-block-fold-vcmp.mir2
-rw-r--r--llvm/test/CodeGen/Thumb2/mve-vpt-block-optnone.mir2
-rw-r--r--llvm/test/CodeGen/Thumb2/pacbti-m-outliner-4.ll2
-rw-r--r--llvm/test/CodeGen/Thumb2/stack_guard_remat.ll2
-rw-r--r--llvm/test/CodeGen/Thumb2/t2sizereduction.mir2
-rw-r--r--llvm/test/CodeGen/WebAssembly/memory-interleave.ll5
-rw-r--r--llvm/test/CodeGen/WebAssembly/simd-relaxed-fmax.ll244
-rw-r--r--llvm/test/CodeGen/WebAssembly/simd-relaxed-fmin.ll246
-rw-r--r--llvm/test/CodeGen/X86/atomic-load-store.ll367
-rw-r--r--llvm/test/CodeGen/X86/basic-block-address-map-empty-function.ll2
-rw-r--r--llvm/test/CodeGen/X86/basic-block-address-map-function-sections.ll6
-rw-r--r--llvm/test/CodeGen/X86/basic-block-address-map-pgo-features.ll2
-rw-r--r--llvm/test/CodeGen/X86/basic-block-address-map-with-basic-block-sections.ll2
-rw-r--r--llvm/test/CodeGen/X86/basic-block-address-map-with-emit-bb-hash.ll94
-rw-r--r--llvm/test/CodeGen/X86/basic-block-address-map-with-mfs.ll2
-rw-r--r--llvm/test/CodeGen/X86/basic-block-address-map.ll2
-rw-r--r--llvm/test/CodeGen/X86/call-graph-section-addrtaken.ll2
-rw-r--r--llvm/test/CodeGen/X86/call-graph-section-assembly.ll2
-rw-r--r--llvm/test/TableGen/intrinsic-manual-name.td6
-rw-r--r--llvm/test/ThinLTO/X86/devirt_external_comdat_same_guid.ll10
-rw-r--r--llvm/test/ThinLTO/X86/dtlto/json.ll20
-rw-r--r--llvm/test/Transforms/ADCE/2016-09-06.ll4
-rw-r--r--llvm/test/Transforms/ADCE/blocks-with-dead-term-nondeterministic.ll4
-rw-r--r--llvm/test/Transforms/AddDiscriminators/basic.ll4
-rw-r--r--llvm/test/Transforms/AddDiscriminators/call-nested.ll9
-rw-r--r--llvm/test/Transforms/AddDiscriminators/call.ll7
-rw-r--r--llvm/test/Transforms/AddDiscriminators/diamond.ll10
-rw-r--r--llvm/test/Transforms/AddDiscriminators/first-only.ll4
-rw-r--r--llvm/test/Transforms/AddDiscriminators/invoke.ll30
-rw-r--r--llvm/test/Transforms/AddDiscriminators/multiple.ll4
-rw-r--r--llvm/test/Transforms/AddDiscriminators/no-discriminators.ll7
-rw-r--r--llvm/test/Transforms/AddDiscriminators/oneline.ll7
-rw-r--r--llvm/test/Transforms/Attributor/reduced/register_benchmark_test.ll30
-rw-r--r--llvm/test/Transforms/CodeGenPrepare/ARM/bitreverse-recognize.ll4
-rw-r--r--llvm/test/Transforms/CodeGenPrepare/X86/bitreverse-hang.ll4
-rw-r--r--llvm/test/Transforms/CodeGenPrepare/dom-tree.ll4
-rw-r--r--llvm/test/Transforms/ConstantHoisting/X86/ehpad.ll7
-rw-r--r--llvm/test/Transforms/ConstraintElimination/add-nsw.ll6
-rw-r--r--llvm/test/Transforms/ConstraintElimination/gep-arithmetic-add.ll6
-rw-r--r--llvm/test/Transforms/Coroutines/coro-debug.ll37
-rw-r--r--llvm/test/Transforms/Coroutines/coro-split-dbg.ll49
-rw-r--r--llvm/test/Transforms/DeadArgElim/dbginfo.ll6
-rw-r--r--llvm/test/Transforms/DeadStoreElimination/matrix-intrinsics.ll8
-rw-r--r--llvm/test/Transforms/DeadStoreElimination/mda-with-dbg-values.ll3
-rw-r--r--llvm/test/Transforms/FunctionImport/Inputs/funcimport_debug.ll4
-rw-r--r--llvm/test/Transforms/FunctionImport/funcimport_debug.ll7
-rw-r--r--llvm/test/Transforms/GCOVProfiling/exit-block.ll8
-rw-r--r--llvm/test/Transforms/GCOVProfiling/linezero.ll19
-rw-r--r--llvm/test/Transforms/GCOVProfiling/split-indirectbr-critical-edges.ll4
-rw-r--r--llvm/test/Transforms/GVN/cond_br2.ll35
-rw-r--r--llvm/test/Transforms/GVN/matrix-intrinsics.ll12
-rw-r--r--llvm/test/Transforms/GVN/pr33549.ll6
-rw-r--r--llvm/test/Transforms/GVN/pr42605.ll2
-rw-r--r--llvm/test/Transforms/GVNHoist/hoist-unsafe-pr31729.ll10
-rw-r--r--llvm/test/Transforms/GVNHoist/pr30499.ll10
-rw-r--r--llvm/test/Transforms/IndVarSimplify/X86/widen-nsw.ll4
-rw-r--r--llvm/test/Transforms/Inline/always-inline-attr.ll10
-rw-r--r--llvm/test/Transforms/Inline/debug-info-duplicate-calls.ll7
-rw-r--r--llvm/test/Transforms/Inline/inline-vla.ll10
-rw-r--r--llvm/test/Transforms/Inline/optimization-remarks-hotness-threshold.ll6
-rw-r--r--llvm/test/Transforms/Inline/optimization-remarks-passed-deleted-callee-yaml.ll6
-rw-r--r--llvm/test/Transforms/Inline/optimization-remarks-passed-yaml.ll6
-rw-r--r--llvm/test/Transforms/Inline/optimization-remarks.ll6
-rw-r--r--llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-to-svbool-binops.ll33
-rw-r--r--llvm/test/Transforms/InstCombine/bitreverse-hang.ll4
-rw-r--r--llvm/test/Transforms/InstCombine/intrinsic-select.ll36
-rw-r--r--llvm/test/Transforms/InstCombine/phi.ll50
-rw-r--r--llvm/test/Transforms/InstCombine/ptrtoaddr.ll131
-rw-r--r--llvm/test/Transforms/InstCombine/select-extractelement-inseltpoison.ll3
-rw-r--r--llvm/test/Transforms/InstCombine/select-extractelement.ll18
-rw-r--r--llvm/test/Transforms/InstCombine/select_frexp.ll8
-rw-r--r--llvm/test/Transforms/InstCombine/sub-gep.ll33
-rw-r--r--llvm/test/Transforms/JumpThreading/ddt-crash3.ll6
-rw-r--r--llvm/test/Transforms/LICM/volatile-alias.ll4
-rw-r--r--llvm/test/Transforms/LoopRotate/noalias.ll6
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/AMDGPU/different-addrspace-addressing-mode-loops.ll2
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/X86/pr17473.ll4
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/pr18165.ll4
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/two-combinations-bug.ll2
-rw-r--r--llvm/test/Transforms/LoopUnroll/runtime-epilog-debuginfo.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/fully-unrolled-cost.ll3
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/maxbandwidth-regpressure.ll3
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-chained.ll226
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-constant-ops.ll80
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-epilogue.ll14
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-mixed.ll108
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-neon.ll90
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product.ll751
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-interleave.ll26
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-sub.ll48
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce.ll48
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/pr33053.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/reg-usage.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-constant-ops.ll44
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-cost.ll66
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-remove-loop-region.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-scalable.ll121
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-unroll.ll32
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/vplan-printing.ll11
-rw-r--r--llvm/test/Transforms/LoopVectorize/ARM/arm-ieee-vectorize.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/partial-reduce-dot-product.ll52
-rw-r--r--llvm/test/Transforms/LoopVectorize/WebAssembly/memory-interleave.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/WebAssembly/partial-reduce-accumulate.ll126
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/illegal-parallel-loop-uniform-write.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/int128_no_gather.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/reduction-fastmath.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/scatter_crash.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/tail_folding_and_assume_safety.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/transform-narrow-interleave-to-widen-memory.ll137
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/vectorization-remarks-profitable.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/diag-missing-instr-debug-loc.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/diag-with-hotness-info-2.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/diag-with-hotness-info.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/incorrect-dom-info.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/metadata-width.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/multiple-address-spaces.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/runtime-check-address-space.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/runtime-check-readonly-address-space.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/scev-exitlim-crash.ll4
-rw-r--r--llvm/test/Transforms/MergeICmps/X86/int64-and-ptr.ll8
-rw-r--r--llvm/test/Transforms/MergeICmps/X86/pr41917.ll16
-rw-r--r--llvm/test/Transforms/NewGVN/basic-cyclic-opt.ll8
-rw-r--r--llvm/test/Transforms/NewGVN/cond_br2-xfail.ll25
-rw-r--r--llvm/test/Transforms/NewGVN/equivalent-phi.ll4
-rw-r--r--llvm/test/Transforms/NewGVN/memory-handling.ll6
-rw-r--r--llvm/test/Transforms/NewGVN/pr31483.ll11
-rw-r--r--llvm/test/Transforms/NewGVN/pr31501.ll6
-rw-r--r--llvm/test/Transforms/NewGVN/pr33187.ll9
-rw-r--r--llvm/test/Transforms/NewGVN/pr33305.ll17
-rw-r--r--llvm/test/Transforms/NewGVN/pr34430.ll4
-rw-r--r--llvm/test/Transforms/NewGVN/pr34452.ll9
-rw-r--r--llvm/test/Transforms/OpenMP/dead_use.ll18
-rw-r--r--llvm/test/Transforms/OpenMP/icv_remarks.ll43
-rw-r--r--llvm/test/Transforms/PGOProfile/misexpect-branch-correct.ll29
-rw-r--r--llvm/test/Transforms/PGOProfile/misexpect-branch-overflow.ll28
-rw-r--r--llvm/test/Transforms/PGOProfile/misexpect-branch-stripped.ll29
-rw-r--r--llvm/test/Transforms/PGOProfile/misexpect-branch-unpredictable.ll26
-rw-r--r--llvm/test/Transforms/PGOProfile/misexpect-branch.ll28
-rw-r--r--llvm/test/Transforms/PGOProfile/misexpect-switch-default.ll47
-rw-r--r--llvm/test/Transforms/PGOProfile/misexpect-switch.ll47
-rw-r--r--llvm/test/Transforms/PhaseOrdering/always-inline-alloca-promotion.ll5
-rw-r--r--llvm/test/Transforms/Util/dbg-user-of-aext.ll7
-rw-r--r--llvm/test/Transforms/Util/libcalls-fast-math-inf-loop.ll14
-rw-r--r--llvm/test/Verifier/atomics.ll15
-rw-r--r--llvm/test/tools/llvm-ir2vec/embeddings-flowaware.ll2
-rw-r--r--llvm/test/tools/llvm-ir2vec/embeddings-symbolic.ll2
-rw-r--r--llvm/test/tools/llvm-ir2vec/embeddings-symbolic.mir92
-rw-r--r--llvm/test/tools/llvm-ir2vec/entities.mir28
-rw-r--r--llvm/test/tools/llvm-ir2vec/error-handling.ll2
-rw-r--r--llvm/test/tools/llvm-ir2vec/error-handling.mir41
-rw-r--r--llvm/test/tools/llvm-ir2vec/output/lit.local.cfg3
-rw-r--r--llvm/test/tools/llvm-ir2vec/output/reference_triplets.txt33
-rw-r--r--llvm/test/tools/llvm-ir2vec/output/reference_x86_entities.txt7174
-rw-r--r--llvm/test/tools/llvm-ir2vec/triplets.mir61
-rw-r--r--llvm/test/tools/llvm-objdump/MachO/disassemble-source-dsym.test31
-rw-r--r--llvm/test/tools/llvm-readobj/ELF/section-types.test5
-rw-r--r--llvm/test/tools/opt/no-target-machine.ll18
258 files changed, 15390 insertions, 3327 deletions
diff --git a/llvm/test/Analysis/BranchProbabilityInfo/pr22718.ll b/llvm/test/Analysis/BranchProbabilityInfo/pr22718.ll
index ed851f2..67ce44e 100644
--- a/llvm/test/Analysis/BranchProbabilityInfo/pr22718.ll
+++ b/llvm/test/Analysis/BranchProbabilityInfo/pr22718.ll
@@ -12,14 +12,14 @@
@.str = private unnamed_addr constant [17 x i8] c"x = %lu\0Ay = %lu\0A\00", align 1
; Function Attrs: inlinehint nounwind uwtable
-define i32 @main() #0 {
+define i32 @main() {
entry:
%retval = alloca i32, align 4
%i = alloca i64, align 8
store i32 0, ptr %retval
store i64 0, ptr @y, align 8
store i64 0, ptr @x, align 8
- call void @srand(i32 422304) #3
+ call void @srand(i32 422304)
store i64 0, ptr %i, align 8
br label %for.cond
@@ -29,7 +29,7 @@ for.cond: ; preds = %for.inc, %entry
br i1 %cmp, label %for.body, label %for.end, !prof !1
for.body: ; preds = %for.cond
- %call = call i32 @rand() #3
+ %call = call i32 @rand()
%conv = sitofp i32 %call to double
%mul = fmul double %conv, 1.000000e+02
%div = fdiv double %mul, 0x41E0000000000000
@@ -65,17 +65,12 @@ for.end: ; preds = %for.cond
}
; Function Attrs: nounwind
-declare void @srand(i32) #1
+declare void @srand(i32)
; Function Attrs: nounwind
-declare i32 @rand() #1
+declare i32 @rand()
-declare i32 @printf(ptr, ...) #2
-
-attributes #0 = { inlinehint nounwind uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+sse,+sse2" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+sse,+sse2" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #2 = { "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+sse,+sse2" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #3 = { nounwind }
+declare i32 @printf(ptr, ...)
!llvm.ident = !{!0}
diff --git a/llvm/test/Analysis/CostModel/ARM/fparith.ll b/llvm/test/Analysis/CostModel/ARM/fparith.ll
index f9424e8..6f2626c 100644
--- a/llvm/test/Analysis/CostModel/ARM/fparith.ll
+++ b/llvm/test/Analysis/CostModel/ARM/fparith.ll
@@ -1,21 +1,21 @@
; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py
-; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=thumbv8.1m.main-none-eabi -mattr=+mve < %s | FileCheck %s --check-prefix=CHECK-MVE
-; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=thumbv8.1m.main-none-eabi -mattr=+mve.fp < %s | FileCheck %s --check-prefix=CHECK-MVEFP
+; RUN: opt -passes="print<cost-model>" -cost-kind=all 2>&1 -disable-output -mtriple=thumbv8.1m.main-none-eabi -mattr=+mve < %s | FileCheck %s --check-prefix=CHECK-MVE
+; RUN: opt -passes="print<cost-model>" -cost-kind=all 2>&1 -disable-output -mtriple=thumbv8.1m.main-none-eabi -mattr=+mve.fp < %s | FileCheck %s --check-prefix=CHECK-MVEFP
target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
define void @f32() {
; CHECK-MVE-LABEL: 'f32'
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %c = fadd float undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %d = fsub float undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %e = fmul float undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+; CHECK-MVE-NEXT: Cost Model: Found costs of 1 for: %c = fadd float undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 1 for: %d = fsub float undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 1 for: %e = fmul float undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
; CHECK-MVEFP-LABEL: 'f32'
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %c = fadd float undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %d = fsub float undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %e = fmul float undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of 1 for: %c = fadd float undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of 1 for: %d = fsub float undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of 1 for: %e = fmul float undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
%c = fadd float undef, undef
%d = fsub float undef, undef
@@ -25,16 +25,16 @@ define void @f32() {
define void @f16() {
; CHECK-MVE-LABEL: 'f16'
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %c = fadd half undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %d = fsub half undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %e = fmul half undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+; CHECK-MVE-NEXT: Cost Model: Found costs of 1 for: %c = fadd half undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 1 for: %d = fsub half undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 1 for: %e = fmul half undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
; CHECK-MVEFP-LABEL: 'f16'
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %c = fadd half undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %d = fsub half undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %e = fmul half undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of 1 for: %c = fadd half undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of 1 for: %d = fsub half undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of 1 for: %e = fmul half undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
%c = fadd half undef, undef
%d = fsub half undef, undef
@@ -44,16 +44,16 @@ define void @f16() {
define void @f64() {
; CHECK-MVE-LABEL: 'f64'
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %c = fadd double undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %d = fsub double undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %e = fmul double undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+; CHECK-MVE-NEXT: Cost Model: Found costs of 1 for: %c = fadd double undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 1 for: %d = fsub double undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 1 for: %e = fmul double undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
; CHECK-MVEFP-LABEL: 'f64'
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %c = fadd double undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %d = fsub double undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %e = fmul double undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of 1 for: %c = fadd double undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of 1 for: %d = fsub double undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of 1 for: %e = fmul double undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
%c = fadd double undef, undef
%d = fsub double undef, undef
@@ -63,28 +63,28 @@ define void @f64() {
define void @vf32() {
; CHECK-MVE-LABEL: 'vf32'
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %c2 = fadd <2 x float> undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %d2 = fsub <2 x float> undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %e2 = fmul <2 x float> undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %c4 = fadd <4 x float> undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %d4 = fsub <4 x float> undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %e4 = fmul <4 x float> undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %c8 = fadd <8 x float> undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %d8 = fsub <8 x float> undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %e8 = fmul <8 x float> undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+; CHECK-MVE-NEXT: Cost Model: Found costs of 4 for: %c2 = fadd <2 x float> undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 4 for: %d2 = fsub <2 x float> undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 4 for: %e2 = fmul <2 x float> undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 8 for: %c4 = fadd <4 x float> undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 8 for: %d4 = fsub <4 x float> undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 8 for: %e4 = fmul <4 x float> undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 16 for: %c8 = fadd <8 x float> undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 16 for: %d8 = fsub <8 x float> undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 16 for: %e8 = fmul <8 x float> undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
; CHECK-MVEFP-LABEL: 'vf32'
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %c2 = fadd <2 x float> undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %d2 = fsub <2 x float> undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %e2 = fmul <2 x float> undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %c4 = fadd <4 x float> undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %d4 = fsub <4 x float> undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %e4 = fmul <4 x float> undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %c8 = fadd <8 x float> undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %d8 = fsub <8 x float> undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %e8 = fmul <8 x float> undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %c2 = fadd <2 x float> undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %d2 = fsub <2 x float> undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %e2 = fmul <2 x float> undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %c4 = fadd <4 x float> undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %d4 = fsub <4 x float> undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %e4 = fmul <4 x float> undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:4 CodeSize:2 Lat:4 SizeLat:4 for: %c8 = fadd <8 x float> undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:4 CodeSize:2 Lat:4 SizeLat:4 for: %d8 = fsub <8 x float> undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:4 CodeSize:2 Lat:4 SizeLat:4 for: %e8 = fmul <8 x float> undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
%c2 = fadd <2 x float> undef, undef
%d2 = fsub <2 x float> undef, undef
@@ -100,28 +100,28 @@ define void @vf32() {
define void @vf16() {
; CHECK-MVE-LABEL: 'vf16'
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %c2 = fadd <2 x half> undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %d2 = fsub <2 x half> undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %e2 = fmul <2 x half> undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %c4 = fadd <4 x half> undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %d4 = fsub <4 x half> undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %e4 = fmul <4 x half> undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %c8 = fadd <8 x half> undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %d8 = fsub <8 x half> undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %e8 = fmul <8 x half> undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+; CHECK-MVE-NEXT: Cost Model: Found costs of 4 for: %c2 = fadd <2 x half> undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 4 for: %d2 = fsub <2 x half> undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 4 for: %e2 = fmul <2 x half> undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 8 for: %c4 = fadd <4 x half> undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 8 for: %d4 = fsub <4 x half> undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 8 for: %e4 = fmul <4 x half> undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 16 for: %c8 = fadd <8 x half> undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 16 for: %d8 = fsub <8 x half> undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 16 for: %e8 = fmul <8 x half> undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
; CHECK-MVEFP-LABEL: 'vf16'
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %c2 = fadd <2 x half> undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %d2 = fsub <2 x half> undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %e2 = fmul <2 x half> undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %c4 = fadd <4 x half> undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %d4 = fsub <4 x half> undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %e4 = fmul <4 x half> undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %c8 = fadd <8 x half> undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %d8 = fsub <8 x half> undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %e8 = fmul <8 x half> undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %c2 = fadd <2 x half> undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %d2 = fsub <2 x half> undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %e2 = fmul <2 x half> undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %c4 = fadd <4 x half> undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %d4 = fsub <4 x half> undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %e4 = fmul <4 x half> undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %c8 = fadd <8 x half> undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %d8 = fsub <8 x half> undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %e8 = fmul <8 x half> undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
%c2 = fadd <2 x half> undef, undef
%d2 = fsub <2 x half> undef, undef
@@ -137,28 +137,28 @@ define void @vf16() {
define void @vf64() {
; CHECK-MVE-LABEL: 'vf64'
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %c2 = fadd <2 x double> undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %d2 = fsub <2 x double> undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %e2 = fmul <2 x double> undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %c4 = fadd <4 x double> undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %d4 = fsub <4 x double> undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %e4 = fmul <4 x double> undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %c8 = fadd <8 x double> undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %d8 = fsub <8 x double> undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %e8 = fmul <8 x double> undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+; CHECK-MVE-NEXT: Cost Model: Found costs of 4 for: %c2 = fadd <2 x double> undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 4 for: %d2 = fsub <2 x double> undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 4 for: %e2 = fmul <2 x double> undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 8 for: %c4 = fadd <4 x double> undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 8 for: %d4 = fsub <4 x double> undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 8 for: %e4 = fmul <4 x double> undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 16 for: %c8 = fadd <8 x double> undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 16 for: %d8 = fsub <8 x double> undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 16 for: %e8 = fmul <8 x double> undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
; CHECK-MVEFP-LABEL: 'vf64'
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %c2 = fadd <2 x double> undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %d2 = fsub <2 x double> undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %e2 = fmul <2 x double> undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %c4 = fadd <4 x double> undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %d4 = fsub <4 x double> undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %e4 = fmul <4 x double> undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %c8 = fadd <8 x double> undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %d8 = fsub <8 x double> undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %e8 = fmul <8 x double> undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of 4 for: %c2 = fadd <2 x double> undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of 4 for: %d2 = fsub <2 x double> undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of 4 for: %e2 = fmul <2 x double> undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of 8 for: %c4 = fadd <4 x double> undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of 8 for: %d4 = fsub <4 x double> undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of 8 for: %e4 = fmul <4 x double> undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of 16 for: %c8 = fadd <8 x double> undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of 16 for: %d8 = fsub <8 x double> undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of 16 for: %e8 = fmul <8 x double> undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
%c2 = fadd <2 x double> undef, undef
%d2 = fsub <2 x double> undef, undef
diff --git a/llvm/test/Analysis/CostModel/ARM/fptoi_sat.ll b/llvm/test/Analysis/CostModel/ARM/fptoi_sat.ll
index aff7b19..af548f6 100644
--- a/llvm/test/Analysis/CostModel/ARM/fptoi_sat.ll
+++ b/llvm/test/Analysis/CostModel/ARM/fptoi_sat.ll
@@ -1,213 +1,213 @@
; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py
-; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=thumbv8.1m.main-none-eabi -mattr=+mve < %s | FileCheck %s --check-prefix=CHECK-MVE
-; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=thumbv8.1m.main-none-eabi -mattr=+mve.fp,+fp64 < %s | FileCheck %s --check-prefix=CHECK-MVEFP
+; RUN: opt -passes="print<cost-model>" -cost-kind=all 2>&1 -disable-output -mtriple=thumbv8.1m.main-none-eabi -mattr=+mve < %s | FileCheck %s --check-prefix=CHECK-MVE
+; RUN: opt -passes="print<cost-model>" -cost-kind=all 2>&1 -disable-output -mtriple=thumbv8.1m.main-none-eabi -mattr=+mve.fp,+fp64 < %s | FileCheck %s --check-prefix=CHECK-MVEFP
define void @casts() {
; CHECK-MVE-LABEL: 'casts'
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %f32s1 = call i1 @llvm.fptosi.sat.i1.f32(float undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %f32u1 = call i1 @llvm.fptoui.sat.i1.f32(float undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %f32s8 = call i8 @llvm.fptosi.sat.i8.f32(float undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %f32u8 = call i8 @llvm.fptoui.sat.i8.f32(float undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %f32s16 = call i16 @llvm.fptosi.sat.i16.f32(float undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %f32u16 = call i16 @llvm.fptoui.sat.i16.f32(float undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %f32s32 = call i32 @llvm.fptosi.sat.i32.f32(float undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %f32u32 = call i32 @llvm.fptoui.sat.i32.f32(float undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %f32s64 = call i64 @llvm.fptosi.sat.i64.f32(float undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %f32u64 = call i64 @llvm.fptoui.sat.i64.f32(float undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %f64s1 = call i1 @llvm.fptosi.sat.i1.f64(double undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %f64u1 = call i1 @llvm.fptoui.sat.i1.f64(double undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %f64s8 = call i8 @llvm.fptosi.sat.i8.f64(double undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %f64u8 = call i8 @llvm.fptoui.sat.i8.f64(double undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %f64s16 = call i16 @llvm.fptosi.sat.i16.f64(double undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %f64u16 = call i16 @llvm.fptoui.sat.i16.f64(double undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %f64s32 = call i32 @llvm.fptosi.sat.i32.f64(double undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %f64u32 = call i32 @llvm.fptoui.sat.i32.f64(double undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %f64s64 = call i64 @llvm.fptosi.sat.i64.f64(double undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %f64u64 = call i64 @llvm.fptoui.sat.i64.f64(double undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 120 for instruction: %v2f32s1 = call <2 x i1> @llvm.fptosi.sat.v2i1.v2f32(<2 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 88 for instruction: %v2f32u1 = call <2 x i1> @llvm.fptoui.sat.v2i1.v2f32(<2 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 102 for instruction: %v2f32s8 = call <2 x i8> @llvm.fptosi.sat.v2i8.v2f32(<2 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 88 for instruction: %v2f32u8 = call <2 x i8> @llvm.fptoui.sat.v2i8.v2f32(<2 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 102 for instruction: %v2f32s16 = call <2 x i16> @llvm.fptosi.sat.v2i16.v2f32(<2 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 88 for instruction: %v2f32u16 = call <2 x i16> @llvm.fptoui.sat.v2i16.v2f32(<2 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 102 for instruction: %v2f32s32 = call <2 x i32> @llvm.fptosi.sat.v2i32.v2f32(<2 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 88 for instruction: %v2f32u32 = call <2 x i32> @llvm.fptoui.sat.v2i32.v2f32(<2 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 134 for instruction: %v2f32s64 = call <2 x i64> @llvm.fptosi.sat.v2i64.v2f32(<2 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 120 for instruction: %v2f32u64 = call <2 x i64> @llvm.fptoui.sat.v2i64.v2f32(<2 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 120 for instruction: %v2f64s1 = call <2 x i1> @llvm.fptosi.sat.v2i1.v2f64(<2 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 88 for instruction: %v2f64u1 = call <2 x i1> @llvm.fptoui.sat.v2i1.v2f64(<2 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 102 for instruction: %v2f64s8 = call <2 x i8> @llvm.fptosi.sat.v2i8.v2f64(<2 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 88 for instruction: %v2f64u8 = call <2 x i8> @llvm.fptoui.sat.v2i8.v2f64(<2 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 102 for instruction: %v2f64s16 = call <2 x i16> @llvm.fptosi.sat.v2i16.v2f64(<2 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 88 for instruction: %v2f64u16 = call <2 x i16> @llvm.fptoui.sat.v2i16.v2f64(<2 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 102 for instruction: %v2f64s32 = call <2 x i32> @llvm.fptosi.sat.v2i32.v2f64(<2 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 88 for instruction: %v2f64u32 = call <2 x i32> @llvm.fptoui.sat.v2i32.v2f64(<2 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 134 for instruction: %v2f64s64 = call <2 x i64> @llvm.fptosi.sat.v2i64.v2f64(<2 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 120 for instruction: %v2f64u64 = call <2 x i64> @llvm.fptoui.sat.v2i64.v2f64(<2 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 240 for instruction: %v4f32s1 = call <4 x i1> @llvm.fptosi.sat.v4i1.v4f32(<4 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 176 for instruction: %v4f32u1 = call <4 x i1> @llvm.fptoui.sat.v4i1.v4f32(<4 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 202 for instruction: %v4f32s8 = call <4 x i8> @llvm.fptosi.sat.v4i8.v4f32(<4 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 176 for instruction: %v4f32u8 = call <4 x i8> @llvm.fptoui.sat.v4i8.v4f32(<4 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 202 for instruction: %v4f32s16 = call <4 x i16> @llvm.fptosi.sat.v4i16.v4f32(<4 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 176 for instruction: %v4f32u16 = call <4 x i16> @llvm.fptoui.sat.v4i16.v4f32(<4 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 202 for instruction: %v4f32s32 = call <4 x i32> @llvm.fptosi.sat.v4i32.v4f32(<4 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 176 for instruction: %v4f32u32 = call <4 x i32> @llvm.fptoui.sat.v4i32.v4f32(<4 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 406 for instruction: %v4f32s64 = call <4 x i64> @llvm.fptosi.sat.v4i64.v4f32(<4 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 378 for instruction: %v4f32u64 = call <4 x i64> @llvm.fptoui.sat.v4i64.v4f32(<4 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 314 for instruction: %v4f64s1 = call <4 x i1> @llvm.fptosi.sat.v4i1.v4f64(<4 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 250 for instruction: %v4f64u1 = call <4 x i1> @llvm.fptoui.sat.v4i1.v4f64(<4 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 276 for instruction: %v4f64s8 = call <4 x i8> @llvm.fptosi.sat.v4i8.v4f64(<4 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 250 for instruction: %v4f64u8 = call <4 x i8> @llvm.fptoui.sat.v4i8.v4f64(<4 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 276 for instruction: %v4f64s16 = call <4 x i16> @llvm.fptosi.sat.v4i16.v4f64(<4 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 250 for instruction: %v4f64u16 = call <4 x i16> @llvm.fptoui.sat.v4i16.v4f64(<4 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 276 for instruction: %v4f64s32 = call <4 x i32> @llvm.fptosi.sat.v4i32.v4f64(<4 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 250 for instruction: %v4f64u32 = call <4 x i32> @llvm.fptoui.sat.v4i32.v4f64(<4 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 404 for instruction: %v4f64s64 = call <4 x i64> @llvm.fptosi.sat.v4i64.v4f64(<4 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 376 for instruction: %v4f64u64 = call <4 x i64> @llvm.fptoui.sat.v4i64.v4f64(<4 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 626 for instruction: %v8f32s1 = call <8 x i1> @llvm.fptosi.sat.v8i1.v8f32(<8 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 498 for instruction: %v8f32u1 = call <8 x i1> @llvm.fptoui.sat.v8i1.v8f32(<8 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 548 for instruction: %v8f32s8 = call <8 x i8> @llvm.fptosi.sat.v8i8.v8f32(<8 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 498 for instruction: %v8f32u8 = call <8 x i8> @llvm.fptoui.sat.v8i8.v8f32(<8 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 548 for instruction: %v8f32s16 = call <8 x i16> @llvm.fptosi.sat.v8i16.v8f32(<8 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 498 for instruction: %v8f32u16 = call <8 x i16> @llvm.fptoui.sat.v8i16.v8f32(<8 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 548 for instruction: %v8f32s32 = call <8 x i32> @llvm.fptosi.sat.v8i32.v8f32(<8 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 496 for instruction: %v8f32u32 = call <8 x i32> @llvm.fptoui.sat.v8i32.v8f32(<8 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 1360 for instruction: %v8f32s64 = call <8 x i64> @llvm.fptosi.sat.v8i64.v8f32(<8 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 1304 for instruction: %v8f32u64 = call <8 x i64> @llvm.fptoui.sat.v8i64.v8f32(<8 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 922 for instruction: %v8f64s1 = call <8 x i1> @llvm.fptosi.sat.v8i1.v8f64(<8 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 794 for instruction: %v8f64u1 = call <8 x i1> @llvm.fptoui.sat.v8i1.v8f64(<8 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 844 for instruction: %v8f64s8 = call <8 x i8> @llvm.fptosi.sat.v8i8.v8f64(<8 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 794 for instruction: %v8f64u8 = call <8 x i8> @llvm.fptoui.sat.v8i8.v8f64(<8 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 844 for instruction: %v8f64s16 = call <8 x i16> @llvm.fptosi.sat.v8i16.v8f64(<8 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 794 for instruction: %v8f64u16 = call <8 x i16> @llvm.fptoui.sat.v8i16.v8f64(<8 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 844 for instruction: %v8f64s32 = call <8 x i32> @llvm.fptosi.sat.v8i32.v8f64(<8 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 792 for instruction: %v8f64u32 = call <8 x i32> @llvm.fptoui.sat.v8i32.v8f64(<8 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 1352 for instruction: %v8f64s64 = call <8 x i64> @llvm.fptosi.sat.v8i64.v8f64(<8 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 1296 for instruction: %v8f64u64 = call <8 x i64> @llvm.fptoui.sat.v8i64.v8f64(<8 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 1834 for instruction: %v16f32s1 = call <16 x i1> @llvm.fptosi.sat.v16i1.v16f32(<16 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 1578 for instruction: %v16f32u1 = call <16 x i1> @llvm.fptoui.sat.v16i1.v16f32(<16 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 1676 for instruction: %v16f32s8 = call <16 x i8> @llvm.fptosi.sat.v16i8.v16f32(<16 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 1578 for instruction: %v16f32u8 = call <16 x i8> @llvm.fptoui.sat.v16i8.v16f32(<16 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 1676 for instruction: %v16f32s16 = call <16 x i16> @llvm.fptosi.sat.v16i16.v16f32(<16 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 1576 for instruction: %v16f32u16 = call <16 x i16> @llvm.fptoui.sat.v16i16.v16f32(<16 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 1672 for instruction: %v16f32s32 = call <16 x i32> @llvm.fptosi.sat.v16i32.v16f32(<16 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 1568 for instruction: %v16f32u32 = call <16 x i32> @llvm.fptoui.sat.v16i32.v16f32(<16 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 4912 for instruction: %v16f32s64 = call <16 x i64> @llvm.fptosi.sat.v16i64.v16f32(<16 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 4800 for instruction: %v16f32u64 = call <16 x i64> @llvm.fptoui.sat.v16i64.v16f32(<16 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 3018 for instruction: %v16f64s1 = call <16 x i1> @llvm.fptosi.sat.v16i1.v16f64(<16 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 2762 for instruction: %v16f64u1 = call <16 x i1> @llvm.fptoui.sat.v16i1.v16f64(<16 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 2860 for instruction: %v16f64s8 = call <16 x i8> @llvm.fptosi.sat.v16i8.v16f64(<16 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 2762 for instruction: %v16f64u8 = call <16 x i8> @llvm.fptoui.sat.v16i8.v16f64(<16 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 2860 for instruction: %v16f64s16 = call <16 x i16> @llvm.fptosi.sat.v16i16.v16f64(<16 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 2760 for instruction: %v16f64u16 = call <16 x i16> @llvm.fptoui.sat.v16i16.v16f64(<16 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 2856 for instruction: %v16f64s32 = call <16 x i32> @llvm.fptosi.sat.v16i32.v16f64(<16 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 2752 for instruction: %v16f64u32 = call <16 x i32> @llvm.fptoui.sat.v16i32.v16f64(<16 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 4880 for instruction: %v16f64s64 = call <16 x i64> @llvm.fptosi.sat.v16i64.v16f64(<16 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 4768 for instruction: %v16f64u64 = call <16 x i64> @llvm.fptoui.sat.v16i64.v16f64(<16 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:23 CodeSize:7 Lat:23 SizeLat:23 for: %f32s1 = call i1 @llvm.fptosi.sat.i1.f32(float undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:21 CodeSize:3 Lat:21 SizeLat:21 for: %f32u1 = call i1 @llvm.fptoui.sat.i1.f32(float undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:23 CodeSize:6 Lat:23 SizeLat:23 for: %f32s8 = call i8 @llvm.fptosi.sat.i8.f32(float undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:21 CodeSize:3 Lat:21 SizeLat:21 for: %f32u8 = call i8 @llvm.fptoui.sat.i8.f32(float undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:23 CodeSize:6 Lat:23 SizeLat:23 for: %f32s16 = call i16 @llvm.fptosi.sat.i16.f32(float undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:21 CodeSize:3 Lat:21 SizeLat:21 for: %f32u16 = call i16 @llvm.fptoui.sat.i16.f32(float undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:23 CodeSize:6 Lat:23 SizeLat:23 for: %f32s32 = call i32 @llvm.fptosi.sat.i32.f32(float undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:21 CodeSize:3 Lat:21 SizeLat:21 for: %f32u32 = call i32 @llvm.fptoui.sat.i32.f32(float undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:24 CodeSize:7 Lat:24 SizeLat:24 for: %f32s64 = call i64 @llvm.fptosi.sat.i64.f32(float undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:21 CodeSize:3 Lat:21 SizeLat:21 for: %f32u64 = call i64 @llvm.fptoui.sat.i64.f32(float undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:23 CodeSize:7 Lat:23 SizeLat:23 for: %f64s1 = call i1 @llvm.fptosi.sat.i1.f64(double undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:21 CodeSize:3 Lat:21 SizeLat:21 for: %f64u1 = call i1 @llvm.fptoui.sat.i1.f64(double undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:23 CodeSize:6 Lat:23 SizeLat:23 for: %f64s8 = call i8 @llvm.fptosi.sat.i8.f64(double undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:21 CodeSize:3 Lat:21 SizeLat:21 for: %f64u8 = call i8 @llvm.fptoui.sat.i8.f64(double undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:23 CodeSize:6 Lat:23 SizeLat:23 for: %f64s16 = call i16 @llvm.fptosi.sat.i16.f64(double undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:21 CodeSize:3 Lat:21 SizeLat:21 for: %f64u16 = call i16 @llvm.fptoui.sat.i16.f64(double undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:23 CodeSize:6 Lat:23 SizeLat:23 for: %f64s32 = call i32 @llvm.fptosi.sat.i32.f64(double undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:21 CodeSize:3 Lat:21 SizeLat:21 for: %f64u32 = call i32 @llvm.fptoui.sat.i32.f64(double undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:24 CodeSize:7 Lat:24 SizeLat:24 for: %f64s64 = call i64 @llvm.fptosi.sat.i64.f64(double undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:21 CodeSize:3 Lat:21 SizeLat:21 for: %f64u64 = call i64 @llvm.fptoui.sat.i64.f64(double undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:120 CodeSize:43 Lat:85 SizeLat:85 for: %v2f32s1 = call <2 x i1> @llvm.fptosi.sat.v2i1.v2f32(<2 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:88 CodeSize:17 Lat:53 SizeLat:53 for: %v2f32u1 = call <2 x i1> @llvm.fptoui.sat.v2i1.v2f32(<2 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:102 CodeSize:30 Lat:67 SizeLat:67 for: %v2f32s8 = call <2 x i8> @llvm.fptosi.sat.v2i8.v2f32(<2 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:88 CodeSize:17 Lat:53 SizeLat:53 for: %v2f32u8 = call <2 x i8> @llvm.fptoui.sat.v2i8.v2f32(<2 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:102 CodeSize:30 Lat:67 SizeLat:67 for: %v2f32s16 = call <2 x i16> @llvm.fptosi.sat.v2i16.v2f32(<2 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:88 CodeSize:17 Lat:53 SizeLat:53 for: %v2f32u16 = call <2 x i16> @llvm.fptoui.sat.v2i16.v2f32(<2 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:102 CodeSize:30 Lat:67 SizeLat:67 for: %v2f32s32 = call <2 x i32> @llvm.fptosi.sat.v2i32.v2f32(<2 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:88 CodeSize:17 Lat:53 SizeLat:53 for: %v2f32u32 = call <2 x i32> @llvm.fptoui.sat.v2i32.v2f32(<2 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:134 CodeSize:30 Lat:67 SizeLat:67 for: %v2f32s64 = call <2 x i64> @llvm.fptosi.sat.v2i64.v2f32(<2 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:120 CodeSize:17 Lat:53 SizeLat:53 for: %v2f32u64 = call <2 x i64> @llvm.fptoui.sat.v2i64.v2f32(<2 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:120 CodeSize:43 Lat:85 SizeLat:85 for: %v2f64s1 = call <2 x i1> @llvm.fptosi.sat.v2i1.v2f64(<2 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:88 CodeSize:17 Lat:53 SizeLat:53 for: %v2f64u1 = call <2 x i1> @llvm.fptoui.sat.v2i1.v2f64(<2 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:102 CodeSize:30 Lat:67 SizeLat:67 for: %v2f64s8 = call <2 x i8> @llvm.fptosi.sat.v2i8.v2f64(<2 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:88 CodeSize:17 Lat:53 SizeLat:53 for: %v2f64u8 = call <2 x i8> @llvm.fptoui.sat.v2i8.v2f64(<2 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:102 CodeSize:30 Lat:67 SizeLat:67 for: %v2f64s16 = call <2 x i16> @llvm.fptosi.sat.v2i16.v2f64(<2 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:88 CodeSize:17 Lat:53 SizeLat:53 for: %v2f64u16 = call <2 x i16> @llvm.fptoui.sat.v2i16.v2f64(<2 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:102 CodeSize:30 Lat:67 SizeLat:67 for: %v2f64s32 = call <2 x i32> @llvm.fptosi.sat.v2i32.v2f64(<2 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:88 CodeSize:17 Lat:53 SizeLat:53 for: %v2f64u32 = call <2 x i32> @llvm.fptoui.sat.v2i32.v2f64(<2 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:134 CodeSize:30 Lat:67 SizeLat:67 for: %v2f64s64 = call <2 x i64> @llvm.fptosi.sat.v2i64.v2f64(<2 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:120 CodeSize:17 Lat:53 SizeLat:53 for: %v2f64u64 = call <2 x i64> @llvm.fptoui.sat.v2i64.v2f64(<2 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:240 CodeSize:85 Lat:169 SizeLat:169 for: %v4f32s1 = call <4 x i1> @llvm.fptosi.sat.v4i1.v4f32(<4 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:176 CodeSize:33 Lat:105 SizeLat:105 for: %v4f32u1 = call <4 x i1> @llvm.fptoui.sat.v4i1.v4f32(<4 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:202 CodeSize:58 Lat:131 SizeLat:131 for: %v4f32s8 = call <4 x i8> @llvm.fptosi.sat.v4i8.v4f32(<4 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:176 CodeSize:33 Lat:105 SizeLat:105 for: %v4f32u8 = call <4 x i8> @llvm.fptoui.sat.v4i8.v4f32(<4 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:202 CodeSize:58 Lat:131 SizeLat:131 for: %v4f32s16 = call <4 x i16> @llvm.fptosi.sat.v4i16.v4f32(<4 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:176 CodeSize:33 Lat:105 SizeLat:105 for: %v4f32u16 = call <4 x i16> @llvm.fptoui.sat.v4i16.v4f32(<4 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:202 CodeSize:58 Lat:131 SizeLat:131 for: %v4f32s32 = call <4 x i32> @llvm.fptosi.sat.v4i32.v4f32(<4 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:176 CodeSize:33 Lat:105 SizeLat:105 for: %v4f32u32 = call <4 x i32> @llvm.fptoui.sat.v4i32.v4f32(<4 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:406 CodeSize:59 Lat:133 SizeLat:133 for: %v4f32s64 = call <4 x i64> @llvm.fptosi.sat.v4i64.v4f32(<4 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:378 CodeSize:33 Lat:105 SizeLat:105 for: %v4f32u64 = call <4 x i64> @llvm.fptoui.sat.v4i64.v4f32(<4 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:314 CodeSize:85 Lat:169 SizeLat:169 for: %v4f64s1 = call <4 x i1> @llvm.fptosi.sat.v4i1.v4f64(<4 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:250 CodeSize:33 Lat:105 SizeLat:105 for: %v4f64u1 = call <4 x i1> @llvm.fptoui.sat.v4i1.v4f64(<4 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:276 CodeSize:58 Lat:131 SizeLat:131 for: %v4f64s8 = call <4 x i8> @llvm.fptosi.sat.v4i8.v4f64(<4 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:250 CodeSize:33 Lat:105 SizeLat:105 for: %v4f64u8 = call <4 x i8> @llvm.fptoui.sat.v4i8.v4f64(<4 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:276 CodeSize:58 Lat:131 SizeLat:131 for: %v4f64s16 = call <4 x i16> @llvm.fptosi.sat.v4i16.v4f64(<4 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:250 CodeSize:33 Lat:105 SizeLat:105 for: %v4f64u16 = call <4 x i16> @llvm.fptoui.sat.v4i16.v4f64(<4 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:276 CodeSize:58 Lat:131 SizeLat:131 for: %v4f64s32 = call <4 x i32> @llvm.fptosi.sat.v4i32.v4f64(<4 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:250 CodeSize:33 Lat:105 SizeLat:105 for: %v4f64u32 = call <4 x i32> @llvm.fptoui.sat.v4i32.v4f64(<4 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:404 CodeSize:59 Lat:133 SizeLat:133 for: %v4f64s64 = call <4 x i64> @llvm.fptosi.sat.v4i64.v4f64(<4 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:376 CodeSize:33 Lat:105 SizeLat:105 for: %v4f64u64 = call <4 x i64> @llvm.fptoui.sat.v4i64.v4f64(<4 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:626 CodeSize:169 Lat:337 SizeLat:337 for: %v8f32s1 = call <8 x i1> @llvm.fptosi.sat.v8i1.v8f32(<8 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:498 CodeSize:65 Lat:209 SizeLat:209 for: %v8f32u1 = call <8 x i1> @llvm.fptoui.sat.v8i1.v8f32(<8 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:548 CodeSize:114 Lat:259 SizeLat:259 for: %v8f32s8 = call <8 x i8> @llvm.fptosi.sat.v8i8.v8f32(<8 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:498 CodeSize:65 Lat:209 SizeLat:209 for: %v8f32u8 = call <8 x i8> @llvm.fptoui.sat.v8i8.v8f32(<8 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:548 CodeSize:114 Lat:259 SizeLat:259 for: %v8f32s16 = call <8 x i16> @llvm.fptosi.sat.v8i16.v8f32(<8 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:498 CodeSize:65 Lat:209 SizeLat:209 for: %v8f32u16 = call <8 x i16> @llvm.fptoui.sat.v8i16.v8f32(<8 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:548 CodeSize:115 Lat:261 SizeLat:261 for: %v8f32s32 = call <8 x i32> @llvm.fptosi.sat.v8i32.v8f32(<8 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:496 CodeSize:65 Lat:209 SizeLat:209 for: %v8f32u32 = call <8 x i32> @llvm.fptoui.sat.v8i32.v8f32(<8 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:1360 CodeSize:117 Lat:265 SizeLat:265 for: %v8f32s64 = call <8 x i64> @llvm.fptosi.sat.v8i64.v8f32(<8 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:1304 CodeSize:65 Lat:209 SizeLat:209 for: %v8f32u64 = call <8 x i64> @llvm.fptoui.sat.v8i64.v8f32(<8 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:922 CodeSize:169 Lat:337 SizeLat:337 for: %v8f64s1 = call <8 x i1> @llvm.fptosi.sat.v8i1.v8f64(<8 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:794 CodeSize:65 Lat:209 SizeLat:209 for: %v8f64u1 = call <8 x i1> @llvm.fptoui.sat.v8i1.v8f64(<8 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:844 CodeSize:114 Lat:259 SizeLat:259 for: %v8f64s8 = call <8 x i8> @llvm.fptosi.sat.v8i8.v8f64(<8 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:794 CodeSize:65 Lat:209 SizeLat:209 for: %v8f64u8 = call <8 x i8> @llvm.fptoui.sat.v8i8.v8f64(<8 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:844 CodeSize:114 Lat:259 SizeLat:259 for: %v8f64s16 = call <8 x i16> @llvm.fptosi.sat.v8i16.v8f64(<8 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:794 CodeSize:65 Lat:209 SizeLat:209 for: %v8f64u16 = call <8 x i16> @llvm.fptoui.sat.v8i16.v8f64(<8 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:844 CodeSize:115 Lat:261 SizeLat:261 for: %v8f64s32 = call <8 x i32> @llvm.fptosi.sat.v8i32.v8f64(<8 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:792 CodeSize:65 Lat:209 SizeLat:209 for: %v8f64u32 = call <8 x i32> @llvm.fptoui.sat.v8i32.v8f64(<8 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:1352 CodeSize:117 Lat:265 SizeLat:265 for: %v8f64s64 = call <8 x i64> @llvm.fptosi.sat.v8i64.v8f64(<8 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:1296 CodeSize:65 Lat:209 SizeLat:209 for: %v8f64u64 = call <8 x i64> @llvm.fptoui.sat.v8i64.v8f64(<8 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:1834 CodeSize:337 Lat:673 SizeLat:673 for: %v16f32s1 = call <16 x i1> @llvm.fptosi.sat.v16i1.v16f32(<16 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:1578 CodeSize:129 Lat:417 SizeLat:417 for: %v16f32u1 = call <16 x i1> @llvm.fptoui.sat.v16i1.v16f32(<16 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:1676 CodeSize:226 Lat:515 SizeLat:515 for: %v16f32s8 = call <16 x i8> @llvm.fptosi.sat.v16i8.v16f32(<16 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:1578 CodeSize:129 Lat:417 SizeLat:417 for: %v16f32u8 = call <16 x i8> @llvm.fptoui.sat.v16i8.v16f32(<16 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:1676 CodeSize:227 Lat:517 SizeLat:517 for: %v16f32s16 = call <16 x i16> @llvm.fptosi.sat.v16i16.v16f32(<16 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:1576 CodeSize:129 Lat:417 SizeLat:417 for: %v16f32u16 = call <16 x i16> @llvm.fptoui.sat.v16i16.v16f32(<16 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:1672 CodeSize:229 Lat:521 SizeLat:521 for: %v16f32s32 = call <16 x i32> @llvm.fptosi.sat.v16i32.v16f32(<16 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:1568 CodeSize:129 Lat:417 SizeLat:417 for: %v16f32u32 = call <16 x i32> @llvm.fptoui.sat.v16i32.v16f32(<16 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:4912 CodeSize:233 Lat:529 SizeLat:529 for: %v16f32s64 = call <16 x i64> @llvm.fptosi.sat.v16i64.v16f32(<16 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:4800 CodeSize:129 Lat:417 SizeLat:417 for: %v16f32u64 = call <16 x i64> @llvm.fptoui.sat.v16i64.v16f32(<16 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:3018 CodeSize:337 Lat:673 SizeLat:673 for: %v16f64s1 = call <16 x i1> @llvm.fptosi.sat.v16i1.v16f64(<16 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:2762 CodeSize:129 Lat:417 SizeLat:417 for: %v16f64u1 = call <16 x i1> @llvm.fptoui.sat.v16i1.v16f64(<16 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:2860 CodeSize:226 Lat:515 SizeLat:515 for: %v16f64s8 = call <16 x i8> @llvm.fptosi.sat.v16i8.v16f64(<16 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:2762 CodeSize:129 Lat:417 SizeLat:417 for: %v16f64u8 = call <16 x i8> @llvm.fptoui.sat.v16i8.v16f64(<16 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:2860 CodeSize:227 Lat:517 SizeLat:517 for: %v16f64s16 = call <16 x i16> @llvm.fptosi.sat.v16i16.v16f64(<16 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:2760 CodeSize:129 Lat:417 SizeLat:417 for: %v16f64u16 = call <16 x i16> @llvm.fptoui.sat.v16i16.v16f64(<16 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:2856 CodeSize:229 Lat:521 SizeLat:521 for: %v16f64s32 = call <16 x i32> @llvm.fptosi.sat.v16i32.v16f64(<16 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:2752 CodeSize:129 Lat:417 SizeLat:417 for: %v16f64u32 = call <16 x i32> @llvm.fptoui.sat.v16i32.v16f64(<16 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:4880 CodeSize:233 Lat:529 SizeLat:529 for: %v16f64s64 = call <16 x i64> @llvm.fptosi.sat.v16i64.v16f64(<16 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:4768 CodeSize:129 Lat:417 SizeLat:417 for: %v16f64u64 = call <16 x i64> @llvm.fptoui.sat.v16i64.v16f64(<16 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
; CHECK-MVEFP-LABEL: 'casts'
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %f32s1 = call i1 @llvm.fptosi.sat.i1.f32(float undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %f32u1 = call i1 @llvm.fptoui.sat.i1.f32(float undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %f32s8 = call i8 @llvm.fptosi.sat.i8.f32(float undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %f32u8 = call i8 @llvm.fptoui.sat.i8.f32(float undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %f32s16 = call i16 @llvm.fptosi.sat.i16.f32(float undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %f32u16 = call i16 @llvm.fptoui.sat.i16.f32(float undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f32s32 = call i32 @llvm.fptosi.sat.i32.f32(float undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f32u32 = call i32 @llvm.fptoui.sat.i32.f32(float undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %f32s64 = call i64 @llvm.fptosi.sat.i64.f32(float undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %f32u64 = call i64 @llvm.fptoui.sat.i64.f32(float undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %f64s1 = call i1 @llvm.fptosi.sat.i1.f64(double undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %f64u1 = call i1 @llvm.fptoui.sat.i1.f64(double undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %f64s8 = call i8 @llvm.fptosi.sat.i8.f64(double undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %f64u8 = call i8 @llvm.fptoui.sat.i8.f64(double undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %f64s16 = call i16 @llvm.fptosi.sat.i16.f64(double undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %f64u16 = call i16 @llvm.fptoui.sat.i16.f64(double undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f64s32 = call i32 @llvm.fptosi.sat.i32.f64(double undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f64u32 = call i32 @llvm.fptoui.sat.i32.f64(double undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %f64s64 = call i64 @llvm.fptosi.sat.i64.f64(double undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %f64u64 = call i64 @llvm.fptoui.sat.i64.f64(double undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v2f32s1 = call <2 x i1> @llvm.fptosi.sat.v2i1.v2f32(<2 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v2f32u1 = call <2 x i1> @llvm.fptoui.sat.v2i1.v2f32(<2 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v2f32s8 = call <2 x i8> @llvm.fptosi.sat.v2i8.v2f32(<2 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v2f32u8 = call <2 x i8> @llvm.fptoui.sat.v2i8.v2f32(<2 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v2f32s16 = call <2 x i16> @llvm.fptosi.sat.v2i16.v2f32(<2 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v2f32u16 = call <2 x i16> @llvm.fptoui.sat.v2i16.v2f32(<2 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2f32s32 = call <2 x i32> @llvm.fptosi.sat.v2i32.v2f32(<2 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2f32u32 = call <2 x i32> @llvm.fptoui.sat.v2i32.v2f32(<2 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 76 for instruction: %v2f32s64 = call <2 x i64> @llvm.fptosi.sat.v2i64.v2f32(<2 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 72 for instruction: %v2f32u64 = call <2 x i64> @llvm.fptoui.sat.v2i64.v2f32(<2 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 80 for instruction: %v2f64s1 = call <2 x i1> @llvm.fptosi.sat.v2i1.v2f64(<2 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 52 for instruction: %v2f64u1 = call <2 x i1> @llvm.fptoui.sat.v2i1.v2f64(<2 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 62 for instruction: %v2f64s8 = call <2 x i8> @llvm.fptosi.sat.v2i8.v2f64(<2 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 52 for instruction: %v2f64u8 = call <2 x i8> @llvm.fptoui.sat.v2i8.v2f64(<2 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 62 for instruction: %v2f64s16 = call <2 x i16> @llvm.fptosi.sat.v2i16.v2f64(<2 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 52 for instruction: %v2f64u16 = call <2 x i16> @llvm.fptoui.sat.v2i16.v2f64(<2 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 62 for instruction: %v2f64s32 = call <2 x i32> @llvm.fptosi.sat.v2i32.v2f64(<2 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 52 for instruction: %v2f64u32 = call <2 x i32> @llvm.fptoui.sat.v2i32.v2f64(<2 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 94 for instruction: %v2f64s64 = call <2 x i64> @llvm.fptosi.sat.v2i64.v2f64(<2 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 84 for instruction: %v2f64u64 = call <2 x i64> @llvm.fptoui.sat.v2i64.v2f64(<2 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v4f32s1 = call <4 x i1> @llvm.fptosi.sat.v4i1.v4f32(<4 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v4f32u1 = call <4 x i1> @llvm.fptoui.sat.v4i1.v4f32(<4 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v4f32s8 = call <4 x i8> @llvm.fptosi.sat.v4i8.v4f32(<4 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v4f32u8 = call <4 x i8> @llvm.fptoui.sat.v4i8.v4f32(<4 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v4f32s16 = call <4 x i16> @llvm.fptosi.sat.v4i16.v4f32(<4 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v4f32u16 = call <4 x i16> @llvm.fptoui.sat.v4i16.v4f32(<4 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4f32s32 = call <4 x i32> @llvm.fptosi.sat.v4i32.v4f32(<4 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4f32u32 = call <4 x i32> @llvm.fptoui.sat.v4i32.v4f32(<4 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 284 for instruction: %v4f32s64 = call <4 x i64> @llvm.fptosi.sat.v4i64.v4f32(<4 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 278 for instruction: %v4f32u64 = call <4 x i64> @llvm.fptoui.sat.v4i64.v4f32(<4 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 234 for instruction: %v4f64s1 = call <4 x i1> @llvm.fptosi.sat.v4i1.v4f64(<4 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 178 for instruction: %v4f64u1 = call <4 x i1> @llvm.fptoui.sat.v4i1.v4f64(<4 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 196 for instruction: %v4f64s8 = call <4 x i8> @llvm.fptosi.sat.v4i8.v4f64(<4 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 178 for instruction: %v4f64u8 = call <4 x i8> @llvm.fptoui.sat.v4i8.v4f64(<4 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 196 for instruction: %v4f64s16 = call <4 x i16> @llvm.fptosi.sat.v4i16.v4f64(<4 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 178 for instruction: %v4f64u16 = call <4 x i16> @llvm.fptoui.sat.v4i16.v4f64(<4 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 196 for instruction: %v4f64s32 = call <4 x i32> @llvm.fptosi.sat.v4i32.v4f64(<4 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 178 for instruction: %v4f64u32 = call <4 x i32> @llvm.fptoui.sat.v4i32.v4f64(<4 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 324 for instruction: %v4f64s64 = call <4 x i64> @llvm.fptosi.sat.v4i64.v4f64(<4 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 304 for instruction: %v4f64u64 = call <4 x i64> @llvm.fptoui.sat.v4i64.v4f64(<4 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %v8f32s1 = call <8 x i1> @llvm.fptosi.sat.v8i1.v8f32(<8 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %v8f32u1 = call <8 x i1> @llvm.fptoui.sat.v8i1.v8f32(<8 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %v8f32s8 = call <8 x i8> @llvm.fptosi.sat.v8i8.v8f32(<8 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %v8f32u8 = call <8 x i8> @llvm.fptoui.sat.v8i8.v8f32(<8 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %v8f32s16 = call <8 x i16> @llvm.fptosi.sat.v8i16.v8f32(<8 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %v8f32u16 = call <8 x i16> @llvm.fptoui.sat.v8i16.v8f32(<8 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8f32s32 = call <8 x i32> @llvm.fptosi.sat.v8i32.v8f32(<8 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8f32u32 = call <8 x i32> @llvm.fptoui.sat.v8i32.v8f32(<8 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 1148 for instruction: %v8f32s64 = call <8 x i64> @llvm.fptosi.sat.v8i64.v8f32(<8 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 1104 for instruction: %v8f32u64 = call <8 x i64> @llvm.fptoui.sat.v8i64.v8f32(<8 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 762 for instruction: %v8f64s1 = call <8 x i1> @llvm.fptosi.sat.v8i1.v8f64(<8 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 650 for instruction: %v8f64u1 = call <8 x i1> @llvm.fptoui.sat.v8i1.v8f64(<8 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 684 for instruction: %v8f64s8 = call <8 x i8> @llvm.fptosi.sat.v8i8.v8f64(<8 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 650 for instruction: %v8f64u8 = call <8 x i8> @llvm.fptoui.sat.v8i8.v8f64(<8 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 684 for instruction: %v8f64s16 = call <8 x i16> @llvm.fptosi.sat.v8i16.v8f64(<8 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 650 for instruction: %v8f64u16 = call <8 x i16> @llvm.fptoui.sat.v8i16.v8f64(<8 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 684 for instruction: %v8f64s32 = call <8 x i32> @llvm.fptosi.sat.v8i32.v8f64(<8 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 648 for instruction: %v8f64u32 = call <8 x i32> @llvm.fptoui.sat.v8i32.v8f64(<8 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 1192 for instruction: %v8f64s64 = call <8 x i64> @llvm.fptosi.sat.v8i64.v8f64(<8 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 1152 for instruction: %v8f64u64 = call <8 x i64> @llvm.fptoui.sat.v8i64.v8f64(<8 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %v16f32s1 = call <16 x i1> @llvm.fptosi.sat.v16i1.v16f32(<16 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %v16f32u1 = call <16 x i1> @llvm.fptoui.sat.v16i1.v16f32(<16 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %v16f32s8 = call <16 x i8> @llvm.fptosi.sat.v16i8.v16f32(<16 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %v16f32u8 = call <16 x i8> @llvm.fptoui.sat.v16i8.v16f32(<16 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %v16f32s16 = call <16 x i16> @llvm.fptosi.sat.v16i16.v16f32(<16 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %v16f32u16 = call <16 x i16> @llvm.fptoui.sat.v16i16.v16f32(<16 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16f32s32 = call <16 x i32> @llvm.fptosi.sat.v16i32.v16f32(<16 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16f32u32 = call <16 x i32> @llvm.fptoui.sat.v16i32.v16f32(<16 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 4488 for instruction: %v16f32s64 = call <16 x i64> @llvm.fptosi.sat.v16i64.v16f32(<16 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 4400 for instruction: %v16f32u64 = call <16 x i64> @llvm.fptoui.sat.v16i64.v16f32(<16 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2698 for instruction: %v16f64s1 = call <16 x i1> @llvm.fptosi.sat.v16i1.v16f64(<16 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2474 for instruction: %v16f64u1 = call <16 x i1> @llvm.fptoui.sat.v16i1.v16f64(<16 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2540 for instruction: %v16f64s8 = call <16 x i8> @llvm.fptosi.sat.v16i8.v16f64(<16 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2474 for instruction: %v16f64u8 = call <16 x i8> @llvm.fptoui.sat.v16i8.v16f64(<16 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2540 for instruction: %v16f64s16 = call <16 x i16> @llvm.fptosi.sat.v16i16.v16f64(<16 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2472 for instruction: %v16f64u16 = call <16 x i16> @llvm.fptoui.sat.v16i16.v16f64(<16 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2536 for instruction: %v16f64s32 = call <16 x i32> @llvm.fptosi.sat.v16i32.v16f64(<16 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2464 for instruction: %v16f64u32 = call <16 x i32> @llvm.fptoui.sat.v16i32.v16f64(<16 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 4560 for instruction: %v16f64s64 = call <16 x i64> @llvm.fptosi.sat.v16i64.v16f64(<16 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 4480 for instruction: %v16f64u64 = call <16 x i64> @llvm.fptoui.sat.v16i64.v16f64(<16 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:5 CodeSize:7 Lat:5 SizeLat:5 for: %f32s1 = call i1 @llvm.fptosi.sat.i1.f32(float undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:5 CodeSize:7 Lat:5 SizeLat:5 for: %f32u1 = call i1 @llvm.fptoui.sat.i1.f32(float undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:5 CodeSize:7 Lat:5 SizeLat:5 for: %f32s8 = call i8 @llvm.fptosi.sat.i8.f32(float undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:5 CodeSize:7 Lat:5 SizeLat:5 for: %f32u8 = call i8 @llvm.fptoui.sat.i8.f32(float undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:5 CodeSize:7 Lat:5 SizeLat:5 for: %f32s16 = call i16 @llvm.fptosi.sat.i16.f32(float undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:5 CodeSize:7 Lat:5 SizeLat:5 for: %f32u16 = call i16 @llvm.fptoui.sat.i16.f32(float undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of 1 for: %f32s32 = call i32 @llvm.fptosi.sat.i32.f32(float undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of 1 for: %f32u32 = call i32 @llvm.fptoui.sat.i32.f32(float undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:6 CodeSize:7 Lat:6 SizeLat:6 for: %f32s64 = call i64 @llvm.fptosi.sat.i64.f32(float undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of 3 for: %f32u64 = call i64 @llvm.fptoui.sat.i64.f32(float undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:7 CodeSize:9 Lat:7 SizeLat:7 for: %f64s1 = call i1 @llvm.fptosi.sat.i1.f64(double undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:7 CodeSize:9 Lat:7 SizeLat:7 for: %f64u1 = call i1 @llvm.fptoui.sat.i1.f64(double undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:7 CodeSize:9 Lat:7 SizeLat:7 for: %f64s8 = call i8 @llvm.fptosi.sat.i8.f64(double undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:7 CodeSize:9 Lat:7 SizeLat:7 for: %f64u8 = call i8 @llvm.fptoui.sat.i8.f64(double undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:7 CodeSize:9 Lat:7 SizeLat:7 for: %f64s16 = call i16 @llvm.fptosi.sat.i16.f64(double undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:7 CodeSize:9 Lat:7 SizeLat:7 for: %f64u16 = call i16 @llvm.fptoui.sat.i16.f64(double undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of 1 for: %f64s32 = call i32 @llvm.fptosi.sat.i32.f64(double undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of 1 for: %f64u32 = call i32 @llvm.fptoui.sat.i32.f64(double undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:7 CodeSize:9 Lat:7 SizeLat:7 for: %f64s64 = call i64 @llvm.fptosi.sat.i64.f64(double undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:7 CodeSize:9 Lat:7 SizeLat:7 for: %f64u64 = call i64 @llvm.fptoui.sat.i64.f64(double undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:6 CodeSize:7 Lat:6 SizeLat:6 for: %v2f32s1 = call <2 x i1> @llvm.fptosi.sat.v2i1.v2f32(<2 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:6 CodeSize:7 Lat:6 SizeLat:6 for: %v2f32u1 = call <2 x i1> @llvm.fptoui.sat.v2i1.v2f32(<2 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:6 CodeSize:7 Lat:6 SizeLat:6 for: %v2f32s8 = call <2 x i8> @llvm.fptosi.sat.v2i8.v2f32(<2 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:6 CodeSize:7 Lat:6 SizeLat:6 for: %v2f32u8 = call <2 x i8> @llvm.fptoui.sat.v2i8.v2f32(<2 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:6 CodeSize:7 Lat:6 SizeLat:6 for: %v2f32s16 = call <2 x i16> @llvm.fptosi.sat.v2i16.v2f32(<2 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:6 CodeSize:7 Lat:6 SizeLat:6 for: %v2f32u16 = call <2 x i16> @llvm.fptoui.sat.v2i16.v2f32(<2 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %v2f32s32 = call <2 x i32> @llvm.fptosi.sat.v2i32.v2f32(<2 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %v2f32u32 = call <2 x i32> @llvm.fptoui.sat.v2i32.v2f32(<2 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:76 CodeSize:5 Lat:9 SizeLat:9 for: %v2f32s64 = call <2 x i64> @llvm.fptosi.sat.v2i64.v2f32(<2 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:72 CodeSize:3 Lat:5 SizeLat:5 for: %v2f32u64 = call <2 x i64> @llvm.fptoui.sat.v2i64.v2f32(<2 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:80 CodeSize:35 Lat:45 SizeLat:45 for: %v2f64s1 = call <2 x i1> @llvm.fptosi.sat.v2i1.v2f64(<2 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:52 CodeSize:17 Lat:17 SizeLat:17 for: %v2f64u1 = call <2 x i1> @llvm.fptoui.sat.v2i1.v2f64(<2 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:62 CodeSize:22 Lat:27 SizeLat:27 for: %v2f64s8 = call <2 x i8> @llvm.fptosi.sat.v2i8.v2f64(<2 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:52 CodeSize:17 Lat:17 SizeLat:17 for: %v2f64u8 = call <2 x i8> @llvm.fptoui.sat.v2i8.v2f64(<2 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:62 CodeSize:22 Lat:27 SizeLat:27 for: %v2f64s16 = call <2 x i16> @llvm.fptosi.sat.v2i16.v2f64(<2 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:52 CodeSize:17 Lat:17 SizeLat:17 for: %v2f64u16 = call <2 x i16> @llvm.fptoui.sat.v2i16.v2f64(<2 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:62 CodeSize:22 Lat:27 SizeLat:27 for: %v2f64s32 = call <2 x i32> @llvm.fptosi.sat.v2i32.v2f64(<2 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:52 CodeSize:17 Lat:17 SizeLat:17 for: %v2f64u32 = call <2 x i32> @llvm.fptoui.sat.v2i32.v2f64(<2 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:94 CodeSize:22 Lat:27 SizeLat:27 for: %v2f64s64 = call <2 x i64> @llvm.fptosi.sat.v2i64.v2f64(<2 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:84 CodeSize:17 Lat:17 SizeLat:17 for: %v2f64u64 = call <2 x i64> @llvm.fptoui.sat.v2i64.v2f64(<2 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:6 CodeSize:7 Lat:6 SizeLat:6 for: %v4f32s1 = call <4 x i1> @llvm.fptosi.sat.v4i1.v4f32(<4 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:6 CodeSize:7 Lat:6 SizeLat:6 for: %v4f32u1 = call <4 x i1> @llvm.fptoui.sat.v4i1.v4f32(<4 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:6 CodeSize:7 Lat:6 SizeLat:6 for: %v4f32s8 = call <4 x i8> @llvm.fptosi.sat.v4i8.v4f32(<4 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:6 CodeSize:7 Lat:6 SizeLat:6 for: %v4f32u8 = call <4 x i8> @llvm.fptoui.sat.v4i8.v4f32(<4 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:6 CodeSize:7 Lat:6 SizeLat:6 for: %v4f32s16 = call <4 x i16> @llvm.fptosi.sat.v4i16.v4f32(<4 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:6 CodeSize:7 Lat:6 SizeLat:6 for: %v4f32u16 = call <4 x i16> @llvm.fptoui.sat.v4i16.v4f32(<4 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %v4f32s32 = call <4 x i32> @llvm.fptosi.sat.v4i32.v4f32(<4 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %v4f32u32 = call <4 x i32> @llvm.fptoui.sat.v4i32.v4f32(<4 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:284 CodeSize:6 Lat:11 SizeLat:11 for: %v4f32s64 = call <4 x i64> @llvm.fptosi.sat.v4i64.v4f32(<4 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:278 CodeSize:3 Lat:5 SizeLat:5 for: %v4f32u64 = call <4 x i64> @llvm.fptoui.sat.v4i64.v4f32(<4 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:234 CodeSize:69 Lat:89 SizeLat:89 for: %v4f64s1 = call <4 x i1> @llvm.fptosi.sat.v4i1.v4f64(<4 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:178 CodeSize:33 Lat:33 SizeLat:33 for: %v4f64u1 = call <4 x i1> @llvm.fptoui.sat.v4i1.v4f64(<4 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:196 CodeSize:42 Lat:51 SizeLat:51 for: %v4f64s8 = call <4 x i8> @llvm.fptosi.sat.v4i8.v4f64(<4 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:178 CodeSize:33 Lat:33 SizeLat:33 for: %v4f64u8 = call <4 x i8> @llvm.fptoui.sat.v4i8.v4f64(<4 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:196 CodeSize:42 Lat:51 SizeLat:51 for: %v4f64s16 = call <4 x i16> @llvm.fptosi.sat.v4i16.v4f64(<4 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:178 CodeSize:33 Lat:33 SizeLat:33 for: %v4f64u16 = call <4 x i16> @llvm.fptoui.sat.v4i16.v4f64(<4 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:196 CodeSize:42 Lat:51 SizeLat:51 for: %v4f64s32 = call <4 x i32> @llvm.fptosi.sat.v4i32.v4f64(<4 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:178 CodeSize:33 Lat:33 SizeLat:33 for: %v4f64u32 = call <4 x i32> @llvm.fptoui.sat.v4i32.v4f64(<4 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:324 CodeSize:43 Lat:53 SizeLat:53 for: %v4f64s64 = call <4 x i64> @llvm.fptosi.sat.v4i64.v4f64(<4 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:304 CodeSize:33 Lat:33 SizeLat:33 for: %v4f64u64 = call <4 x i64> @llvm.fptoui.sat.v4i64.v4f64(<4 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:12 CodeSize:14 Lat:12 SizeLat:12 for: %v8f32s1 = call <8 x i1> @llvm.fptosi.sat.v8i1.v8f32(<8 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:12 CodeSize:14 Lat:12 SizeLat:12 for: %v8f32u1 = call <8 x i1> @llvm.fptoui.sat.v8i1.v8f32(<8 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:12 CodeSize:14 Lat:12 SizeLat:12 for: %v8f32s8 = call <8 x i8> @llvm.fptosi.sat.v8i8.v8f32(<8 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:12 CodeSize:14 Lat:12 SizeLat:12 for: %v8f32u8 = call <8 x i8> @llvm.fptoui.sat.v8i8.v8f32(<8 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:12 CodeSize:14 Lat:12 SizeLat:12 for: %v8f32s16 = call <8 x i16> @llvm.fptosi.sat.v8i16.v8f32(<8 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:12 CodeSize:14 Lat:12 SizeLat:12 for: %v8f32u16 = call <8 x i16> @llvm.fptoui.sat.v8i16.v8f32(<8 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:4 CodeSize:2 Lat:4 SizeLat:4 for: %v8f32s32 = call <8 x i32> @llvm.fptosi.sat.v8i32.v8f32(<8 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:4 CodeSize:2 Lat:4 SizeLat:4 for: %v8f32u32 = call <8 x i32> @llvm.fptoui.sat.v8i32.v8f32(<8 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:1148 CodeSize:43 Lat:53 SizeLat:53 for: %v8f32s64 = call <8 x i64> @llvm.fptosi.sat.v8i64.v8f32(<8 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:1104 CodeSize:5 Lat:9 SizeLat:9 for: %v8f32u64 = call <8 x i64> @llvm.fptoui.sat.v8i64.v8f32(<8 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:762 CodeSize:137 Lat:177 SizeLat:177 for: %v8f64s1 = call <8 x i1> @llvm.fptosi.sat.v8i1.v8f64(<8 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:650 CodeSize:65 Lat:65 SizeLat:65 for: %v8f64u1 = call <8 x i1> @llvm.fptoui.sat.v8i1.v8f64(<8 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:684 CodeSize:82 Lat:99 SizeLat:99 for: %v8f64s8 = call <8 x i8> @llvm.fptosi.sat.v8i8.v8f64(<8 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:650 CodeSize:65 Lat:65 SizeLat:65 for: %v8f64u8 = call <8 x i8> @llvm.fptoui.sat.v8i8.v8f64(<8 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:684 CodeSize:82 Lat:99 SizeLat:99 for: %v8f64s16 = call <8 x i16> @llvm.fptosi.sat.v8i16.v8f64(<8 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:650 CodeSize:65 Lat:65 SizeLat:65 for: %v8f64u16 = call <8 x i16> @llvm.fptoui.sat.v8i16.v8f64(<8 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:684 CodeSize:83 Lat:101 SizeLat:101 for: %v8f64s32 = call <8 x i32> @llvm.fptosi.sat.v8i32.v8f64(<8 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:648 CodeSize:65 Lat:65 SizeLat:65 for: %v8f64u32 = call <8 x i32> @llvm.fptoui.sat.v8i32.v8f64(<8 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:1192 CodeSize:85 Lat:105 SizeLat:105 for: %v8f64s64 = call <8 x i64> @llvm.fptosi.sat.v8i64.v8f64(<8 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:1152 CodeSize:65 Lat:65 SizeLat:65 for: %v8f64u64 = call <8 x i64> @llvm.fptoui.sat.v8i64.v8f64(<8 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:24 CodeSize:28 Lat:24 SizeLat:24 for: %v16f32s1 = call <16 x i1> @llvm.fptosi.sat.v16i1.v16f32(<16 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:24 CodeSize:28 Lat:24 SizeLat:24 for: %v16f32u1 = call <16 x i1> @llvm.fptoui.sat.v16i1.v16f32(<16 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:24 CodeSize:28 Lat:24 SizeLat:24 for: %v16f32s8 = call <16 x i8> @llvm.fptosi.sat.v16i8.v16f32(<16 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:24 CodeSize:28 Lat:24 SizeLat:24 for: %v16f32u8 = call <16 x i8> @llvm.fptoui.sat.v16i8.v16f32(<16 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:24 CodeSize:28 Lat:24 SizeLat:24 for: %v16f32s16 = call <16 x i16> @llvm.fptosi.sat.v16i16.v16f32(<16 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:24 CodeSize:28 Lat:24 SizeLat:24 for: %v16f32u16 = call <16 x i16> @llvm.fptoui.sat.v16i16.v16f32(<16 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:8 CodeSize:4 Lat:8 SizeLat:8 for: %v16f32s32 = call <16 x i32> @llvm.fptosi.sat.v16i32.v16f32(<16 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:8 CodeSize:4 Lat:8 SizeLat:8 for: %v16f32u32 = call <16 x i32> @llvm.fptoui.sat.v16i32.v16f32(<16 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:4488 CodeSize:85 Lat:105 SizeLat:105 for: %v16f32s64 = call <16 x i64> @llvm.fptosi.sat.v16i64.v16f32(<16 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:4400 CodeSize:9 Lat:17 SizeLat:17 for: %v16f32u64 = call <16 x i64> @llvm.fptoui.sat.v16i64.v16f32(<16 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2698 CodeSize:273 Lat:353 SizeLat:353 for: %v16f64s1 = call <16 x i1> @llvm.fptosi.sat.v16i1.v16f64(<16 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2474 CodeSize:129 Lat:129 SizeLat:129 for: %v16f64u1 = call <16 x i1> @llvm.fptoui.sat.v16i1.v16f64(<16 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2540 CodeSize:162 Lat:195 SizeLat:195 for: %v16f64s8 = call <16 x i8> @llvm.fptosi.sat.v16i8.v16f64(<16 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2474 CodeSize:129 Lat:129 SizeLat:129 for: %v16f64u8 = call <16 x i8> @llvm.fptoui.sat.v16i8.v16f64(<16 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2540 CodeSize:163 Lat:197 SizeLat:197 for: %v16f64s16 = call <16 x i16> @llvm.fptosi.sat.v16i16.v16f64(<16 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2472 CodeSize:129 Lat:129 SizeLat:129 for: %v16f64u16 = call <16 x i16> @llvm.fptoui.sat.v16i16.v16f64(<16 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2536 CodeSize:165 Lat:201 SizeLat:201 for: %v16f64s32 = call <16 x i32> @llvm.fptosi.sat.v16i32.v16f64(<16 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2464 CodeSize:129 Lat:129 SizeLat:129 for: %v16f64u32 = call <16 x i32> @llvm.fptoui.sat.v16i32.v16f64(<16 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:4560 CodeSize:169 Lat:209 SizeLat:209 for: %v16f64s64 = call <16 x i64> @llvm.fptosi.sat.v16i64.v16f64(<16 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:4480 CodeSize:129 Lat:129 SizeLat:129 for: %v16f64u64 = call <16 x i64> @llvm.fptoui.sat.v16i64.v16f64(<16 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
%f32s1 = call i1 @llvm.fptosi.sat.i1.f32(float undef)
%f32u1 = call i1 @llvm.fptoui.sat.i1.f32(float undef)
@@ -324,110 +324,110 @@ define void @casts() {
define void @fp16() {
; CHECK-MVE-LABEL: 'fp16'
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %f16s1 = call i1 @llvm.fptosi.sat.i1.f16(half undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %f16u1 = call i1 @llvm.fptoui.sat.i1.f16(half undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %f16s8 = call i8 @llvm.fptosi.sat.i8.f16(half undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %f16u8 = call i8 @llvm.fptoui.sat.i8.f16(half undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %f16s16 = call i16 @llvm.fptosi.sat.i16.f16(half undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %f16u16 = call i16 @llvm.fptoui.sat.i16.f16(half undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %f16s32 = call i32 @llvm.fptosi.sat.i32.f16(half undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %f16u32 = call i32 @llvm.fptoui.sat.i32.f16(half undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %f16s64 = call i64 @llvm.fptosi.sat.i64.f16(half undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %f16u64 = call i64 @llvm.fptoui.sat.i64.f16(half undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 120 for instruction: %v2f16s1 = call <2 x i1> @llvm.fptosi.sat.v2i1.v2f16(<2 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 88 for instruction: %v2f16u1 = call <2 x i1> @llvm.fptoui.sat.v2i1.v2f16(<2 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 102 for instruction: %v2f16s8 = call <2 x i8> @llvm.fptosi.sat.v2i8.v2f16(<2 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 88 for instruction: %v2f16u8 = call <2 x i8> @llvm.fptoui.sat.v2i8.v2f16(<2 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 102 for instruction: %v2f16s16 = call <2 x i16> @llvm.fptosi.sat.v2i16.v2f16(<2 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 88 for instruction: %v2f16u16 = call <2 x i16> @llvm.fptoui.sat.v2i16.v2f16(<2 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 102 for instruction: %v2f16s32 = call <2 x i32> @llvm.fptosi.sat.v2i32.v2f16(<2 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 88 for instruction: %v2f16u32 = call <2 x i32> @llvm.fptoui.sat.v2i32.v2f16(<2 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 134 for instruction: %v2f16s64 = call <2 x i64> @llvm.fptosi.sat.v2i64.v2f16(<2 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 120 for instruction: %v2f16u64 = call <2 x i64> @llvm.fptoui.sat.v2i64.v2f16(<2 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 240 for instruction: %v4f16s1 = call <4 x i1> @llvm.fptosi.sat.v4i1.v4f16(<4 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 176 for instruction: %v4f16u1 = call <4 x i1> @llvm.fptoui.sat.v4i1.v4f16(<4 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 202 for instruction: %v4f16s8 = call <4 x i8> @llvm.fptosi.sat.v4i8.v4f16(<4 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 176 for instruction: %v4f16u8 = call <4 x i8> @llvm.fptoui.sat.v4i8.v4f16(<4 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 202 for instruction: %v4f16s16 = call <4 x i16> @llvm.fptosi.sat.v4i16.v4f16(<4 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 176 for instruction: %v4f16u16 = call <4 x i16> @llvm.fptoui.sat.v4i16.v4f16(<4 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 202 for instruction: %v4f16s32 = call <4 x i32> @llvm.fptosi.sat.v4i32.v4f16(<4 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 176 for instruction: %v4f16u32 = call <4 x i32> @llvm.fptoui.sat.v4i32.v4f16(<4 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 406 for instruction: %v4f16s64 = call <4 x i64> @llvm.fptosi.sat.v4i64.v4f16(<4 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 378 for instruction: %v4f16u64 = call <4 x i64> @llvm.fptoui.sat.v4i64.v4f16(<4 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 480 for instruction: %v8f16s1 = call <8 x i1> @llvm.fptosi.sat.v8i1.v8f16(<8 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 352 for instruction: %v8f16u1 = call <8 x i1> @llvm.fptoui.sat.v8i1.v8f16(<8 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 402 for instruction: %v8f16s8 = call <8 x i8> @llvm.fptosi.sat.v8i8.v8f16(<8 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 352 for instruction: %v8f16u8 = call <8 x i8> @llvm.fptoui.sat.v8i8.v8f16(<8 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 402 for instruction: %v8f16s16 = call <8 x i16> @llvm.fptosi.sat.v8i16.v8f16(<8 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 352 for instruction: %v8f16u16 = call <8 x i16> @llvm.fptoui.sat.v8i16.v8f16(<8 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 550 for instruction: %v8f16s32 = call <8 x i32> @llvm.fptosi.sat.v8i32.v8f16(<8 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 498 for instruction: %v8f16u32 = call <8 x i32> @llvm.fptoui.sat.v8i32.v8f16(<8 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 1362 for instruction: %v8f16s64 = call <8 x i64> @llvm.fptosi.sat.v8i64.v8f16(<8 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 1306 for instruction: %v8f16u64 = call <8 x i64> @llvm.fptoui.sat.v8i64.v8f16(<8 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 1250 for instruction: %v16f16s1 = call <16 x i1> @llvm.fptosi.sat.v16i1.v16f16(<16 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 994 for instruction: %v16f16u1 = call <16 x i1> @llvm.fptoui.sat.v16i1.v16f16(<16 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 1092 for instruction: %v16f16s8 = call <16 x i8> @llvm.fptosi.sat.v16i8.v16f16(<16 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 994 for instruction: %v16f16u8 = call <16 x i8> @llvm.fptoui.sat.v16i8.v16f16(<16 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 1092 for instruction: %v16f16s16 = call <16 x i16> @llvm.fptosi.sat.v16i16.v16f16(<16 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 992 for instruction: %v16f16u16 = call <16 x i16> @llvm.fptoui.sat.v16i16.v16f16(<16 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 1680 for instruction: %v16f16s32 = call <16 x i32> @llvm.fptosi.sat.v16i32.v16f16(<16 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 1576 for instruction: %v16f16u32 = call <16 x i32> @llvm.fptoui.sat.v16i32.v16f16(<16 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 4920 for instruction: %v16f16s64 = call <16 x i64> @llvm.fptosi.sat.v16i64.v16f16(<16 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 4808 for instruction: %v16f16u64 = call <16 x i64> @llvm.fptoui.sat.v16i64.v16f16(<16 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:23 CodeSize:7 Lat:23 SizeLat:23 for: %f16s1 = call i1 @llvm.fptosi.sat.i1.f16(half undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:21 CodeSize:3 Lat:21 SizeLat:21 for: %f16u1 = call i1 @llvm.fptoui.sat.i1.f16(half undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:23 CodeSize:6 Lat:23 SizeLat:23 for: %f16s8 = call i8 @llvm.fptosi.sat.i8.f16(half undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:21 CodeSize:3 Lat:21 SizeLat:21 for: %f16u8 = call i8 @llvm.fptoui.sat.i8.f16(half undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:23 CodeSize:6 Lat:23 SizeLat:23 for: %f16s16 = call i16 @llvm.fptosi.sat.i16.f16(half undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:21 CodeSize:3 Lat:21 SizeLat:21 for: %f16u16 = call i16 @llvm.fptoui.sat.i16.f16(half undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:23 CodeSize:6 Lat:23 SizeLat:23 for: %f16s32 = call i32 @llvm.fptosi.sat.i32.f16(half undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:21 CodeSize:3 Lat:21 SizeLat:21 for: %f16u32 = call i32 @llvm.fptoui.sat.i32.f16(half undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:24 CodeSize:7 Lat:24 SizeLat:24 for: %f16s64 = call i64 @llvm.fptosi.sat.i64.f16(half undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:21 CodeSize:3 Lat:21 SizeLat:21 for: %f16u64 = call i64 @llvm.fptoui.sat.i64.f16(half undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:120 CodeSize:43 Lat:85 SizeLat:85 for: %v2f16s1 = call <2 x i1> @llvm.fptosi.sat.v2i1.v2f16(<2 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:88 CodeSize:17 Lat:53 SizeLat:53 for: %v2f16u1 = call <2 x i1> @llvm.fptoui.sat.v2i1.v2f16(<2 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:102 CodeSize:30 Lat:67 SizeLat:67 for: %v2f16s8 = call <2 x i8> @llvm.fptosi.sat.v2i8.v2f16(<2 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:88 CodeSize:17 Lat:53 SizeLat:53 for: %v2f16u8 = call <2 x i8> @llvm.fptoui.sat.v2i8.v2f16(<2 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:102 CodeSize:30 Lat:67 SizeLat:67 for: %v2f16s16 = call <2 x i16> @llvm.fptosi.sat.v2i16.v2f16(<2 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:88 CodeSize:17 Lat:53 SizeLat:53 for: %v2f16u16 = call <2 x i16> @llvm.fptoui.sat.v2i16.v2f16(<2 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:102 CodeSize:30 Lat:67 SizeLat:67 for: %v2f16s32 = call <2 x i32> @llvm.fptosi.sat.v2i32.v2f16(<2 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:88 CodeSize:17 Lat:53 SizeLat:53 for: %v2f16u32 = call <2 x i32> @llvm.fptoui.sat.v2i32.v2f16(<2 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:134 CodeSize:30 Lat:67 SizeLat:67 for: %v2f16s64 = call <2 x i64> @llvm.fptosi.sat.v2i64.v2f16(<2 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:120 CodeSize:17 Lat:53 SizeLat:53 for: %v2f16u64 = call <2 x i64> @llvm.fptoui.sat.v2i64.v2f16(<2 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:240 CodeSize:85 Lat:169 SizeLat:169 for: %v4f16s1 = call <4 x i1> @llvm.fptosi.sat.v4i1.v4f16(<4 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:176 CodeSize:33 Lat:105 SizeLat:105 for: %v4f16u1 = call <4 x i1> @llvm.fptoui.sat.v4i1.v4f16(<4 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:202 CodeSize:58 Lat:131 SizeLat:131 for: %v4f16s8 = call <4 x i8> @llvm.fptosi.sat.v4i8.v4f16(<4 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:176 CodeSize:33 Lat:105 SizeLat:105 for: %v4f16u8 = call <4 x i8> @llvm.fptoui.sat.v4i8.v4f16(<4 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:202 CodeSize:58 Lat:131 SizeLat:131 for: %v4f16s16 = call <4 x i16> @llvm.fptosi.sat.v4i16.v4f16(<4 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:176 CodeSize:33 Lat:105 SizeLat:105 for: %v4f16u16 = call <4 x i16> @llvm.fptoui.sat.v4i16.v4f16(<4 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:202 CodeSize:58 Lat:131 SizeLat:131 for: %v4f16s32 = call <4 x i32> @llvm.fptosi.sat.v4i32.v4f16(<4 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:176 CodeSize:33 Lat:105 SizeLat:105 for: %v4f16u32 = call <4 x i32> @llvm.fptoui.sat.v4i32.v4f16(<4 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:406 CodeSize:59 Lat:133 SizeLat:133 for: %v4f16s64 = call <4 x i64> @llvm.fptosi.sat.v4i64.v4f16(<4 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:378 CodeSize:33 Lat:105 SizeLat:105 for: %v4f16u64 = call <4 x i64> @llvm.fptoui.sat.v4i64.v4f16(<4 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:480 CodeSize:169 Lat:337 SizeLat:337 for: %v8f16s1 = call <8 x i1> @llvm.fptosi.sat.v8i1.v8f16(<8 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:352 CodeSize:65 Lat:209 SizeLat:209 for: %v8f16u1 = call <8 x i1> @llvm.fptoui.sat.v8i1.v8f16(<8 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:402 CodeSize:114 Lat:259 SizeLat:259 for: %v8f16s8 = call <8 x i8> @llvm.fptosi.sat.v8i8.v8f16(<8 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:352 CodeSize:65 Lat:209 SizeLat:209 for: %v8f16u8 = call <8 x i8> @llvm.fptoui.sat.v8i8.v8f16(<8 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:402 CodeSize:114 Lat:259 SizeLat:259 for: %v8f16s16 = call <8 x i16> @llvm.fptosi.sat.v8i16.v8f16(<8 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:352 CodeSize:65 Lat:209 SizeLat:209 for: %v8f16u16 = call <8 x i16> @llvm.fptoui.sat.v8i16.v8f16(<8 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:550 CodeSize:115 Lat:261 SizeLat:261 for: %v8f16s32 = call <8 x i32> @llvm.fptosi.sat.v8i32.v8f16(<8 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:498 CodeSize:65 Lat:209 SizeLat:209 for: %v8f16u32 = call <8 x i32> @llvm.fptoui.sat.v8i32.v8f16(<8 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:1362 CodeSize:117 Lat:265 SizeLat:265 for: %v8f16s64 = call <8 x i64> @llvm.fptosi.sat.v8i64.v8f16(<8 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:1306 CodeSize:65 Lat:209 SizeLat:209 for: %v8f16u64 = call <8 x i64> @llvm.fptoui.sat.v8i64.v8f16(<8 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:1250 CodeSize:337 Lat:673 SizeLat:673 for: %v16f16s1 = call <16 x i1> @llvm.fptosi.sat.v16i1.v16f16(<16 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:994 CodeSize:129 Lat:417 SizeLat:417 for: %v16f16u1 = call <16 x i1> @llvm.fptoui.sat.v16i1.v16f16(<16 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:1092 CodeSize:226 Lat:515 SizeLat:515 for: %v16f16s8 = call <16 x i8> @llvm.fptosi.sat.v16i8.v16f16(<16 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:994 CodeSize:129 Lat:417 SizeLat:417 for: %v16f16u8 = call <16 x i8> @llvm.fptoui.sat.v16i8.v16f16(<16 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:1092 CodeSize:227 Lat:517 SizeLat:517 for: %v16f16s16 = call <16 x i16> @llvm.fptosi.sat.v16i16.v16f16(<16 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:992 CodeSize:129 Lat:417 SizeLat:417 for: %v16f16u16 = call <16 x i16> @llvm.fptoui.sat.v16i16.v16f16(<16 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:1680 CodeSize:229 Lat:521 SizeLat:521 for: %v16f16s32 = call <16 x i32> @llvm.fptosi.sat.v16i32.v16f16(<16 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:1576 CodeSize:129 Lat:417 SizeLat:417 for: %v16f16u32 = call <16 x i32> @llvm.fptoui.sat.v16i32.v16f16(<16 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:4920 CodeSize:233 Lat:529 SizeLat:529 for: %v16f16s64 = call <16 x i64> @llvm.fptosi.sat.v16i64.v16f16(<16 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:4808 CodeSize:129 Lat:417 SizeLat:417 for: %v16f16u64 = call <16 x i64> @llvm.fptoui.sat.v16i64.v16f16(<16 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
; CHECK-MVEFP-LABEL: 'fp16'
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %f16s1 = call i1 @llvm.fptosi.sat.i1.f16(half undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %f16u1 = call i1 @llvm.fptoui.sat.i1.f16(half undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %f16s8 = call i8 @llvm.fptosi.sat.i8.f16(half undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %f16u8 = call i8 @llvm.fptoui.sat.i8.f16(half undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %f16s16 = call i16 @llvm.fptosi.sat.i16.f16(half undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %f16u16 = call i16 @llvm.fptoui.sat.i16.f16(half undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f16s32 = call i32 @llvm.fptosi.sat.i32.f16(half undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f16u32 = call i32 @llvm.fptoui.sat.i32.f16(half undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %f16s64 = call i64 @llvm.fptosi.sat.i64.f16(half undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %f16u64 = call i64 @llvm.fptoui.sat.i64.f16(half undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v2f16s1 = call <2 x i1> @llvm.fptosi.sat.v2i1.v2f16(<2 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v2f16u1 = call <2 x i1> @llvm.fptoui.sat.v2i1.v2f16(<2 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v2f16s8 = call <2 x i8> @llvm.fptosi.sat.v2i8.v2f16(<2 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v2f16u8 = call <2 x i8> @llvm.fptoui.sat.v2i8.v2f16(<2 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2f16s16 = call <2 x i16> @llvm.fptosi.sat.v2i16.v2f16(<2 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2f16u16 = call <2 x i16> @llvm.fptoui.sat.v2i16.v2f16(<2 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 44 for instruction: %v2f16s32 = call <2 x i32> @llvm.fptosi.sat.v2i32.v2f16(<2 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 40 for instruction: %v2f16u32 = call <2 x i32> @llvm.fptoui.sat.v2i32.v2f16(<2 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 76 for instruction: %v2f16s64 = call <2 x i64> @llvm.fptosi.sat.v2i64.v2f16(<2 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 72 for instruction: %v2f16u64 = call <2 x i64> @llvm.fptoui.sat.v2i64.v2f16(<2 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v4f16s1 = call <4 x i1> @llvm.fptosi.sat.v4i1.v4f16(<4 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v4f16u1 = call <4 x i1> @llvm.fptoui.sat.v4i1.v4f16(<4 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v4f16s8 = call <4 x i8> @llvm.fptosi.sat.v4i8.v4f16(<4 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v4f16u8 = call <4 x i8> @llvm.fptoui.sat.v4i8.v4f16(<4 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4f16s16 = call <4 x i16> @llvm.fptosi.sat.v4i16.v4f16(<4 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4f16u16 = call <4 x i16> @llvm.fptoui.sat.v4i16.v4f16(<4 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %v4f16s32 = call <4 x i32> @llvm.fptosi.sat.v4i32.v4f16(<4 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v4f16u32 = call <4 x i32> @llvm.fptoui.sat.v4i32.v4f16(<4 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 284 for instruction: %v4f16s64 = call <4 x i64> @llvm.fptosi.sat.v4i64.v4f16(<4 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 278 for instruction: %v4f16u64 = call <4 x i64> @llvm.fptoui.sat.v4i64.v4f16(<4 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v8f16s1 = call <8 x i1> @llvm.fptosi.sat.v8i1.v8f16(<8 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v8f16u1 = call <8 x i1> @llvm.fptoui.sat.v8i1.v8f16(<8 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v8f16s8 = call <8 x i8> @llvm.fptosi.sat.v8i8.v8f16(<8 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v8f16u8 = call <8 x i8> @llvm.fptoui.sat.v8i8.v8f16(<8 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8f16s16 = call <8 x i16> @llvm.fptosi.sat.v8i16.v8f16(<8 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8f16u16 = call <8 x i16> @llvm.fptoui.sat.v8i16.v8f16(<8 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %v8f16s32 = call <8 x i32> @llvm.fptosi.sat.v8i32.v8f16(<8 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %v8f16u32 = call <8 x i32> @llvm.fptoui.sat.v8i32.v8f16(<8 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 1112 for instruction: %v8f16s64 = call <8 x i64> @llvm.fptosi.sat.v8i64.v8f16(<8 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 1102 for instruction: %v8f16u64 = call <8 x i64> @llvm.fptoui.sat.v8i64.v8f16(<8 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %v16f16s1 = call <16 x i1> @llvm.fptosi.sat.v16i1.v16f16(<16 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %v16f16u1 = call <16 x i1> @llvm.fptoui.sat.v16i1.v16f16(<16 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %v16f16s8 = call <16 x i8> @llvm.fptosi.sat.v16i8.v16f16(<16 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %v16f16u8 = call <16 x i8> @llvm.fptoui.sat.v16i8.v16f16(<16 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v16f16s16 = call <16 x i16> @llvm.fptosi.sat.v16i16.v16f16(<16 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v16f16u16 = call <16 x i16> @llvm.fptoui.sat.v16i16.v16f16(<16 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 124 for instruction: %v16f16s32 = call <16 x i32> @llvm.fptosi.sat.v16i32.v16f16(<16 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %v16f16u32 = call <16 x i32> @llvm.fptoui.sat.v16i32.v16f16(<16 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 4484 for instruction: %v16f16s64 = call <16 x i64> @llvm.fptosi.sat.v16i64.v16f16(<16 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 4400 for instruction: %v16f16u64 = call <16 x i64> @llvm.fptoui.sat.v16i64.v16f16(<16 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:5 CodeSize:7 Lat:5 SizeLat:5 for: %f16s1 = call i1 @llvm.fptosi.sat.i1.f16(half undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:5 CodeSize:7 Lat:5 SizeLat:5 for: %f16u1 = call i1 @llvm.fptoui.sat.i1.f16(half undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:5 CodeSize:7 Lat:5 SizeLat:5 for: %f16s8 = call i8 @llvm.fptosi.sat.i8.f16(half undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:5 CodeSize:7 Lat:5 SizeLat:5 for: %f16u8 = call i8 @llvm.fptoui.sat.i8.f16(half undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:5 CodeSize:7 Lat:5 SizeLat:5 for: %f16s16 = call i16 @llvm.fptosi.sat.i16.f16(half undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:5 CodeSize:7 Lat:5 SizeLat:5 for: %f16u16 = call i16 @llvm.fptoui.sat.i16.f16(half undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of 1 for: %f16s32 = call i32 @llvm.fptosi.sat.i32.f16(half undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of 1 for: %f16u32 = call i32 @llvm.fptoui.sat.i32.f16(half undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:6 CodeSize:7 Lat:6 SizeLat:6 for: %f16s64 = call i64 @llvm.fptosi.sat.i64.f16(half undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of 3 for: %f16u64 = call i64 @llvm.fptoui.sat.i64.f16(half undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:6 CodeSize:7 Lat:6 SizeLat:6 for: %v2f16s1 = call <2 x i1> @llvm.fptosi.sat.v2i1.v2f16(<2 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:6 CodeSize:7 Lat:6 SizeLat:6 for: %v2f16u1 = call <2 x i1> @llvm.fptoui.sat.v2i1.v2f16(<2 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:6 CodeSize:7 Lat:6 SizeLat:6 for: %v2f16s8 = call <2 x i8> @llvm.fptosi.sat.v2i8.v2f16(<2 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:6 CodeSize:7 Lat:6 SizeLat:6 for: %v2f16u8 = call <2 x i8> @llvm.fptoui.sat.v2i8.v2f16(<2 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %v2f16s16 = call <2 x i16> @llvm.fptosi.sat.v2i16.v2f16(<2 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %v2f16u16 = call <2 x i16> @llvm.fptoui.sat.v2i16.v2f16(<2 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:44 CodeSize:5 Lat:9 SizeLat:9 for: %v2f16s32 = call <2 x i32> @llvm.fptosi.sat.v2i32.v2f16(<2 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:40 CodeSize:3 Lat:5 SizeLat:5 for: %v2f16u32 = call <2 x i32> @llvm.fptoui.sat.v2i32.v2f16(<2 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:76 CodeSize:5 Lat:9 SizeLat:9 for: %v2f16s64 = call <2 x i64> @llvm.fptosi.sat.v2i64.v2f16(<2 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:72 CodeSize:3 Lat:5 SizeLat:5 for: %v2f16u64 = call <2 x i64> @llvm.fptoui.sat.v2i64.v2f16(<2 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:6 CodeSize:7 Lat:6 SizeLat:6 for: %v4f16s1 = call <4 x i1> @llvm.fptosi.sat.v4i1.v4f16(<4 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:6 CodeSize:7 Lat:6 SizeLat:6 for: %v4f16u1 = call <4 x i1> @llvm.fptoui.sat.v4i1.v4f16(<4 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:6 CodeSize:7 Lat:6 SizeLat:6 for: %v4f16s8 = call <4 x i8> @llvm.fptosi.sat.v4i8.v4f16(<4 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:6 CodeSize:7 Lat:6 SizeLat:6 for: %v4f16u8 = call <4 x i8> @llvm.fptoui.sat.v4i8.v4f16(<4 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %v4f16s16 = call <4 x i16> @llvm.fptosi.sat.v4i16.v4f16(<4 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %v4f16u16 = call <4 x i16> @llvm.fptoui.sat.v4i16.v4f16(<4 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:10 CodeSize:5 Lat:9 SizeLat:9 for: %v4f16s32 = call <4 x i32> @llvm.fptosi.sat.v4i32.v4f16(<4 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:6 CodeSize:3 Lat:5 SizeLat:5 for: %v4f16u32 = call <4 x i32> @llvm.fptoui.sat.v4i32.v4f16(<4 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:284 CodeSize:6 Lat:11 SizeLat:11 for: %v4f16s64 = call <4 x i64> @llvm.fptosi.sat.v4i64.v4f16(<4 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:278 CodeSize:3 Lat:5 SizeLat:5 for: %v4f16u64 = call <4 x i64> @llvm.fptoui.sat.v4i64.v4f16(<4 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:6 CodeSize:7 Lat:6 SizeLat:6 for: %v8f16s1 = call <8 x i1> @llvm.fptosi.sat.v8i1.v8f16(<8 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:6 CodeSize:7 Lat:6 SizeLat:6 for: %v8f16u1 = call <8 x i1> @llvm.fptoui.sat.v8i1.v8f16(<8 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:6 CodeSize:7 Lat:6 SizeLat:6 for: %v8f16s8 = call <8 x i8> @llvm.fptosi.sat.v8i8.v8f16(<8 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:6 CodeSize:7 Lat:6 SizeLat:6 for: %v8f16u8 = call <8 x i8> @llvm.fptoui.sat.v8i8.v8f16(<8 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %v8f16s16 = call <8 x i16> @llvm.fptosi.sat.v8i16.v8f16(<8 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %v8f16u16 = call <8 x i16> @llvm.fptoui.sat.v8i16.v8f16(<8 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:20 CodeSize:6 Lat:11 SizeLat:11 for: %v8f16s32 = call <8 x i32> @llvm.fptosi.sat.v8i32.v8f16(<8 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:14 CodeSize:3 Lat:5 SizeLat:5 for: %v8f16u32 = call <8 x i32> @llvm.fptoui.sat.v8i32.v8f16(<8 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:1112 CodeSize:8 Lat:15 SizeLat:15 for: %v8f16s64 = call <8 x i64> @llvm.fptosi.sat.v8i64.v8f16(<8 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:1102 CodeSize:3 Lat:5 SizeLat:5 for: %v8f16u64 = call <8 x i64> @llvm.fptoui.sat.v8i64.v8f16(<8 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:12 CodeSize:14 Lat:12 SizeLat:12 for: %v16f16s1 = call <16 x i1> @llvm.fptosi.sat.v16i1.v16f16(<16 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:12 CodeSize:14 Lat:12 SizeLat:12 for: %v16f16u1 = call <16 x i1> @llvm.fptoui.sat.v16i1.v16f16(<16 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:12 CodeSize:14 Lat:12 SizeLat:12 for: %v16f16s8 = call <16 x i8> @llvm.fptosi.sat.v16i8.v16f16(<16 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:12 CodeSize:14 Lat:12 SizeLat:12 for: %v16f16u8 = call <16 x i8> @llvm.fptoui.sat.v16i8.v16f16(<16 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:4 CodeSize:2 Lat:4 SizeLat:4 for: %v16f16s16 = call <16 x i16> @llvm.fptosi.sat.v16i16.v16f16(<16 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:4 CodeSize:2 Lat:4 SizeLat:4 for: %v16f16u16 = call <16 x i16> @llvm.fptoui.sat.v16i16.v16f16(<16 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:124 CodeSize:75 Lat:85 SizeLat:85 for: %v16f16s32 = call <16 x i32> @llvm.fptosi.sat.v16i32.v16f16(<16 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:48 CodeSize:5 Lat:9 SizeLat:9 for: %v16f16u32 = call <16 x i32> @llvm.fptoui.sat.v16i32.v16f16(<16 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:4484 CodeSize:79 Lat:93 SizeLat:93 for: %v16f16s64 = call <16 x i64> @llvm.fptosi.sat.v16i64.v16f16(<16 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:4400 CodeSize:5 Lat:9 SizeLat:9 for: %v16f16u64 = call <16 x i64> @llvm.fptoui.sat.v16i64.v16f16(<16 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
%f16s1 = call i1 @llvm.fptosi.sat.i1.f16(half undef)
%f16u1 = call i1 @llvm.fptoui.sat.i1.f16(half undef)
diff --git a/llvm/test/Analysis/CostModel/ARM/intrinsic-cost-kinds.ll b/llvm/test/Analysis/CostModel/ARM/intrinsic-cost-kinds.ll
deleted file mode 100644
index 6377437..0000000
--- a/llvm/test/Analysis/CostModel/ARM/intrinsic-cost-kinds.ll
+++ /dev/null
@@ -1,371 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py
-; RUN: opt -mtriple=armv8.1m.main -mattr=+mve.fp -passes="print<cost-model>" 2>&1 -disable-output -cost-kind=throughput < %s | FileCheck %s --check-prefix=THRU
-; RUN: opt -mtriple=armv8.1m.main -mattr=+mve.fp -passes="print<cost-model>" 2>&1 -disable-output -cost-kind=latency < %s | FileCheck %s --check-prefix=LATE
-; RUN: opt -mtriple=armv8.1m.main -mattr=+mve.fp -passes="print<cost-model>" 2>&1 -disable-output -cost-kind=code-size < %s | FileCheck %s --check-prefix=SIZE
-; RUN: opt -mtriple=armv8.1m.main -mattr=+mve.fp -passes="print<cost-model>" 2>&1 -disable-output -cost-kind=size-latency < %s | FileCheck %s --check-prefix=SIZE_LATE
-
-target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
-
-; Test a cross-section of intrinsics for various cost-kinds.
-; Other test files may check for accuracy of a particular intrinsic
-; across subtargets or types. This is just a basic correctness check using an
-; ARM target and a legal scalar type (i32/float) and/or an
-; illegal vector type (16 x i32/float).
-
-declare i32 @llvm.smax.i32(i32, i32)
-declare <16 x i32> @llvm.smax.v16i32(<16 x i32>, <16 x i32>)
-
-declare float @llvm.fmuladd.f32(float, float, float)
-declare <16 x float> @llvm.fmuladd.v16f32(<16 x float>, <16 x float>, <16 x float>)
-
-declare float @llvm.log2.f32(float)
-declare <16 x float> @llvm.log2.v16f32(<16 x float>)
-
-declare float @llvm.experimental.constrained.fadd.f32(float, float, metadata, metadata)
-declare <16 x float> @llvm.experimental.constrained.fadd.v16f32(<16 x float>, <16 x float>, metadata, metadata)
-
-declare float @llvm.maximum.f32(float, float)
-declare <16 x float> @llvm.maximum.v16f32(<16 x float>, <16 x float>)
-
-declare i32 @llvm.cttz.i32(i32, i1)
-declare <16 x i32> @llvm.cttz.v16i32(<16 x i32>, i1)
-
-declare i32 @llvm.ctlz.i32(i32, i1)
-declare <16 x i32> @llvm.ctlz.v16i32(<16 x i32>, i1)
-
-declare i32 @llvm.fshl.i32(i32, i32, i32)
-declare <16 x i32> @llvm.fshl.v16i32(<16 x i32>, <16 x i32>, <16 x i32>)
-
-declare <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr>, i32, <16 x i1>, <16 x float>)
-declare void @llvm.masked.scatter.v16f32.v16p0(<16 x float>, <16 x ptr>, i32, <16 x i1>)
-declare float @llvm.vector.reduce.fmax.v16f32(<16 x float>)
-
-declare void @llvm.memcpy.p0.p0.i32(ptr, ptr, i32, i1)
-
-declare i32 @llvm.ssa.copy.i32(i32)
-declare float @llvm.ssa.copy.f32(float)
-declare ptr @llvm.ssa.copy.p0(ptr)
-
-define void @smax(i32 %a, i32 %b, <16 x i32> %va, <16 x i32> %vb) {
-; THRU-LABEL: 'smax'
-; THRU-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %s = call i32 @llvm.smax.i32(i32 %a, i32 %b)
-; THRU-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v = call <16 x i32> @llvm.smax.v16i32(<16 x i32> %va, <16 x i32> %vb)
-; THRU-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
-;
-; LATE-LABEL: 'smax'
-; LATE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %s = call i32 @llvm.smax.i32(i32 %a, i32 %b)
-; LATE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v = call <16 x i32> @llvm.smax.v16i32(<16 x i32> %va, <16 x i32> %vb)
-; LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
-; SIZE-LABEL: 'smax'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %s = call i32 @llvm.smax.i32(i32 %a, i32 %b)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v = call <16 x i32> @llvm.smax.v16i32(<16 x i32> %va, <16 x i32> %vb)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
-; SIZE_LATE-LABEL: 'smax'
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %s = call i32 @llvm.smax.i32(i32 %a, i32 %b)
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v = call <16 x i32> @llvm.smax.v16i32(<16 x i32> %va, <16 x i32> %vb)
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
- %s = call i32 @llvm.smax.i32(i32 %a, i32 %b)
- %v = call <16 x i32> @llvm.smax.v16i32(<16 x i32> %va, <16 x i32> %vb)
- ret void
-}
-
-define void @fmuladd(float %a, float %b, float %c, <16 x float> %va, <16 x float> %vb, <16 x float> %vc) {
-; THRU-LABEL: 'fmuladd'
-; THRU-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %s = call float @llvm.fmuladd.f32(float %a, float %b, float %c)
-; THRU-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v = call <16 x float> @llvm.fmuladd.v16f32(<16 x float> %va, <16 x float> %vb, <16 x float> %vc)
-; THRU-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
-;
-; LATE-LABEL: 'fmuladd'
-; LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %s = call float @llvm.fmuladd.f32(float %a, float %b, float %c)
-; LATE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v = call <16 x float> @llvm.fmuladd.v16f32(<16 x float> %va, <16 x float> %vb, <16 x float> %vc)
-; LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
-; SIZE-LABEL: 'fmuladd'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %s = call float @llvm.fmuladd.f32(float %a, float %b, float %c)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v = call <16 x float> @llvm.fmuladd.v16f32(<16 x float> %va, <16 x float> %vb, <16 x float> %vc)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
-; SIZE_LATE-LABEL: 'fmuladd'
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %s = call float @llvm.fmuladd.f32(float %a, float %b, float %c)
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v = call <16 x float> @llvm.fmuladd.v16f32(<16 x float> %va, <16 x float> %vb, <16 x float> %vc)
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
- %s = call float @llvm.fmuladd.f32(float %a, float %b, float %c)
- %v = call <16 x float> @llvm.fmuladd.v16f32(<16 x float> %va, <16 x float> %vb, <16 x float> %vc)
- ret void
-}
-
-define void @log2(float %a, <16 x float> %va) {
-; THRU-LABEL: 'log2'
-; THRU-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %s = call float @llvm.log2.f32(float %a)
-; THRU-NEXT: Cost Model: Found an estimated cost of 192 for instruction: %v = call <16 x float> @llvm.log2.v16f32(<16 x float> %va)
-; THRU-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
-;
-; LATE-LABEL: 'log2'
-; LATE-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %s = call float @llvm.log2.f32(float %a)
-; LATE-NEXT: Cost Model: Found an estimated cost of 192 for instruction: %v = call <16 x float> @llvm.log2.v16f32(<16 x float> %va)
-; LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
-; SIZE-LABEL: 'log2'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %s = call float @llvm.log2.f32(float %a)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %v = call <16 x float> @llvm.log2.v16f32(<16 x float> %va)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
-; SIZE_LATE-LABEL: 'log2'
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %s = call float @llvm.log2.f32(float %a)
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 192 for instruction: %v = call <16 x float> @llvm.log2.v16f32(<16 x float> %va)
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
- %s = call float @llvm.log2.f32(float %a)
- %v = call <16 x float> @llvm.log2.v16f32(<16 x float> %va)
- ret void
-}
-
-define void @constrained_fadd(float %a, <16 x float> %va) strictfp {
-; THRU-LABEL: 'constrained_fadd'
-; THRU-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %s = call float @llvm.experimental.constrained.fadd.f32(float %a, float %a, metadata !"round.dynamic", metadata !"fpexcept.ignore")
-; THRU-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %t = call <16 x float> @llvm.experimental.constrained.fadd.v16f32(<16 x float> %va, <16 x float> %va, metadata !"round.dynamic", metadata !"fpexcept.ignore")
-; THRU-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
-;
-; LATE-LABEL: 'constrained_fadd'
-; LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %s = call float @llvm.experimental.constrained.fadd.f32(float %a, float %a, metadata !"round.dynamic", metadata !"fpexcept.ignore")
-; LATE-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %t = call <16 x float> @llvm.experimental.constrained.fadd.v16f32(<16 x float> %va, <16 x float> %va, metadata !"round.dynamic", metadata !"fpexcept.ignore")
-; LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
-; SIZE-LABEL: 'constrained_fadd'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %s = call float @llvm.experimental.constrained.fadd.f32(float %a, float %a, metadata !"round.dynamic", metadata !"fpexcept.ignore")
-; SIZE-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %t = call <16 x float> @llvm.experimental.constrained.fadd.v16f32(<16 x float> %va, <16 x float> %va, metadata !"round.dynamic", metadata !"fpexcept.ignore")
-; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
-; SIZE_LATE-LABEL: 'constrained_fadd'
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %s = call float @llvm.experimental.constrained.fadd.f32(float %a, float %a, metadata !"round.dynamic", metadata !"fpexcept.ignore")
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %t = call <16 x float> @llvm.experimental.constrained.fadd.v16f32(<16 x float> %va, <16 x float> %va, metadata !"round.dynamic", metadata !"fpexcept.ignore")
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
- %s = call float @llvm.experimental.constrained.fadd.f32(float %a, float %a, metadata !"round.dynamic", metadata !"fpexcept.ignore")
- %t = call <16 x float> @llvm.experimental.constrained.fadd.v16f32(<16 x float> %va, <16 x float> %va, metadata !"round.dynamic", metadata !"fpexcept.ignore")
- ret void
-}
-
-define void @fmaximum(float %a, float %b, <16 x float> %va, <16 x float> %vb) {
-; THRU-LABEL: 'fmaximum'
-; THRU-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %s = call float @llvm.maximum.f32(float %a, float %b)
-; THRU-NEXT: Cost Model: Found an estimated cost of 208 for instruction: %v = call <16 x float> @llvm.maximum.v16f32(<16 x float> %va, <16 x float> %vb)
-; THRU-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
-;
-; LATE-LABEL: 'fmaximum'
-; LATE-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %s = call float @llvm.maximum.f32(float %a, float %b)
-; LATE-NEXT: Cost Model: Found an estimated cost of 208 for instruction: %v = call <16 x float> @llvm.maximum.v16f32(<16 x float> %va, <16 x float> %vb)
-; LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
-; SIZE-LABEL: 'fmaximum'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %s = call float @llvm.maximum.f32(float %a, float %b)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %v = call <16 x float> @llvm.maximum.v16f32(<16 x float> %va, <16 x float> %vb)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
-; SIZE_LATE-LABEL: 'fmaximum'
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %s = call float @llvm.maximum.f32(float %a, float %b)
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 208 for instruction: %v = call <16 x float> @llvm.maximum.v16f32(<16 x float> %va, <16 x float> %vb)
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
- %s = call float @llvm.maximum.f32(float %a, float %b)
- %v = call <16 x float> @llvm.maximum.v16f32(<16 x float> %va, <16 x float> %vb)
- ret void
-}
-
-define void @cttz(i32 %a, <16 x i32> %va) {
-; THRU-LABEL: 'cttz'
-; THRU-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %s = call i32 @llvm.cttz.i32(i32 %a, i1 false)
-; THRU-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v = call <16 x i32> @llvm.cttz.v16i32(<16 x i32> %va, i1 false)
-; THRU-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
-;
-; LATE-LABEL: 'cttz'
-; LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %s = call i32 @llvm.cttz.i32(i32 %a, i1 false)
-; LATE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v = call <16 x i32> @llvm.cttz.v16i32(<16 x i32> %va, i1 false)
-; LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
-; SIZE-LABEL: 'cttz'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %s = call i32 @llvm.cttz.i32(i32 %a, i1 false)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v = call <16 x i32> @llvm.cttz.v16i32(<16 x i32> %va, i1 false)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
-; SIZE_LATE-LABEL: 'cttz'
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %s = call i32 @llvm.cttz.i32(i32 %a, i1 false)
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v = call <16 x i32> @llvm.cttz.v16i32(<16 x i32> %va, i1 false)
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
- %s = call i32 @llvm.cttz.i32(i32 %a, i1 false)
- %v = call <16 x i32> @llvm.cttz.v16i32(<16 x i32> %va, i1 false)
- ret void
-}
-
-define void @ctlz(i32 %a, <16 x i32> %va) {
-; THRU-LABEL: 'ctlz'
-; THRU-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %s = call i32 @llvm.ctlz.i32(i32 %a, i1 true)
-; THRU-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v = call <16 x i32> @llvm.ctlz.v16i32(<16 x i32> %va, i1 true)
-; THRU-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
-;
-; LATE-LABEL: 'ctlz'
-; LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %s = call i32 @llvm.ctlz.i32(i32 %a, i1 true)
-; LATE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v = call <16 x i32> @llvm.ctlz.v16i32(<16 x i32> %va, i1 true)
-; LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
-; SIZE-LABEL: 'ctlz'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %s = call i32 @llvm.ctlz.i32(i32 %a, i1 true)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v = call <16 x i32> @llvm.ctlz.v16i32(<16 x i32> %va, i1 true)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
-; SIZE_LATE-LABEL: 'ctlz'
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %s = call i32 @llvm.ctlz.i32(i32 %a, i1 true)
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v = call <16 x i32> @llvm.ctlz.v16i32(<16 x i32> %va, i1 true)
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
- %s = call i32 @llvm.ctlz.i32(i32 %a, i1 true)
- %v = call <16 x i32> @llvm.ctlz.v16i32(<16 x i32> %va, i1 true)
- ret void
-}
-
-define void @fshl(i32 %a, i32 %b, i32 %c, <16 x i32> %va, <16 x i32> %vb, <16 x i32> %vc) {
-; THRU-LABEL: 'fshl'
-; THRU-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %s = call i32 @llvm.fshl.i32(i32 %a, i32 %b, i32 %c)
-; THRU-NEXT: Cost Model: Found an estimated cost of 120 for instruction: %v = call <16 x i32> @llvm.fshl.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i32> %vc)
-; THRU-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
-;
-; LATE-LABEL: 'fshl'
-; LATE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %s = call i32 @llvm.fshl.i32(i32 %a, i32 %b, i32 %c)
-; LATE-NEXT: Cost Model: Found an estimated cost of 120 for instruction: %v = call <16 x i32> @llvm.fshl.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i32> %vc)
-; LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
-; SIZE-LABEL: 'fshl'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %s = call i32 @llvm.fshl.i32(i32 %a, i32 %b, i32 %c)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 92 for instruction: %v = call <16 x i32> @llvm.fshl.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i32> %vc)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
-; SIZE_LATE-LABEL: 'fshl'
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %s = call i32 @llvm.fshl.i32(i32 %a, i32 %b, i32 %c)
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 120 for instruction: %v = call <16 x i32> @llvm.fshl.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i32> %vc)
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
- %s = call i32 @llvm.fshl.i32(i32 %a, i32 %b, i32 %c)
- %v = call <16 x i32> @llvm.fshl.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i32> %vc)
- ret void
-}
-
-define void @maskedgather(<16 x ptr> %va, <16 x i1> %vb, <16 x float> %vc) {
-; THRU-LABEL: 'maskedgather'
-; THRU-NEXT: Cost Model: Found an estimated cost of 176 for instruction: %v = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> align 1 %va, <16 x i1> %vb, <16 x float> %vc)
-; THRU-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
-;
-; LATE-LABEL: 'maskedgather'
-; LATE-NEXT: Cost Model: Found an estimated cost of 176 for instruction: %v = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> align 1 %va, <16 x i1> %vb, <16 x float> %vc)
-; LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
-; SIZE-LABEL: 'maskedgather'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 176 for instruction: %v = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> align 1 %va, <16 x i1> %vb, <16 x float> %vc)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
-; SIZE_LATE-LABEL: 'maskedgather'
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 176 for instruction: %v = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> align 1 %va, <16 x i1> %vb, <16 x float> %vc)
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
- %v = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> %va, i32 1, <16 x i1> %vb, <16 x float> %vc)
- ret void
-}
-
-define void @maskedscatter(<16 x float> %va, <16 x ptr> %vb, <16 x i1> %vc) {
-; THRU-LABEL: 'maskedscatter'
-; THRU-NEXT: Cost Model: Found an estimated cost of 176 for instruction: call void @llvm.masked.scatter.v16f32.v16p0(<16 x float> %va, <16 x ptr> align 1 %vb, <16 x i1> %vc)
-; THRU-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
-;
-; LATE-LABEL: 'maskedscatter'
-; LATE-NEXT: Cost Model: Found an estimated cost of 176 for instruction: call void @llvm.masked.scatter.v16f32.v16p0(<16 x float> %va, <16 x ptr> align 1 %vb, <16 x i1> %vc)
-; LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
-; SIZE-LABEL: 'maskedscatter'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 176 for instruction: call void @llvm.masked.scatter.v16f32.v16p0(<16 x float> %va, <16 x ptr> align 1 %vb, <16 x i1> %vc)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
-; SIZE_LATE-LABEL: 'maskedscatter'
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 176 for instruction: call void @llvm.masked.scatter.v16f32.v16p0(<16 x float> %va, <16 x ptr> align 1 %vb, <16 x i1> %vc)
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
- call void @llvm.masked.scatter.v16f32.v16p0(<16 x float> %va, <16 x ptr> %vb, i32 1, <16 x i1> %vc)
- ret void
-}
-
-define void @reduce_fmax(<16 x float> %va) {
-; THRU-LABEL: 'reduce_fmax'
-; THRU-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %v = call float @llvm.vector.reduce.fmax.v16f32(<16 x float> %va)
-; THRU-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
-;
-; LATE-LABEL: 'reduce_fmax'
-; LATE-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %v = call float @llvm.vector.reduce.fmax.v16f32(<16 x float> %va)
-; LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
-; SIZE-LABEL: 'reduce_fmax'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v = call float @llvm.vector.reduce.fmax.v16f32(<16 x float> %va)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
-; SIZE_LATE-LABEL: 'reduce_fmax'
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %v = call float @llvm.vector.reduce.fmax.v16f32(<16 x float> %va)
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
- %v = call float @llvm.vector.reduce.fmax.v16f32(<16 x float> %va)
- ret void
-}
-
-define void @memcpy(ptr %a, ptr %b, i32 %c) {
-; THRU-LABEL: 'memcpy'
-; THRU-NEXT: Cost Model: Found an estimated cost of 4 for instruction: call void @llvm.memcpy.p0.p0.i32(ptr align 1 %a, ptr align 1 %b, i32 32, i1 false)
-; THRU-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
-;
-; LATE-LABEL: 'memcpy'
-; LATE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: call void @llvm.memcpy.p0.p0.i32(ptr align 1 %a, ptr align 1 %b, i32 32, i1 false)
-; LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
-; SIZE-LABEL: 'memcpy'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: call void @llvm.memcpy.p0.p0.i32(ptr align 1 %a, ptr align 1 %b, i32 32, i1 false)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
-; SIZE_LATE-LABEL: 'memcpy'
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: call void @llvm.memcpy.p0.p0.i32(ptr align 1 %a, ptr align 1 %b, i32 32, i1 false)
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
- call void @llvm.memcpy.p0.p0.i32(ptr align 1 %a, ptr align 1 %b, i32 32, i1 false)
- ret void
-}
-
-define void @ssa_copy() {
- ; CHECK: %{{.*}} = llvm.intr.ssa.copy %{{.*}} : f32
-; THRU-LABEL: 'ssa_copy'
-; THRU-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %i = call i32 @llvm.ssa.copy.i32(i32 undef)
-; THRU-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %f = call float @llvm.ssa.copy.f32(float undef)
-; THRU-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %p = call ptr @llvm.ssa.copy.p0(ptr undef)
-; THRU-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
-;
-; LATE-LABEL: 'ssa_copy'
-; LATE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %i = call i32 @llvm.ssa.copy.i32(i32 undef)
-; LATE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %f = call float @llvm.ssa.copy.f32(float undef)
-; LATE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %p = call ptr @llvm.ssa.copy.p0(ptr undef)
-; LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
-; SIZE-LABEL: 'ssa_copy'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %i = call i32 @llvm.ssa.copy.i32(i32 undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %f = call float @llvm.ssa.copy.f32(float undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %p = call ptr @llvm.ssa.copy.p0(ptr undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
-; SIZE_LATE-LABEL: 'ssa_copy'
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %i = call i32 @llvm.ssa.copy.i32(i32 undef)
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %f = call float @llvm.ssa.copy.f32(float undef)
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %p = call ptr @llvm.ssa.copy.p0(ptr undef)
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
- %i = call i32 @llvm.ssa.copy.i32(i32 undef)
- %f = call float @llvm.ssa.copy.f32(float undef)
- %p = call ptr @llvm.ssa.copy.p0(ptr undef)
- ret void
-}
diff --git a/llvm/test/Analysis/CostModel/ARM/mve-intrinsic-cost-kinds.ll b/llvm/test/Analysis/CostModel/ARM/mve-intrinsic-cost-kinds.ll
new file mode 100644
index 0000000..b3ad818
--- /dev/null
+++ b/llvm/test/Analysis/CostModel/ARM/mve-intrinsic-cost-kinds.ll
@@ -0,0 +1,181 @@
+; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py
+; RUN: opt -mtriple=armv8.1m.main -mattr=+mve.fp -passes="print<cost-model>" -cost-kind=all 2>&1 -disable-output < %s | FileCheck %s
+
+target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
+
+; Test a cross-section of intrinsics for various cost-kinds.
+; Other test files may check for accuracy of a particular intrinsic
+; across subtargets or types. This is just a basic correctness check using an
+; ARM target and a legal scalar type (i32/float) and/or an
+; illegal vector type (16 x i32/float).
+
+declare i32 @llvm.smax.i32(i32, i32)
+declare <16 x i32> @llvm.smax.v16i32(<16 x i32>, <16 x i32>)
+
+declare float @llvm.fmuladd.f32(float, float, float)
+declare <16 x float> @llvm.fmuladd.v16f32(<16 x float>, <16 x float>, <16 x float>)
+
+declare float @llvm.log2.f32(float)
+declare <16 x float> @llvm.log2.v16f32(<16 x float>)
+
+declare float @llvm.experimental.constrained.fadd.f32(float, float, metadata, metadata)
+declare <16 x float> @llvm.experimental.constrained.fadd.v16f32(<16 x float>, <16 x float>, metadata, metadata)
+
+declare float @llvm.maximum.f32(float, float)
+declare <16 x float> @llvm.maximum.v16f32(<16 x float>, <16 x float>)
+
+declare i32 @llvm.cttz.i32(i32, i1)
+declare <16 x i32> @llvm.cttz.v16i32(<16 x i32>, i1)
+
+declare i32 @llvm.ctlz.i32(i32, i1)
+declare <16 x i32> @llvm.ctlz.v16i32(<16 x i32>, i1)
+
+declare i32 @llvm.fshl.i32(i32, i32, i32)
+declare <16 x i32> @llvm.fshl.v16i32(<16 x i32>, <16 x i32>, <16 x i32>)
+
+declare <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr>, i32, <16 x i1>, <16 x float>)
+declare void @llvm.masked.scatter.v16f32.v16p0(<16 x float>, <16 x ptr>, i32, <16 x i1>)
+declare float @llvm.vector.reduce.fmax.v16f32(<16 x float>)
+
+declare void @llvm.memcpy.p0.p0.i32(ptr, ptr, i32, i1)
+
+declare i32 @llvm.ssa.copy.i32(i32)
+declare float @llvm.ssa.copy.f32(float)
+declare ptr @llvm.ssa.copy.p0(ptr)
+
+define void @smax(i32 %a, i32 %b, <16 x i32> %va, <16 x i32> %vb) {
+; CHECK-LABEL: 'smax'
+; CHECK-NEXT: Cost Model: Found costs of RThru:2 CodeSize:3 Lat:2 SizeLat:2 for: %s = call i32 @llvm.smax.i32(i32 %a, i32 %b)
+; CHECK-NEXT: Cost Model: Found costs of RThru:8 CodeSize:4 Lat:8 SizeLat:8 for: %v = call <16 x i32> @llvm.smax.v16i32(<16 x i32> %va, <16 x i32> %vb)
+; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
+;
+ %s = call i32 @llvm.smax.i32(i32 %a, i32 %b)
+ %v = call <16 x i32> @llvm.smax.v16i32(<16 x i32> %va, <16 x i32> %vb)
+ ret void
+}
+
+define void @fmuladd(float %a, float %b, float %c, <16 x float> %va, <16 x float> %vb, <16 x float> %vc) {
+; CHECK-LABEL: 'fmuladd'
+; CHECK-NEXT: Cost Model: Found costs of 1 for: %s = call float @llvm.fmuladd.f32(float %a, float %b, float %c)
+; CHECK-NEXT: Cost Model: Found costs of 8 for: %v = call <16 x float> @llvm.fmuladd.v16f32(<16 x float> %va, <16 x float> %vb, <16 x float> %vc)
+; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
+;
+ %s = call float @llvm.fmuladd.f32(float %a, float %b, float %c)
+ %v = call <16 x float> @llvm.fmuladd.v16f32(<16 x float> %va, <16 x float> %vb, <16 x float> %vc)
+ ret void
+}
+
+define void @log2(float %a, <16 x float> %va) {
+; CHECK-LABEL: 'log2'
+; CHECK-NEXT: Cost Model: Found costs of RThru:10 CodeSize:1 Lat:10 SizeLat:10 for: %s = call float @llvm.log2.f32(float %a)
+; CHECK-NEXT: Cost Model: Found costs of RThru:192 CodeSize:48 Lat:192 SizeLat:192 for: %v = call <16 x float> @llvm.log2.v16f32(<16 x float> %va)
+; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
+;
+ %s = call float @llvm.log2.f32(float %a)
+ %v = call <16 x float> @llvm.log2.v16f32(<16 x float> %va)
+ ret void
+}
+
+define void @constrained_fadd(float %a, <16 x float> %va) strictfp {
+; CHECK-LABEL: 'constrained_fadd'
+; CHECK-NEXT: Cost Model: Found costs of 1 for: %s = call float @llvm.experimental.constrained.fadd.f32(float %a, float %a, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+; CHECK-NEXT: Cost Model: Found costs of 48 for: %t = call <16 x float> @llvm.experimental.constrained.fadd.v16f32(<16 x float> %va, <16 x float> %va, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
+;
+ %s = call float @llvm.experimental.constrained.fadd.f32(float %a, float %a, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+ %t = call <16 x float> @llvm.experimental.constrained.fadd.v16f32(<16 x float> %va, <16 x float> %va, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+ ret void
+}
+
+define void @fmaximum(float %a, float %b, <16 x float> %va, <16 x float> %vb) {
+; CHECK-LABEL: 'fmaximum'
+; CHECK-NEXT: Cost Model: Found costs of RThru:10 CodeSize:1 Lat:10 SizeLat:10 for: %s = call float @llvm.maximum.f32(float %a, float %b)
+; CHECK-NEXT: Cost Model: Found costs of RThru:208 CodeSize:64 Lat:208 SizeLat:208 for: %v = call <16 x float> @llvm.maximum.v16f32(<16 x float> %va, <16 x float> %vb)
+; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
+;
+ %s = call float @llvm.maximum.f32(float %a, float %b)
+ %v = call <16 x float> @llvm.maximum.v16f32(<16 x float> %va, <16 x float> %vb)
+ ret void
+}
+
+define void @cttz(i32 %a, <16 x i32> %va) {
+; CHECK-LABEL: 'cttz'
+; CHECK-NEXT: Cost Model: Found costs of 1 for: %s = call i32 @llvm.cttz.i32(i32 %a, i1 false)
+; CHECK-NEXT: Cost Model: Found costs of 8 for: %v = call <16 x i32> @llvm.cttz.v16i32(<16 x i32> %va, i1 false)
+; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
+;
+ %s = call i32 @llvm.cttz.i32(i32 %a, i1 false)
+ %v = call <16 x i32> @llvm.cttz.v16i32(<16 x i32> %va, i1 false)
+ ret void
+}
+
+define void @ctlz(i32 %a, <16 x i32> %va) {
+; CHECK-LABEL: 'ctlz'
+; CHECK-NEXT: Cost Model: Found costs of 1 for: %s = call i32 @llvm.ctlz.i32(i32 %a, i1 true)
+; CHECK-NEXT: Cost Model: Found costs of 8 for: %v = call <16 x i32> @llvm.ctlz.v16i32(<16 x i32> %va, i1 true)
+; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
+;
+ %s = call i32 @llvm.ctlz.i32(i32 %a, i1 true)
+ %v = call <16 x i32> @llvm.ctlz.v16i32(<16 x i32> %va, i1 true)
+ ret void
+}
+
+define void @fshl(i32 %a, i32 %b, i32 %c, <16 x i32> %va, <16 x i32> %vb, <16 x i32> %vc) {
+; CHECK-LABEL: 'fshl'
+; CHECK-NEXT: Cost Model: Found costs of RThru:7 CodeSize:8 Lat:7 SizeLat:7 for: %s = call i32 @llvm.fshl.i32(i32 %a, i32 %b, i32 %c)
+; CHECK-NEXT: Cost Model: Found costs of RThru:120 CodeSize:92 Lat:120 SizeLat:120 for: %v = call <16 x i32> @llvm.fshl.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i32> %vc)
+; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
+;
+ %s = call i32 @llvm.fshl.i32(i32 %a, i32 %b, i32 %c)
+ %v = call <16 x i32> @llvm.fshl.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i32> %vc)
+ ret void
+}
+
+define void @maskedgather(<16 x ptr> %va, <16 x i1> %vb, <16 x float> %vc) {
+; CHECK-LABEL: 'maskedgather'
+; CHECK-NEXT: Cost Model: Found costs of 176 for: %v = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> align 1 %va, <16 x i1> %vb, <16 x float> %vc)
+; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
+;
+ %v = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> %va, i32 1, <16 x i1> %vb, <16 x float> %vc)
+ ret void
+}
+
+define void @maskedscatter(<16 x float> %va, <16 x ptr> %vb, <16 x i1> %vc) {
+; CHECK-LABEL: 'maskedscatter'
+; CHECK-NEXT: Cost Model: Found costs of 176 for: call void @llvm.masked.scatter.v16f32.v16p0(<16 x float> %va, <16 x ptr> align 1 %vb, <16 x i1> %vc)
+; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
+;
+ call void @llvm.masked.scatter.v16f32.v16p0(<16 x float> %va, <16 x ptr> %vb, i32 1, <16 x i1> %vc)
+ ret void
+}
+
+define void @reduce_fmax(<16 x float> %va) {
+; CHECK-LABEL: 'reduce_fmax'
+; CHECK-NEXT: Cost Model: Found costs of RThru:9 CodeSize:6 Lat:9 SizeLat:9 for: %v = call float @llvm.vector.reduce.fmax.v16f32(<16 x float> %va)
+; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
+;
+ %v = call float @llvm.vector.reduce.fmax.v16f32(<16 x float> %va)
+ ret void
+}
+
+define void @memcpy(ptr %a, ptr %b, i32 %c) {
+; CHECK-LABEL: 'memcpy'
+; CHECK-NEXT: Cost Model: Found costs of 4 for: call void @llvm.memcpy.p0.p0.i32(ptr align 1 %a, ptr align 1 %b, i32 32, i1 false)
+; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
+;
+ call void @llvm.memcpy.p0.p0.i32(ptr align 1 %a, ptr align 1 %b, i32 32, i1 false)
+ ret void
+}
+
+define void @ssa_copy() {
+; CHECK-LABEL: 'ssa_copy'
+; CHECK-NEXT: Cost Model: Found costs of 0 for: %i = call i32 @llvm.ssa.copy.i32(i32 undef)
+; CHECK-NEXT: Cost Model: Found costs of 0 for: %f = call float @llvm.ssa.copy.f32(float undef)
+; CHECK-NEXT: Cost Model: Found costs of 0 for: %p = call ptr @llvm.ssa.copy.p0(ptr undef)
+; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
+;
+ %i = call i32 @llvm.ssa.copy.i32(i32 undef)
+ %f = call float @llvm.ssa.copy.f32(float undef)
+ %p = call ptr @llvm.ssa.copy.p0(ptr undef)
+ ret void
+}
diff --git a/llvm/test/Analysis/CostModel/ARM/mve-target-intrinsics.ll b/llvm/test/Analysis/CostModel/ARM/mve-target-intrinsics.ll
new file mode 100644
index 0000000..d09216a
--- /dev/null
+++ b/llvm/test/Analysis/CostModel/ARM/mve-target-intrinsics.ll
@@ -0,0 +1,24 @@
+; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py
+; RUN: opt < %s -passes="print<cost-model>" -cost-kind=all 2>&1 -disable-output -mtriple=thumbv8.1m.main -mattr=+mve | FileCheck %s
+
+target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
+
+define void @intrinsics() {
+; CHECK-LABEL: 'intrinsics'
+; CHECK-NEXT: Cost Model: Found costs of 1 for: %t1 = call i32 @llvm.arm.ssat(i32 undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found costs of 1 for: %t2 = tail call { <8 x half>, <8 x half> } @llvm.arm.mve.vld2q.v8f16.p0(ptr undef)
+; CHECK-NEXT: Cost Model: Found costs of 1 for: %t3 = call { i32, i32 } @llvm.arm.mve.sqrshrl(i32 undef, i32 undef, i32 undef, i32 48)
+; CHECK-NEXT: Cost Model: Found costs of 1 for: %t4 = tail call { i32, i32 } @llvm.arm.mve.vmlldava.v8i16(i32 0, i32 0, i32 0, i32 0, i32 0, <8 x i16> undef, <8 x i16> undef)
+; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
+;
+ %t1 = call i32 @llvm.arm.ssat(i32 undef, i32 undef)
+ %t2 = tail call { <8 x half>, <8 x half> } @llvm.arm.mve.vld2q.v8f16.p0(ptr undef)
+ %t3 = call { i32, i32 } @llvm.arm.mve.sqrshrl(i32 undef, i32 undef, i32 undef, i32 48)
+ %t4 = tail call { i32, i32 } @llvm.arm.mve.vmlldava.v8i16(i32 0, i32 0, i32 0, i32 0, i32 0, <8 x i16> undef, <8 x i16> undef)
+ ret void
+}
+
+declare i32 @llvm.arm.ssat(i32, i32)
+declare { <8 x half>, <8 x half> } @llvm.arm.mve.vld2q.v8f16.p0(ptr)
+declare { i32, i32 } @llvm.arm.mve.sqrshrl(i32, i32, i32, i32)
+declare { i32, i32 } @llvm.arm.mve.vmlldava.v8i16(i32, i32, i32, i32, i32, <8 x i16>, <8 x i16>)
diff --git a/llvm/test/Analysis/CostModel/ARM/target-intrinsics.ll b/llvm/test/Analysis/CostModel/ARM/target-intrinsics.ll
deleted file mode 100644
index 2d6b318..0000000
--- a/llvm/test/Analysis/CostModel/ARM/target-intrinsics.ll
+++ /dev/null
@@ -1,41 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py
-; RUN: opt < %s -passes="print<cost-model>" 2>&1 -disable-output -mtriple=thumbv8.1m.main -mattr=+mve | FileCheck %s --check-prefix=CHECK-THUMB2-RECIP
-; RUN: opt < %s -passes="print<cost-model>" 2>&1 -disable-output -cost-kind=throughput -mtriple=thumbv8.1m.main -mattr=+mve | FileCheck %s --check-prefix=CHECK-THUMB2-RECIP
-; RUN: opt < %s -passes="print<cost-model>" 2>&1 -disable-output -cost-kind=latency -mtriple=thumbv8.1m.main -mattr=+mve | FileCheck %s --check-prefix=CHECK-THUMB2-LAT
-; RUN: opt < %s -passes="print<cost-model>" 2>&1 -disable-output -cost-kind=code-size -mtriple=thumbv8.1m.main -mattr=+mve | FileCheck %s --check-prefix=CHECK-THUMB2-SIZE
-
-target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
-
-define void @intrinsics() {
-; CHECK-THUMB2-RECIP-LABEL: 'intrinsics'
-; CHECK-THUMB2-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %t1 = call i32 @llvm.arm.ssat(i32 undef, i32 undef)
-; CHECK-THUMB2-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %t2 = tail call { <8 x half>, <8 x half> } @llvm.arm.mve.vld2q.v8f16.p0(ptr undef)
-; CHECK-THUMB2-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %t3 = call { i32, i32 } @llvm.arm.mve.sqrshrl(i32 undef, i32 undef, i32 undef, i32 48)
-; CHECK-THUMB2-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %t4 = tail call { i32, i32 } @llvm.arm.mve.vmlldava.v8i16(i32 0, i32 0, i32 0, i32 0, i32 0, <8 x i16> undef, <8 x i16> undef)
-; CHECK-THUMB2-RECIP-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
-;
-; CHECK-THUMB2-LAT-LABEL: 'intrinsics'
-; CHECK-THUMB2-LAT-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %t1 = call i32 @llvm.arm.ssat(i32 undef, i32 undef)
-; CHECK-THUMB2-LAT-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %t2 = tail call { <8 x half>, <8 x half> } @llvm.arm.mve.vld2q.v8f16.p0(ptr undef)
-; CHECK-THUMB2-LAT-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %t3 = call { i32, i32 } @llvm.arm.mve.sqrshrl(i32 undef, i32 undef, i32 undef, i32 48)
-; CHECK-THUMB2-LAT-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %t4 = tail call { i32, i32 } @llvm.arm.mve.vmlldava.v8i16(i32 0, i32 0, i32 0, i32 0, i32 0, <8 x i16> undef, <8 x i16> undef)
-; CHECK-THUMB2-LAT-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
-; CHECK-THUMB2-SIZE-LABEL: 'intrinsics'
-; CHECK-THUMB2-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %t1 = call i32 @llvm.arm.ssat(i32 undef, i32 undef)
-; CHECK-THUMB2-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %t2 = tail call { <8 x half>, <8 x half> } @llvm.arm.mve.vld2q.v8f16.p0(ptr undef)
-; CHECK-THUMB2-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %t3 = call { i32, i32 } @llvm.arm.mve.sqrshrl(i32 undef, i32 undef, i32 undef, i32 48)
-; CHECK-THUMB2-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %t4 = tail call { i32, i32 } @llvm.arm.mve.vmlldava.v8i16(i32 0, i32 0, i32 0, i32 0, i32 0, <8 x i16> undef, <8 x i16> undef)
-; CHECK-THUMB2-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
- %t1 = call i32 @llvm.arm.ssat(i32 undef, i32 undef)
- %t2 = tail call { <8 x half>, <8 x half> } @llvm.arm.mve.vld2q.v8f16.p0(ptr undef)
- %t3 = call { i32, i32 } @llvm.arm.mve.sqrshrl(i32 undef, i32 undef, i32 undef, i32 48)
- %t4 = tail call { i32, i32 } @llvm.arm.mve.vmlldava.v8i16(i32 0, i32 0, i32 0, i32 0, i32 0, <8 x i16> undef, <8 x i16> undef)
- ret void
-}
-
-declare i32 @llvm.arm.ssat(i32, i32)
-declare { <8 x half>, <8 x half> } @llvm.arm.mve.vld2q.v8f16.p0(ptr)
-declare { i32, i32 } @llvm.arm.mve.sqrshrl(i32, i32, i32, i32)
-declare { i32, i32 } @llvm.arm.mve.vmlldava.v8i16(i32, i32, i32, i32, i32, <8 x i16>, <8 x i16>)
diff --git a/llvm/test/Analysis/CostModel/SystemZ/intrinsic-cost-crash.ll b/llvm/test/Analysis/CostModel/SystemZ/intrinsic-cost-crash.ll
index 245e8f7..058370c 100644
--- a/llvm/test/Analysis/CostModel/SystemZ/intrinsic-cost-crash.ll
+++ b/llvm/test/Analysis/CostModel/SystemZ/intrinsic-cost-crash.ll
@@ -23,10 +23,10 @@
%"class.llvm::Metadata.306.1758.9986.10470.10954.11438.11922.12406.12890.13374.13858.15310.15794.16278.17730.19182.21118.25958.26926.29346.29830.30314.30798.31282.31766.32250.32734.33702.36606.38058.41638" = type { i8, i8, i16, i32 }
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end(ptr nocapture) #0
+declare void @llvm.lifetime.end(ptr nocapture)
; Function Attrs: nounwind ssp uwtable
-define hidden void @fun(ptr %N, i1 %arg) #1 align 2 {
+define hidden void @fun(ptr %N, i1 %arg) align 2 {
; CHECK: define
entry:
%NumOperands.i = getelementptr inbounds %"class.llvm::SDNode.310.1762.9990.10474.10958.11442.11926.12410.12894.13378.13862.15314.15798.16282.17734.19186.21122.25962.26930.29350.29834.30318.30802.31286.31770.32254.32738.33706.36610.38062.41642", ptr %N, i64 0, i32 8
@@ -47,8 +47,6 @@ for.body: ; preds = %for.body, %for.body
br i1 %exitcond193, label %for.cond.cleanup, label %for.body
}
-attributes #0 = { argmemonly nounwind }
-attributes #1 = { nounwind ssp uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+fxsr,+mmx,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
!llvm.ident = !{!0}
diff --git a/llvm/test/Analysis/Delinearization/constant_functions_multi_dim.ll b/llvm/test/Analysis/Delinearization/constant_functions_multi_dim.ll
index 0c0fb41..891d604 100644
--- a/llvm/test/Analysis/Delinearization/constant_functions_multi_dim.ll
+++ b/llvm/test/Analysis/Delinearization/constant_functions_multi_dim.ll
@@ -4,7 +4,7 @@
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
; Function Attrs: noinline nounwind uwtable
-define void @mat_mul(ptr %C, ptr %A, ptr %B, i64 %N) #0 !kernel_arg_addr_space !2 !kernel_arg_access_qual !3 !kernel_arg_type !4 !kernel_arg_base_type !4 !kernel_arg_type_qual !5 {
+define void @mat_mul(ptr %C, ptr %A, ptr %B, i64 %N) !kernel_arg_addr_space !2 !kernel_arg_access_qual !3 !kernel_arg_type !4 !kernel_arg_base_type !4 !kernel_arg_type_qual !5 {
; CHECK-LABEL: 'mat_mul'
; CHECK-NEXT: Inst: %tmp = load float, ptr %arrayidx, align 4
; CHECK-NEXT: AccessFunction: {(4 * %N * %call),+,4}<%for.inc>
@@ -22,8 +22,8 @@ entry:
br label %entry.split
entry.split: ; preds = %entry
- %call = tail call i64 @_Z13get_global_idj(i32 0) #3
- %call1 = tail call i64 @_Z13get_global_idj(i32 1) #3
+ %call = tail call i64 @_Z13get_global_idj(i32 0)
+ %call1 = tail call i64 @_Z13get_global_idj(i32 1)
%cmp1 = icmp sgt i64 %N, 0
%mul = mul nsw i64 %call, %N
br i1 %cmp1, label %for.inc.lr.ph, label %for.end
@@ -59,15 +59,10 @@ for.end: ; preds = %for.cond.for.end_cr
}
; Function Attrs: nounwind readnone
-declare i64 @_Z13get_global_idj(i32) #1
+declare i64 @_Z13get_global_idj(i32)
; Function Attrs: nounwind readnone speculatable
-declare float @llvm.fmuladd.f32(float, float, float) #2
-
-attributes #0 = { noinline nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #2 = { nounwind readnone speculatable }
-attributes #3 = { nounwind readnone }
+declare float @llvm.fmuladd.f32(float, float, float)
!llvm.module.flags = !{!0}
!llvm.ident = !{!1}
diff --git a/llvm/test/Analysis/DependenceAnalysis/MIVCheckConst.ll b/llvm/test/Analysis/DependenceAnalysis/MIVCheckConst.ll
index b498d7064..f5be89a 100644
--- a/llvm/test/Analysis/DependenceAnalysis/MIVCheckConst.ll
+++ b/llvm/test/Analysis/DependenceAnalysis/MIVCheckConst.ll
@@ -30,7 +30,7 @@ target datalayout = "e-m:e-p:32:32-i1:32-i64:64-a:0-v32:32-n16:32"
%20 = type { [768 x i32] }
%21 = type { [416 x i32] }
-define void @test(ptr %A, ptr %B, i1 %arg, i32 %n, i32 %m) #0 align 2 {
+define void @test(ptr %A, ptr %B, i1 %arg, i32 %n, i32 %m) align 2 {
; CHECK-LABEL: 'test'
; CHECK-NEXT: Src: %v1 = load i32, ptr %B, align 4 --> Dst: %v1 = load i32, ptr %B, align 4
; CHECK-NEXT: da analyze - none!
@@ -91,5 +91,3 @@ bb38:
bb40:
ret void
}
-
-attributes #0 = { "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/llvm/test/Analysis/DependenceAnalysis/NonCanonicalizedSubscript.ll b/llvm/test/Analysis/DependenceAnalysis/NonCanonicalizedSubscript.ll
index e5d5d21e..eba017a 100644
--- a/llvm/test/Analysis/DependenceAnalysis/NonCanonicalizedSubscript.ll
+++ b/llvm/test/Analysis/DependenceAnalysis/NonCanonicalizedSubscript.ll
@@ -52,7 +52,7 @@ for.end:
@a = global [10004 x [10004 x i32]] zeroinitializer, align 16
; Function Attrs: nounwind uwtable
-define void @coupled_miv_type_mismatch(i32 %n) #0 {
+define void @coupled_miv_type_mismatch(i32 %n) {
; CHECK-LABEL: 'coupled_miv_type_mismatch'
; CHECK-NEXT: Src: %2 = load i32, ptr %arrayidx5, align 4 --> Dst: %2 = load i32, ptr %arrayidx5, align 4
; CHECK-NEXT: da analyze - none!
@@ -101,8 +101,6 @@ for.end13: ; preds = %for.cond
ret void
}
-attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+sse,+sse2" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
!llvm.ident = !{!0}
!0 = !{!"clang version 3.7.0"}
diff --git a/llvm/test/Analysis/MemoryDependenceAnalysis/invariant.group-bug.ll b/llvm/test/Analysis/MemoryDependenceAnalysis/invariant.group-bug.ll
index c11191e..5470ef9 100644
--- a/llvm/test/Analysis/MemoryDependenceAnalysis/invariant.group-bug.ll
+++ b/llvm/test/Analysis/MemoryDependenceAnalysis/invariant.group-bug.ll
@@ -17,13 +17,13 @@ target triple = "x86_64-grtev4-linux-gnu"
%4 = type { ptr }
%5 = type { i64, [8 x i8] }
-define void @fail(ptr noalias sret(i1) %arg, ptr %arg1, ptr %arg2, ptr %arg3, i1 %arg4) local_unnamed_addr #0 {
+define void @fail(ptr noalias sret(i1) %arg, ptr %arg1, ptr %arg2, ptr %arg3, i1 %arg4) local_unnamed_addr {
; CHECK-LABEL: @fail(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[I4:%.*]] = load ptr, ptr [[ARG1:%.*]], align 8, !invariant.group [[META6:![0-9]+]]
; CHECK-NEXT: [[I5:%.*]] = getelementptr inbounds ptr, ptr [[I4]], i64 6
; CHECK-NEXT: [[I6:%.*]] = load ptr, ptr [[I5]], align 8, !invariant.load [[META6]]
-; CHECK-NEXT: [[I7:%.*]] = tail call i64 [[I6]](ptr [[ARG1]]) #[[ATTR1:[0-9]+]]
+; CHECK-NEXT: [[I7:%.*]] = tail call i64 [[I6]](ptr [[ARG1]])
; CHECK-NEXT: [[I9:%.*]] = load ptr, ptr [[ARG2:%.*]], align 8
; CHECK-NEXT: store i8 0, ptr [[I9]], align 1
; CHECK-NEXT: br i1 [[ARG4:%.*]], label [[BB10:%.*]], label [[BB29:%.*]]
@@ -32,7 +32,7 @@ define void @fail(ptr noalias sret(i1) %arg, ptr %arg1, ptr %arg2, ptr %arg3, i1
; CHECK-NEXT: [[I15_PRE:%.*]] = load ptr, ptr [[I14_PHI_TRANS_INSERT]], align 8, !invariant.load [[META6]]
; CHECK-NEXT: br label [[BB12:%.*]]
; CHECK: bb12:
-; CHECK-NEXT: [[I16:%.*]] = call i64 [[I15_PRE]](ptr nonnull [[ARG1]], ptr null, i64 0) #[[ATTR1]]
+; CHECK-NEXT: [[I16:%.*]] = call i64 [[I15_PRE]](ptr nonnull [[ARG1]], ptr null, i64 0)
; CHECK-NEXT: br i1 true, label [[BB28:%.*]], label [[BB17:%.*]]
; CHECK: bb17:
; CHECK-NEXT: br i1 true, label [[BB18:%.*]], label [[BB21:%.*]]
@@ -55,7 +55,7 @@ bb:
%i4 = load ptr, ptr %arg1, align 8, !invariant.group !6
%i5 = getelementptr inbounds ptr, ptr %i4, i64 6
%i6 = load ptr, ptr %i5, align 8, !invariant.load !6
- %i7 = tail call i64 %i6(ptr %arg1) #1
+ %i7 = tail call i64 %i6(ptr %arg1)
%i9 = load ptr, ptr %arg2, align 8
store i8 0, ptr %i9, align 1
br i1 %arg4, label %bb10, label %bb29
@@ -67,7 +67,7 @@ bb12: ; preds = %bb28, %bb10
%i13 = load ptr, ptr %arg1, align 8, !invariant.group !6
%i14 = getelementptr inbounds ptr, ptr %i13, i64 22
%i15 = load ptr, ptr %i14, align 8, !invariant.load !6
- %i16 = call i64 %i15(ptr nonnull %arg1, ptr null, i64 0) #1
+ %i16 = call i64 %i15(ptr nonnull %arg1, ptr null, i64 0)
br i1 %arg4, label %bb28, label %bb17
bb17: ; preds = %bb12
@@ -110,9 +110,6 @@ bb29: ; preds = %bb28, %bb
ret void
}
-attributes #0 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="non-leaf" "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+popcnt,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="non-leaf" "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+popcnt,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
!llvm.linker.options = !{}
!llvm.module.flags = !{!0, !1, !3, !4, !5}
diff --git a/llvm/test/Analysis/MemorySSA/pr28880.ll b/llvm/test/Analysis/MemorySSA/pr28880.ll
index 98f3261..a2690b9 100644
--- a/llvm/test/Analysis/MemorySSA/pr28880.ll
+++ b/llvm/test/Analysis/MemorySSA/pr28880.ll
@@ -8,7 +8,7 @@
@global.1 = external hidden unnamed_addr global double, align 8
; Function Attrs: nounwind ssp uwtable
-define hidden fastcc void @hoge(i1 %arg) unnamed_addr #0 {
+define hidden fastcc void @hoge(i1 %arg) unnamed_addr {
bb:
br i1 %arg, label %bb1, label %bb2
@@ -45,6 +45,3 @@ bb4: ; preds = %bb3
bb6: ; preds = %bb3
unreachable
}
-
-attributes #0 = { nounwind ssp uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="core2" "target-features"="+cx16,+fxsr,+mmx,+sse,+sse2,+sse3,+ssse3" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
diff --git a/llvm/test/Analysis/MemorySSA/pr39197.ll b/llvm/test/Analysis/MemorySSA/pr39197.ll
index af57b3c..6be0c58 100644
--- a/llvm/test/Analysis/MemorySSA/pr39197.ll
+++ b/llvm/test/Analysis/MemorySSA/pr39197.ll
@@ -12,13 +12,13 @@ declare void @dummy()
; CHECK-LABEL: @main()
; Function Attrs: nounwind
-define dso_local void @main() #0 {
+define dso_local void @main() {
call void @func_1()
unreachable
}
; Function Attrs: nounwind
-define dso_local void @func_1() #0 {
+define dso_local void @func_1() {
%1 = alloca ptr, align 8
%2 = call signext i32 @func_2()
%3 = icmp ne i32 %2, 0
@@ -64,45 +64,45 @@ define dso_local void @func_1() #0 {
}
; Function Attrs: nounwind
-declare dso_local signext i32 @func_2() #0
+declare dso_local signext i32 @func_2()
; Function Attrs: nounwind
-define dso_local void @safe_sub_func_uint8_t_u_u() #0 {
+define dso_local void @safe_sub_func_uint8_t_u_u() {
ret void
}
; Function Attrs: nounwind
-define dso_local void @safe_add_func_int64_t_s_s() #0 {
+define dso_local void @safe_add_func_int64_t_s_s() {
ret void
}
; Function Attrs: nounwind
-define dso_local void @safe_rshift_func_int16_t_s_u() #0 {
+define dso_local void @safe_rshift_func_int16_t_s_u() {
ret void
}
; Function Attrs: nounwind
-define dso_local void @safe_div_func_uint8_t_u_u() #0 {
+define dso_local void @safe_div_func_uint8_t_u_u() {
ret void
}
; Function Attrs: nounwind
-define dso_local void @safe_mul_func_uint16_t_u_u() #0 {
+define dso_local void @safe_mul_func_uint16_t_u_u() {
ret void
}
; Function Attrs: nounwind
-define dso_local void @safe_mul_func_int16_t_s_s() #0 {
+define dso_local void @safe_mul_func_int16_t_s_s() {
ret void
}
; Function Attrs: nounwind
-define dso_local void @safe_div_func_int32_t_s_s() #0 {
+define dso_local void @safe_div_func_int32_t_s_s() {
ret void
}
; Function Attrs: nounwind
-define dso_local signext i16 @safe_sub_func_int16_t_s_s(i16 signext) #0 {
+define dso_local signext i16 @safe_sub_func_int16_t_s_s(i16 signext) {
%2 = alloca i16, align 2
store i16 %0, ptr %2, align 2, !tbaa !1
%3 = load i16, ptr %2, align 2, !tbaa !1
@@ -113,29 +113,25 @@ define dso_local signext i16 @safe_sub_func_int16_t_s_s(i16 signext) #0 {
}
; Function Attrs: nounwind
-define dso_local void @safe_add_func_uint16_t_u_u() #0 {
+define dso_local void @safe_add_func_uint16_t_u_u() {
ret void
}
; Function Attrs: nounwind
-define dso_local void @safe_div_func_int8_t_s_s() #0 {
+define dso_local void @safe_div_func_int8_t_s_s() {
ret void
}
; Function Attrs: nounwind
-define dso_local void @safe_add_func_int16_t_s_s() #0 {
+define dso_local void @safe_add_func_int16_t_s_s() {
ret void
}
; Function Attrs: nounwind
-define dso_local void @safe_add_func_uint8_t_u_u() #0 {
+define dso_local void @safe_add_func_uint8_t_u_u() {
ret void
}
-attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="z13" "target-features"="+transactional-execution,+vector" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { argmemonly nounwind }
-attributes #2 = { nounwind }
-
!llvm.ident = !{!0}
!0 = !{!"clang version 8.0.0 (http://llvm.org/git/clang.git 7cda4756fc9713d98fd3513b8df172700f267bad) (http://llvm.org/git/llvm.git 199c0d32e96b646bd8cf6beeaf0f99f8a434b56a)"}
diff --git a/llvm/test/Analysis/MemorySSA/pr40038.ll b/llvm/test/Analysis/MemorySSA/pr40038.ll
index efdcbe5..39ea78b 100644
--- a/llvm/test/Analysis/MemorySSA/pr40038.ll
+++ b/llvm/test/Analysis/MemorySSA/pr40038.ll
@@ -10,21 +10,21 @@ target triple = "s390x-ibm-linux"
; Function Attrs: nounwind
; CHECK-LABEL: @main
-define dso_local void @main() #0 {
+define dso_local void @main() {
bb:
call void @func_1()
unreachable
}
; Function Attrs: nounwind
-define dso_local void @func_1() #0 {
+define dso_local void @func_1() {
bb:
call void @func_2()
unreachable
}
; Function Attrs: nounwind
-define dso_local void @func_2() #0 {
+define dso_local void @func_2() {
bb:
%tmp = alloca i32, align 4
store i32 0, ptr @g_80, align 4, !tbaa !1
@@ -68,10 +68,7 @@ bb18: ; preds = %bb12, %bb1
}
; Function Attrs: cold noreturn nounwind
-declare void @llvm.trap() #1
-
-attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="z13" "target-features"="+transactional-execution,+vector" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { cold noreturn nounwind }
+declare void @llvm.trap()
!llvm.ident = !{!0}
diff --git a/llvm/test/Analysis/MemorySSA/pr43569.ll b/llvm/test/Analysis/MemorySSA/pr43569.ll
index 02d074e..c81f8d4 100644
--- a/llvm/test/Analysis/MemorySSA/pr43569.ll
+++ b/llvm/test/Analysis/MemorySSA/pr43569.ll
@@ -10,7 +10,7 @@ target triple = "x86_64-unknown-linux-gnu"
; CHECK-LABEL: @c()
; Function Attrs: nounwind uwtable
-define dso_local void @c() #0 {
+define dso_local void @c() {
entry:
call void @llvm.instrprof.increment(ptr @__profn_c, i64 68269137, i32 3, i32 0)
br label %for.cond
@@ -42,8 +42,4 @@ for.end: ; preds = %for.cond1
}
; Function Attrs: nounwind
-declare void @llvm.instrprof.increment(ptr, i64, i32, i32) #1
-
-attributes #0 = { nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind }
-
+declare void @llvm.instrprof.increment(ptr, i64, i32, i32)
diff --git a/llvm/test/Analysis/ScalarEvolution/pr22674.ll b/llvm/test/Analysis/ScalarEvolution/pr22674.ll
index 95f96ca..b2f4ae6 100644
--- a/llvm/test/Analysis/ScalarEvolution/pr22674.ll
+++ b/llvm/test/Analysis/ScalarEvolution/pr22674.ll
@@ -11,7 +11,7 @@ target triple = "x86_64-pc-linux-gnux32"
%"class.llvm::AttributeImpl.2.1802.3601.5914.6685.7456.8227.9255.9769.10026.18508" = type <{ ptr, %"class.llvm::FoldingSetImpl::Node.1.1801.3600.5913.6684.7455.8226.9254.9768.10025.18505", i8, [3 x i8] }>
; Function Attrs: nounwind uwtable
-define void @_ZNK4llvm11AttrBuilder13hasAttributesENS_12AttributeSetEy(i1 %arg) #0 align 2 {
+define void @_ZNK4llvm11AttrBuilder13hasAttributesENS_12AttributeSetEy(i1 %arg) align 2 {
entry:
br i1 %arg, label %cond.false, label %_ZNK4llvm12AttributeSet11getNumSlotsEv.exit
@@ -82,8 +82,6 @@ return: ; preds = %_ZNK4llvm9Attribute
ret void
}
-attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
!llvm.module.flags = !{!0}
!llvm.ident = !{!1}
diff --git a/llvm/test/Analysis/ScalarEvolution/scev-canonical-mode.ll b/llvm/test/Analysis/ScalarEvolution/scev-canonical-mode.ll
index 3879b2e7..d9cc3e5 100644
--- a/llvm/test/Analysis/ScalarEvolution/scev-canonical-mode.ll
+++ b/llvm/test/Analysis/ScalarEvolution/scev-canonical-mode.ll
@@ -6,7 +6,7 @@ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
; Function Attrs: norecurse nounwind uwtable
-define void @ehF(i1 %arg) #0 {
+define void @ehF(i1 %arg) {
entry:
br i1 %arg, label %if.then.i, label %hup.exit
@@ -28,5 +28,3 @@ for.body.i: ; preds = %for.body.i, %for.bo
hup.exit: ; preds = %for.body.i, %if.then.i, %entry
ret void
}
-
-attributes #0 = { norecurse nounwind uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/llvm/test/Analysis/TypeBasedAliasAnalysis/PR17620.ll b/llvm/test/Analysis/TypeBasedAliasAnalysis/PR17620.ll
index 67d81e7..7fb4231 100644
--- a/llvm/test/Analysis/TypeBasedAliasAnalysis/PR17620.ll
+++ b/llvm/test/Analysis/TypeBasedAliasAnalysis/PR17620.ll
@@ -13,7 +13,7 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
%classD = type { ptr }
; Function Attrs: ssp uwtable
-define ptr @test(ptr %this, ptr %p1) #0 align 2 {
+define ptr @test(ptr %this, ptr %p1) align 2 {
entry:
; CHECK-LABEL: @test
; CHECK: load ptr, ptr %p1, align 8, !tbaa
@@ -25,10 +25,7 @@ entry:
unreachable
}
-declare void @callee(ptr, ptr) #1
-
-attributes #0 = { ssp uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+declare void @callee(ptr, ptr)
!llvm.ident = !{!0}
diff --git a/llvm/test/Analysis/TypeBasedAliasAnalysis/tbaa-path.ll b/llvm/test/Analysis/TypeBasedAliasAnalysis/tbaa-path.ll
index 942fdf5..f9a2988 100644
--- a/llvm/test/Analysis/TypeBasedAliasAnalysis/tbaa-path.ll
+++ b/llvm/test/Analysis/TypeBasedAliasAnalysis/tbaa-path.ll
@@ -9,7 +9,7 @@
%struct.StructC = type { i16, %struct.StructB, i32 }
%struct.StructD = type { i16, %struct.StructB, i32, i8 }
-define i32 @_Z1gPjP7StructAy(ptr %s, ptr %A, i64 %count) #0 {
+define i32 @_Z1gPjP7StructAy(ptr %s, ptr %A, i64 %count) {
entry:
; Access to ptr and &(A->f32).
; CHECK: Function
@@ -35,7 +35,7 @@ entry:
ret i32 %3
}
-define i32 @_Z2g2PjP7StructAy(ptr %s, ptr %A, i64 %count) #0 {
+define i32 @_Z2g2PjP7StructAy(ptr %s, ptr %A, i64 %count) {
entry:
; Access to ptr and &(A->f16).
; CHECK: Function
@@ -60,7 +60,7 @@ entry:
ret i32 %3
}
-define i32 @_Z2g3P7StructAP7StructBy(ptr %A, ptr %B, i64 %count) #0 {
+define i32 @_Z2g3P7StructAP7StructBy(ptr %A, ptr %B, i64 %count) {
entry:
; Access to &(A->f32) and &(B->a.f32).
; CHECK: Function
@@ -89,7 +89,7 @@ entry:
ret i32 %3
}
-define i32 @_Z2g4P7StructAP7StructBy(ptr %A, ptr %B, i64 %count) #0 {
+define i32 @_Z2g4P7StructAP7StructBy(ptr %A, ptr %B, i64 %count) {
entry:
; Access to &(A->f32) and &(B->a.f16).
; CHECK: Function
@@ -117,7 +117,7 @@ entry:
ret i32 %3
}
-define i32 @_Z2g5P7StructAP7StructBy(ptr %A, ptr %B, i64 %count) #0 {
+define i32 @_Z2g5P7StructAP7StructBy(ptr %A, ptr %B, i64 %count) {
entry:
; Access to &(A->f32) and &(B->f32).
; CHECK: Function
@@ -145,7 +145,7 @@ entry:
ret i32 %3
}
-define i32 @_Z2g6P7StructAP7StructBy(ptr %A, ptr %B, i64 %count) #0 {
+define i32 @_Z2g6P7StructAP7StructBy(ptr %A, ptr %B, i64 %count) {
entry:
; Access to &(A->f32) and &(B->a.f32_2).
; CHECK: Function
@@ -174,7 +174,7 @@ entry:
ret i32 %3
}
-define i32 @_Z2g7P7StructAP7StructSy(ptr %A, ptr %S, i64 %count) #0 {
+define i32 @_Z2g7P7StructAP7StructSy(ptr %A, ptr %S, i64 %count) {
entry:
; Access to &(A->f32) and &(S->f32).
; CHECK: Function
@@ -202,7 +202,7 @@ entry:
ret i32 %3
}
-define i32 @_Z2g8P7StructAP7StructSy(ptr %A, ptr %S, i64 %count) #0 {
+define i32 @_Z2g8P7StructAP7StructSy(ptr %A, ptr %S, i64 %count) {
entry:
; Access to &(A->f32) and &(S->f16).
; CHECK: Function
@@ -229,7 +229,7 @@ entry:
ret i32 %3
}
-define i32 @_Z2g9P7StructSP8StructS2y(ptr %S, ptr %S2, i64 %count) #0 {
+define i32 @_Z2g9P7StructSP8StructS2y(ptr %S, ptr %S2, i64 %count) {
entry:
; Access to &(S->f32) and &(S2->f32).
; CHECK: Function
@@ -257,7 +257,7 @@ entry:
ret i32 %3
}
-define i32 @_Z3g10P7StructSP8StructS2y(ptr %S, ptr %S2, i64 %count) #0 {
+define i32 @_Z3g10P7StructSP8StructS2y(ptr %S, ptr %S2, i64 %count) {
entry:
; Access to &(S->f32) and &(S2->f16).
; CHECK: Function
@@ -284,7 +284,7 @@ entry:
ret i32 %3
}
-define i32 @_Z3g11P7StructCP7StructDy(ptr %C, ptr %D, i64 %count) #0 {
+define i32 @_Z3g11P7StructCP7StructDy(ptr %C, ptr %D, i64 %count) {
entry:
; Access to &(C->b.a.f32) and &(D->b.a.f32).
; CHECK: Function
@@ -318,7 +318,7 @@ entry:
ret i32 %3
}
-define i32 @_Z3g12P7StructCP7StructDy(ptr %C, ptr %D, i64 %count) #0 {
+define i32 @_Z3g12P7StructCP7StructDy(ptr %C, ptr %D, i64 %count) {
entry:
; Access to &(b1->a.f32) and &(b2->a.f32).
; CHECK: Function
@@ -357,8 +357,6 @@ entry:
ret i32 %5
}
-attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
!0 = !{!1, !1, i64 0}
!1 = !{!"any pointer", !2}
!2 = !{!"omnipotent char", !3}
diff --git a/llvm/test/Assembler/atomic.ll b/llvm/test/Assembler/atomic.ll
index 39f33f9f..6609edc 100644
--- a/llvm/test/Assembler/atomic.ll
+++ b/llvm/test/Assembler/atomic.ll
@@ -52,6 +52,25 @@ define void @f(ptr %x) {
; CHECK: atomicrmw volatile usub_sat ptr %x, i32 10 syncscope("agent") monotonic
atomicrmw volatile usub_sat ptr %x, i32 10 syncscope("agent") monotonic
+ ; CHECK : load atomic <1 x i32>, ptr %x unordered, align 4
+ load atomic <1 x i32>, ptr %x unordered, align 4
+ ; CHECK : store atomic <1 x i32> splat (i32 3), ptr %x release, align 4
+ store atomic <1 x i32> <i32 3>, ptr %x release, align 4
+ ; CHECK : load atomic <2 x i32>, ptr %x unordered, align 4
+ load atomic <2 x i32>, ptr %x unordered, align 4
+ ; CHECK : store atomic <2 x i32> <i32 3, i32 4>, ptr %x release, align 4
+ store atomic <2 x i32> <i32 3, i32 4>, ptr %x release, align 4
+
+ ; CHECK : load atomic <2 x ptr>, ptr %x unordered, align 4
+ load atomic <2 x ptr>, ptr %x unordered, align 4
+ ; CHECK : store atomic <2 x ptr> zeroinitializer, ptr %x release, align 4
+ store atomic <2 x ptr> zeroinitializer, ptr %x release, align 4
+
+ ; CHECK : load atomic <2 x float>, ptr %x unordered, align 4
+ load atomic <2 x float>, ptr %x unordered, align 4
+ ; CHECK : store atomic <2 x float> <float 3.0, float 4.0>, ptr %x release, align 4
+ store atomic <2 x float> <float 3.0, float 4.0>, ptr %x release, align 4
+
; CHECK: fence syncscope("singlethread") release
fence syncscope("singlethread") release
; CHECK: fence seq_cst
diff --git a/llvm/test/Bitcode/DILocation-implicit-code.ll b/llvm/test/Bitcode/DILocation-implicit-code.ll
index 159cb8a..1090bff 100644
--- a/llvm/test/Bitcode/DILocation-implicit-code.ll
+++ b/llvm/test/Bitcode/DILocation-implicit-code.ll
@@ -13,7 +13,7 @@ $_ZN1A3fooEi = comdat any
@_ZTIi = external dso_local constant i8*
; Function Attrs: noinline optnone uwtable
-define dso_local void @_Z5test1v() #0 personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) !dbg !7 {
+define dso_local void @_Z5test1v() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) !dbg !7 {
entry:
%retval = alloca %struct.A, align 1
%a = alloca %struct.A, align 1
@@ -40,13 +40,13 @@ lpad: ; preds = %entry
catch.dispatch: ; preds = %lpad
%sel = load i32, i32* %ehselector.slot, align 4, !dbg !13
- %3 = call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTIi to i8*)) #4, !dbg !13
+ %3 = call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTIi to i8*)), !dbg !13
%matches = icmp eq i32 %sel, %3, !dbg !13
br i1 %matches, label %catch, label %eh.resume, !dbg !13
catch: ; preds = %catch.dispatch
%exn = load i8*, i8** %exn.slot, align 8, !dbg !13
- %4 = call i8* @__cxa_begin_catch(i8* %exn) #4, !dbg !13
+ %4 = call i8* @__cxa_begin_catch(i8* %exn), !dbg !13
%5 = bitcast i8* %4 to i32*, !dbg !13
%6 = load i32, i32* %5, align 4, !dbg !13
store i32 %6, i32* %e, align 4, !dbg !13
@@ -55,7 +55,7 @@ catch: ; preds = %catch.dispatch
to label %invoke.cont2 unwind label %lpad1, !dbg !15
invoke.cont2: ; preds = %catch
- call void @__cxa_end_catch() #4, !dbg !16
+ call void @__cxa_end_catch(), !dbg !16
br label %return
lpad1: ; preds = %catch
@@ -65,7 +65,7 @@ lpad1: ; preds = %catch
store i8* %9, i8** %exn.slot, align 8, !dbg !12
%10 = extractvalue { i8*, i32 } %8, 1, !dbg !12
store i32 %10, i32* %ehselector.slot, align 4, !dbg !12
- call void @__cxa_end_catch() #4, !dbg !16
+ call void @__cxa_end_catch(), !dbg !16
br label %eh.resume, !dbg !16
try.cont: ; No predecessors!
@@ -84,7 +84,7 @@ eh.resume: ; preds = %lpad1, %catch.dispa
}
; Function Attrs: noinline nounwind optnone uwtable
-define linkonce_odr dso_local void @_ZN1AC2Ev(%struct.A* %this) unnamed_addr #1 comdat align 2 !dbg !17 {
+define linkonce_odr dso_local void @_ZN1AC2Ev(%struct.A* %this) unnamed_addr comdat align 2 !dbg !17 {
entry:
%this.addr = alloca %struct.A*, align 8
store %struct.A* %this, %struct.A** %this.addr, align 8
@@ -93,7 +93,7 @@ entry:
}
; Function Attrs: noinline optnone uwtable
-define linkonce_odr dso_local void @_ZN1A3fooEi(%struct.A* %this, i32 %i) #0 comdat align 2 !dbg !19 {
+define linkonce_odr dso_local void @_ZN1A3fooEi(%struct.A* %this, i32 %i) comdat align 2 !dbg !19 {
entry:
%retval = alloca %struct.A, align 1
%this.addr = alloca %struct.A*, align 8
@@ -106,10 +106,10 @@ entry:
br i1 %cmp, label %if.then, label %if.end, !dbg !20
if.then: ; preds = %entry
- %exception = call i8* @__cxa_allocate_exception(i64 4) #4, !dbg !22
+ %exception = call i8* @__cxa_allocate_exception(i64 4), !dbg !22
%1 = bitcast i8* %exception to i32*, !dbg !22
store i32 1, i32* %1, align 16, !dbg !22
- call void @__cxa_throw(i8* %exception, i8* bitcast (i8** @_ZTIi to i8*), i8* null) #5, !dbg !22
+ call void @__cxa_throw(i8* %exception, i8* bitcast (i8** @_ZTIi to i8*), i8* null), !dbg !22
unreachable, !dbg !22
if.end: ; preds = %entry
@@ -119,17 +119,17 @@ if.end: ; preds = %entry
declare dso_local i32 @__gxx_personality_v0(...)
; Function Attrs: nounwind readnone
-declare i32 @llvm.eh.typeid.for(i8*) #2
+declare i32 @llvm.eh.typeid.for(i8*)
declare dso_local i8* @__cxa_begin_catch(i8*)
declare dso_local void @__cxa_end_catch()
; Function Attrs: noreturn nounwind
-declare void @llvm.trap() #3
+declare void @llvm.trap()
; Function Attrs: noinline optnone uwtable
-define dso_local void @_Z5test2v() #0 !dbg !24 {
+define dso_local void @_Z5test2v() !dbg !24 {
entry:
%a = alloca %struct.A, align 1
%b = alloca %struct.A, align 1
@@ -143,13 +143,6 @@ declare dso_local i8* @__cxa_allocate_exception(i64)
declare dso_local void @__cxa_throw(i8*, i8*, i8*)
-attributes #0 = { noinline optnone uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { noinline nounwind optnone uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #2 = { nounwind readnone }
-attributes #3 = { noreturn nounwind }
-attributes #4 = { nounwind }
-attributes #5 = { noreturn }
-
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!3, !4, !5}
!llvm.ident = !{!6}
diff --git a/llvm/test/Bitcode/drop-debug-info.3.5.ll b/llvm/test/Bitcode/drop-debug-info.3.5.ll
index 35d3958..16102a0 100644
--- a/llvm/test/Bitcode/drop-debug-info.3.5.ll
+++ b/llvm/test/Bitcode/drop-debug-info.3.5.ll
@@ -12,15 +12,13 @@ target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.10.0"
; Function Attrs: nounwind ssp uwtable
-define i32 @main() #0 {
+define i32 @main() {
entry:
%retval = alloca i32, align 4
store i32 0, i32* %retval
ret i32 0, !dbg !12
}
-attributes #0 = { nounwind ssp uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!9, !10}
!llvm.ident = !{!11}
diff --git a/llvm/test/Bitcode/upgrade-tbaa.ll b/llvm/test/Bitcode/upgrade-tbaa.ll
index 7a70d99..893ba69 100644
--- a/llvm/test/Bitcode/upgrade-tbaa.ll
+++ b/llvm/test/Bitcode/upgrade-tbaa.ll
@@ -2,7 +2,7 @@
; RUN: verify-uselistorder < %s
; Function Attrs: nounwind
-define void @_Z4testPiPf(i32* nocapture %pI, float* nocapture %pF) #0 {
+define void @_Z4testPiPf(i32* nocapture %pI, float* nocapture %pF) {
entry:
store i32 0, i32* %pI, align 4, !tbaa !{!"int", !0}
; CHECK: store i32 0, ptr %pI, align 4, !tbaa [[TAG_INT:!.*]]
@@ -11,8 +11,6 @@ entry:
ret void
}
-attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-realign-stack" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
!0 = !{!"omnipotent char", !1}
!1 = !{!"Simple C/C++ TBAA"}
!2 = !{!"float", !0}
diff --git a/llvm/test/CodeGen/AArch64/load-zext-bitcast.ll b/llvm/test/CodeGen/AArch64/load-zext-bitcast.ll
index 9193025..6177ae5 100644
--- a/llvm/test/CodeGen/AArch64/load-zext-bitcast.ll
+++ b/llvm/test/CodeGen/AArch64/load-zext-bitcast.ll
@@ -112,8 +112,7 @@ entry:
define double @load_u64_from_u8_off1(ptr %n){
; CHECK-LABEL: load_u64_from_u8_off1:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ldrb w8, [x0, #1]
-; CHECK-NEXT: fmov d0, x8
+; CHECK-NEXT: ldr b0, [x0, #1]
; CHECK-NEXT: ret
entry:
%p = getelementptr i8, ptr %n, i64 1
@@ -140,8 +139,7 @@ entry:
define float @load_u32_from_u8_off1(ptr %n){
; CHECK-LABEL: load_u32_from_u8_off1:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ldrb w8, [x0, #1]
-; CHECK-NEXT: fmov s0, w8
+; CHECK-NEXT: ldr b0, [x0, #1]
; CHECK-NEXT: ret
entry:
%p = getelementptr i8, ptr %n, i64 1
@@ -154,8 +152,7 @@ entry:
define half @load_u16_from_u8_off1(ptr %n){
; CHECK-LABEL: load_u16_from_u8_off1:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ldrb w8, [x0, #1]
-; CHECK-NEXT: fmov s0, w8
+; CHECK-NEXT: ldr b0, [x0, #1]
; CHECK-NEXT: // kill: def $h0 killed $h0 killed $s0
; CHECK-NEXT: ret
entry:
@@ -185,8 +182,7 @@ entry:
define double @load_u64_from_u16_off2(ptr %n){
; CHECK-LABEL: load_u64_from_u16_off2:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ldrh w8, [x0, #2]
-; CHECK-NEXT: fmov d0, x8
+; CHECK-NEXT: ldr h0, [x0, #2]
; CHECK-NEXT: ret
entry:
%p = getelementptr i8, ptr %n, i64 2
@@ -199,8 +195,7 @@ entry:
define double @load_u64_from_u8_off2(ptr %n){
; CHECK-LABEL: load_u64_from_u8_off2:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ldrb w8, [x0, #2]
-; CHECK-NEXT: fmov d0, x8
+; CHECK-NEXT: ldr b0, [x0, #2]
; CHECK-NEXT: ret
entry:
%p = getelementptr i8, ptr %n, i64 2
@@ -226,7 +221,7 @@ entry:
define float @load_u32_from_u8_off2(ptr %n){
; CHECK-LABEL: load_u32_from_u8_off2:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ldr b0, [x0, #1]
+; CHECK-NEXT: ldr b0, [x0, #2]
; CHECK-NEXT: ret
entry:
%p = getelementptr i8, ptr %n, i64 2
@@ -239,7 +234,7 @@ entry:
define half @load_u16_from_u8_off2(ptr %n){
; CHECK-LABEL: load_u16_from_u8_off2:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ldr b0, [x0, #1]
+; CHECK-NEXT: ldr b0, [x0, #2]
; CHECK-NEXT: // kill: def $h0 killed $h0 killed $s0
; CHECK-NEXT: ret
entry:
@@ -283,8 +278,7 @@ entry:
define double @load_u64_from_u8_off255(ptr %n){
; CHECK-LABEL: load_u64_from_u8_off255:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ldrb w8, [x0, #255]
-; CHECK-NEXT: fmov d0, x8
+; CHECK-NEXT: ldr b0, [x0, #255]
; CHECK-NEXT: ret
entry:
%p = getelementptr i8, ptr %n, i64 255
@@ -311,8 +305,7 @@ entry:
define float @load_u32_from_u8_off255(ptr %n){
; CHECK-LABEL: load_u32_from_u8_off255:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ldrb w8, [x0, #255]
-; CHECK-NEXT: fmov s0, w8
+; CHECK-NEXT: ldr b0, [x0, #255]
; CHECK-NEXT: ret
entry:
%p = getelementptr i8, ptr %n, i64 255
@@ -325,8 +318,7 @@ entry:
define half @load_u16_from_u8_off255(ptr %n){
; CHECK-LABEL: load_u16_from_u8_off255:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ldrb w8, [x0, #255]
-; CHECK-NEXT: fmov s0, w8
+; CHECK-NEXT: ldr b0, [x0, #255]
; CHECK-NEXT: // kill: def $h0 killed $h0 killed $s0
; CHECK-NEXT: ret
entry:
@@ -354,7 +346,7 @@ entry:
define double @load_u64_from_u16_off256(ptr %n){
; CHECK-LABEL: load_u64_from_u16_off256:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ldr h0, [x0, #128]
+; CHECK-NEXT: ldr h0, [x0, #256]
; CHECK-NEXT: ret
entry:
%p = getelementptr i8, ptr %n, i64 256
@@ -367,7 +359,7 @@ entry:
define double @load_u64_from_u8_off256(ptr %n){
; CHECK-LABEL: load_u64_from_u8_off256:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ldr b0, [x0, #64]
+; CHECK-NEXT: ldr b0, [x0, #256]
; CHECK-NEXT: ret
entry:
%p = getelementptr i8, ptr %n, i64 256
@@ -393,7 +385,7 @@ entry:
define float @load_u32_from_u8_off256(ptr %n){
; CHECK-LABEL: load_u32_from_u8_off256:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ldr b0, [x0, #128]
+; CHECK-NEXT: ldr b0, [x0, #256]
; CHECK-NEXT: ret
entry:
%p = getelementptr i8, ptr %n, i64 256
@@ -406,7 +398,7 @@ entry:
define half @load_u16_from_u8_off256(ptr %n){
; CHECK-LABEL: load_u16_from_u8_off256:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ldr b0, [x0, #128]
+; CHECK-NEXT: ldr b0, [x0, #256]
; CHECK-NEXT: // kill: def $h0 killed $h0 killed $s0
; CHECK-NEXT: ret
entry:
@@ -435,8 +427,7 @@ entry:
define double @load_u64_from_u16_offn(ptr %n){
; CHECK-LABEL: load_u64_from_u16_offn:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: mov w8, #8190 // =0x1ffe
-; CHECK-NEXT: ldr h0, [x0, x8]
+; CHECK-NEXT: ldr h0, [x0, #8190]
; CHECK-NEXT: ret
entry:
%p = getelementptr i8, ptr %n, i64 8190
@@ -517,7 +508,8 @@ entry:
define double @load_u64_from_u16_offnp1(ptr %n){
; CHECK-LABEL: load_u64_from_u16_offnp1:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ldr h0, [x0, #4096]
+; CHECK-NEXT: add x8, x0, #2, lsl #12 // =8192
+; CHECK-NEXT: ldr h0, [x8]
; CHECK-NEXT: ret
entry:
%p = getelementptr i8, ptr %n, i64 8192
@@ -530,7 +522,8 @@ entry:
define double @load_u64_from_u8_offnp1(ptr %n){
; CHECK-LABEL: load_u64_from_u8_offnp1:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ldr b0, [x0, #1024]
+; CHECK-NEXT: add x8, x0, #1, lsl #12 // =4096
+; CHECK-NEXT: ldr b0, [x8]
; CHECK-NEXT: ret
entry:
%p = getelementptr i8, ptr %n, i64 4096
@@ -557,7 +550,8 @@ entry:
define float @load_u32_from_u8_offnp1(ptr %n){
; CHECK-LABEL: load_u32_from_u8_offnp1:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ldr b0, [x0, #2048]
+; CHECK-NEXT: add x8, x0, #1, lsl #12 // =4096
+; CHECK-NEXT: ldr b0, [x8]
; CHECK-NEXT: ret
entry:
%p = getelementptr i8, ptr %n, i64 4096
@@ -570,7 +564,8 @@ entry:
define half @load_u16_from_u8_offnp1(ptr %n){
; CHECK-LABEL: load_u16_from_u8_offnp1:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ldr b0, [x0, #2048]
+; CHECK-NEXT: add x8, x0, #1, lsl #12 // =4096
+; CHECK-NEXT: ldr b0, [x8]
; CHECK-NEXT: // kill: def $h0 killed $h0 killed $s0
; CHECK-NEXT: ret
entry:
diff --git a/llvm/test/CodeGen/AArch64/pr164181.ll b/llvm/test/CodeGen/AArch64/pr164181.ll
new file mode 100644
index 0000000..4ec63ec
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/pr164181.ll
@@ -0,0 +1,640 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc -verify-machineinstrs < %s | FileCheck %s
+
+; This test recreates a regalloc crash reported in
+; https://github.com/llvm/llvm-project/issues/164181
+; When rematting an instruction we need to make sure to constrain the newly
+; allocated register to both the rematted def's reg class and the use's reg
+; class.
+
+target triple = "aarch64-unknown-linux-gnu"
+
+@var_32 = external global i16
+@var_35 = external global i64
+@var_39 = external global i64
+@var_46 = external global i64
+@var_50 = external global i32
+
+define void @f(i1 %var_0, i16 %var_1, i64 %var_2, i8 %var_3, i16 %var_4, i1 %var_5, i32 %var_6, i32 %var_7, i8 %var_10, i64 %var_11, i8 %var_14, i32 %var_15, i64 %var_16, ptr %arr_3, ptr %arr_4, ptr %arr_6, ptr %arr_7, ptr %arr_12, ptr %arr_13, ptr %arr_19, i64 %mul, i64 %conv35, i64 %idxprom138.us16, i8 %0, i8 %1, ptr %invariant.gep875.us) #0 {
+; CHECK-LABEL: f:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: sub sp, sp, #240
+; CHECK-NEXT: str x30, [sp, #144] // 8-byte Folded Spill
+; CHECK-NEXT: stp x28, x27, [sp, #160] // 16-byte Folded Spill
+; CHECK-NEXT: stp x26, x25, [sp, #176] // 16-byte Folded Spill
+; CHECK-NEXT: stp x24, x23, [sp, #192] // 16-byte Folded Spill
+; CHECK-NEXT: stp x22, x21, [sp, #208] // 16-byte Folded Spill
+; CHECK-NEXT: stp x20, x19, [sp, #224] // 16-byte Folded Spill
+; CHECK-NEXT: str w6, [sp, #20] // 4-byte Folded Spill
+; CHECK-NEXT: str w4, [sp, #72] // 4-byte Folded Spill
+; CHECK-NEXT: str w3, [sp, #112] // 4-byte Folded Spill
+; CHECK-NEXT: str w5, [sp, #36] // 4-byte Folded Spill
+; CHECK-NEXT: tbz w5, #0, .LBB0_43
+; CHECK-NEXT: // %bb.1: // %for.body41.lr.ph
+; CHECK-NEXT: ldr x4, [sp, #312]
+; CHECK-NEXT: ldr x14, [sp, #280]
+; CHECK-NEXT: tbz w0, #0, .LBB0_42
+; CHECK-NEXT: // %bb.2: // %for.body41.us.preheader
+; CHECK-NEXT: ldrb w8, [sp, #368]
+; CHECK-NEXT: ldrb w12, [sp, #256]
+; CHECK-NEXT: ldr w26, [sp, #264]
+; CHECK-NEXT: adrp x20, :got:var_50
+; CHECK-NEXT: mov x28, #-1 // =0xffffffffffffffff
+; CHECK-NEXT: mov w21, #36006 // =0x8ca6
+; CHECK-NEXT: ldr x11, [sp, #376]
+; CHECK-NEXT: ldrb w13, [sp, #360]
+; CHECK-NEXT: ldp x17, x16, [sp, #296]
+; CHECK-NEXT: mov w22, #1 // =0x1
+; CHECK-NEXT: add x27, x14, #120
+; CHECK-NEXT: ldr x18, [sp, #288]
+; CHECK-NEXT: ldr x7, [sp, #272]
+; CHECK-NEXT: ldr x5, [sp, #248]
+; CHECK-NEXT: mov x10, xzr
+; CHECK-NEXT: mov w23, wzr
+; CHECK-NEXT: mov w30, wzr
+; CHECK-NEXT: ldrb w19, [sp, #240]
+; CHECK-NEXT: mov w25, wzr
+; CHECK-NEXT: mov x24, xzr
+; CHECK-NEXT: str w8, [sp, #108] // 4-byte Folded Spill
+; CHECK-NEXT: mov x3, x26
+; CHECK-NEXT: ldp x9, x8, [sp, #344]
+; CHECK-NEXT: str w12, [sp, #92] // 4-byte Folded Spill
+; CHECK-NEXT: mov w12, #1 // =0x1
+; CHECK-NEXT: bic w12, w12, w0
+; CHECK-NEXT: str w12, [sp, #76] // 4-byte Folded Spill
+; CHECK-NEXT: mov w12, #48 // =0x30
+; CHECK-NEXT: str x9, [sp, #136] // 8-byte Folded Spill
+; CHECK-NEXT: ldp x9, x15, [sp, #328]
+; CHECK-NEXT: madd x8, x8, x12, x9
+; CHECK-NEXT: str x8, [sp, #64] // 8-byte Folded Spill
+; CHECK-NEXT: add x8, x26, w26, uxtw #1
+; CHECK-NEXT: ldr x20, [x20, :got_lo12:var_50]
+; CHECK-NEXT: str x26, [sp, #96] // 8-byte Folded Spill
+; CHECK-NEXT: str x14, [sp, #152] // 8-byte Folded Spill
+; CHECK-NEXT: lsl x6, x8, #3
+; CHECK-NEXT: add x8, x14, #120
+; CHECK-NEXT: str x4, [sp, #24] // 8-byte Folded Spill
+; CHECK-NEXT: str w19, [sp, #16] // 4-byte Folded Spill
+; CHECK-NEXT: str x8, [sp, #80] // 8-byte Folded Spill
+; CHECK-NEXT: b .LBB0_4
+; CHECK-NEXT: .p2align 5, , 16
+; CHECK-NEXT: .LBB0_3: // in Loop: Header=BB0_4 Depth=1
+; CHECK-NEXT: ldr w19, [sp, #16] // 4-byte Folded Reload
+; CHECK-NEXT: ldr x24, [sp, #40] // 8-byte Folded Reload
+; CHECK-NEXT: ldr x14, [sp, #152] // 8-byte Folded Reload
+; CHECK-NEXT: mov w23, #1 // =0x1
+; CHECK-NEXT: mov w30, #1 // =0x1
+; CHECK-NEXT: mov w25, w19
+; CHECK-NEXT: .LBB0_4: // %for.body41.us
+; CHECK-NEXT: // =>This Loop Header: Depth=1
+; CHECK-NEXT: // Child Loop BB0_6 Depth 2
+; CHECK-NEXT: // Child Loop BB0_8 Depth 3
+; CHECK-NEXT: // Child Loop BB0_10 Depth 4
+; CHECK-NEXT: // Child Loop BB0_11 Depth 5
+; CHECK-NEXT: // Child Loop BB0_28 Depth 5
+; CHECK-NEXT: // Child Loop BB0_39 Depth 5
+; CHECK-NEXT: ldr w8, [sp, #20] // 4-byte Folded Reload
+; CHECK-NEXT: mov x12, x24
+; CHECK-NEXT: str x24, [sp, #48] // 8-byte Folded Spill
+; CHECK-NEXT: str w8, [x14]
+; CHECK-NEXT: mov w8, #1 // =0x1
+; CHECK-NEXT: strb w19, [x14]
+; CHECK-NEXT: b .LBB0_6
+; CHECK-NEXT: .p2align 5, , 16
+; CHECK-NEXT: .LBB0_5: // %for.cond.cleanup93.us
+; CHECK-NEXT: // in Loop: Header=BB0_6 Depth=2
+; CHECK-NEXT: ldr w9, [sp, #36] // 4-byte Folded Reload
+; CHECK-NEXT: ldr x4, [sp, #24] // 8-byte Folded Reload
+; CHECK-NEXT: ldp x24, x12, [sp, #48] // 16-byte Folded Reload
+; CHECK-NEXT: mov x22, xzr
+; CHECK-NEXT: mov w25, wzr
+; CHECK-NEXT: mov w8, wzr
+; CHECK-NEXT: tbz w9, #0, .LBB0_3
+; CHECK-NEXT: .LBB0_6: // %for.body67.us
+; CHECK-NEXT: // Parent Loop BB0_4 Depth=1
+; CHECK-NEXT: // => This Loop Header: Depth=2
+; CHECK-NEXT: // Child Loop BB0_8 Depth 3
+; CHECK-NEXT: // Child Loop BB0_10 Depth 4
+; CHECK-NEXT: // Child Loop BB0_11 Depth 5
+; CHECK-NEXT: // Child Loop BB0_28 Depth 5
+; CHECK-NEXT: // Child Loop BB0_39 Depth 5
+; CHECK-NEXT: str x12, [sp, #40] // 8-byte Folded Spill
+; CHECK-NEXT: cmn x24, #30
+; CHECK-NEXT: mov x12, #-30 // =0xffffffffffffffe2
+; CHECK-NEXT: add x19, x4, w8, sxtw #2
+; CHECK-NEXT: mov x9, xzr
+; CHECK-NEXT: csel x12, x24, x12, lo
+; CHECK-NEXT: mov w4, w30
+; CHECK-NEXT: str x12, [sp, #56] // 8-byte Folded Spill
+; CHECK-NEXT: b .LBB0_8
+; CHECK-NEXT: .p2align 5, , 16
+; CHECK-NEXT: .LBB0_7: // %for.cond.cleanup98.us
+; CHECK-NEXT: // in Loop: Header=BB0_8 Depth=3
+; CHECK-NEXT: ldr w4, [sp, #72] // 4-byte Folded Reload
+; CHECK-NEXT: ldr w23, [sp, #128] // 4-byte Folded Reload
+; CHECK-NEXT: mov w9, #1 // =0x1
+; CHECK-NEXT: mov x22, xzr
+; CHECK-NEXT: tbnz w0, #0, .LBB0_5
+; CHECK-NEXT: .LBB0_8: // %for.cond95.preheader.us
+; CHECK-NEXT: // Parent Loop BB0_4 Depth=1
+; CHECK-NEXT: // Parent Loop BB0_6 Depth=2
+; CHECK-NEXT: // => This Loop Header: Depth=3
+; CHECK-NEXT: // Child Loop BB0_10 Depth 4
+; CHECK-NEXT: // Child Loop BB0_11 Depth 5
+; CHECK-NEXT: // Child Loop BB0_28 Depth 5
+; CHECK-NEXT: // Child Loop BB0_39 Depth 5
+; CHECK-NEXT: ldr x8, [sp, #64] // 8-byte Folded Reload
+; CHECK-NEXT: mov w14, #1152 // =0x480
+; CHECK-NEXT: mov w24, #1 // =0x1
+; CHECK-NEXT: mov w12, wzr
+; CHECK-NEXT: str wzr, [sp, #132] // 4-byte Folded Spill
+; CHECK-NEXT: mov w30, w4
+; CHECK-NEXT: madd x8, x9, x14, x8
+; CHECK-NEXT: mov w14, #1 // =0x1
+; CHECK-NEXT: str x8, [sp, #120] // 8-byte Folded Spill
+; CHECK-NEXT: add x8, x9, x9, lsl #1
+; CHECK-NEXT: lsl x26, x8, #4
+; CHECK-NEXT: sxtb w8, w23
+; CHECK-NEXT: mov w23, w25
+; CHECK-NEXT: str w8, [sp, #116] // 4-byte Folded Spill
+; CHECK-NEXT: b .LBB0_10
+; CHECK-NEXT: .p2align 5, , 16
+; CHECK-NEXT: .LBB0_9: // %for.cond510.preheader.us
+; CHECK-NEXT: // in Loop: Header=BB0_10 Depth=4
+; CHECK-NEXT: ldr w23, [sp, #92] // 4-byte Folded Reload
+; CHECK-NEXT: mov x22, x8
+; CHECK-NEXT: ldr x3, [sp, #96] // 8-byte Folded Reload
+; CHECK-NEXT: ldr x27, [sp, #80] // 8-byte Folded Reload
+; CHECK-NEXT: mov x28, #-1 // =0xffffffffffffffff
+; CHECK-NEXT: mov x14, xzr
+; CHECK-NEXT: ldr w8, [sp, #76] // 4-byte Folded Reload
+; CHECK-NEXT: tbz w8, #31, .LBB0_7
+; CHECK-NEXT: .LBB0_10: // %for.body99.us
+; CHECK-NEXT: // Parent Loop BB0_4 Depth=1
+; CHECK-NEXT: // Parent Loop BB0_6 Depth=2
+; CHECK-NEXT: // Parent Loop BB0_8 Depth=3
+; CHECK-NEXT: // => This Loop Header: Depth=4
+; CHECK-NEXT: // Child Loop BB0_11 Depth 5
+; CHECK-NEXT: // Child Loop BB0_28 Depth 5
+; CHECK-NEXT: // Child Loop BB0_39 Depth 5
+; CHECK-NEXT: ldr w8, [sp, #116] // 4-byte Folded Reload
+; CHECK-NEXT: and w8, w8, w8, asr #31
+; CHECK-NEXT: str w8, [sp, #128] // 4-byte Folded Spill
+; CHECK-NEXT: .p2align 5, , 16
+; CHECK-NEXT: .LBB0_11: // %for.body113.us
+; CHECK-NEXT: // Parent Loop BB0_4 Depth=1
+; CHECK-NEXT: // Parent Loop BB0_6 Depth=2
+; CHECK-NEXT: // Parent Loop BB0_8 Depth=3
+; CHECK-NEXT: // Parent Loop BB0_10 Depth=4
+; CHECK-NEXT: // => This Inner Loop Header: Depth=5
+; CHECK-NEXT: tbnz w0, #0, .LBB0_11
+; CHECK-NEXT: // %bb.12: // %for.cond131.preheader.us
+; CHECK-NEXT: // in Loop: Header=BB0_10 Depth=4
+; CHECK-NEXT: ldr w8, [sp, #112] // 4-byte Folded Reload
+; CHECK-NEXT: mov w4, #1 // =0x1
+; CHECK-NEXT: strb w8, [x18]
+; CHECK-NEXT: ldr x8, [sp, #120] // 8-byte Folded Reload
+; CHECK-NEXT: ldrh w8, [x8]
+; CHECK-NEXT: cbnz w4, .LBB0_14
+; CHECK-NEXT: // %bb.13: // %cond.true146.us
+; CHECK-NEXT: // in Loop: Header=BB0_10 Depth=4
+; CHECK-NEXT: ldrsb w4, [x27, x3]
+; CHECK-NEXT: b .LBB0_15
+; CHECK-NEXT: .p2align 5, , 16
+; CHECK-NEXT: .LBB0_14: // in Loop: Header=BB0_10 Depth=4
+; CHECK-NEXT: mov w4, wzr
+; CHECK-NEXT: .LBB0_15: // %cond.end154.us
+; CHECK-NEXT: // in Loop: Header=BB0_10 Depth=4
+; CHECK-NEXT: mov w25, #18984 // =0x4a28
+; CHECK-NEXT: mul w8, w8, w25
+; CHECK-NEXT: and w8, w8, #0xfff8
+; CHECK-NEXT: lsl w8, w8, w4
+; CHECK-NEXT: cbz w8, .LBB0_17
+; CHECK-NEXT: // %bb.16: // %if.then.us
+; CHECK-NEXT: // in Loop: Header=BB0_10 Depth=4
+; CHECK-NEXT: str wzr, [sp, #132] // 4-byte Folded Spill
+; CHECK-NEXT: str wzr, [x18]
+; CHECK-NEXT: .LBB0_17: // %if.end.us
+; CHECK-NEXT: // in Loop: Header=BB0_10 Depth=4
+; CHECK-NEXT: ldr w8, [sp, #108] // 4-byte Folded Reload
+; CHECK-NEXT: mov w4, #18984 // =0x4a28
+; CHECK-NEXT: mov w25, w23
+; CHECK-NEXT: strb w8, [x18]
+; CHECK-NEXT: ldrsb w8, [x27, x3]
+; CHECK-NEXT: lsl w8, w4, w8
+; CHECK-NEXT: mov x4, #-18403 // =0xffffffffffffb81d
+; CHECK-NEXT: movk x4, #58909, lsl #16
+; CHECK-NEXT: cbz w8, .LBB0_19
+; CHECK-NEXT: // %bb.18: // %if.then.us.2
+; CHECK-NEXT: // in Loop: Header=BB0_10 Depth=4
+; CHECK-NEXT: str wzr, [sp, #132] // 4-byte Folded Spill
+; CHECK-NEXT: strb wzr, [x18]
+; CHECK-NEXT: .LBB0_19: // %if.then.us.5
+; CHECK-NEXT: // in Loop: Header=BB0_10 Depth=4
+; CHECK-NEXT: ldr w23, [sp, #132] // 4-byte Folded Reload
+; CHECK-NEXT: mov w8, #29625 // =0x73b9
+; CHECK-NEXT: movk w8, #21515, lsl #16
+; CHECK-NEXT: cmp w23, w8
+; CHECK-NEXT: csel w23, w23, w8, lt
+; CHECK-NEXT: str w23, [sp, #132] // 4-byte Folded Spill
+; CHECK-NEXT: tbz w0, #0, .LBB0_21
+; CHECK-NEXT: // %bb.20: // in Loop: Header=BB0_10 Depth=4
+; CHECK-NEXT: mov w8, wzr
+; CHECK-NEXT: b .LBB0_22
+; CHECK-NEXT: .p2align 5, , 16
+; CHECK-NEXT: .LBB0_21: // %cond.true146.us.7
+; CHECK-NEXT: // in Loop: Header=BB0_10 Depth=4
+; CHECK-NEXT: ldrsb w8, [x27, x3]
+; CHECK-NEXT: .LBB0_22: // %cond.end154.us.7
+; CHECK-NEXT: // in Loop: Header=BB0_10 Depth=4
+; CHECK-NEXT: mov w23, #18984 // =0x4a28
+; CHECK-NEXT: mov w3, #149 // =0x95
+; CHECK-NEXT: lsl w8, w23, w8
+; CHECK-NEXT: cbz w8, .LBB0_24
+; CHECK-NEXT: // %bb.23: // %if.then.us.7
+; CHECK-NEXT: // in Loop: Header=BB0_10 Depth=4
+; CHECK-NEXT: ldr x8, [sp, #152] // 8-byte Folded Reload
+; CHECK-NEXT: str wzr, [sp, #132] // 4-byte Folded Spill
+; CHECK-NEXT: str wzr, [x8]
+; CHECK-NEXT: .LBB0_24: // %if.end.us.7
+; CHECK-NEXT: // in Loop: Header=BB0_10 Depth=4
+; CHECK-NEXT: mov x23, xzr
+; CHECK-NEXT: b .LBB0_28
+; CHECK-NEXT: .p2align 5, , 16
+; CHECK-NEXT: .LBB0_25: // %cond.true331.us
+; CHECK-NEXT: // in Loop: Header=BB0_28 Depth=5
+; CHECK-NEXT: ldrsb w4, [x10]
+; CHECK-NEXT: .LBB0_26: // %cond.end345.us
+; CHECK-NEXT: // in Loop: Header=BB0_28 Depth=5
+; CHECK-NEXT: strh w4, [x18]
+; CHECK-NEXT: mul x4, x22, x28
+; CHECK-NEXT: adrp x22, :got:var_46
+; CHECK-NEXT: mov x8, xzr
+; CHECK-NEXT: ldr x22, [x22, :got_lo12:var_46]
+; CHECK-NEXT: str x4, [x22]
+; CHECK-NEXT: mov x4, #-18403 // =0xffffffffffffb81d
+; CHECK-NEXT: movk x4, #58909, lsl #16
+; CHECK-NEXT: .LBB0_27: // %for.inc371.us
+; CHECK-NEXT: // in Loop: Header=BB0_28 Depth=5
+; CHECK-NEXT: mov w22, #-18978 // =0xffffb5de
+; CHECK-NEXT: orr x23, x23, #0x1
+; CHECK-NEXT: mov x24, xzr
+; CHECK-NEXT: mul w12, w12, w22
+; CHECK-NEXT: mov x22, x5
+; CHECK-NEXT: tbz w0, #0, .LBB0_36
+; CHECK-NEXT: .LBB0_28: // %for.body194.us
+; CHECK-NEXT: // Parent Loop BB0_4 Depth=1
+; CHECK-NEXT: // Parent Loop BB0_6 Depth=2
+; CHECK-NEXT: // Parent Loop BB0_8 Depth=3
+; CHECK-NEXT: // Parent Loop BB0_10 Depth=4
+; CHECK-NEXT: // => This Inner Loop Header: Depth=5
+; CHECK-NEXT: cbnz wzr, .LBB0_30
+; CHECK-NEXT: // %bb.29: // %if.then222.us
+; CHECK-NEXT: // in Loop: Header=BB0_28 Depth=5
+; CHECK-NEXT: adrp x27, :got:var_32
+; CHECK-NEXT: ldur w8, [x19, #-12]
+; CHECK-NEXT: ldr x27, [x27, :got_lo12:var_32]
+; CHECK-NEXT: strh w8, [x27]
+; CHECK-NEXT: sxtb w8, w25
+; CHECK-NEXT: bic w25, w8, w8, asr #31
+; CHECK-NEXT: b .LBB0_31
+; CHECK-NEXT: .p2align 5, , 16
+; CHECK-NEXT: .LBB0_30: // in Loop: Header=BB0_28 Depth=5
+; CHECK-NEXT: mov w25, wzr
+; CHECK-NEXT: .LBB0_31: // %if.end239.us
+; CHECK-NEXT: // in Loop: Header=BB0_28 Depth=5
+; CHECK-NEXT: strb w3, [x16]
+; CHECK-NEXT: tst w13, #0xff
+; CHECK-NEXT: b.eq .LBB0_33
+; CHECK-NEXT: // %bb.32: // %if.then254.us
+; CHECK-NEXT: // in Loop: Header=BB0_28 Depth=5
+; CHECK-NEXT: ldrh w8, [x26, x14, lsl #1]
+; CHECK-NEXT: adrp x27, :got:var_35
+; CHECK-NEXT: ldr x27, [x27, :got_lo12:var_35]
+; CHECK-NEXT: cmp w8, #0
+; CHECK-NEXT: csel x8, xzr, x7, eq
+; CHECK-NEXT: str x8, [x27]
+; CHECK-NEXT: strh w1, [x17]
+; CHECK-NEXT: .LBB0_33: // %if.end282.us
+; CHECK-NEXT: // in Loop: Header=BB0_28 Depth=5
+; CHECK-NEXT: orr x27, x24, x4
+; CHECK-NEXT: adrp x8, :got:var_39
+; CHECK-NEXT: str x27, [x18]
+; CHECK-NEXT: ldr x8, [x8, :got_lo12:var_39]
+; CHECK-NEXT: str x10, [x8]
+; CHECK-NEXT: ldrb w8, [x6, x9]
+; CHECK-NEXT: str x8, [x18]
+; CHECK-NEXT: mov w8, #1 // =0x1
+; CHECK-NEXT: cbnz x2, .LBB0_27
+; CHECK-NEXT: // %bb.34: // %if.then327.us
+; CHECK-NEXT: // in Loop: Header=BB0_28 Depth=5
+; CHECK-NEXT: cbz w8, .LBB0_25
+; CHECK-NEXT: // %bb.35: // in Loop: Header=BB0_28 Depth=5
+; CHECK-NEXT: mov w4, wzr
+; CHECK-NEXT: b .LBB0_26
+; CHECK-NEXT: .p2align 5, , 16
+; CHECK-NEXT: .LBB0_36: // %for.cond376.preheader.us
+; CHECK-NEXT: // in Loop: Header=BB0_10 Depth=4
+; CHECK-NEXT: mov w3, #1152 // =0x480
+; CHECK-NEXT: mov x22, xzr
+; CHECK-NEXT: mov w4, wzr
+; CHECK-NEXT: mov x24, x27
+; CHECK-NEXT: lsl x23, x14, #1
+; CHECK-NEXT: mov x27, #-1 // =0xffffffffffffffff
+; CHECK-NEXT: madd x14, x14, x3, x11
+; CHECK-NEXT: mov w28, w30
+; CHECK-NEXT: mov w3, #-7680 // =0xffffe200
+; CHECK-NEXT: b .LBB0_39
+; CHECK-NEXT: .p2align 5, , 16
+; CHECK-NEXT: .LBB0_37: // %if.then466.us
+; CHECK-NEXT: // in Loop: Header=BB0_39 Depth=5
+; CHECK-NEXT: ldr x28, [sp, #152] // 8-byte Folded Reload
+; CHECK-NEXT: ldr x3, [sp, #136] // 8-byte Folded Reload
+; CHECK-NEXT: sxtb w4, w4
+; CHECK-NEXT: bic w4, w4, w4, asr #31
+; CHECK-NEXT: str x3, [x28]
+; CHECK-NEXT: mov w3, #-7680 // =0xffffe200
+; CHECK-NEXT: .LBB0_38: // %for.inc505.us
+; CHECK-NEXT: // in Loop: Header=BB0_39 Depth=5
+; CHECK-NEXT: add x22, x22, #1
+; CHECK-NEXT: add x27, x27, #1
+; CHECK-NEXT: mov w28, wzr
+; CHECK-NEXT: cmp x27, #0
+; CHECK-NEXT: b.hs .LBB0_9
+; CHECK-NEXT: .LBB0_39: // %for.body380.us
+; CHECK-NEXT: // Parent Loop BB0_4 Depth=1
+; CHECK-NEXT: // Parent Loop BB0_6 Depth=2
+; CHECK-NEXT: // Parent Loop BB0_8 Depth=3
+; CHECK-NEXT: // Parent Loop BB0_10 Depth=4
+; CHECK-NEXT: // => This Inner Loop Header: Depth=5
+; CHECK-NEXT: mov w30, w28
+; CHECK-NEXT: ldrh w28, [x23]
+; CHECK-NEXT: tst w0, #0x1
+; CHECK-NEXT: strh w28, [x11]
+; CHECK-NEXT: csel w28, w21, w3, ne
+; CHECK-NEXT: str w28, [x20]
+; CHECK-NEXT: cbz x15, .LBB0_38
+; CHECK-NEXT: // %bb.40: // %if.then436.us
+; CHECK-NEXT: // in Loop: Header=BB0_39 Depth=5
+; CHECK-NEXT: ldrh w28, [x14]
+; CHECK-NEXT: cbnz w28, .LBB0_37
+; CHECK-NEXT: // %bb.41: // in Loop: Header=BB0_39 Depth=5
+; CHECK-NEXT: mov w4, wzr
+; CHECK-NEXT: b .LBB0_38
+; CHECK-NEXT: .LBB0_42: // %for.body41
+; CHECK-NEXT: strb wzr, [x4]
+; CHECK-NEXT: strb wzr, [x14]
+; CHECK-NEXT: .LBB0_43: // %for.cond563.preheader
+; CHECK-NEXT: ldp x20, x19, [sp, #224] // 16-byte Folded Reload
+; CHECK-NEXT: ldp x22, x21, [sp, #208] // 16-byte Folded Reload
+; CHECK-NEXT: ldp x24, x23, [sp, #192] // 16-byte Folded Reload
+; CHECK-NEXT: ldp x26, x25, [sp, #176] // 16-byte Folded Reload
+; CHECK-NEXT: ldp x28, x27, [sp, #160] // 16-byte Folded Reload
+; CHECK-NEXT: ldr x30, [sp, #144] // 8-byte Folded Reload
+; CHECK-NEXT: add sp, sp, #240
+; CHECK-NEXT: ret
+entry:
+ br i1 %var_5, label %for.body41.lr.ph, label %for.cond563.preheader
+
+for.body41.lr.ph: ; preds = %entry
+ %arrayidx147 = getelementptr i8, ptr %arr_3, i64 120
+ %tobool326.not = icmp eq i64 %var_2, 0
+ %not353 = xor i64 0, -1
+ %add538 = select i1 %var_0, i16 0, i16 1
+ br i1 %var_0, label %for.body41.us, label %for.body41
+
+for.body41.us: ; preds = %for.cond.cleanup93.us, %for.body41.lr.ph
+ %var_24.promoted9271009.us = phi i64 [ 0, %for.body41.lr.ph ], [ %6, %for.cond.cleanup93.us ]
+ %var_37.promoted9301008.us = phi i64 [ 1, %for.body41.lr.ph ], [ 0, %for.cond.cleanup93.us ]
+ %2 = phi i8 [ 0, %for.body41.lr.ph ], [ 1, %for.cond.cleanup93.us ]
+ %add4139751001.us = phi i16 [ 0, %for.body41.lr.ph ], [ 1, %for.cond.cleanup93.us ]
+ %3 = phi i8 [ 0, %for.body41.lr.ph ], [ %var_10, %for.cond.cleanup93.us ]
+ store i32 %var_6, ptr %arr_3, align 4
+ store i8 %var_10, ptr %arr_3, align 1
+ br label %for.body67.us
+
+for.body67.us: ; preds = %for.cond.cleanup93.us, %for.body41.us
+ %4 = phi i8 [ %3, %for.body41.us ], [ 0, %for.cond.cleanup93.us ]
+ %add413977.us = phi i16 [ %add4139751001.us, %for.body41.us ], [ %add413.us17, %for.cond.cleanup93.us ]
+ %5 = phi i8 [ %2, %for.body41.us ], [ %.sroa.speculated829.us, %for.cond.cleanup93.us ]
+ %conv64922.us = phi i32 [ 1, %for.body41.us ], [ 0, %for.cond.cleanup93.us ]
+ %6 = phi i64 [ %var_24.promoted9271009.us, %for.body41.us ], [ %.sroa.speculated832.us, %for.cond.cleanup93.us ]
+ %mul354903918.us = phi i64 [ %var_37.promoted9301008.us, %for.body41.us ], [ 0, %for.cond.cleanup93.us ]
+ %i_2.0921.us = zext i32 %var_15 to i64
+ %.sroa.speculated832.us = tail call i64 @llvm.umin.i64(i64 %var_24.promoted9271009.us, i64 -30)
+ %sext1023 = shl i64 %i_2.0921.us, 1
+ %idxprom138.us162 = ashr i64 %sext1023, 1
+ %gep889.us = getelementptr [24 x i16], ptr %arr_19, i64 %idxprom138.us16
+ %arrayidx149.us = getelementptr i8, ptr %arrayidx147, i64 %idxprom138.us162
+ %arrayidx319.us = getelementptr [24 x i8], ptr null, i64 %idxprom138.us162
+ %7 = sext i32 %conv64922.us to i64
+ %8 = getelementptr i32, ptr %arr_12, i64 %7
+ %arrayidx226.us = getelementptr i8, ptr %8, i64 -12
+ br label %for.cond95.preheader.us
+
+for.cond.cleanup93.us: ; preds = %for.cond.cleanup98.us
+ br i1 %var_5, label %for.body67.us, label %for.body41.us
+
+for.cond.cleanup98.us: ; preds = %for.cond510.preheader.us
+ br i1 %var_0, label %for.cond.cleanup93.us, label %for.cond95.preheader.us
+
+for.body99.us: ; preds = %for.cond95.preheader.us, %for.cond510.preheader.us
+ %mul287985.us = phi i16 [ 0, %for.cond95.preheader.us ], [ %mul287.us, %for.cond510.preheader.us ]
+ %9 = phi i8 [ %29, %for.cond95.preheader.us ], [ %var_14, %for.cond510.preheader.us ]
+ %add413979.us = phi i16 [ %add413978.us, %for.cond95.preheader.us ], [ %add413.us17, %for.cond510.preheader.us ]
+ %10 = phi i32 [ 0, %for.cond95.preheader.us ], [ %26, %for.cond510.preheader.us ]
+ %mul354905.us = phi i64 [ %mul354904.us, %for.cond95.preheader.us ], [ %mul354907.us, %for.cond510.preheader.us ]
+ %sub283896.us = phi i64 [ 1, %for.cond95.preheader.us ], [ %sub283.us, %for.cond510.preheader.us ]
+ %conv96880.us = phi i64 [ 1, %for.cond95.preheader.us ], [ 0, %for.cond510.preheader.us ]
+ %.sroa.speculated829.us = tail call i8 @llvm.smin.i8(i8 %30, i8 0)
+ br label %for.body113.us
+
+for.body380.us: ; preds = %for.cond376.preheader.us, %for.inc505.us
+ %indvars.iv1018 = phi i64 [ 0, %for.cond376.preheader.us ], [ %indvars.iv.next1019, %for.inc505.us ]
+ %11 = phi i8 [ 0, %for.cond376.preheader.us ], [ %13, %for.inc505.us ]
+ %add413980.us = phi i16 [ %add413979.us, %for.cond376.preheader.us ], [ 0, %for.inc505.us ]
+ %12 = load i16, ptr %arrayidx384.us, align 2
+ store i16 %12, ptr %invariant.gep875.us, align 2
+ %add413.us17 = or i16 %add413980.us, 0
+ %arrayidx416.us = getelementptr i16, ptr %arr_13, i64 %indvars.iv1018
+ %conv419.us = select i1 %var_0, i32 36006, i32 -7680
+ store i32 %conv419.us, ptr @var_50, align 4
+ %tobool435.not.us = icmp eq i64 %mul, 0
+ br i1 %tobool435.not.us, label %for.inc505.us, label %if.then436.us
+
+if.then436.us: ; preds = %for.body380.us
+ %.sroa.speculated817.us = tail call i8 @llvm.smax.i8(i8 %11, i8 0)
+ %cond464.in.us = load i16, ptr %gep876.us, align 2
+ %tobool465.not.us = icmp eq i16 %cond464.in.us, 0
+ br i1 %tobool465.not.us, label %for.inc505.us, label %if.then466.us
+
+if.then466.us: ; preds = %if.then436.us
+ store i64 %conv35, ptr %arr_3, align 8
+ br label %for.inc505.us
+
+for.inc505.us: ; preds = %if.then466.us, %if.then436.us, %for.body380.us
+ %13 = phi i8 [ %11, %for.body380.us ], [ %.sroa.speculated817.us, %if.then466.us ], [ 0, %if.then436.us ]
+ %indvars.iv.next1019 = add i64 %indvars.iv1018, 1
+ %cmp378.us = icmp ult i64 %indvars.iv1018, 0
+ br i1 %cmp378.us, label %for.body380.us, label %for.cond510.preheader.us
+
+for.body194.us: ; preds = %if.end.us.7, %for.inc371.us
+ %indvars.iv = phi i64 [ 0, %if.end.us.7 ], [ %indvars.iv.next, %for.inc371.us ]
+ %mul287986.us = phi i16 [ %mul287985.us, %if.end.us.7 ], [ %mul287.us, %for.inc371.us ]
+ %14 = phi i8 [ %9, %if.end.us.7 ], [ %16, %for.inc371.us ]
+ %mul354906.us = phi i64 [ %mul354905.us, %if.end.us.7 ], [ %var_11, %for.inc371.us ]
+ %sub283897.us = phi i64 [ %sub283896.us, %if.end.us.7 ], [ 0, %for.inc371.us ]
+ %tobool221.not.us = icmp eq i32 1, 0
+ br i1 %tobool221.not.us, label %if.end239.us, label %if.then222.us
+
+if.then222.us: ; preds = %for.body194.us
+ %15 = load i32, ptr %arrayidx226.us, align 4
+ %conv227.us = trunc i32 %15 to i16
+ store i16 %conv227.us, ptr @var_32, align 2
+ %.sroa.speculated820.us = tail call i8 @llvm.smax.i8(i8 %14, i8 0)
+ br label %if.end239.us
+
+if.end239.us: ; preds = %if.then222.us, %for.body194.us
+ %16 = phi i8 [ %.sroa.speculated820.us, %if.then222.us ], [ 0, %for.body194.us ]
+ store i8 -107, ptr %arr_7, align 1
+ %tobool253.not.us = icmp eq i8 %0, 0
+ br i1 %tobool253.not.us, label %if.end282.us, label %if.then254.us
+
+if.then254.us: ; preds = %if.end239.us
+ %17 = load i16, ptr %arrayidx259.us, align 2
+ %tobool261.not.us = icmp eq i16 %17, 0
+ %conv268.us = select i1 %tobool261.not.us, i64 0, i64 %var_16
+ store i64 %conv268.us, ptr @var_35, align 8
+ %gep867.us = getelementptr [24 x [24 x i64]], ptr null, i64 %indvars.iv
+ store i16 %var_1, ptr %arr_6, align 2
+ br label %if.end282.us
+
+if.end282.us: ; preds = %if.then254.us, %if.end239.us
+ %sub283.us = or i64 %sub283897.us, -434259939
+ store i64 %sub283.us, ptr %arr_4, align 8
+ %mul287.us = mul i16 %mul287986.us, -18978
+ store i64 0, ptr @var_39, align 8
+ %18 = load i8, ptr %arrayidx321.us, align 1
+ %conv322.us = zext i8 %18 to i64
+ store i64 %conv322.us, ptr %arr_4, align 8
+ br i1 %tobool326.not, label %if.then327.us, label %for.inc371.us
+
+if.then327.us: ; preds = %if.end282.us
+ %tobool330.not.us = icmp eq i32 0, 0
+ br i1 %tobool330.not.us, label %cond.end345.us, label %cond.true331.us
+
+cond.true331.us: ; preds = %if.then327.us
+ %19 = load i8, ptr null, align 1
+ %20 = sext i8 %19 to i16
+ br label %cond.end345.us
+
+cond.end345.us: ; preds = %cond.true331.us, %if.then327.us
+ %cond346.us = phi i16 [ %20, %cond.true331.us ], [ 0, %if.then327.us ]
+ store i16 %cond346.us, ptr %arr_4, align 2
+ %mul354.us = mul i64 %mul354906.us, %not353
+ store i64 %mul354.us, ptr @var_46, align 8
+ br label %for.inc371.us
+
+for.inc371.us: ; preds = %cond.end345.us, %if.end282.us
+ %mul354907.us = phi i64 [ 1, %if.end282.us ], [ 0, %cond.end345.us ]
+ %indvars.iv.next = or i64 %indvars.iv, 1
+ br i1 %var_0, label %for.body194.us, label %for.cond376.preheader.us
+
+cond.true146.us: ; preds = %for.cond131.preheader.us
+ %21 = load i8, ptr %arrayidx149.us, align 1
+ %conv150.us = sext i8 %21 to i32
+ br label %cond.end154.us
+
+cond.end154.us: ; preds = %for.cond131.preheader.us, %cond.true146.us
+ %cond155.us = phi i32 [ %conv150.us, %cond.true146.us ], [ 0, %for.cond131.preheader.us ]
+ %shl.us = shl i32 %div.us, %cond155.us
+ %tobool157.not.us = icmp eq i32 %shl.us, 0
+ br i1 %tobool157.not.us, label %if.end.us, label %if.then.us
+
+if.then.us: ; preds = %cond.end154.us
+ store i32 0, ptr %arr_4, align 4
+ br label %if.end.us
+
+if.end.us: ; preds = %if.then.us, %cond.end154.us
+ %22 = phi i32 [ 0, %if.then.us ], [ %10, %cond.end154.us ]
+ store i8 %1, ptr %arr_4, align 1
+ call void @llvm.assume(i1 true)
+ %23 = load i8, ptr %arrayidx149.us, align 1
+ %conv150.us.2 = sext i8 %23 to i32
+ %shl.us.2 = shl i32 18984, %conv150.us.2
+ %tobool157.not.us.2 = icmp eq i32 %shl.us.2, 0
+ br i1 %tobool157.not.us.2, label %if.then.us.5, label %if.then.us.2
+
+if.then.us.2: ; preds = %if.end.us
+ %.sroa.speculated826.us.2 = tail call i32 @llvm.smin.i32(i32 %10, i32 0)
+ store i8 0, ptr %arr_4, align 1
+ br label %if.then.us.5
+
+if.then.us.5: ; preds = %if.then.us.2, %if.end.us
+ %24 = phi i32 [ 0, %if.then.us.2 ], [ %22, %if.end.us ]
+ %.sroa.speculated826.us.5 = tail call i32 @llvm.smin.i32(i32 %24, i32 1410036665)
+ br i1 %var_0, label %cond.end154.us.7, label %cond.true146.us.7
+
+cond.true146.us.7: ; preds = %if.then.us.5
+ %25 = load i8, ptr %arrayidx149.us, align 1
+ %conv150.us.7 = sext i8 %25 to i32
+ br label %cond.end154.us.7
+
+cond.end154.us.7: ; preds = %cond.true146.us.7, %if.then.us.5
+ %cond155.us.7 = phi i32 [ %conv150.us.7, %cond.true146.us.7 ], [ 0, %if.then.us.5 ]
+ %shl.us.7 = shl i32 18984, %cond155.us.7
+ %tobool157.not.us.7 = icmp eq i32 %shl.us.7, 0
+ br i1 %tobool157.not.us.7, label %if.end.us.7, label %if.then.us.7
+
+if.then.us.7: ; preds = %cond.end154.us.7
+ store i32 0, ptr %arr_3, align 4
+ br label %if.end.us.7
+
+if.end.us.7: ; preds = %if.then.us.7, %cond.end154.us.7
+ %26 = phi i32 [ 0, %if.then.us.7 ], [ %.sroa.speculated826.us.5, %cond.end154.us.7 ]
+ %arrayidx259.us = getelementptr i16, ptr %arrayidx257.us, i64 %conv96880.us
+ br label %for.body194.us
+
+for.body113.us: ; preds = %for.body113.us, %for.body99.us
+ br i1 %var_0, label %for.body113.us, label %for.cond131.preheader.us
+
+for.cond510.preheader.us: ; preds = %for.inc505.us
+ %cmp97.us = icmp slt i16 %add538, 0
+ br i1 %cmp97.us, label %for.body99.us, label %for.cond.cleanup98.us
+
+for.cond376.preheader.us: ; preds = %for.inc371.us
+ %arrayidx384.us = getelementptr i16, ptr null, i64 %conv96880.us
+ %gep876.us = getelementptr [24 x [24 x i16]], ptr %invariant.gep875.us, i64 %conv96880.us
+ br label %for.body380.us
+
+for.cond131.preheader.us: ; preds = %for.body113.us
+ store i8 %var_3, ptr %arr_4, align 1
+ %27 = load i16, ptr %gep884.us, align 2
+ %28 = mul i16 18984, %27
+ %div.us = zext i16 %28 to i32
+ %tobool145.not.us = icmp eq i8 0, 0
+ br i1 %tobool145.not.us, label %cond.end154.us, label %cond.true146.us
+
+for.cond95.preheader.us: ; preds = %for.cond.cleanup98.us, %for.body67.us
+ %indvars.iv1021 = phi i64 [ 1, %for.cond.cleanup98.us ], [ 0, %for.body67.us ]
+ %29 = phi i8 [ %16, %for.cond.cleanup98.us ], [ %4, %for.body67.us ]
+ %add413978.us = phi i16 [ %var_4, %for.cond.cleanup98.us ], [ %add413977.us, %for.body67.us ]
+ %30 = phi i8 [ %.sroa.speculated829.us, %for.cond.cleanup98.us ], [ %5, %for.body67.us ]
+ %mul354904.us = phi i64 [ 0, %for.cond.cleanup98.us ], [ %mul354903918.us, %for.body67.us ]
+ %gep884.us = getelementptr [24 x [24 x i16]], ptr %gep889.us, i64 %indvars.iv1021
+ %arrayidx321.us = getelementptr i8, ptr %arrayidx319.us, i64 %indvars.iv1021
+ %arrayidx257.us = getelementptr [24 x i16], ptr null, i64 %indvars.iv1021
+ br label %for.body99.us
+
+for.cond563.preheader: ; preds = %for.body41, %entry
+ ret void
+
+for.body41: ; preds = %for.body41.lr.ph
+ store i8 0, ptr %arr_12, align 1
+ store i8 0, ptr %arr_3, align 1
+ br label %for.cond563.preheader
+}
+
+attributes #0 = { nounwind "frame-pointer"="non-leaf" "target-cpu"="grace" }
+attributes #1 = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
+attributes #2 = { nocallback nofree nosync nounwind willreturn memory(inaccessiblemem: write) }
diff --git a/llvm/test/CodeGen/AMDGPU/combine_andor_with_cmps.ll b/llvm/test/CodeGen/AMDGPU/combine_andor_with_cmps.ll
index 57a1e4c..ec92edb 100644
--- a/llvm/test/CodeGen/AMDGPU/combine_andor_with_cmps.ll
+++ b/llvm/test/CodeGen/AMDGPU/combine_andor_with_cmps.ll
@@ -3385,7 +3385,7 @@ declare half @llvm.canonicalize.f16(half)
declare <2 x half> @llvm.canonicalize.v2f16(<2 x half>)
attributes #0 = { nounwind "amdgpu-ieee"="false" }
-attributes #1 = { nounwind "unsafe-fp-math"="true" "no-nans-fp-math"="true" }
+attributes #1 = { nounwind "no-nans-fp-math"="true" }
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; GFX11NONANS-FAKE16: {{.*}}
; GFX11NONANS-TRUE16: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/fdiv.f64.ll b/llvm/test/CodeGen/AMDGPU/fdiv.f64.ll
index acb32d4..11476a6 100644
--- a/llvm/test/CodeGen/AMDGPU/fdiv.f64.ll
+++ b/llvm/test/CodeGen/AMDGPU/fdiv.f64.ll
@@ -127,7 +127,7 @@ define amdgpu_kernel void @s_fdiv_v4f64(ptr addrspace(1) %out, <4 x double> %num
; GCN-LABEL: {{^}}div_fast_2_x_pat_f64:
; GCN: v_mul_f64 [[MUL:v\[[0-9]+:[0-9]+\]]], s{{\[[0-9]+:[0-9]+\]}}, 0.5
; GCN: buffer_store_dwordx2 [[MUL]]
-define amdgpu_kernel void @div_fast_2_x_pat_f64(ptr addrspace(1) %out) #1 {
+define amdgpu_kernel void @div_fast_2_x_pat_f64(ptr addrspace(1) %out) #0 {
%x = load double, ptr addrspace(1) poison
%rcp = fdiv fast double %x, 2.0
store double %rcp, ptr addrspace(1) %out, align 4
@@ -139,7 +139,7 @@ define amdgpu_kernel void @div_fast_2_x_pat_f64(ptr addrspace(1) %out) #1 {
; GCN-DAG: v_mov_b32_e32 v[[K_HI:[0-9]+]], 0x3fb99999
; GCN: v_mul_f64 [[MUL:v\[[0-9]+:[0-9]+\]]], s{{\[[0-9]+:[0-9]+\]}}, v[[[K_LO]]:[[K_HI]]]
; GCN: buffer_store_dwordx2 [[MUL]]
-define amdgpu_kernel void @div_fast_k_x_pat_f64(ptr addrspace(1) %out) #1 {
+define amdgpu_kernel void @div_fast_k_x_pat_f64(ptr addrspace(1) %out) #0 {
%x = load double, ptr addrspace(1) poison
%rcp = fdiv fast double %x, 10.0
store double %rcp, ptr addrspace(1) %out, align 4
@@ -151,7 +151,7 @@ define amdgpu_kernel void @div_fast_k_x_pat_f64(ptr addrspace(1) %out) #1 {
; GCN-DAG: v_mov_b32_e32 v[[K_HI:[0-9]+]], 0xbfb99999
; GCN: v_mul_f64 [[MUL:v\[[0-9]+:[0-9]+\]]], s{{\[[0-9]+:[0-9]+\]}}, v[[[K_LO]]:[[K_HI]]]
; GCN: buffer_store_dwordx2 [[MUL]]
-define amdgpu_kernel void @div_fast_neg_k_x_pat_f64(ptr addrspace(1) %out) #1 {
+define amdgpu_kernel void @div_fast_neg_k_x_pat_f64(ptr addrspace(1) %out) #0 {
%x = load double, ptr addrspace(1) poison
%rcp = fdiv fast double %x, -10.0
store double %rcp, ptr addrspace(1) %out, align 4
@@ -159,4 +159,3 @@ define amdgpu_kernel void @div_fast_neg_k_x_pat_f64(ptr addrspace(1) %out) #1 {
}
attributes #0 = { nounwind }
-attributes #1 = { nounwind "unsafe-fp-math"="true" }
diff --git a/llvm/test/CodeGen/AMDGPU/fmad-formation-fmul-distribute-denormal-mode.ll b/llvm/test/CodeGen/AMDGPU/fmad-formation-fmul-distribute-denormal-mode.ll
index 92eb4a6..0a266bc 100644
--- a/llvm/test/CodeGen/AMDGPU/fmad-formation-fmul-distribute-denormal-mode.ll
+++ b/llvm/test/CodeGen/AMDGPU/fmad-formation-fmul-distribute-denormal-mode.ll
@@ -284,4 +284,4 @@ define <2 x float> @unsafe_fast_fmul_fsub_ditribute_post_legalize(float %arg0, <
ret <2 x float> %tmp1
}
-attributes #0 = { "no-infs-fp-math"="true" "unsafe-fp-math"="true" }
+attributes #0 = { "no-infs-fp-math"="true" }
diff --git a/llvm/test/CodeGen/AMDGPU/fmed3.bf16.ll b/llvm/test/CodeGen/AMDGPU/fmed3.bf16.ll
index bc85dc2..3e513de 100644
--- a/llvm/test/CodeGen/AMDGPU/fmed3.bf16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fmed3.bf16.ll
@@ -219,8 +219,8 @@ define <2 x bfloat> @v_test_fmed3_r_i_i_v2bf16_minimumnum_maximumnum(<2 x bfloat
}
attributes #0 = { nounwind readnone }
-attributes #1 = { nounwind "unsafe-fp-math"="false" "no-nans-fp-math"="false" }
-attributes #2 = { nounwind "unsafe-fp-math"="false" "no-nans-fp-math"="true" }
+attributes #1 = { nounwind "no-nans-fp-math"="false" }
+attributes #2 = { nounwind "no-nans-fp-math"="true" }
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; GFX11: {{.*}}
; GFX11-SDAG: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/fmed3.ll b/llvm/test/CodeGen/AMDGPU/fmed3.ll
index 3145a27..60ac0b9 100644
--- a/llvm/test/CodeGen/AMDGPU/fmed3.ll
+++ b/llvm/test/CodeGen/AMDGPU/fmed3.ll
@@ -8905,4 +8905,4 @@ declare half @llvm.minnum.f16(half, half) #0
declare half @llvm.maxnum.f16(half, half) #0
attributes #0 = { nounwind readnone }
-attributes #2 = { nounwind "unsafe-fp-math"="false" "no-nans-fp-math"="true" }
+attributes #2 = { nounwind "no-nans-fp-math"="true" }
diff --git a/llvm/test/CodeGen/AMDGPU/fneg-combines.legal.f16.ll b/llvm/test/CodeGen/AMDGPU/fneg-combines.legal.f16.ll
index d8bbda1..69d1ee3f 100644
--- a/llvm/test/CodeGen/AMDGPU/fneg-combines.legal.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/fneg-combines.legal.f16.ll
@@ -159,7 +159,7 @@ declare half @llvm.amdgcn.interp.p2.f16(float, float, i32, i32, i1, i32) #0
attributes #0 = { nounwind "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
attributes #1 = { nounwind readnone }
-attributes #2 = { nounwind "unsafe-fp-math"="true" }
+attributes #2 = { nounwind }
attributes #3 = { nounwind "no-signed-zeros-fp-math"="true" }
attributes #4 = { nounwind "amdgpu-ieee"="false" "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
diff --git a/llvm/test/CodeGen/AMDGPU/fneg-combines.ll b/llvm/test/CodeGen/AMDGPU/fneg-combines.ll
index aaea4f7..b3202cb 100644
--- a/llvm/test/CodeGen/AMDGPU/fneg-combines.ll
+++ b/llvm/test/CodeGen/AMDGPU/fneg-combines.ll
@@ -8006,7 +8006,7 @@ declare float @llvm.amdgcn.interp.p2(float, float, i32, i32, i32) #0
attributes #0 = { nounwind "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
attributes #1 = { nounwind readnone }
-attributes #2 = { nounwind "unsafe-fp-math"="true" }
+attributes #2 = { nounwind }
attributes #3 = { nounwind "no-signed-zeros-fp-math"="true" }
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; GCN-NSZ: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/frem.ll b/llvm/test/CodeGen/AMDGPU/frem.ll
index 6f91222..d8cbdb1 100644
--- a/llvm/test/CodeGen/AMDGPU/frem.ll
+++ b/llvm/test/CodeGen/AMDGPU/frem.ll
@@ -2048,7 +2048,7 @@ define amdgpu_kernel void @unsafe_frem_f16(ptr addrspace(1) %out, ptr addrspace(
; GFX1200-FAKE16-NEXT: v_fmac_f16_e32 v1, v3, v2
; GFX1200-FAKE16-NEXT: global_store_b16 v0, v1, s[0:1]
; GFX1200-FAKE16-NEXT: s_endpgm
- ptr addrspace(1) %in2) #1 {
+ ptr addrspace(1) %in2) #0 {
%gep2 = getelementptr half, ptr addrspace(1) %in2, i32 4
%r0 = load half, ptr addrspace(1) %in1, align 4
%r1 = load half, ptr addrspace(1) %gep2, align 4
@@ -3417,7 +3417,7 @@ define amdgpu_kernel void @unsafe_frem_f32(ptr addrspace(1) %out, ptr addrspace(
; GFX1200-NEXT: v_fmac_f32_e32 v1, v3, v2
; GFX1200-NEXT: global_store_b32 v0, v1, s[0:1]
; GFX1200-NEXT: s_endpgm
- ptr addrspace(1) %in2) #1 {
+ ptr addrspace(1) %in2) #0 {
%gep2 = getelementptr float, ptr addrspace(1) %in2, i32 4
%r0 = load float, ptr addrspace(1) %in1, align 4
%r1 = load float, ptr addrspace(1) %gep2, align 4
@@ -4821,7 +4821,7 @@ define amdgpu_kernel void @unsafe_frem_f64(ptr addrspace(1) %out, ptr addrspace(
; GFX1200-NEXT: v_fma_f64 v[0:1], -v[4:5], v[2:3], v[0:1]
; GFX1200-NEXT: global_store_b64 v12, v[0:1], s[0:1]
; GFX1200-NEXT: s_endpgm
- ptr addrspace(1) %in2) #1 {
+ ptr addrspace(1) %in2) #0 {
%r0 = load double, ptr addrspace(1) %in1, align 8
%r1 = load double, ptr addrspace(1) %in2, align 8
%r2 = frem afn double %r0, %r1
@@ -18918,7 +18918,4 @@ define amdgpu_kernel void @frem_v2f64_const(ptr addrspace(1) %out) #0 {
-attributes #0 = { nounwind "unsafe-fp-math"="false" "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
-attributes #1 = { nounwind "unsafe-fp-math"="true" "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
-
-
+attributes #0 = { nounwind "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
diff --git a/llvm/test/CodeGen/AMDGPU/fsqrt.f64.ll b/llvm/test/CodeGen/AMDGPU/fsqrt.f64.ll
index 1b74ddf..9b97981 100644
--- a/llvm/test/CodeGen/AMDGPU/fsqrt.f64.ll
+++ b/llvm/test/CodeGen/AMDGPU/fsqrt.f64.ll
@@ -2870,7 +2870,7 @@ define double @v_sqrt_f64__enough_unsafe_attrs(double %x) #3 {
ret double %result
}
-define double @v_sqrt_f64__unsafe_attr(double %x) #4 {
+define double @v_sqrt_f64__unsafe_attr(double %x) {
; GFX6-SDAG-LABEL: v_sqrt_f64__unsafe_attr:
; GFX6-SDAG: ; %bb.0:
; GFX6-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
@@ -3449,7 +3449,6 @@ declare i32 @llvm.amdgcn.readfirstlane(i32) #1
attributes #0 = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
attributes #1 = { convergent nounwind willreturn memory(none) }
attributes #3 = { "no-nans-fp-math"="true" "no-infs-fp-math"="true" }
-attributes #4 = { "unsafe-fp-math"="true" }
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; GFX6: {{.*}}
; GFX8: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/fsqrt.r600.ll b/llvm/test/CodeGen/AMDGPU/fsqrt.r600.ll
index 9f19bcb..c93c077 100644
--- a/llvm/test/CodeGen/AMDGPU/fsqrt.r600.ll
+++ b/llvm/test/CodeGen/AMDGPU/fsqrt.r600.ll
@@ -239,4 +239,4 @@ declare <2 x float> @llvm.sqrt.v2f32(<2 x float> %in) #0
declare <4 x float> @llvm.sqrt.v4f32(<4 x float> %in) #0
attributes #0 = { nounwind readnone }
-attributes #1 = { nounwind "unsafe-fp-math"="true" }
+attributes #1 = { nounwind }
diff --git a/llvm/test/CodeGen/AMDGPU/inline-attr.ll b/llvm/test/CodeGen/AMDGPU/inline-attr.ll
index 4e93eca..c33b3344 100644
--- a/llvm/test/CodeGen/AMDGPU/inline-attr.ll
+++ b/llvm/test/CodeGen/AMDGPU/inline-attr.ll
@@ -36,18 +36,18 @@ entry:
ret void
}
-attributes #0 = { nounwind "uniform-work-group-size"="false" "unsafe-fp-math"="true"}
-attributes #1 = { nounwind "less-precise-fpmad"="true" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "unsafe-fp-math"="true" }
+attributes #0 = { nounwind "uniform-work-group-size"="false"}
+attributes #1 = { nounwind "less-precise-fpmad"="true" "no-infs-fp-math"="true" "no-nans-fp-math"="true" }
;.
-; UNSAFE: attributes #[[ATTR0]] = { nounwind "uniform-work-group-size"="false" "unsafe-fp-math"="true" }
-; UNSAFE: attributes #[[ATTR1]] = { nounwind "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "uniform-work-group-size"="false" "unsafe-fp-math"="true" }
+; UNSAFE: attributes #[[ATTR0]] = { nounwind "uniform-work-group-size"="false" }
+; UNSAFE: attributes #[[ATTR1]] = { nounwind "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "uniform-work-group-size"="false" }
;.
-; NONANS: attributes #[[ATTR0]] = { nounwind "no-nans-fp-math"="true" "uniform-work-group-size"="false" "unsafe-fp-math"="true" }
-; NONANS: attributes #[[ATTR1]] = { nounwind "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="true" "uniform-work-group-size"="false" "unsafe-fp-math"="true" }
+; NONANS: attributes #[[ATTR0]] = { nounwind "no-nans-fp-math"="true" "uniform-work-group-size"="false" }
+; NONANS: attributes #[[ATTR1]] = { nounwind "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="true" "uniform-work-group-size"="false" }
;.
-; NOINFS: attributes #[[ATTR0]] = { nounwind "no-infs-fp-math"="true" "uniform-work-group-size"="false" "unsafe-fp-math"="true" }
-; NOINFS: attributes #[[ATTR1]] = { nounwind "less-precise-fpmad"="false" "no-infs-fp-math"="true" "no-nans-fp-math"="false" "uniform-work-group-size"="false" "unsafe-fp-math"="true" }
+; NOINFS: attributes #[[ATTR0]] = { nounwind "no-infs-fp-math"="true" "uniform-work-group-size"="false" }
+; NOINFS: attributes #[[ATTR1]] = { nounwind "less-precise-fpmad"="false" "no-infs-fp-math"="true" "no-nans-fp-math"="false" "uniform-work-group-size"="false" }
;.
; UNSAFE: [[META0]] = !{}
;.
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.add.min.max.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.add.min.max.ll
new file mode 100644
index 0000000..99421d4
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.add.min.max.ll
@@ -0,0 +1,191 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GCN,GFX1250-SDAG %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GCN,GFX1250-GISEL %s
+
+declare i32 @llvm.amdgcn.add.min.i32(i32, i32, i32, i1)
+declare i32 @llvm.amdgcn.add.max.i32(i32, i32, i32, i1)
+declare i32 @llvm.amdgcn.add.min.u32(i32, i32, i32, i1)
+declare i32 @llvm.amdgcn.add.max.u32(i32, i32, i32, i1)
+declare <2 x i16> @llvm.amdgcn.pk.add.min.i16(<2 x i16>, <2 x i16>, <2 x i16>, i1)
+declare <2 x i16> @llvm.amdgcn.pk.add.max.i16(<2 x i16>, <2 x i16>, <2 x i16>, i1)
+declare <2 x i16> @llvm.amdgcn.pk.add.min.u16(<2 x i16>, <2 x i16>, <2 x i16>, i1)
+declare <2 x i16> @llvm.amdgcn.pk.add.max.u16(<2 x i16>, <2 x i16>, <2 x i16>, i1)
+
+define i32 @test_add_min_i32_vvv(i32 %a, i32 %b, i32 %c) {
+; GCN-LABEL: test_add_min_i32_vvv:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: v_add_min_i32 v0, v0, v1, v2
+; GCN-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call i32 @llvm.amdgcn.add.min.i32(i32 %a, i32 %b, i32 %c, i1 0)
+ ret i32 %ret
+}
+
+define i32 @test_add_min_i32_ssi_clamp(i32 inreg %a, i32 inreg %b) {
+; GCN-LABEL: test_add_min_i32_ssi_clamp:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: v_add_min_i32 v0, s0, s1, 1 clamp
+; GCN-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call i32 @llvm.amdgcn.add.min.i32(i32 %a, i32 %b, i32 1, i1 1)
+ ret i32 %ret
+}
+
+define i32 @test_add_min_u32_vvv(i32 %a, i32 %b, i32 %c) {
+; GCN-LABEL: test_add_min_u32_vvv:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: v_add_min_u32 v0, v0, v1, v2
+; GCN-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call i32 @llvm.amdgcn.add.min.u32(i32 %a, i32 %b, i32 %c, i1 0)
+ ret i32 %ret
+}
+
+define i32 @test_add_min_u32_ssi_clamp(i32 inreg %a, i32 inreg %b) {
+; GCN-LABEL: test_add_min_u32_ssi_clamp:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: v_add_min_u32 v0, s0, s1, 1 clamp
+; GCN-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call i32 @llvm.amdgcn.add.min.u32(i32 %a, i32 %b, i32 1, i1 1)
+ ret i32 %ret
+}
+
+define i32 @test_add_max_i32_vvv(i32 %a, i32 %b, i32 %c) {
+; GCN-LABEL: test_add_max_i32_vvv:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: v_add_max_i32 v0, v0, v1, v2
+; GCN-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call i32 @llvm.amdgcn.add.max.i32(i32 %a, i32 %b, i32 %c, i1 0)
+ ret i32 %ret
+}
+
+define i32 @test_add_max_i32_ssi_clamp(i32 inreg %a, i32 inreg %b) {
+; GCN-LABEL: test_add_max_i32_ssi_clamp:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: v_add_max_i32 v0, s0, s1, 1 clamp
+; GCN-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call i32 @llvm.amdgcn.add.max.i32(i32 %a, i32 %b, i32 1, i1 1)
+ ret i32 %ret
+}
+
+define i32 @test_add_max_u32_vvv(i32 %a, i32 %b, i32 %c) {
+; GCN-LABEL: test_add_max_u32_vvv:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: v_add_max_u32 v0, v0, v1, v2
+; GCN-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call i32 @llvm.amdgcn.add.max.u32(i32 %a, i32 %b, i32 %c, i1 0)
+ ret i32 %ret
+}
+
+define i32 @test_add_max_u32_ssi_clamp(i32 inreg %a, i32 inreg %b) {
+; GCN-LABEL: test_add_max_u32_ssi_clamp:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: v_add_max_u32 v0, s0, s1, 1 clamp
+; GCN-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call i32 @llvm.amdgcn.add.max.u32(i32 %a, i32 %b, i32 1, i1 1)
+ ret i32 %ret
+}
+
+define <2 x i16> @test_add_min_i16_vvv(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c) {
+; GCN-LABEL: test_add_min_i16_vvv:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: v_pk_add_min_i16 v0, v0, v1, v2
+; GCN-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call <2 x i16> @llvm.amdgcn.pk.add.min.i16(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c, i1 0)
+ ret <2 x i16> %ret
+}
+
+define <2 x i16> @test_add_min_i16_ssi_clamp(<2 x i16> inreg %a, <2 x i16> inreg %b) {
+; GCN-LABEL: test_add_min_i16_ssi_clamp:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: v_pk_add_min_i16 v0, s0, s1, 1 op_sel_hi:[1,1,0] clamp
+; GCN-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call <2 x i16> @llvm.amdgcn.pk.add.min.i16(<2 x i16> %a, <2 x i16> %b, <2 x i16> <i16 1, i16 1>, i1 1)
+ ret <2 x i16> %ret
+}
+
+define <2 x i16> @test_add_min_u16_vvv(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c) {
+; GCN-LABEL: test_add_min_u16_vvv:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: v_pk_add_min_u16 v0, v0, v1, v2
+; GCN-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call <2 x i16> @llvm.amdgcn.pk.add.min.u16(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c, i1 0)
+ ret <2 x i16> %ret
+}
+
+define <2 x i16> @test_add_min_u16_ssi_clamp(<2 x i16> inreg %a, <2 x i16> inreg %b) {
+; GCN-LABEL: test_add_min_u16_ssi_clamp:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: v_pk_add_min_u16 v0, s0, s1, 1 op_sel_hi:[1,1,0] clamp
+; GCN-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call <2 x i16> @llvm.amdgcn.pk.add.min.u16(<2 x i16> %a, <2 x i16> %b, <2 x i16> <i16 1, i16 1>, i1 1)
+ ret <2 x i16> %ret
+}
+
+define <2 x i16> @test_add_max_i16_vvv(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c) {
+; GCN-LABEL: test_add_max_i16_vvv:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: v_pk_add_max_i16 v0, v0, v1, v2
+; GCN-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call <2 x i16> @llvm.amdgcn.pk.add.max.i16(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c, i1 0)
+ ret <2 x i16> %ret
+}
+
+define <2 x i16> @test_add_max_i16_ssi_clamp(<2 x i16> inreg %a, <2 x i16> inreg %b) {
+; GCN-LABEL: test_add_max_i16_ssi_clamp:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: v_pk_add_max_i16 v0, s0, s1, 1 op_sel_hi:[1,1,0] clamp
+; GCN-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call <2 x i16> @llvm.amdgcn.pk.add.max.i16(<2 x i16> %a, <2 x i16> %b, <2 x i16> <i16 1, i16 1>, i1 1)
+ ret <2 x i16> %ret
+}
+
+define <2 x i16> @test_add_max_u16_vvv(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c) {
+; GCN-LABEL: test_add_max_u16_vvv:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: v_pk_add_max_u16 v0, v0, v1, v2
+; GCN-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call <2 x i16> @llvm.amdgcn.pk.add.max.u16(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c, i1 0)
+ ret <2 x i16> %ret
+}
+
+define <2 x i16> @test_add_max_u16_ssi_clamp(<2 x i16> inreg %a, <2 x i16> inreg %b) {
+; GCN-LABEL: test_add_max_u16_ssi_clamp:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: v_pk_add_max_u16 v0, s0, s1, 1 op_sel_hi:[1,1,0] clamp
+; GCN-NEXT: s_set_pc_i64 s[30:31]
+ %ret = tail call <2 x i16> @llvm.amdgcn.pk.add.max.u16(<2 x i16> %a, <2 x i16> %b, <2 x i16> <i16 1, i16 1>, i1 1)
+ ret <2 x i16> %ret
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; GFX1250-GISEL: {{.*}}
+; GFX1250-SDAG: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.exp2.ll b/llvm/test/CodeGen/AMDGPU/llvm.exp2.ll
index 883db20..e30a586 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.exp2.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.exp2.ll
@@ -1485,7 +1485,7 @@ define float @v_exp2_f32_fast(float %in) {
ret float %result
}
-define float @v_exp2_f32_unsafe_math_attr(float %in) "unsafe-fp-math"="true" {
+define float @v_exp2_f32_unsafe_math_attr(float %in) {
; SI-SDAG-LABEL: v_exp2_f32_unsafe_math_attr:
; SI-SDAG: ; %bb.0:
; SI-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.log2.ll b/llvm/test/CodeGen/AMDGPU/llvm.log2.ll
index 0854134..61a777f 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.log2.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.log2.ll
@@ -1907,7 +1907,7 @@ define float @v_log2_f32_fast(float %in) {
ret float %result
}
-define float @v_log2_f32_unsafe_math_attr(float %in) "unsafe-fp-math"="true" {
+define float @v_log2_f32_unsafe_math_attr(float %in) {
; SI-SDAG-LABEL: v_log2_f32_unsafe_math_attr:
; SI-SDAG: ; %bb.0:
; SI-SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/minmax.ll b/llvm/test/CodeGen/AMDGPU/minmax.ll
index d578d2e..60570bd 100644
--- a/llvm/test/CodeGen/AMDGPU/minmax.ll
+++ b/llvm/test/CodeGen/AMDGPU/minmax.ll
@@ -1296,4 +1296,4 @@ declare half @llvm.minnum.f16(half, half)
declare half @llvm.maxnum.f16(half, half)
declare float @llvm.minnum.f32(float, float)
declare float @llvm.maxnum.f32(float, float)
-attributes #0 = { nounwind "unsafe-fp-math"="false" "no-nans-fp-math"="true" }
+attributes #0 = { nounwind "no-nans-fp-math"="true" }
diff --git a/llvm/test/CodeGen/AMDGPU/stackguard.ll b/llvm/test/CodeGen/AMDGPU/stackguard.ll
new file mode 100644
index 0000000..393686f
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/stackguard.ll
@@ -0,0 +1,14 @@
+; RUN: not llc -global-isel=0 -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -filetype=null %s 2>&1 | FileCheck %s
+; RUN: not llc -global-isel -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -filetype=null %s 2>&1 | FileCheck %s
+
+; FIXME: To actually support stackguard, need to fix intrinsic to
+; return pointer in any address space.
+
+; CHECK: error: unable to lower stackguard
+define i1 @test_stackguard(ptr %p1) {
+ %p2 = call ptr @llvm.stackguard()
+ %res = icmp ne ptr %p2, %p1
+ ret i1 %res
+}
+
+declare ptr @llvm.stackguard()
diff --git a/llvm/test/CodeGen/ARM/2014-05-14-DwarfEHCrash.ll b/llvm/test/CodeGen/ARM/2014-05-14-DwarfEHCrash.ll
index 96639ed..bfaf799 100644
--- a/llvm/test/CodeGen/ARM/2014-05-14-DwarfEHCrash.ll
+++ b/llvm/test/CodeGen/ARM/2014-05-14-DwarfEHCrash.ll
@@ -45,6 +45,6 @@ declare ptr @__cxa_begin_catch(ptr)
declare void @__cxa_end_catch()
-attributes #0 = { "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="true" }
+attributes #0 = { "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "use-soft-float"="true" }
attributes #1 = { nounwind readnone }
attributes #2 = { nounwind }
diff --git a/llvm/test/CodeGen/ARM/ARMLoadStoreDBG.mir b/llvm/test/CodeGen/ARM/ARMLoadStoreDBG.mir
index 812ac23..a18023c 100644
--- a/llvm/test/CodeGen/ARM/ARMLoadStoreDBG.mir
+++ b/llvm/test/CodeGen/ARM/ARMLoadStoreDBG.mir
@@ -31,8 +31,8 @@
; Function Attrs: nounwind readnone
declare void @llvm.dbg.value(metadata, i64, metadata, metadata) #2
- attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-realign-stack" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
- attributes #1 = { "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-realign-stack" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+ attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-realign-stack" "stack-protector-buffer-size"="8" "use-soft-float"="false" }
+ attributes #1 = { "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-realign-stack" "stack-protector-buffer-size"="8" "use-soft-float"="false" }
attributes #2 = { nounwind readnone }
attributes #3 = { nounwind }
diff --git a/llvm/test/CodeGen/ARM/O3-pipeline.ll b/llvm/test/CodeGen/ARM/O3-pipeline.ll
index 9601a2e..2731148 100644
--- a/llvm/test/CodeGen/ARM/O3-pipeline.ll
+++ b/llvm/test/CodeGen/ARM/O3-pipeline.ll
@@ -166,6 +166,7 @@
; CHECK-NEXT: ARM Execution Domain Fix
; CHECK-NEXT: BreakFalseDeps
; CHECK-NEXT: ARM pseudo instruction expansion pass
+; CHECK-NEXT: Insert KCFI indirect call checks
; CHECK-NEXT: Thumb2 instruction size reduce pass
; CHECK-NEXT: MachineDominator Tree Construction
; CHECK-NEXT: Machine Natural Loop Construction
diff --git a/llvm/test/CodeGen/ARM/Windows/wineh-basic.ll b/llvm/test/CodeGen/ARM/Windows/wineh-basic.ll
index d0bdd66..e4dc92d 100644
--- a/llvm/test/CodeGen/ARM/Windows/wineh-basic.ll
+++ b/llvm/test/CodeGen/ARM/Windows/wineh-basic.ll
@@ -36,8 +36,8 @@ declare arm_aapcs_vfpcc i32 @__CxxFrameHandler3(...)
declare arm_aapcs_vfpcc void @__std_terminate() local_unnamed_addr
-attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="cortex-a9" "target-features"="+dsp,+fp16,+neon,+strict-align,+vfp3" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="cortex-a9" "target-features"="+dsp,+fp16,+neon,+strict-align,+vfp3" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="cortex-a9" "target-features"="+dsp,+fp16,+neon,+strict-align,+vfp3" "use-soft-float"="false" }
+attributes #1 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="cortex-a9" "target-features"="+dsp,+fp16,+neon,+strict-align,+vfp3" "use-soft-float"="false" }
attributes #2 = { noreturn nounwind }
!llvm.module.flags = !{!0, !1}
diff --git a/llvm/test/CodeGen/ARM/byval_load_align.ll b/llvm/test/CodeGen/ARM/byval_load_align.ll
index c594bd3..5bb4fe7 100644
--- a/llvm/test/CodeGen/ARM/byval_load_align.ll
+++ b/llvm/test/CodeGen/ARM/byval_load_align.ll
@@ -22,6 +22,6 @@ entry:
declare void @Logger(i8 signext, ptr byval(%struct.ModuleID)) #1
-attributes #0 = { nounwind ssp "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind ssp "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "use-soft-float"="false" }
+attributes #1 = { "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "use-soft-float"="false" }
attributes #2 = { nounwind }
diff --git a/llvm/test/CodeGen/ARM/call-graph-section-addrtaken.ll b/llvm/test/CodeGen/ARM/call-graph-section-addrtaken.ll
index 972a470..cabd43e 100644
--- a/llvm/test/CodeGen/ARM/call-graph-section-addrtaken.ll
+++ b/llvm/test/CodeGen/ARM/call-graph-section-addrtaken.ll
@@ -27,7 +27,7 @@ entry:
!1 = !{i64 0, !"_ZTSFivE.generalized"}
!2 = !{i64 0, !"_ZTSFviE.generalized"}
-; CHECK: .section .llvm.callgraph,"o",%progbits,.text
+; CHECK: .section .llvm.callgraph,"o",%llvm_call_graph,.text
;; Version
; CHECK-NEXT: .byte 0
;; Flags -- Potential indirect target so LSB is set to 1. Other bits are 0.
diff --git a/llvm/test/CodeGen/ARM/call-graph-section-assembly.ll b/llvm/test/CodeGen/ARM/call-graph-section-assembly.ll
index ec8d5b8..3d3974e 100644
--- a/llvm/test/CodeGen/ARM/call-graph-section-assembly.ll
+++ b/llvm/test/CodeGen/ARM/call-graph-section-assembly.ll
@@ -36,7 +36,7 @@ entry:
!4 = !{!5}
!5 = !{i64 0, !"_ZTSFPvS_E.generalized"}
-; CHECK: .section .llvm.callgraph,"o",%progbits,.text
+; CHECK: .section .llvm.callgraph,"o",%llvm_call_graph,.text
;; Version
; CHECK-NEXT: .byte 0
;; Flags
diff --git a/llvm/test/CodeGen/ARM/cfguard-module-flag.ll b/llvm/test/CodeGen/ARM/cfguard-module-flag.ll
index 3e8c9f4..bb3c04a 100644
--- a/llvm/test/CodeGen/ARM/cfguard-module-flag.ll
+++ b/llvm/test/CodeGen/ARM/cfguard-module-flag.ll
@@ -21,7 +21,7 @@ entry:
; CHECK-NOT: __guard_check_icall_fptr
; CHECK-NOT: __guard_dispatch_icall_fptr
}
-attributes #0 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="cortex-a9" "target-features"="+armv7-a,+dsp,+fp16,+neon,+strict-align,+thumb-mode,+vfp3" "unsafe-fp-math"="false" "use-soft-float"="false"}
+attributes #0 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="cortex-a9" "target-features"="+armv7-a,+dsp,+fp16,+neon,+strict-align,+thumb-mode,+vfp3" "use-soft-float"="false"}
!llvm.module.flags = !{!0}
!0 = !{i32 2, !"cfguard", i32 1}
diff --git a/llvm/test/CodeGen/ARM/clang-section.ll b/llvm/test/CodeGen/ARM/clang-section.ll
index 9277d90..9c32ab2 100644
--- a/llvm/test/CodeGen/ARM/clang-section.ll
+++ b/llvm/test/CodeGen/ARM/clang-section.ll
@@ -35,8 +35,8 @@ attributes #0 = { "bss-section"="my_bss.1" "data-section"="my_data.1" "rodata-se
attributes #1 = { "data-section"="my_data.1" "rodata-section"="my_rodata.1" }
attributes #2 = { "bss-section"="my_bss.2" "rodata-section"="my_rodata.1" }
attributes #3 = { "bss-section"="my_bss.2" "data-section"="my_data.2" "rodata-section"="my_rodata.2" }
-attributes #6 = { "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign,preserve-sign" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="cortex-a9" "target-features"="+dsp,+fp16,+neon,+vfp3" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #7 = { noinline nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign,preserve-sign" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="cortex-a9" "target-features"="+dsp,+fp16,+neon,+vfp3" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #6 = { "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign,preserve-sign" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="cortex-a9" "target-features"="+dsp,+fp16,+neon,+vfp3" "use-soft-float"="false" }
+attributes #7 = { noinline nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign,preserve-sign" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="cortex-a9" "target-features"="+dsp,+fp16,+neon,+vfp3" "use-soft-float"="false" }
!llvm.module.flags = !{!0, !1, !2, !3}
diff --git a/llvm/test/CodeGen/ARM/cmse-clear-float-bigend.mir b/llvm/test/CodeGen/ARM/cmse-clear-float-bigend.mir
index 47f4e1a..ae36da4 100644
--- a/llvm/test/CodeGen/ARM/cmse-clear-float-bigend.mir
+++ b/llvm/test/CodeGen/ARM/cmse-clear-float-bigend.mir
@@ -16,7 +16,7 @@
; Function Attrs: nounwind
declare void @llvm.stackprotector(ptr, ptr) #1
- attributes #0 = { "cmse_nonsecure_entry" nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+8msecext,+armv8-m.main,-d32,-fp64,+fp-armv8,+hwdiv,+thumb-mode,-crypto,-fullfp16,-neon" "unsafe-fp-math"="false" "use-soft-float"="false" }
+ attributes #0 = { "cmse_nonsecure_entry" nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+8msecext,+armv8-m.main,-d32,-fp64,+fp-armv8,+hwdiv,+thumb-mode,-crypto,-fullfp16,-neon" "use-soft-float"="false" }
attributes #1 = { nounwind }
attributes #2 = { "cmse_nonsecure_call" nounwind }
diff --git a/llvm/test/CodeGen/ARM/coalesce-dbgvalue.ll b/llvm/test/CodeGen/ARM/coalesce-dbgvalue.ll
index 4d4853c..7960b79 100644
--- a/llvm/test/CodeGen/ARM/coalesce-dbgvalue.ll
+++ b/llvm/test/CodeGen/ARM/coalesce-dbgvalue.ll
@@ -72,8 +72,8 @@ declare i32 @fn3(...) #1
; Function Attrs: nounwind readnone
declare void @llvm.dbg.value(metadata, metadata, metadata) #2
-attributes #0 = { nounwind ssp "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind ssp "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "use-soft-float"="false" }
attributes #2 = { nounwind readnone }
attributes #3 = { nounwind }
diff --git a/llvm/test/CodeGen/ARM/constantpool-promote-dbg.ll b/llvm/test/CodeGen/ARM/constantpool-promote-dbg.ll
index 246eeeb..4bc6c41 100644
--- a/llvm/test/CodeGen/ARM/constantpool-promote-dbg.ll
+++ b/llvm/test/CodeGen/ARM/constantpool-promote-dbg.ll
@@ -19,7 +19,7 @@ entry:
ret ptr getelementptr inbounds ([4 x i8], ptr @.str, i32 0, i32 1), !dbg !16
}
-attributes #0 = { minsize norecurse nounwind optsize readnone "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="cortex-m3" "target-features"="+hwdiv,+soft-float,-crypto,-neon" "unsafe-fp-math"="false" "use-soft-float"="true" }
+attributes #0 = { minsize norecurse nounwind optsize readnone "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="cortex-m3" "target-features"="+hwdiv,+soft-float,-crypto,-neon" "use-soft-float"="true" }
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!3, !4, !5, !6}
diff --git a/llvm/test/CodeGen/ARM/constantpool-promote.ll b/llvm/test/CodeGen/ARM/constantpool-promote.ll
index c383b39..87f14ebf 100644
--- a/llvm/test/CodeGen/ARM/constantpool-promote.ll
+++ b/llvm/test/CodeGen/ARM/constantpool-promote.ll
@@ -200,8 +200,8 @@ declare void @d(ptr) #1
declare void @llvm.memcpy.p0.p0.i32(ptr nocapture writeonly, ptr nocapture readonly, i32, i1)
declare void @llvm.memmove.p0.p0.i32(ptr, ptr, i32, i1) local_unnamed_addr
-attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "use-soft-float"="false" }
+attributes #1 = { "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "use-soft-float"="false" }
attributes #2 = { nounwind }
!llvm.module.flags = !{!0, !1}
diff --git a/llvm/test/CodeGen/ARM/early-cfi-sections.ll b/llvm/test/CodeGen/ARM/early-cfi-sections.ll
index 72b8702..ef99ae5 100644
--- a/llvm/test/CodeGen/ARM/early-cfi-sections.ll
+++ b/llvm/test/CodeGen/ARM/early-cfi-sections.ll
@@ -13,7 +13,7 @@ entry:
ret void, !dbg !10
}
-attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="arm7tdmi" "target-features"="+soft-float,+strict-align,-crypto,-neon" "unsafe-fp-math"="false" "use-soft-float"="true" }
+attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="arm7tdmi" "target-features"="+soft-float,+strict-align,-crypto,-neon" "use-soft-float"="true" }
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!3, !4, !5, !6}
diff --git a/llvm/test/CodeGen/ARM/fp16-vld.ll b/llvm/test/CodeGen/ARM/fp16-vld.ll
index 549546e..778685c 100644
--- a/llvm/test/CodeGen/ARM/fp16-vld.ll
+++ b/llvm/test/CodeGen/ARM/fp16-vld.ll
@@ -43,4 +43,4 @@ byeblock:
ret void
}
-attributes #0 = { norecurse nounwind readonly "no-infs-fp-math"="true" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "target-cpu"="generic" "target-features"="+armv8.2-a,+fullfp16,+strict-align,-thumb-mode" "unsafe-fp-math"="true" "use-soft-float"="false" }
+attributes #0 = { norecurse nounwind readonly "no-infs-fp-math"="true" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "target-cpu"="generic" "target-features"="+armv8.2-a,+fullfp16,+strict-align,-thumb-mode" "use-soft-float"="false" }
diff --git a/llvm/test/CodeGen/ARM/global-merge-1.ll b/llvm/test/CodeGen/ARM/global-merge-1.ll
index 46e9d96..05719ae 100644
--- a/llvm/test/CodeGen/ARM/global-merge-1.ll
+++ b/llvm/test/CodeGen/ARM/global-merge-1.ll
@@ -74,9 +74,9 @@ define internal ptr @returnFoo() #2 {
ret ptr @foo
}
-attributes #0 = { nounwind ssp "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #2 = { nounwind readnone ssp "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind ssp "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "use-soft-float"="false" }
+attributes #1 = { "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "use-soft-float"="false" }
+attributes #2 = { nounwind readnone ssp "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "use-soft-float"="false" }
attributes #3 = { nounwind }
!llvm.ident = !{!0}
diff --git a/llvm/test/CodeGen/ARM/isel-v8i32-crash.ll b/llvm/test/CodeGen/ARM/isel-v8i32-crash.ll
index 27534a6..bdd842a 100644
--- a/llvm/test/CodeGen/ARM/isel-v8i32-crash.ll
+++ b/llvm/test/CodeGen/ARM/isel-v8i32-crash.ll
@@ -21,4 +21,4 @@ entry:
ret void
}
-attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-realign-stack" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-realign-stack" "stack-protector-buffer-size"="8" "use-soft-float"="false" }
diff --git a/llvm/test/CodeGen/ARM/kcfi-arm.ll b/llvm/test/CodeGen/ARM/kcfi-arm.ll
new file mode 100644
index 0000000..e3696cf
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/kcfi-arm.ll
@@ -0,0 +1,138 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc -mtriple=armv7-linux-gnueabi -verify-machineinstrs < %s | FileCheck %s --check-prefix=ASM
+; RUN: llc -mtriple=armv7-linux-gnueabi -verify-machineinstrs -stop-after=finalize-isel < %s | FileCheck %s --check-prefixes=MIR,ISEL
+; RUN: llc -mtriple=armv7-linux-gnueabi -verify-machineinstrs -stop-after=kcfi < %s | FileCheck %s --check-prefixes=MIR,KCFI
+
+; MIR checks for all functions (grouped here to prevent update_llc_test_checks.py from removing them)
+
+; MIR-LABEL: name: f1
+; MIR: body:
+
+; ISEL: BLX %0, csr_aapcs,{{.*}} cfi-type 12345678
+
+; KCFI: BUNDLE{{.*}} {
+; KCFI-NEXT: KCFI_CHECK_ARM $r0, 12345678
+; KCFI-NEXT: BLX killed $r0, csr_aapcs,{{.*}}
+; KCFI-NEXT: }
+
+; MIR-LABEL: name: f2
+; MIR: body:
+
+; ISEL: TCRETURNri %0, 0, csr_aapcs, implicit $sp, cfi-type 12345678
+
+; KCFI: BUNDLE{{.*}} {
+; KCFI-NEXT: KCFI_CHECK_ARM $r0, 12345678
+; KCFI-NEXT: TAILJMPr killed $r0, csr_aapcs, implicit $sp, implicit $sp
+; KCFI-NEXT: }
+
+; ASM: .long 12345678
+define void @f1(ptr noundef %x) !kcfi_type !1 {
+; ASM-LABEL: f1:
+; ASM: @ %bb.0:
+; ASM-NEXT: .save {r11, lr}
+; ASM-NEXT: push {r11, lr}
+; ASM-NEXT: bic r12, r0, #1
+; ASM-NEXT: ldr r12, [r12, #-4]
+; ASM-NEXT: eor r12, r12, #78
+; ASM-NEXT: eor r12, r12, #24832
+; ASM-NEXT: eor r12, r12, #12320768
+; ASM-NEXT: eors r12, r12, #0
+; ASM-NEXT: beq .Ltmp0
+; ASM-NEXT: udf #33760
+; ASM-NEXT: .Ltmp0:
+; ASM-NEXT: blx r0
+; ASM-NEXT: pop {r11, pc}
+
+ call void %x() [ "kcfi"(i32 12345678) ]
+ ret void
+}
+
+; Test with tail call
+define void @f2(ptr noundef %x) !kcfi_type !1 {
+; ASM-LABEL: f2:
+; ASM: @ %bb.0:
+; ASM-NEXT: bic r12, r0, #1
+; ASM-NEXT: ldr r12, [r12, #-4]
+; ASM-NEXT: eor r12, r12, #78
+; ASM-NEXT: eor r12, r12, #24832
+; ASM-NEXT: eor r12, r12, #12320768
+; ASM-NEXT: eors r12, r12, #0
+; ASM-NEXT: beq .Ltmp1
+; ASM-NEXT: udf #33760
+; ASM-NEXT: .Ltmp1:
+; ASM-NEXT: bx r0
+
+ tail call void %x() [ "kcfi"(i32 12345678) ]
+ ret void
+}
+
+; Test r3 spill/reload when target is r12 and r3 is a call argument.
+; With 5+ arguments (target + 4 args), r0-r3 are all used for arguments,
+; forcing r3 to be spilled when we need it as scratch register.
+define void @f3_r3_spill(ptr noundef %target, i32 %a, i32 %b, i32 %c, i32 %d) !kcfi_type !1 {
+; ASM-LABEL: f3_r3_spill:
+; ASM: @ %bb.0:
+; ASM-NEXT: .save {r11, lr}
+; ASM-NEXT: push {r11, lr}
+; ASM-NEXT: mov lr, r3
+; ASM-NEXT: ldr r3, [sp, #8]
+; ASM-NEXT: mov r12, r0
+; ASM-NEXT: mov r0, r1
+; ASM-NEXT: mov r1, r2
+; ASM-NEXT: mov r2, lr
+; ASM-NEXT: stmdb sp!, {r3}
+; ASM-NEXT: bic r3, r12, #1
+; ASM-NEXT: ldr r3, [r3, #-4]
+; ASM-NEXT: eor r3, r3, #78
+; ASM-NEXT: eor r3, r3, #24832
+; ASM-NEXT: eor r3, r3, #12320768
+; ASM-NEXT: eors r3, r3, #0
+; ASM-NEXT: ldm sp!, {r3}
+; ASM-NEXT: beq .Ltmp2
+; ASM-NEXT: udf #33772
+; ASM-NEXT: .Ltmp2:
+; ASM-NEXT: blx r12
+; ASM-NEXT: pop {r11, pc}
+; Arguments: r0=%target, r1=%a, r2=%b, r3=%c, [sp]=%d
+; Call needs: r0=%a, r1=%b, r2=%c, r3=%d, target in r12
+; Compiler shuffles arguments into place, saving r3 (c) in lr, loading d from stack
+; r3 is live as 4th argument, so push it before KCFI check
+; Restore r3 immediately after comparison, before branch
+ call void %target(i32 %a, i32 %b, i32 %c, i32 %d) [ "kcfi"(i32 12345678) ]
+ ret void
+}
+
+; Test with 3 arguments - r3 not live, target in r12, so r3 used as scratch without spilling
+define void @f4_r3_unused(ptr noundef %target, i32 %a, i32 %b) !kcfi_type !1 {
+; ASM-LABEL: f4_r3_unused:
+; ASM: @ %bb.0:
+; ASM-NEXT: .save {r11, lr}
+; ASM-NEXT: push {r11, lr}
+; ASM-NEXT: mov r3, r0
+; ASM-NEXT: mov r0, r1
+; ASM-NEXT: mov r1, r2
+; ASM-NEXT: bic r12, r3, #1
+; ASM-NEXT: ldr r12, [r12, #-4]
+; ASM-NEXT: eor r12, r12, #78
+; ASM-NEXT: eor r12, r12, #24832
+; ASM-NEXT: eor r12, r12, #12320768
+; ASM-NEXT: eors r12, r12, #0
+; ASM-NEXT: beq .Ltmp3
+; ASM-NEXT: udf #33763
+; ASM-NEXT: .Ltmp3:
+; ASM-NEXT: blx r3
+; ASM-NEXT: pop {r11, pc}
+; Only 3 arguments total, so r3 is not used as call argument
+; Compiler puts target→r3, a→r0, b→r1
+; r3 is the target, so we use r12 as scratch (no spill needed)
+ call void %target(i32 %a, i32 %b) [ "kcfi"(i32 12345678) ]
+ ret void
+}
+
+!llvm.module.flags = !{!0}
+!0 = !{i32 4, !"kcfi", i32 1}
+!1 = !{i32 12345678}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; ISEL: {{.*}}
+; KCFI: {{.*}}
+; MIR: {{.*}}
diff --git a/llvm/test/CodeGen/ARM/kcfi-cbz-range.ll b/llvm/test/CodeGen/ARM/kcfi-cbz-range.ll
new file mode 100644
index 0000000..8e71cae
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/kcfi-cbz-range.ll
@@ -0,0 +1,81 @@
+; RUN: llc -mtriple=thumbv7-linux-gnueabi -filetype=obj < %s
+; RUN: llc -mtriple=thumbv7-linux-gnueabi < %s | FileCheck %s
+
+; This test verifies that KCFI instrumentation doesn't cause "out of range
+; pc-relative fixup value" errors when generating object files.
+;
+; The test creates a scenario with enough KCFI-instrumented indirect calls
+; (~32 bytes each) that would push a cbz/cbnz instruction out of its ±126 byte
+; range if the KCFI_CHECK pseudo-instruction size is not properly accounted for.
+;
+; Without the fix (KCFI_CHECK returns size 0):
+; - Backend thinks KCFI checks take no space
+; - Generates cbz to branch over the code
+; - During assembly, cbz target is >126 bytes away
+; - Assembly fails with "error: out of range pc-relative fixup value"
+;
+; With the fix (KCFI_CHECK returns size 32 for Thumb2):
+; - Backend correctly accounts for KCFI check expansion
+; - Avoids cbz or uses longer-range branch instructions
+; - Assembly succeeds, object file is generated
+
+declare void @external_function(i32)
+
+; Test WITHOUT KCFI: should generate cbz since calls are small
+; CHECK-LABEL: test_without_kcfi:
+; CHECK: cbz
+; CHECK-NOT: bic{{.*}}#1
+define i32 @test_without_kcfi(ptr %callback, i32 %x) {
+entry:
+ %cmp = icmp eq i32 %x, 0
+ br i1 %cmp, label %if_zero, label %if_nonzero
+
+if_nonzero:
+ ; Regular (non-KCFI) indirect calls - much smaller
+ call void %callback()
+ call void %callback()
+ call void %callback()
+ call void %callback()
+ call void %callback()
+ call void %callback()
+
+ call void @external_function(i32 %x)
+ %add1 = add i32 %x, 1
+ ret i32 %add1
+
+if_zero:
+ call void @external_function(i32 0)
+ ret i32 0
+}
+
+; Test WITH KCFI: should NOT generate cbz due to large KCFI checks
+; CHECK-LABEL: test_with_kcfi:
+; CHECK-NOT: cbz
+; CHECK: bic{{.*}}#1
+define i32 @test_with_kcfi(ptr %callback, i32 %x) !kcfi_type !1 {
+entry:
+ %cmp = icmp eq i32 %x, 0
+ br i1 %cmp, label %if_zero, label %if_nonzero
+
+if_nonzero:
+ ; Six KCFI-instrumented indirect calls (~192 bytes total, exceeds cbz range)
+ call void %callback() [ "kcfi"(i32 12345678) ]
+ call void %callback() [ "kcfi"(i32 12345678) ]
+ call void %callback() [ "kcfi"(i32 12345678) ]
+ call void %callback() [ "kcfi"(i32 12345678) ]
+ call void %callback() [ "kcfi"(i32 12345678) ]
+ call void %callback() [ "kcfi"(i32 12345678) ]
+
+ ; Regular call to prevent optimization
+ call void @external_function(i32 %x)
+ %add1 = add i32 %x, 1
+ ret i32 %add1
+
+if_zero:
+ call void @external_function(i32 0)
+ ret i32 0
+}
+
+!llvm.module.flags = !{!0}
+!0 = !{i32 4, !"kcfi", i32 1}
+!1 = !{i32 12345678}
diff --git a/llvm/test/CodeGen/ARM/kcfi-patchable-function-prefix.ll b/llvm/test/CodeGen/ARM/kcfi-patchable-function-prefix.ll
new file mode 100644
index 0000000..f8e0838
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/kcfi-patchable-function-prefix.ll
@@ -0,0 +1,99 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc -mtriple=armv7-linux-gnueabi -verify-machineinstrs < %s | FileCheck %s
+
+; CHECK: .p2align 2
+; CHECK-NOT: nop
+; CHECK: .long 12345678
+define void @f1(ptr noundef %x) !kcfi_type !1 {
+; CHECK-LABEL: f1:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: bic r12, r0, #1
+; CHECK-NEXT: ldr r12, [r12, #-4]
+; CHECK-NEXT: eor r12, r12, #78
+; CHECK-NEXT: eor r12, r12, #24832
+; CHECK-NEXT: eor r12, r12, #12320768
+; CHECK-NEXT: eors r12, r12, #0
+; CHECK-NEXT: beq .Ltmp0
+; CHECK-NEXT: udf #33760
+; CHECK-NEXT: .Ltmp0:
+; CHECK-NEXT: blx r0
+; CHECK-NEXT: pop {r11, pc}
+ call void %x() [ "kcfi"(i32 12345678) ]
+ ret void
+}
+
+; CHECK: .p2align 2
+; CHECK-NOT: .long
+; CHECK-NOT: nop
+define void @f2(ptr noundef %x) {
+; CHECK-LABEL: f2:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: bic r12, r0, #1
+; CHECK-NEXT: ldr r12, [r12, #-4]
+; CHECK-NEXT: eor r12, r12, #78
+; CHECK-NEXT: eor r12, r12, #24832
+; CHECK-NEXT: eor r12, r12, #12320768
+; CHECK-NEXT: eors r12, r12, #0
+; CHECK-NEXT: beq .Ltmp1
+; CHECK-NEXT: udf #33760
+; CHECK-NEXT: .Ltmp1:
+; CHECK-NEXT: blx r0
+; CHECK-NEXT: pop {r11, pc}
+ call void %x() [ "kcfi"(i32 12345678) ]
+ ret void
+}
+
+; CHECK: .p2align 2
+; CHECK: .long 12345678
+; CHECK-COUNT-11: nop
+define void @f3(ptr noundef %x) #0 !kcfi_type !1 {
+; CHECK-LABEL: f3:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: bic r12, r0, #1
+; CHECK-NEXT: ldr r12, [r12, #-48]
+; CHECK-NEXT: eor r12, r12, #78
+; CHECK-NEXT: eor r12, r12, #24832
+; CHECK-NEXT: eor r12, r12, #12320768
+; CHECK-NEXT: eors r12, r12, #0
+; CHECK-NEXT: beq .Ltmp3
+; CHECK-NEXT: udf #33760
+; CHECK-NEXT: .Ltmp3:
+; CHECK-NEXT: blx r0
+; CHECK-NEXT: pop {r11, pc}
+ call void %x() [ "kcfi"(i32 12345678) ]
+ ret void
+}
+
+; CHECK: .p2align 2
+; CHECK-COUNT-11: nop
+define void @f4(ptr noundef %x) #0 {
+; CHECK-LABEL: f4:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: bic r12, r0, #1
+; CHECK-NEXT: ldr r12, [r12, #-48]
+; CHECK-NEXT: eor r12, r12, #78
+; CHECK-NEXT: eor r12, r12, #24832
+; CHECK-NEXT: eor r12, r12, #12320768
+; CHECK-NEXT: eors r12, r12, #0
+; CHECK-NEXT: beq .Ltmp5
+; CHECK-NEXT: udf #33760
+; CHECK-NEXT: .Ltmp5:
+; CHECK-NEXT: blx r0
+; CHECK-NEXT: pop {r11, pc}
+ call void %x() [ "kcfi"(i32 12345678) ]
+ ret void
+}
+
+attributes #0 = { "patchable-function-prefix"="11" }
+
+!llvm.module.flags = !{!0}
+!0 = !{i32 4, !"kcfi", i32 1}
+!1 = !{i32 12345678}
diff --git a/llvm/test/CodeGen/ARM/kcfi-thumb.ll b/llvm/test/CodeGen/ARM/kcfi-thumb.ll
new file mode 100644
index 0000000..7c02d830
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/kcfi-thumb.ll
@@ -0,0 +1,215 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc -mtriple=thumbv6m-none-eabi < %s | FileCheck %s
+
+; This test verifies that Thumb1 (ARMv6-M) generates correct code for backend KCFI.
+; Thumb1 uses the backend KCFI implementation with Thumb1-specific instructions.
+
+; Test function without KCFI annotation
+; CHECK-LABEL: .globl nosan
+; CHECK-NEXT: .p2align 1
+; CHECK-NEXT: .type nosan,%function
+; CHECK-NEXT: .code 16
+; CHECK-NEXT: .thumb_func
+define dso_local void @nosan() nounwind {
+; CHECK-LABEL: nosan:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: bx lr
+ ret void
+}
+
+; Test function with KCFI annotation - verifies type hash emission
+;; The alignment is at least 4 to avoid unaligned type hash loads when this
+;; instrumented function is indirectly called.
+; CHECK-LABEL: .globl target_func
+; CHECK-NEXT: .p2align 2
+; CHECK-NEXT: .type target_func,%function
+; CHECK-NEXT: .long 3170468932
+; CHECK-NEXT: .code 16
+; CHECK-NEXT: .thumb_func
+define void @target_func() !kcfi_type !1 {
+; CHECK-LABEL: target_func:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: bx lr
+ ret void
+}
+
+; Test indirect call with KCFI check using operand bundles
+; CHECK-LABEL: .globl f1
+; CHECK: .p2align 2
+; CHECK-NEXT: .type f1,%function
+; CHECK-NEXT: .long 3170468932
+; CHECK-NEXT: .code 16
+; CHECK-NEXT: .thumb_func
+define void @f1(ptr noundef %x) !kcfi_type !1 {
+; CHECK-LABEL: f1:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: .save {r7, lr}
+; CHECK-NEXT: push {r7, lr}
+; CHECK-NEXT: movs r3, #1
+; CHECK-NEXT: mov r2, r0
+; CHECK-NEXT: bics r2, r3
+; CHECK-NEXT: subs r2, #4
+; CHECK-NEXT: ldr r2, [r2]
+; CHECK-NEXT: movs r3, #188
+; CHECK-NEXT: lsls r3, r3, #8
+; CHECK-NEXT: adds r3, #249
+; CHECK-NEXT: lsls r3, r3, #8
+; CHECK-NEXT: adds r3, #132
+; CHECK-NEXT: lsls r3, r3, #8
+; CHECK-NEXT: adds r3, #68
+; CHECK-NEXT: cmp r2, r3
+; CHECK-NEXT: beq .Ltmp0
+; CHECK-NEXT: bkpt #0
+; CHECK-NEXT: .Ltmp0:
+; CHECK-NEXT: blx r0
+; CHECK-NEXT: pop {r7, pc}
+ call void %x() [ "kcfi"(i32 -1124498364) ]
+ ret void
+}
+
+; Test with tail call - backend KCFI supports tail calls
+define void @f2(ptr noundef %x) !kcfi_type !1 {
+; CHECK-LABEL: f2:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: .save {r7, lr}
+; CHECK-NEXT: push {r7, lr}
+; CHECK-NEXT: movs r3, #1
+; CHECK-NEXT: mov r2, r0
+; CHECK-NEXT: bics r2, r3
+; CHECK-NEXT: subs r2, #4
+; CHECK-NEXT: ldr r2, [r2]
+; CHECK-NEXT: movs r3, #188
+; CHECK-NEXT: lsls r3, r3, #8
+; CHECK-NEXT: adds r3, #249
+; CHECK-NEXT: lsls r3, r3, #8
+; CHECK-NEXT: adds r3, #132
+; CHECK-NEXT: lsls r3, r3, #8
+; CHECK-NEXT: adds r3, #68
+; CHECK-NEXT: cmp r2, r3
+; CHECK-NEXT: beq .Ltmp1
+; CHECK-NEXT: bkpt #0
+; CHECK-NEXT: .Ltmp1:
+; CHECK-NEXT: blx r0
+; CHECK-NEXT: pop {r7, pc}
+ tail call void %x() [ "kcfi"(i32 -1124498364) ]
+ ret void
+}
+
+; Test with R2 live (3 arguments) - compiler shuffles args, no spilling needed
+define void @f3_r2_live(ptr noundef %x, i32 %a, i32 %b, i32 %c) !kcfi_type !1 {
+; CHECK-LABEL: f3_r2_live:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: .save {r4, lr}
+; CHECK-NEXT: push {r4, lr}
+; CHECK-NEXT: mov r4, r0
+; CHECK-NEXT: mov r0, r1
+; CHECK-NEXT: mov r1, r2
+; CHECK-NEXT: mov r2, r3
+; CHECK-NEXT: push {r2}
+; CHECK-NEXT: movs r3, #1
+; CHECK-NEXT: mov r2, r4
+; CHECK-NEXT: bics r2, r3
+; CHECK-NEXT: subs r2, #4
+; CHECK-NEXT: ldr r2, [r2]
+; CHECK-NEXT: movs r3, #188
+; CHECK-NEXT: lsls r3, r3, #8
+; CHECK-NEXT: adds r3, #249
+; CHECK-NEXT: lsls r3, r3, #8
+; CHECK-NEXT: adds r3, #132
+; CHECK-NEXT: lsls r3, r3, #8
+; CHECK-NEXT: adds r3, #68
+; CHECK-NEXT: cmp r2, r3
+; CHECK-NEXT: pop {r2}
+; CHECK-NEXT: beq .Ltmp2
+; CHECK-NEXT: bkpt #0
+; CHECK-NEXT: .Ltmp2:
+; CHECK-NEXT: blx r4
+; CHECK-NEXT: pop {r4, pc}
+; Compiler shuffles: target→r4, c→r2, a→r0, b→r1
+; R2 is live (3rd arg), so we push it, then uses R3 as temp, R2 as scratch
+ call void %x(i32 %a, i32 %b, i32 %c) [ "kcfi"(i32 -1124498364) ]
+ ret void
+}
+
+; Test with both R2 and R3 live (4 arguments) - compiler moves to r5/r4, uses R3 temp and R12 scratch
+define void @f4_r2_r3_live(ptr noundef %x, i32 %a, i32 %b, i32 %c, i32 %d) !kcfi_type !1 {
+; CHECK-LABEL: f4_r2_r3_live:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: .save {r4, r5, r7, lr}
+; CHECK-NEXT: push {r4, r5, r7, lr}
+; CHECK-NEXT: mov r5, r3
+; CHECK-NEXT: mov r4, r0
+; CHECK-NEXT: ldr r3, [sp, #16]
+; CHECK-NEXT: mov r0, r1
+; CHECK-NEXT: mov r1, r2
+; CHECK-NEXT: mov r2, r5
+; CHECK-NEXT: push {r3}
+; CHECK-NEXT: push {r2}
+; CHECK-NEXT: movs r3, #1
+; CHECK-NEXT: mov r2, r4
+; CHECK-NEXT: bics r2, r3
+; CHECK-NEXT: subs r2, #4
+; CHECK-NEXT: ldr r2, [r2]
+; CHECK-NEXT: movs r3, #188
+; CHECK-NEXT: lsls r3, r3, #8
+; CHECK-NEXT: adds r3, #249
+; CHECK-NEXT: lsls r3, r3, #8
+; CHECK-NEXT: adds r3, #132
+; CHECK-NEXT: lsls r3, r3, #8
+; CHECK-NEXT: adds r3, #68
+; CHECK-NEXT: cmp r2, r3
+; CHECK-NEXT: pop {r2}
+; CHECK-NEXT: pop {r3}
+; CHECK-NEXT: beq .Ltmp3
+; CHECK-NEXT: bkpt #0
+; CHECK-NEXT: .Ltmp3:
+; CHECK-NEXT: blx r4
+; CHECK-NEXT: pop {r4, r5, r7, pc}
+; Compiler shuffles: r3→r5, target→r4, d→r3 (from stack), a→r0, b→r1, c→r2
+; Then pushes r3 (d value), then r2, uses R3 as temp, R2 as scratch
+ call void %x(i32 %a, i32 %b, i32 %c, i32 %d) [ "kcfi"(i32 -1124498364) ]
+ ret void
+}
+
+; Test where target ends up in R12, forcing R2 as scratch, with both R2 and R3 live
+; This uses inline asm to force target into R12, with 4 call arguments to make R2/R3 live
+define void @f5_r12_target_r2_r3_live(i32 %a, i32 %b, i32 %c, i32 %d) !kcfi_type !1 {
+; CHECK-LABEL: f5_r12_target_r2_r3_live:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: .save {r7, lr}
+; CHECK-NEXT: push {r7, lr}
+; CHECK-NEXT: @APP
+; CHECK-NEXT: @NO_APP
+; CHECK-NEXT: push {r3}
+; CHECK-NEXT: push {r2}
+; CHECK-NEXT: movs r3, #1
+; CHECK-NEXT: mov r2, r12
+; CHECK-NEXT: bics r2, r3
+; CHECK-NEXT: subs r2, #4
+; CHECK-NEXT: ldr r2, [r2]
+; CHECK-NEXT: movs r3, #188
+; CHECK-NEXT: lsls r3, r3, #8
+; CHECK-NEXT: adds r3, #249
+; CHECK-NEXT: lsls r3, r3, #8
+; CHECK-NEXT: adds r3, #132
+; CHECK-NEXT: lsls r3, r3, #8
+; CHECK-NEXT: adds r3, #68
+; CHECK-NEXT: cmp r2, r3
+; CHECK-NEXT: pop {r2}
+; CHECK-NEXT: pop {r3}
+; CHECK-NEXT: beq .Ltmp4
+; CHECK-NEXT: bkpt #0
+; CHECK-NEXT: .Ltmp4:
+; CHECK-NEXT: blx r12
+; CHECK-NEXT: pop {r7, pc}
+; Use inline asm to get function pointer into R12
+; With 4 arguments (r0-r3), both R2 and R3 are live
+; Target in R12 means R2 is scratch, R3 is temp, and both need spilling
+ %target = call ptr asm "", "={r12}"()
+ call void %target(i32 %a, i32 %b, i32 %c, i32 %d) [ "kcfi"(i32 -1124498364) ]
+ ret void
+}
+
+!llvm.module.flags = !{!0}
+!0 = !{i32 4, !"kcfi", i32 1}
+!1 = !{i32 -1124498364}
diff --git a/llvm/test/CodeGen/ARM/kcfi-thumb2.ll b/llvm/test/CodeGen/ARM/kcfi-thumb2.ll
new file mode 100644
index 0000000..f319d98
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/kcfi-thumb2.ll
@@ -0,0 +1,163 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc -mtriple=thumbv7-linux-gnueabi -verify-machineinstrs < %s | FileCheck %s --check-prefix=ASM
+; RUN: llc -mtriple=thumbv7-linux-gnueabi -verify-machineinstrs -stop-after=finalize-isel < %s | FileCheck %s --check-prefixes=MIR,ISEL
+; RUN: llc -mtriple=thumbv7-linux-gnueabi -verify-machineinstrs -stop-after=kcfi < %s | FileCheck %s --check-prefixes=MIR,KCFI
+
+; MIR checks for all functions (grouped here to prevent update_llc_test_checks.py from removing them)
+
+; MIR-LABEL: name: f1
+; MIR: body:
+
+; ISEL: tBLXr 14 /* CC::al */, $noreg, %0, csr_aapcs,{{.*}} cfi-type 12345678
+
+; KCFI: BUNDLE{{.*}} {
+; KCFI-NEXT: KCFI_CHECK_Thumb2 $r0, 12345678
+; KCFI-NEXT: tBLXr 14 /* CC::al */, $noreg, {{(killed )?}}$r0, csr_aapcs,{{.*}}
+; KCFI-NEXT: }
+
+; MIR-LABEL: name: f2
+; MIR: body:
+
+; ISEL: TCRETURNri %0, 0, csr_aapcs, implicit $sp, cfi-type 12345678
+
+; KCFI: BUNDLE{{.*}} {
+; KCFI-NEXT: KCFI_CHECK_Thumb2 $r0, 12345678
+; KCFI-NEXT: tTAILJMPr {{(killed )?}}$r0, csr_aapcs, implicit $sp, implicit $sp
+; KCFI-NEXT: }
+
+; Test function without KCFI annotation
+; ASM-LABEL: .globl nosan
+; ASM-NEXT: .p2align 1
+; ASM-NEXT: .type nosan,%function
+; ASM-NEXT: .code 16
+; ASM-NEXT: .thumb_func
+define dso_local void @nosan() nounwind {
+; ASM-LABEL: nosan:
+; ASM: @ %bb.0:
+; ASM-NEXT: bx lr
+ ret void
+}
+
+; Test function with KCFI annotation - verifies type hash emission
+;; The alignment is at least 4 to avoid unaligned type hash loads when this
+;; instrumented function is indirectly called.
+; ASM-LABEL: .globl target_func
+; ASM-NEXT: .p2align 2
+; ASM-NEXT: .type target_func,%function
+; ASM-NEXT: .long 12345678
+; ASM-NEXT: .code 16
+; ASM-NEXT: .thumb_func
+define void @target_func() !kcfi_type !1 {
+; ASM-LABEL: target_func:
+; ASM: @ %bb.0:
+; ASM-NEXT: bx lr
+ ret void
+}
+
+; Test indirect call with KCFI check
+; ASM: .long 12345678
+define void @f1(ptr noundef %x) !kcfi_type !1 {
+; ASM-LABEL: f1:
+; ASM: @ %bb.0:
+; ASM-NEXT: .save {r7, lr}
+; ASM-NEXT: push {r7, lr}
+; ASM-NEXT: bic r12, r0, #1
+; ASM-NEXT: ldr r12, [r12, #-4]
+; ASM-NEXT: eor r12, r12, #78
+; ASM-NEXT: eor r12, r12, #24832
+; ASM-NEXT: eor r12, r12, #12320768
+; ASM-NEXT: eors r12, r12, #0
+; ASM-NEXT: beq.w .Ltmp0
+; ASM-NEXT: udf #128
+; ASM-NEXT: .Ltmp0:
+; ASM-NEXT: blx r0
+; ASM-NEXT: pop {r7, pc}
+
+ call void %x() [ "kcfi"(i32 12345678) ]
+ ret void
+}
+
+; Test with tail call
+define void @f2(ptr noundef %x) !kcfi_type !1 {
+; ASM-LABEL: f2:
+; ASM: @ %bb.0:
+; ASM-NEXT: bic r12, r0, #1
+; ASM-NEXT: ldr r12, [r12, #-4]
+; ASM-NEXT: eor r12, r12, #78
+; ASM-NEXT: eor r12, r12, #24832
+; ASM-NEXT: eor r12, r12, #12320768
+; ASM-NEXT: eors r12, r12, #0
+; ASM-NEXT: beq.w .Ltmp1
+; ASM-NEXT: udf #128
+; ASM-NEXT: .Ltmp1:
+; ASM-NEXT: bx r0
+
+ tail call void %x() [ "kcfi"(i32 12345678) ]
+ ret void
+}
+
+; Test r3 spill/reload when target is r12 and r3 is a call argument (Thumb2)
+define void @f3_r3_spill(ptr noundef %target, i32 %a, i32 %b, i32 %c, i32 %d) !kcfi_type !1 {
+; ASM-LABEL: f3_r3_spill:
+; ASM: @ %bb.0:
+; ASM-NEXT: .save {r7, lr}
+; ASM-NEXT: push {r7, lr}
+; ASM-NEXT: mov lr, r3
+; ASM-NEXT: ldr r3, [sp, #8]
+; ASM-NEXT: mov r12, r0
+; ASM-NEXT: mov r0, r1
+; ASM-NEXT: mov r1, r2
+; ASM-NEXT: mov r2, lr
+; ASM-NEXT: push {r3}
+; ASM-NEXT: bic r3, r12, #1
+; ASM-NEXT: ldr r3, [r3, #-4]
+; ASM-NEXT: eor r3, r3, #78
+; ASM-NEXT: eor r3, r3, #24832
+; ASM-NEXT: eor r3, r3, #12320768
+; ASM-NEXT: eors r3, r3, #0
+; ASM-NEXT: pop {r3}
+; ASM-NEXT: beq.w .Ltmp2
+; ASM-NEXT: udf #140
+; ASM-NEXT: .Ltmp2:
+; ASM-NEXT: blx r12
+; ASM-NEXT: pop {r7, pc}
+; Arguments: r0=%target, r1=%a, r2=%b, r3=%c, [sp+8]=%d
+; Call needs: r0=%a, r1=%b, r2=%c, r3=%d, target in r12
+; r3 is live as 4th argument, so push it before KCFI check
+ call void %target(i32 %a, i32 %b, i32 %c, i32 %d) [ "kcfi"(i32 12345678) ]
+ ret void
+}
+
+; Test with 3 arguments - r3 not live, target in r12 or elsewhere, r12 used as scratch
+define void @f4_r3_unused(ptr noundef %target, i32 %a, i32 %b) !kcfi_type !1 {
+; ASM-LABEL: f4_r3_unused:
+; ASM: @ %bb.0:
+; ASM-NEXT: .save {r7, lr}
+; ASM-NEXT: push {r7, lr}
+; ASM-NEXT: mov r3, r0
+; ASM-NEXT: mov r0, r1
+; ASM-NEXT: mov r1, r2
+; ASM-NEXT: bic r12, r3, #1
+; ASM-NEXT: ldr r12, [r12, #-4]
+; ASM-NEXT: eor r12, r12, #78
+; ASM-NEXT: eor r12, r12, #24832
+; ASM-NEXT: eor r12, r12, #12320768
+; ASM-NEXT: eors r12, r12, #0
+; ASM-NEXT: beq.w .Ltmp3
+; ASM-NEXT: udf #131
+; ASM-NEXT: .Ltmp3:
+; ASM-NEXT: blx r3
+; ASM-NEXT: pop {r7, pc}
+; Only 3 arguments total, so r3 is not used as call argument
+; Target might be in r3, using r12 as scratch (no spill needed)
+ call void %target(i32 %a, i32 %b) [ "kcfi"(i32 12345678) ]
+ ret void
+}
+
+!llvm.module.flags = !{!0}
+!0 = !{i32 4, !"kcfi", i32 1}
+!1 = !{i32 12345678}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; ISEL: {{.*}}
+; KCFI: {{.*}}
+; MIR: {{.*}}
diff --git a/llvm/test/CodeGen/ARM/kcfi.ll b/llvm/test/CodeGen/ARM/kcfi.ll
deleted file mode 100644
index 9e16468..0000000
--- a/llvm/test/CodeGen/ARM/kcfi.ll
+++ /dev/null
@@ -1,28 +0,0 @@
-; RUN: llc -mtriple=thumbv6m-none-eabi < %s | FileCheck %s
-
-; CHECK-LABEL: .globl nosan
-; CHECK-NEXT: .p2align 1
-; CHECK-NEXT: .type nosan,%function
-; CHECK-NEXT: .code 16
-; CHECK-NEXT: .thumb_func
-; CHECK-NEXT: nosan:
-define dso_local void @nosan() nounwind {
- ret void
-}
-
-;; The alignment is at least 4 to avoid unaligned type hash loads when this
-;; instrumented function is indirectly called.
-; CHECK-LABEL: .globl f1
-; CHECK-NEXT: .p2align 2
-; CHECK-NEXT: .type f1,%function
-; CHECK-NEXT: .long 3170468932
-; CHECK-NEXT: .code 16
-; CHECK-NEXT: .thumb_func
-; CHECK-NEXT: f1:
-define void @f1(ptr noundef %x) !kcfi_type !1 {
- ret void
-}
-
-!llvm.module.flags = !{!0}
-!0 = !{i32 4, !"kcfi", i32 1}
-!1 = !{i32 -1124498364}
diff --git a/llvm/test/CodeGen/ARM/out-of-registers.ll b/llvm/test/CodeGen/ARM/out-of-registers.ll
index c6488f1..8da2069 100644
--- a/llvm/test/CodeGen/ARM/out-of-registers.ll
+++ b/llvm/test/CodeGen/ARM/out-of-registers.ll
@@ -32,7 +32,7 @@ declare { <4 x float>, <4 x float>, <4 x float>, <4 x float> } @llvm.arm.neon.vl
; Function Attrs: nounwind
-attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "stack-protector-buffer-size"="8" "unsafe-fp-math"="true" "use-soft-float"="false" }
+attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "stack-protector-buffer-size"="8" "use-soft-float"="false" }
attributes #1 = { nounwind }
attributes #2 = { nounwind readonly }
diff --git a/llvm/test/CodeGen/ARM/relax-per-target-feature.ll b/llvm/test/CodeGen/ARM/relax-per-target-feature.ll
index 71db294..99ed6f3 100644
--- a/llvm/test/CodeGen/ARM/relax-per-target-feature.ll
+++ b/llvm/test/CodeGen/ARM/relax-per-target-feature.ll
@@ -30,5 +30,5 @@ entry:
attributes #0 = { nounwind "disable-tail-calls"="false" "target-cpu"="cortex-a53" "target-features"="+crypto,+fp-armv8,+neon,+soft-float-abi,+strict-align,+thumb-mode,-crc,-dotprod,-dsp,-hwdiv,-hwdiv-arm,-ras" "use-soft-float"="true" }
-attributes #2 = { nounwind "disable-tail-calls"="false" "target-cpu"="arm7tdmi" "target-features"="+strict-align,+thumb-mode,-crc,-dotprod,-dsp,-hwdiv,-hwdiv-arm,-ras" "unsafe-fp-math"="false" "use-soft-float"="true" }
+attributes #2 = { nounwind "disable-tail-calls"="false" "target-cpu"="arm7tdmi" "target-features"="+strict-align,+thumb-mode,-crc,-dotprod,-dsp,-hwdiv,-hwdiv-arm,-ras" "use-soft-float"="true" }
attributes #3 = { nounwind }
diff --git a/llvm/test/CodeGen/ARM/softfp-constant-comparison.ll b/llvm/test/CodeGen/ARM/softfp-constant-comparison.ll
index 76df93b..2aa7611 100644
--- a/llvm/test/CodeGen/ARM/softfp-constant-comparison.ll
+++ b/llvm/test/CodeGen/ARM/softfp-constant-comparison.ll
@@ -32,4 +32,4 @@ land.end: ; preds = %land.rhs, %entry
ret void
}
-attributes #0 = { noinline nounwind optnone "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign,preserve-sign" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="cortex-m4" "target-features"="+armv7e-m,+dsp,+fp16,+hwdiv,+thumb-mode,+vfp2sp,+vfp3d16sp,+vfp4d16sp,-aes,-crc,-crypto,-dotprod,-fp16fml,-fullfp16,-hwdiv-arm,-lob,-mve,-mve.fp,-ras,-sb,-sha2" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { noinline nounwind optnone "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign,preserve-sign" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="cortex-m4" "target-features"="+armv7e-m,+dsp,+fp16,+hwdiv,+thumb-mode,+vfp2sp,+vfp3d16sp,+vfp4d16sp,-aes,-crc,-crypto,-dotprod,-fp16fml,-fullfp16,-hwdiv-arm,-lob,-mve,-mve.fp,-ras,-sb,-sha2" "use-soft-float"="false" }
diff --git a/llvm/test/CodeGen/ARM/stack-protector-bmovpcb_call.ll b/llvm/test/CodeGen/ARM/stack-protector-bmovpcb_call.ll
index 6f2cb42..2cf6d29 100644
--- a/llvm/test/CodeGen/ARM/stack-protector-bmovpcb_call.ll
+++ b/llvm/test/CodeGen/ARM/stack-protector-bmovpcb_call.ll
@@ -25,7 +25,7 @@ declare void @llvm.memcpy.p0.p0.i32(ptr nocapture, ptr nocapture readonly, i32,
; Function Attrs: nounwind optsize
declare i32 @printf(ptr nocapture readonly, ...) #2
-attributes #0 = { nounwind optsize ssp "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind optsize ssp "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "use-soft-float"="false" }
attributes #1 = { nounwind }
-attributes #2 = { nounwind optsize "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { nounwind optsize "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "use-soft-float"="false" }
attributes #3 = { nounwind optsize }
diff --git a/llvm/test/CodeGen/ARM/stack_guard_remat.ll b/llvm/test/CodeGen/ARM/stack_guard_remat.ll
index 983ef13..0930ccc 100644
--- a/llvm/test/CodeGen/ARM/stack_guard_remat.ll
+++ b/llvm/test/CodeGen/ARM/stack_guard_remat.ll
@@ -68,7 +68,7 @@ declare void @foo3(ptr)
; Function Attrs: nounwind
declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
-attributes #0 = { nounwind ssp "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind ssp "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "use-soft-float"="false" }
;--- pic-flag.ll
!llvm.module.flags = !{!0}
diff --git a/llvm/test/CodeGen/ARM/struct-byval-frame-index.ll b/llvm/test/CodeGen/ARM/struct-byval-frame-index.ll
index 24df0d3..868dc03 100644
--- a/llvm/test/CodeGen/ARM/struct-byval-frame-index.ll
+++ b/llvm/test/CodeGen/ARM/struct-byval-frame-index.ll
@@ -34,4 +34,4 @@ entry:
; Function Attrs: nounwind
declare void @RestoreMVBlock8x8(i32, i32, ptr byval(%structN) nocapture, i32) #1
-attributes #1 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "use-soft-float"="false" }
diff --git a/llvm/test/CodeGen/ARM/subtarget-align.ll b/llvm/test/CodeGen/ARM/subtarget-align.ll
index a24b487..f87e21f 100644
--- a/llvm/test/CodeGen/ARM/subtarget-align.ll
+++ b/llvm/test/CodeGen/ARM/subtarget-align.ll
@@ -18,7 +18,7 @@ entry:
ret i32 0
}
-attributes #0 = { "target-cpu"="generic" "target-features"="+armv7-a,+dsp,+neon,+vfp3,-thumb-mode" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { "target-cpu"="generic" "target-features"="+armv7-a,+dsp,+neon,+vfp3,-thumb-mode" "use-soft-float"="false" }
attributes #1 = { "target-cpu"="arm7tdmi" "target-features"="+armv4t" "use-soft-float"="true" }
diff --git a/llvm/test/CodeGen/ARM/unschedule-first-call.ll b/llvm/test/CodeGen/ARM/unschedule-first-call.ll
index e0bb787..ad422f7 100644
--- a/llvm/test/CodeGen/ARM/unschedule-first-call.ll
+++ b/llvm/test/CodeGen/ARM/unschedule-first-call.ll
@@ -128,7 +128,7 @@ declare { i64, i1 } @llvm.sadd.with.overflow.i64(i64, i64) #1
; Function Attrs: nounwind readnone
declare { i64, i1 } @llvm.ssub.with.overflow.i64(i64, i64) #1
-attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "polly-optimized" "stack-protector-buffer-size"="8" "target-cpu"="arm1176jzf-s" "target-features"="+dsp,+strict-align,+vfp2" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "polly-optimized" "stack-protector-buffer-size"="8" "target-cpu"="arm1176jzf-s" "target-features"="+dsp,+strict-align,+vfp2" "use-soft-float"="false" }
attributes #1 = { nounwind readnone }
!llvm.ident = !{!0}
diff --git a/llvm/test/CodeGen/ARM/vector-spilling.ll b/llvm/test/CodeGen/ARM/vector-spilling.ll
index 5dc20a8..8d1339844 100644
--- a/llvm/test/CodeGen/ARM/vector-spilling.ll
+++ b/llvm/test/CodeGen/ARM/vector-spilling.ll
@@ -30,4 +30,4 @@ entry:
declare void @foo(<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>)
-attributes #0 = { noredzone "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { noredzone "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "use-soft-float"="false" }
diff --git a/llvm/test/CodeGen/ARM/vldm-sched-a9.ll b/llvm/test/CodeGen/ARM/vldm-sched-a9.ll
index 892b261..4e36711 100644
--- a/llvm/test/CodeGen/ARM/vldm-sched-a9.ll
+++ b/llvm/test/CodeGen/ARM/vldm-sched-a9.ll
@@ -132,4 +132,4 @@ entry:
declare void @capture(ptr, ptr)
-attributes #0 = { noredzone "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { noredzone "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "use-soft-float"="false" }
diff --git a/llvm/test/CodeGen/DirectX/CBufferAccess/unused.ll b/llvm/test/CodeGen/DirectX/CBufferAccess/unused.ll
new file mode 100644
index 0000000..6f1bbd0
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/CBufferAccess/unused.ll
@@ -0,0 +1,13 @@
+; RUN: opt -S -dxil-cbuffer-access -mtriple=dxil--shadermodel6.3-library %s | FileCheck %s
+; Check that we correctly ignore cbuffers that were nulled out by optimizations.
+
+%__cblayout_CB = type <{ float }>
+@CB.cb = local_unnamed_addr global target("dx.CBuffer", target("dx.Layout", %__cblayout_CB, 4, 0)) poison
+@x = external local_unnamed_addr addrspace(2) global float, align 4
+
+; CHECK-NOT: !hlsl.cbs =
+!hlsl.cbs = !{!0, !1, !2}
+
+!0 = !{ptr @CB.cb, ptr addrspace(2) @x}
+!1 = !{ptr @CB.cb, null}
+!2 = !{null, null}
diff --git a/llvm/test/CodeGen/LoongArch/lasx/shuffle-as-permute-and-shuffle.ll b/llvm/test/CodeGen/LoongArch/lasx/shuffle-as-permute-and-shuffle.ll
index 245f764..7149cdb 100644
--- a/llvm/test/CodeGen/LoongArch/lasx/shuffle-as-permute-and-shuffle.ll
+++ b/llvm/test/CodeGen/LoongArch/lasx/shuffle-as-permute-and-shuffle.ll
@@ -32,9 +32,7 @@ define <16 x i16> @shuffle_v16i16(<16 x i16> %a) {
; CHECK: # %bb.0:
; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI2_0)
; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI2_0)
-; CHECK-NEXT: xvpermi.d $xr2, $xr0, 78
-; CHECK-NEXT: xvshuf.w $xr1, $xr2, $xr0
-; CHECK-NEXT: xvori.b $xr0, $xr1, 0
+; CHECK-NEXT: xvperm.w $xr0, $xr0, $xr1
; CHECK-NEXT: ret
%shuffle = shufflevector <16 x i16> %a, <16 x i16> poison, <16 x i32> <i32 8, i32 9, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
ret <16 x i16> %shuffle
@@ -55,9 +53,7 @@ define <16 x i16> @shuffle_v16i16_same_lane(<16 x i16> %a) {
define <8 x i32> @shuffle_v8i32(<8 x i32> %a) {
; CHECK-LABEL: shuffle_v8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI4_0)
-; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI4_0)
-; CHECK-NEXT: xvperm.w $xr0, $xr0, $xr1
+; CHECK-NEXT: xvpermi.d $xr0, $xr0, 226
; CHECK-NEXT: ret
%shuffle = shufflevector <8 x i32> %a, <8 x i32> poison, <8 x i32> <i32 4, i32 5, i32 0, i32 1, i32 4, i32 5, i32 6, i32 7>
ret <8 x i32> %shuffle
@@ -93,9 +89,7 @@ define <4 x i64> @shuffle_v4i64_same_lane(<4 x i64> %a) {
define <8 x float> @shuffle_v8f32(<8 x float> %a) {
; CHECK-LABEL: shuffle_v8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: pcalau12i $a0, %pc_hi20(.LCPI8_0)
-; CHECK-NEXT: xvld $xr1, $a0, %pc_lo12(.LCPI8_0)
-; CHECK-NEXT: xvperm.w $xr0, $xr0, $xr1
+; CHECK-NEXT: xvpermi.d $xr0, $xr0, 226
; CHECK-NEXT: ret
%shuffle = shufflevector <8 x float> %a, <8 x float> poison, <8 x i32> <i32 4, i32 5, i32 0, i32 1, i32 4, i32 5, i32 6, i32 7>
ret <8 x float> %shuffle
diff --git a/llvm/test/CodeGen/MSP430/libcalls.ll b/llvm/test/CodeGen/MSP430/libcalls.ll
index 5d3755c..d1bafea 100644
--- a/llvm/test/CodeGen/MSP430/libcalls.ll
+++ b/llvm/test/CodeGen/MSP430/libcalls.ll
@@ -639,4 +639,18 @@ entry:
ret i32 %shr
}
+define i64 @test__mspabi_divull(i64 %a, i64 %b) #0 {
+; CHECK-LABEL: test__mspabi_divull:
+; CHECK: call #__mspabi_divull
+ %result = udiv i64 %a, %b
+ ret i64 %result
+}
+
+define i64 @test__mspabi_remull(i64 %a, i64 %b) #0 {
+; CHECK-LABEL: test__mspabi_remull:
+; CHECK: call #__mspabi_remull
+ %result = urem i64 %a, %b
+ ret i64 %result
+}
+
attributes #0 = { nounwind }
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/store-fp-zero-to-x0.ll b/llvm/test/CodeGen/RISCV/GlobalISel/store-fp-zero-to-x0.ll
new file mode 100644
index 0000000..bc79c6f
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/store-fp-zero-to-x0.ll
@@ -0,0 +1,320 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -global-isel -mtriple=riscv32 -mattr=+f,+zfh < %s \
+; RUN: | FileCheck %s --check-prefix=RV32F
+; RUN: llc -global-isel -mtriple=riscv32 -mattr=+d,+zfh < %s \
+; RUN: | FileCheck %s --check-prefix=RV32D
+; RUN: llc -global-isel -mtriple=riscv64 -mattr=+f,+zfh < %s \
+; RUN: | FileCheck %s --check-prefix=RV64F
+; RUN: llc -global-isel -mtriple=riscv64 -mattr=+d,+zfh < %s \
+; RUN: | FileCheck %s --check-prefix=RV64D
+
+define void @zero_f16(ptr %i) {
+; RV32F-LABEL: zero_f16:
+; RV32F: # %bb.0: # %entry
+; RV32F-NEXT: sh zero, 0(a0)
+; RV32F-NEXT: ret
+;
+; RV32D-LABEL: zero_f16:
+; RV32D: # %bb.0: # %entry
+; RV32D-NEXT: sh zero, 0(a0)
+; RV32D-NEXT: ret
+;
+; RV64F-LABEL: zero_f16:
+; RV64F: # %bb.0: # %entry
+; RV64F-NEXT: sh zero, 0(a0)
+; RV64F-NEXT: ret
+;
+; RV64D-LABEL: zero_f16:
+; RV64D: # %bb.0: # %entry
+; RV64D-NEXT: sh zero, 0(a0)
+; RV64D-NEXT: ret
+entry:
+ store half 0.0, ptr %i, align 4
+ ret void
+}
+
+define void @zero_bf16(ptr %i) {
+; RV32F-LABEL: zero_bf16:
+; RV32F: # %bb.0: # %entry
+; RV32F-NEXT: sh zero, 0(a0)
+; RV32F-NEXT: ret
+;
+; RV32D-LABEL: zero_bf16:
+; RV32D: # %bb.0: # %entry
+; RV32D-NEXT: sh zero, 0(a0)
+; RV32D-NEXT: ret
+;
+; RV64F-LABEL: zero_bf16:
+; RV64F: # %bb.0: # %entry
+; RV64F-NEXT: sh zero, 0(a0)
+; RV64F-NEXT: ret
+;
+; RV64D-LABEL: zero_bf16:
+; RV64D: # %bb.0: # %entry
+; RV64D-NEXT: sh zero, 0(a0)
+; RV64D-NEXT: ret
+entry:
+ store bfloat 0.0, ptr %i, align 4
+ ret void
+}
+
+define void @zero_f32(ptr %i) {
+; RV32F-LABEL: zero_f32:
+; RV32F: # %bb.0: # %entry
+; RV32F-NEXT: sw zero, 0(a0)
+; RV32F-NEXT: ret
+;
+; RV32D-LABEL: zero_f32:
+; RV32D: # %bb.0: # %entry
+; RV32D-NEXT: sw zero, 0(a0)
+; RV32D-NEXT: ret
+;
+; RV64F-LABEL: zero_f32:
+; RV64F: # %bb.0: # %entry
+; RV64F-NEXT: sw zero, 0(a0)
+; RV64F-NEXT: ret
+;
+; RV64D-LABEL: zero_f32:
+; RV64D: # %bb.0: # %entry
+; RV64D-NEXT: sw zero, 0(a0)
+; RV64D-NEXT: ret
+entry:
+ store float 0.0, ptr %i, align 4
+ ret void
+}
+
+
+define void @zero_f64(ptr %i) {
+; RV32F-LABEL: zero_f64:
+; RV32F: # %bb.0: # %entry
+; RV32F-NEXT: lui a1, %hi(.LCPI3_0)
+; RV32F-NEXT: addi a1, a1, %lo(.LCPI3_0)
+; RV32F-NEXT: lw a2, 0(a1)
+; RV32F-NEXT: lw a1, 4(a1)
+; RV32F-NEXT: sw a2, 0(a0)
+; RV32F-NEXT: sw a1, 4(a0)
+; RV32F-NEXT: ret
+;
+; RV32D-LABEL: zero_f64:
+; RV32D: # %bb.0: # %entry
+; RV32D-NEXT: fcvt.d.w fa5, zero
+; RV32D-NEXT: fsd fa5, 0(a0)
+; RV32D-NEXT: ret
+;
+; RV64F-LABEL: zero_f64:
+; RV64F: # %bb.0: # %entry
+; RV64F-NEXT: sd zero, 0(a0)
+; RV64F-NEXT: ret
+;
+; RV64D-LABEL: zero_f64:
+; RV64D: # %bb.0: # %entry
+; RV64D-NEXT: sd zero, 0(a0)
+; RV64D-NEXT: ret
+entry:
+ store double 0.0, ptr %i, align 8
+ ret void
+}
+
+define void @zero_v1f32(ptr %i) {
+; RV32F-LABEL: zero_v1f32:
+; RV32F: # %bb.0: # %entry
+; RV32F-NEXT: sw zero, 0(a0)
+; RV32F-NEXT: ret
+;
+; RV32D-LABEL: zero_v1f32:
+; RV32D: # %bb.0: # %entry
+; RV32D-NEXT: sw zero, 0(a0)
+; RV32D-NEXT: ret
+;
+; RV64F-LABEL: zero_v1f32:
+; RV64F: # %bb.0: # %entry
+; RV64F-NEXT: sw zero, 0(a0)
+; RV64F-NEXT: ret
+;
+; RV64D-LABEL: zero_v1f32:
+; RV64D: # %bb.0: # %entry
+; RV64D-NEXT: sw zero, 0(a0)
+; RV64D-NEXT: ret
+entry:
+ store <1 x float> <float 0.0>, ptr %i, align 8
+ ret void
+}
+
+define void @zero_v2f32(ptr %i) {
+; RV32F-LABEL: zero_v2f32:
+; RV32F: # %bb.0: # %entry
+; RV32F-NEXT: sw zero, 0(a0)
+; RV32F-NEXT: sw zero, 4(a0)
+; RV32F-NEXT: ret
+;
+; RV32D-LABEL: zero_v2f32:
+; RV32D: # %bb.0: # %entry
+; RV32D-NEXT: sw zero, 0(a0)
+; RV32D-NEXT: sw zero, 4(a0)
+; RV32D-NEXT: ret
+;
+; RV64F-LABEL: zero_v2f32:
+; RV64F: # %bb.0: # %entry
+; RV64F-NEXT: sw zero, 0(a0)
+; RV64F-NEXT: sw zero, 4(a0)
+; RV64F-NEXT: ret
+;
+; RV64D-LABEL: zero_v2f32:
+; RV64D: # %bb.0: # %entry
+; RV64D-NEXT: sw zero, 0(a0)
+; RV64D-NEXT: sw zero, 4(a0)
+; RV64D-NEXT: ret
+entry:
+ store <2 x float> <float 0.0, float 0.0>, ptr %i, align 8
+ ret void
+}
+
+define void @zero_v4f32(ptr %i) {
+; RV32F-LABEL: zero_v4f32:
+; RV32F: # %bb.0: # %entry
+; RV32F-NEXT: sw zero, 0(a0)
+; RV32F-NEXT: sw zero, 4(a0)
+; RV32F-NEXT: sw zero, 8(a0)
+; RV32F-NEXT: sw zero, 12(a0)
+; RV32F-NEXT: ret
+;
+; RV32D-LABEL: zero_v4f32:
+; RV32D: # %bb.0: # %entry
+; RV32D-NEXT: sw zero, 0(a0)
+; RV32D-NEXT: sw zero, 4(a0)
+; RV32D-NEXT: sw zero, 8(a0)
+; RV32D-NEXT: sw zero, 12(a0)
+; RV32D-NEXT: ret
+;
+; RV64F-LABEL: zero_v4f32:
+; RV64F: # %bb.0: # %entry
+; RV64F-NEXT: sw zero, 0(a0)
+; RV64F-NEXT: sw zero, 4(a0)
+; RV64F-NEXT: sw zero, 8(a0)
+; RV64F-NEXT: sw zero, 12(a0)
+; RV64F-NEXT: ret
+;
+; RV64D-LABEL: zero_v4f32:
+; RV64D: # %bb.0: # %entry
+; RV64D-NEXT: sw zero, 0(a0)
+; RV64D-NEXT: sw zero, 4(a0)
+; RV64D-NEXT: sw zero, 8(a0)
+; RV64D-NEXT: sw zero, 12(a0)
+; RV64D-NEXT: ret
+entry:
+ store <4 x float> <float 0.0, float 0.0, float 0.0, float 0.0>, ptr %i, align 8
+ ret void
+}
+
+define void @zero_v1f64(ptr %i) {
+; RV32F-LABEL: zero_v1f64:
+; RV32F: # %bb.0: # %entry
+; RV32F-NEXT: lui a1, %hi(.LCPI7_0)
+; RV32F-NEXT: addi a1, a1, %lo(.LCPI7_0)
+; RV32F-NEXT: lw a2, 0(a1)
+; RV32F-NEXT: lw a1, 4(a1)
+; RV32F-NEXT: sw a2, 0(a0)
+; RV32F-NEXT: sw a1, 4(a0)
+; RV32F-NEXT: ret
+;
+; RV32D-LABEL: zero_v1f64:
+; RV32D: # %bb.0: # %entry
+; RV32D-NEXT: fcvt.d.w fa5, zero
+; RV32D-NEXT: fsd fa5, 0(a0)
+; RV32D-NEXT: ret
+;
+; RV64F-LABEL: zero_v1f64:
+; RV64F: # %bb.0: # %entry
+; RV64F-NEXT: sd zero, 0(a0)
+; RV64F-NEXT: ret
+;
+; RV64D-LABEL: zero_v1f64:
+; RV64D: # %bb.0: # %entry
+; RV64D-NEXT: sd zero, 0(a0)
+; RV64D-NEXT: ret
+entry:
+ store <1 x double> <double 0.0>, ptr %i, align 8
+ ret void
+}
+
+define void @zero_v2f64(ptr %i) {
+; RV32F-LABEL: zero_v2f64:
+; RV32F: # %bb.0: # %entry
+; RV32F-NEXT: lui a1, %hi(.LCPI8_0)
+; RV32F-NEXT: addi a1, a1, %lo(.LCPI8_0)
+; RV32F-NEXT: lw a2, 0(a1)
+; RV32F-NEXT: lw a1, 4(a1)
+; RV32F-NEXT: sw a2, 0(a0)
+; RV32F-NEXT: sw a1, 4(a0)
+; RV32F-NEXT: sw a2, 8(a0)
+; RV32F-NEXT: sw a1, 12(a0)
+; RV32F-NEXT: ret
+;
+; RV32D-LABEL: zero_v2f64:
+; RV32D: # %bb.0: # %entry
+; RV32D-NEXT: fcvt.d.w fa5, zero
+; RV32D-NEXT: fsd fa5, 0(a0)
+; RV32D-NEXT: fsd fa5, 8(a0)
+; RV32D-NEXT: ret
+;
+; RV64F-LABEL: zero_v2f64:
+; RV64F: # %bb.0: # %entry
+; RV64F-NEXT: sd zero, 0(a0)
+; RV64F-NEXT: sd zero, 8(a0)
+; RV64F-NEXT: ret
+;
+; RV64D-LABEL: zero_v2f64:
+; RV64D: # %bb.0: # %entry
+; RV64D-NEXT: sd zero, 0(a0)
+; RV64D-NEXT: sd zero, 8(a0)
+; RV64D-NEXT: ret
+entry:
+ store <2 x double> <double 0.0, double 0.0>, ptr %i, align 8
+ ret void
+}
+
+define void @zero_v4f64(ptr %i) {
+; RV32F-LABEL: zero_v4f64:
+; RV32F: # %bb.0: # %entry
+; RV32F-NEXT: lui a1, %hi(.LCPI9_0)
+; RV32F-NEXT: addi a1, a1, %lo(.LCPI9_0)
+; RV32F-NEXT: lw a2, 0(a1)
+; RV32F-NEXT: lw a1, 4(a1)
+; RV32F-NEXT: sw a2, 0(a0)
+; RV32F-NEXT: sw a1, 4(a0)
+; RV32F-NEXT: sw a2, 8(a0)
+; RV32F-NEXT: sw a1, 12(a0)
+; RV32F-NEXT: sw a2, 16(a0)
+; RV32F-NEXT: sw a1, 20(a0)
+; RV32F-NEXT: sw a2, 24(a0)
+; RV32F-NEXT: sw a1, 28(a0)
+; RV32F-NEXT: ret
+;
+; RV32D-LABEL: zero_v4f64:
+; RV32D: # %bb.0: # %entry
+; RV32D-NEXT: fcvt.d.w fa5, zero
+; RV32D-NEXT: fsd fa5, 0(a0)
+; RV32D-NEXT: fsd fa5, 8(a0)
+; RV32D-NEXT: fsd fa5, 16(a0)
+; RV32D-NEXT: fsd fa5, 24(a0)
+; RV32D-NEXT: ret
+;
+; RV64F-LABEL: zero_v4f64:
+; RV64F: # %bb.0: # %entry
+; RV64F-NEXT: sd zero, 0(a0)
+; RV64F-NEXT: sd zero, 8(a0)
+; RV64F-NEXT: sd zero, 16(a0)
+; RV64F-NEXT: sd zero, 24(a0)
+; RV64F-NEXT: ret
+;
+; RV64D-LABEL: zero_v4f64:
+; RV64D: # %bb.0: # %entry
+; RV64D-NEXT: sd zero, 0(a0)
+; RV64D-NEXT: sd zero, 8(a0)
+; RV64D-NEXT: sd zero, 16(a0)
+; RV64D-NEXT: sd zero, 24(a0)
+; RV64D-NEXT: ret
+entry:
+ store <4 x double> <double 0.0, double 0.0, double 0.0, double 0.0>, ptr %i, align 8
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-sdnode.ll
index 061b2b0..abd00b6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfadd-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-sdnode.ll
@@ -11,33 +11,80 @@
; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
; RUN: --check-prefixes=CHECK,ZVFHMIN
+; RUN: llc -mtriple=riscv64 -mattr=+zvfh,+experimental-zvfbfa,+v \
+; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
+; RUN: --check-prefixes=CHECK,ZVFBFA
define <vscale x 1 x bfloat> @vfadd_vv_nxv1bf16(<vscale x 1 x bfloat> %va, <vscale x 1 x bfloat> %vb) {
-; CHECK-LABEL: vfadd_vv_nxv1bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v9
-; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8
-; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
-; CHECK-NEXT: vfadd.vv v9, v9, v10
-; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vv_nxv1bf16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v9
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v9, v8
+; ZVFH-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFH-NEXT: vfadd.vv v9, v9, v10
+; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v9
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vv_nxv1bf16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v9
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v9, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v9, v9, v10
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v9
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv1bf16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli a0, zero, e16alt, mf4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v9
+; ZVFBFA-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v9, v9, v10
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, mf4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9
+; ZVFBFA-NEXT: ret
%vc = fadd <vscale x 1 x bfloat> %va, %vb
ret <vscale x 1 x bfloat> %vc
}
define <vscale x 1 x bfloat> @vfadd_vf_nxv1bf16(<vscale x 1 x bfloat> %va, bfloat %b) {
-; CHECK-LABEL: vfadd_vf_nxv1bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: fcvt.s.bf16 fa5, fa0
-; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8
-; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
-; CHECK-NEXT: vfadd.vf v9, v9, fa5
-; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vf_nxv1bf16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: fcvt.s.bf16 fa5, fa0
+; ZVFH-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v9, v8
+; ZVFH-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFH-NEXT: vfadd.vf v9, v9, fa5
+; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v9
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vf_nxv1bf16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: fcvt.s.bf16 fa5, fa0
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v9, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vfadd.vf v9, v9, fa5
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v9
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv1bf16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fcvt.s.bf16 fa5, fa0
+; ZVFBFA-NEXT: vsetvli a0, zero, e16alt, mf4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFBFA-NEXT: vfadd.vf v9, v9, fa5
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, mf4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9
+; ZVFBFA-NEXT: ret
%head = insertelement <vscale x 1 x bfloat> poison, bfloat %b, i32 0
%splat = shufflevector <vscale x 1 x bfloat> %head, <vscale x 1 x bfloat> poison, <vscale x 1 x i32> zeroinitializer
%vc = fadd <vscale x 1 x bfloat> %va, %splat
@@ -45,31 +92,75 @@ define <vscale x 1 x bfloat> @vfadd_vf_nxv1bf16(<vscale x 1 x bfloat> %va, bfloa
}
define <vscale x 2 x bfloat> @vfadd_vv_nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x bfloat> %vb) {
-; CHECK-LABEL: vfadd_vv_nxv2bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v9
-; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; CHECK-NEXT: vfadd.vv v9, v9, v10
-; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vv_nxv2bf16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v9
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v9, v8
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFH-NEXT: vfadd.vv v9, v9, v10
+; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v9
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vv_nxv2bf16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v9
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v9, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v9, v9, v10
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v9
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv2bf16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli a0, zero, e16alt, mf2, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v9
+; ZVFBFA-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v9, v9, v10
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, mf2, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9
+; ZVFBFA-NEXT: ret
%vc = fadd <vscale x 2 x bfloat> %va, %vb
ret <vscale x 2 x bfloat> %vc
}
define <vscale x 2 x bfloat> @vfadd_vf_nxv2bf16(<vscale x 2 x bfloat> %va, bfloat %b) {
-; CHECK-LABEL: vfadd_vf_nxv2bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: fcvt.s.bf16 fa5, fa0
-; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; CHECK-NEXT: vfadd.vf v9, v9, fa5
-; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vf_nxv2bf16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: fcvt.s.bf16 fa5, fa0
+; ZVFH-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v9, v8
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFH-NEXT: vfadd.vf v9, v9, fa5
+; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v9
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vf_nxv2bf16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: fcvt.s.bf16 fa5, fa0
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v9, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vfadd.vf v9, v9, fa5
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v9
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv2bf16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fcvt.s.bf16 fa5, fa0
+; ZVFBFA-NEXT: vsetvli a0, zero, e16alt, mf2, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFBFA-NEXT: vfadd.vf v9, v9, fa5
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, mf2, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9
+; ZVFBFA-NEXT: ret
%head = insertelement <vscale x 2 x bfloat> poison, bfloat %b, i32 0
%splat = shufflevector <vscale x 2 x bfloat> %head, <vscale x 2 x bfloat> poison, <vscale x 2 x i32> zeroinitializer
%vc = fadd <vscale x 2 x bfloat> %va, %splat
@@ -77,31 +168,75 @@ define <vscale x 2 x bfloat> @vfadd_vf_nxv2bf16(<vscale x 2 x bfloat> %va, bfloa
}
define <vscale x 4 x bfloat> @vfadd_vv_nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x bfloat> %vb) {
-; CHECK-LABEL: vfadd_vv_nxv4bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v9
-; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
-; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; CHECK-NEXT: vfadd.vv v10, v12, v10
-; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v10
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vv_nxv4bf16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v9
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v12, v8
+; ZVFH-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVFH-NEXT: vfadd.vv v10, v12, v10
+; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v10
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vv_nxv4bf16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v9
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v12, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v10, v12, v10
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v10
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv4bf16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli a0, zero, e16alt, m1, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v9
+; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v10, v12, v10
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m1, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v10
+; ZVFBFA-NEXT: ret
%vc = fadd <vscale x 4 x bfloat> %va, %vb
ret <vscale x 4 x bfloat> %vc
}
define <vscale x 4 x bfloat> @vfadd_vf_nxv4bf16(<vscale x 4 x bfloat> %va, bfloat %b) {
-; CHECK-LABEL: vfadd_vf_nxv4bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: fcvt.s.bf16 fa5, fa0
-; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
-; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; CHECK-NEXT: vfadd.vf v10, v10, fa5
-; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v10
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vf_nxv4bf16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: fcvt.s.bf16 fa5, fa0
+; ZVFH-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v8
+; ZVFH-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVFH-NEXT: vfadd.vf v10, v10, fa5
+; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v10
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vf_nxv4bf16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: fcvt.s.bf16 fa5, fa0
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT: vfadd.vf v10, v10, fa5
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v10
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv4bf16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fcvt.s.bf16 fa5, fa0
+; ZVFBFA-NEXT: vsetvli a0, zero, e16alt, m1, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVFBFA-NEXT: vfadd.vf v10, v10, fa5
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m1, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v10
+; ZVFBFA-NEXT: ret
%head = insertelement <vscale x 4 x bfloat> poison, bfloat %b, i32 0
%splat = shufflevector <vscale x 4 x bfloat> %head, <vscale x 4 x bfloat> poison, <vscale x 4 x i32> zeroinitializer
%vc = fadd <vscale x 4 x bfloat> %va, %splat
@@ -109,31 +244,75 @@ define <vscale x 4 x bfloat> @vfadd_vf_nxv4bf16(<vscale x 4 x bfloat> %va, bfloa
}
define <vscale x 8 x bfloat> @vfadd_vv_nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb) {
-; CHECK-LABEL: vfadd_vv_nxv8bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v10
-; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8
-; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
-; CHECK-NEXT: vfadd.vv v12, v16, v12
-; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v12
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vv_nxv8bf16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v12, v10
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v8
+; ZVFH-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; ZVFH-NEXT: vfadd.vv v12, v16, v12
+; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v12
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vv_nxv8bf16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v12, v10
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v12, v16, v12
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v12
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv8bf16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli a0, zero, e16alt, m2, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v10
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v12, v16, v12
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m2, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v12
+; ZVFBFA-NEXT: ret
%vc = fadd <vscale x 8 x bfloat> %va, %vb
ret <vscale x 8 x bfloat> %vc
}
define <vscale x 8 x bfloat> @vfadd_vf_nxv8bf16(<vscale x 8 x bfloat> %va, bfloat %b) {
-; CHECK-LABEL: vfadd_vf_nxv8bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: fcvt.s.bf16 fa5, fa0
-; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
-; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
-; CHECK-NEXT: vfadd.vf v12, v12, fa5
-; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v12
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vf_nxv8bf16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: fcvt.s.bf16 fa5, fa0
+; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v12, v8
+; ZVFH-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; ZVFH-NEXT: vfadd.vf v12, v12, fa5
+; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v12
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vf_nxv8bf16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: fcvt.s.bf16 fa5, fa0
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v12, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vfadd.vf v12, v12, fa5
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v12
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv8bf16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fcvt.s.bf16 fa5, fa0
+; ZVFBFA-NEXT: vsetvli a0, zero, e16alt, m2, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; ZVFBFA-NEXT: vfadd.vf v12, v12, fa5
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m2, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v12
+; ZVFBFA-NEXT: ret
%head = insertelement <vscale x 8 x bfloat> poison, bfloat %b, i32 0
%splat = shufflevector <vscale x 8 x bfloat> %head, <vscale x 8 x bfloat> poison, <vscale x 8 x i32> zeroinitializer
%vc = fadd <vscale x 8 x bfloat> %va, %splat
@@ -141,16 +320,38 @@ define <vscale x 8 x bfloat> @vfadd_vf_nxv8bf16(<vscale x 8 x bfloat> %va, bfloa
}
define <vscale x 8 x bfloat> @vfadd_fv_nxv8bf16(<vscale x 8 x bfloat> %va, bfloat %b) {
-; CHECK-LABEL: vfadd_fv_nxv8bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: fcvt.s.bf16 fa5, fa0
-; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
-; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
-; CHECK-NEXT: vfadd.vf v12, v12, fa5
-; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v12
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_fv_nxv8bf16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: fcvt.s.bf16 fa5, fa0
+; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v12, v8
+; ZVFH-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; ZVFH-NEXT: vfadd.vf v12, v12, fa5
+; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v12
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_fv_nxv8bf16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: fcvt.s.bf16 fa5, fa0
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v12, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vfadd.vf v12, v12, fa5
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v12
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_fv_nxv8bf16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fcvt.s.bf16 fa5, fa0
+; ZVFBFA-NEXT: vsetvli a0, zero, e16alt, m2, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; ZVFBFA-NEXT: vfadd.vf v12, v12, fa5
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m2, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v12
+; ZVFBFA-NEXT: ret
%head = insertelement <vscale x 8 x bfloat> poison, bfloat %b, i32 0
%splat = shufflevector <vscale x 8 x bfloat> %head, <vscale x 8 x bfloat> poison, <vscale x 8 x i32> zeroinitializer
%vc = fadd <vscale x 8 x bfloat> %splat, %va
@@ -158,31 +359,75 @@ define <vscale x 8 x bfloat> @vfadd_fv_nxv8bf16(<vscale x 8 x bfloat> %va, bfloa
}
define <vscale x 16 x bfloat> @vfadd_vv_nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x bfloat> %vb) {
-; CHECK-LABEL: vfadd_vv_nxv16bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v12
-; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v8
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT: vfadd.vv v16, v24, v16
-; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v16
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vv_nxv16bf16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v12
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v8
+; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFH-NEXT: vfadd.vv v16, v24, v16
+; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v16
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vv_nxv16bf16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v12
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v16, v24, v16
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v16
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv16bf16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli a0, zero, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v12
+; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v16, v24, v16
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16
+; ZVFBFA-NEXT: ret
%vc = fadd <vscale x 16 x bfloat> %va, %vb
ret <vscale x 16 x bfloat> %vc
}
define <vscale x 16 x bfloat> @vfadd_vf_nxv16bf16(<vscale x 16 x bfloat> %va, bfloat %b) {
-; CHECK-LABEL: vfadd_vf_nxv16bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: fcvt.s.bf16 fa5, fa0
-; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT: vfadd.vf v16, v16, fa5
-; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v16
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vf_nxv16bf16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: fcvt.s.bf16 fa5, fa0
+; ZVFH-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v8
+; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFH-NEXT: vfadd.vf v16, v16, fa5
+; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v16
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vf_nxv16bf16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: fcvt.s.bf16 fa5, fa0
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfadd.vf v16, v16, fa5
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v16
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv16bf16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fcvt.s.bf16 fa5, fa0
+; ZVFBFA-NEXT: vsetvli a0, zero, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vf v16, v16, fa5
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16
+; ZVFBFA-NEXT: ret
%head = insertelement <vscale x 16 x bfloat> poison, bfloat %b, i32 0
%splat = shufflevector <vscale x 16 x bfloat> %head, <vscale x 16 x bfloat> poison, <vscale x 16 x i32> zeroinitializer
%vc = fadd <vscale x 16 x bfloat> %va, %splat
@@ -190,78 +435,216 @@ define <vscale x 16 x bfloat> @vfadd_vf_nxv16bf16(<vscale x 16 x bfloat> %va, bf
}
define <vscale x 32 x bfloat> @vfadd_vv_nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x bfloat> %vb) {
-; CHECK-LABEL: vfadd_vv_nxv32bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v16
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
-; CHECK-NEXT: vfwcvtbf16.f.f.v v0, v8
-; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v20
-; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v12
-; CHECK-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT: vfadd.vv v0, v0, v8
-; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v0
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT: vfadd.vv v16, v16, v24
-; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v12, v16
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vv_nxv32bf16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: addi sp, sp, -16
+; ZVFH-NEXT: .cfi_def_cfa_offset 16
+; ZVFH-NEXT: csrr a0, vlenb
+; ZVFH-NEXT: slli a0, a0, 3
+; ZVFH-NEXT: sub sp, sp, a0
+; ZVFH-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFH-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v16
+; ZVFH-NEXT: addi a0, sp, 16
+; ZVFH-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v0, v8
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v20
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v12
+; ZVFH-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFH-NEXT: vfadd.vv v0, v0, v8
+; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v0
+; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFH-NEXT: vfadd.vv v16, v16, v24
+; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v12, v16
+; ZVFH-NEXT: csrr a0, vlenb
+; ZVFH-NEXT: slli a0, a0, 3
+; ZVFH-NEXT: add sp, sp, a0
+; ZVFH-NEXT: .cfi_def_cfa sp, 16
+; ZVFH-NEXT: addi sp, sp, 16
+; ZVFH-NEXT: .cfi_def_cfa_offset 0
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vv_nxv32bf16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: addi sp, sp, -16
+; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: sub sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v16
+; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v0, v8
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v20
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v12
+; ZVFHMIN-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v0, v0, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v0
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v16, v16, v24
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v12, v16
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
+; ZVFHMIN-NEXT: addi sp, sp, 16
+; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv32bf16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: addi sp, sp, -16
+; ZVFBFA-NEXT: .cfi_def_cfa_offset 16
+; ZVFBFA-NEXT: csrr a0, vlenb
+; ZVFBFA-NEXT: slli a0, a0, 3
+; ZVFBFA-NEXT: sub sp, sp, a0
+; ZVFBFA-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFBFA-NEXT: vsetvli a0, zero, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v16
+; ZVFBFA-NEXT: addi a0, sp, 16
+; ZVFBFA-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill
+; ZVFBFA-NEXT: vfwcvt.f.f.v v0, v8
+; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v20
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v12
+; ZVFBFA-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v0, v0, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v0
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v16, v16, v24
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v12, v16
+; ZVFBFA-NEXT: csrr a0, vlenb
+; ZVFBFA-NEXT: slli a0, a0, 3
+; ZVFBFA-NEXT: add sp, sp, a0
+; ZVFBFA-NEXT: .cfi_def_cfa sp, 16
+; ZVFBFA-NEXT: addi sp, sp, 16
+; ZVFBFA-NEXT: .cfi_def_cfa_offset 0
+; ZVFBFA-NEXT: ret
%vc = fadd <vscale x 32 x bfloat> %va, %vb
ret <vscale x 32 x bfloat> %vc
}
define <vscale x 32 x bfloat> @vfadd_vf_nxv32bf16(<vscale x 32 x bfloat> %va, bfloat %b) {
-; CHECK-LABEL: vfadd_vf_nxv32bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: fmv.x.h a0, fa0
-; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8
-; CHECK-NEXT: addi a1, sp, 16
-; CHECK-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill
-; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v12
-; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma
-; CHECK-NEXT: vmv.v.x v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v0, v8
-; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v12
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT: vfadd.vv v0, v8, v0
-; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v0
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT: vfadd.vv v16, v24, v16
-; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v12, v16
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vf_nxv32bf16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: addi sp, sp, -16
+; ZVFH-NEXT: .cfi_def_cfa_offset 16
+; ZVFH-NEXT: csrr a0, vlenb
+; ZVFH-NEXT: slli a0, a0, 3
+; ZVFH-NEXT: sub sp, sp, a0
+; ZVFH-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFH-NEXT: fmv.x.h a0, fa0
+; ZVFH-NEXT: vsetvli a1, zero, e16, m4, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v8
+; ZVFH-NEXT: addi a1, sp, 16
+; ZVFH-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v12
+; ZVFH-NEXT: vsetvli a1, zero, e16, m8, ta, ma
+; ZVFH-NEXT: vmv.v.x v8, a0
+; ZVFH-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v0, v8
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v12
+; ZVFH-NEXT: addi a0, sp, 16
+; ZVFH-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFH-NEXT: vfadd.vv v0, v8, v0
+; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v0
+; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFH-NEXT: vfadd.vv v16, v24, v16
+; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v12, v16
+; ZVFH-NEXT: csrr a0, vlenb
+; ZVFH-NEXT: slli a0, a0, 3
+; ZVFH-NEXT: add sp, sp, a0
+; ZVFH-NEXT: .cfi_def_cfa sp, 16
+; ZVFH-NEXT: addi sp, sp, 16
+; ZVFH-NEXT: .cfi_def_cfa_offset 0
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vf_nxv32bf16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: addi sp, sp, -16
+; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: sub sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFHMIN-NEXT: fmv.x.h a0, fa0
+; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v8
+; ZVFHMIN-NEXT: addi a1, sp, 16
+; ZVFHMIN-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v12
+; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m8, ta, ma
+; ZVFHMIN-NEXT: vmv.v.x v8, a0
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v0, v8
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v12
+; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v0, v8, v0
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v0
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v16, v24, v16
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v12, v16
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
+; ZVFHMIN-NEXT: addi sp, sp, 16
+; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv32bf16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: addi sp, sp, -16
+; ZVFBFA-NEXT: .cfi_def_cfa_offset 16
+; ZVFBFA-NEXT: csrr a0, vlenb
+; ZVFBFA-NEXT: slli a0, a0, 3
+; ZVFBFA-NEXT: sub sp, sp, a0
+; ZVFBFA-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFBFA-NEXT: fmv.x.h a0, fa0
+; ZVFBFA-NEXT: vsetvli a1, zero, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v8
+; ZVFBFA-NEXT: addi a1, sp, 16
+; ZVFBFA-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill
+; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v12
+; ZVFBFA-NEXT: vsetvli a1, zero, e16alt, m8, ta, ma
+; ZVFBFA-NEXT: vmv.v.x v8, a0
+; ZVFBFA-NEXT: vsetvli a0, zero, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v0, v8
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v12
+; ZVFBFA-NEXT: addi a0, sp, 16
+; ZVFBFA-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v0, v8, v0
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v0
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v16, v24, v16
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v12, v16
+; ZVFBFA-NEXT: csrr a0, vlenb
+; ZVFBFA-NEXT: slli a0, a0, 3
+; ZVFBFA-NEXT: add sp, sp, a0
+; ZVFBFA-NEXT: .cfi_def_cfa sp, 16
+; ZVFBFA-NEXT: addi sp, sp, 16
+; ZVFBFA-NEXT: .cfi_def_cfa_offset 0
+; ZVFBFA-NEXT: ret
%head = insertelement <vscale x 32 x bfloat> poison, bfloat %b, i32 0
%splat = shufflevector <vscale x 32 x bfloat> %head, <vscale x 32 x bfloat> poison, <vscale x 32 x i32> zeroinitializer
%vc = fadd <vscale x 32 x bfloat> %va, %splat
@@ -285,6 +668,12 @@ define <vscale x 1 x half> @vfadd_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv1f16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v8, v8, v9
+; ZVFBFA-NEXT: ret
%vc = fadd <vscale x 1 x half> %va, %vb
ret <vscale x 1 x half> %vc
}
@@ -306,6 +695,12 @@ define <vscale x 1 x half> @vfadd_vf_nxv1f16(<vscale x 1 x half> %va, half %b) {
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv1f16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
+; ZVFBFA-NEXT: vfadd.vf v8, v8, fa0
+; ZVFBFA-NEXT: ret
%head = insertelement <vscale x 1 x half> poison, half %b, i32 0
%splat = shufflevector <vscale x 1 x half> %head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
%vc = fadd <vscale x 1 x half> %va, %splat
@@ -329,6 +724,12 @@ define <vscale x 2 x half> @vfadd_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv2f16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v8, v8, v9
+; ZVFBFA-NEXT: ret
%vc = fadd <vscale x 2 x half> %va, %vb
ret <vscale x 2 x half> %vc
}
@@ -350,6 +751,12 @@ define <vscale x 2 x half> @vfadd_vf_nxv2f16(<vscale x 2 x half> %va, half %b) {
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv2f16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
+; ZVFBFA-NEXT: vfadd.vf v8, v8, fa0
+; ZVFBFA-NEXT: ret
%head = insertelement <vscale x 2 x half> poison, half %b, i32 0
%splat = shufflevector <vscale x 2 x half> %head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
%vc = fadd <vscale x 2 x half> %va, %splat
@@ -373,6 +780,12 @@ define <vscale x 4 x half> @vfadd_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv4f16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v8, v8, v9
+; ZVFBFA-NEXT: ret
%vc = fadd <vscale x 4 x half> %va, %vb
ret <vscale x 4 x half> %vc
}
@@ -394,6 +807,12 @@ define <vscale x 4 x half> @vfadd_vf_nxv4f16(<vscale x 4 x half> %va, half %b) {
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv4f16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli a0, zero, e16, m1, ta, ma
+; ZVFBFA-NEXT: vfadd.vf v8, v8, fa0
+; ZVFBFA-NEXT: ret
%head = insertelement <vscale x 4 x half> poison, half %b, i32 0
%splat = shufflevector <vscale x 4 x half> %head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
%vc = fadd <vscale x 4 x half> %va, %splat
@@ -417,6 +836,12 @@ define <vscale x 8 x half> @vfadd_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv8f16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v8, v8, v10
+; ZVFBFA-NEXT: ret
%vc = fadd <vscale x 8 x half> %va, %vb
ret <vscale x 8 x half> %vc
}
@@ -438,6 +863,12 @@ define <vscale x 8 x half> @vfadd_vf_nxv8f16(<vscale x 8 x half> %va, half %b) {
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv8f16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; ZVFBFA-NEXT: vfadd.vf v8, v8, fa0
+; ZVFBFA-NEXT: ret
%head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
%vc = fadd <vscale x 8 x half> %va, %splat
@@ -461,6 +892,12 @@ define <vscale x 8 x half> @vfadd_fv_nxv8f16(<vscale x 8 x half> %va, half %b) {
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_fv_nxv8f16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli a0, zero, e16, m2, ta, ma
+; ZVFBFA-NEXT: vfadd.vf v8, v8, fa0
+; ZVFBFA-NEXT: ret
%head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
%vc = fadd <vscale x 8 x half> %splat, %va
@@ -484,6 +921,12 @@ define <vscale x 16 x half> @vfadd_vv_nxv16f16(<vscale x 16 x half> %va, <vscale
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv16f16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v8, v8, v12
+; ZVFBFA-NEXT: ret
%vc = fadd <vscale x 16 x half> %va, %vb
ret <vscale x 16 x half> %vc
}
@@ -505,6 +948,12 @@ define <vscale x 16 x half> @vfadd_vf_nxv16f16(<vscale x 16 x half> %va, half %b
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv16f16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; ZVFBFA-NEXT: vfadd.vf v8, v8, fa0
+; ZVFBFA-NEXT: ret
%head = insertelement <vscale x 16 x half> poison, half %b, i32 0
%splat = shufflevector <vscale x 16 x half> %head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
%vc = fadd <vscale x 16 x half> %va, %splat
@@ -549,6 +998,12 @@ define <vscale x 32 x half> @vfadd_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv32f16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli a0, zero, e16, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v8, v8, v16
+; ZVFBFA-NEXT: ret
%vc = fadd <vscale x 32 x half> %va, %vb
ret <vscale x 32 x half> %vc
}
@@ -596,6 +1051,12 @@ define <vscale x 32 x half> @vfadd_vf_nxv32f16(<vscale x 32 x half> %va, half %b
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv32f16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli a0, zero, e16, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vf v8, v8, fa0
+; ZVFBFA-NEXT: ret
%head = insertelement <vscale x 32 x half> poison, half %b, i32 0
%splat = shufflevector <vscale x 32 x half> %head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
%vc = fadd <vscale x 32 x half> %va, %splat
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll
index 32e3d6b..633a201 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll
@@ -11,52 +11,125 @@
; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \
; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
; RUN: --check-prefixes=CHECK,ZVFHMIN
+; RUN: llc -mtriple=riscv64 -mattr=+zvfhmin,+experimental-zvfbfa,+v \
+; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \
+; RUN: --check-prefixes=CHECK,ZVFBFA
declare <vscale x 1 x bfloat> @llvm.vp.fadd.nxv1bf16(<vscale x 1 x bfloat>, <vscale x 1 x bfloat>, <vscale x 1 x i1>, i32)
define <vscale x 1 x bfloat> @vfadd_vv_nxv1bf16(<vscale x 1 x bfloat> %va, <vscale x 1 x bfloat> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vv_nxv1bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v9, v0.t
-; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
-; CHECK-NEXT: vfadd.vv v9, v9, v10, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vv_nxv1bf16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v9, v0.t
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v9, v8, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFH-NEXT: vfadd.vv v9, v9, v10, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vv_nxv1bf16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v9, v0.t
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v9, v8, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v9, v9, v10, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv1bf16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v9, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v9, v8, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v9, v9, v10, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, mf4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9, v0.t
+; ZVFBFA-NEXT: ret
%v = call <vscale x 1 x bfloat> @llvm.vp.fadd.nxv1bf16(<vscale x 1 x bfloat> %va, <vscale x 1 x bfloat> %b, <vscale x 1 x i1> %m, i32 %evl)
ret <vscale x 1 x bfloat> %v
}
define <vscale x 1 x bfloat> @vfadd_vv_nxv1bf16_unmasked(<vscale x 1 x bfloat> %va, <vscale x 1 x bfloat> %b, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vv_nxv1bf16_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v9
-; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8
-; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
-; CHECK-NEXT: vfadd.vv v9, v9, v10
-; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vv_nxv1bf16_unmasked:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v9
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v9, v8
+; ZVFH-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFH-NEXT: vfadd.vv v9, v9, v10
+; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v9
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vv_nxv1bf16_unmasked:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v9
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v9, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v9, v9, v10
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v9
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv1bf16_unmasked:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v9
+; ZVFBFA-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v9, v9, v10
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, mf4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9
+; ZVFBFA-NEXT: ret
%v = call <vscale x 1 x bfloat> @llvm.vp.fadd.nxv1bf16(<vscale x 1 x bfloat> %va, <vscale x 1 x bfloat> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
ret <vscale x 1 x bfloat> %v
}
define <vscale x 1 x bfloat> @vfadd_vf_nxv1bf16(<vscale x 1 x bfloat> %va, bfloat %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vf_nxv1bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: fmv.x.h a1, fa0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vmv.v.x v9, a1
-; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8, v0.t
-; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
-; CHECK-NEXT: vfadd.vv v9, v10, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vf_nxv1bf16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: fmv.x.h a1, fa0
+; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT: vmv.v.x v9, a1
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v8, v0.t
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v8, v9, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFH-NEXT: vfadd.vv v9, v10, v8, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vf_nxv1bf16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: fmv.x.h a1, fa0
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vmv.v.x v9, a1
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v8, v0.t
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v8, v9, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v9, v10, v8, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv1bf16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fmv.x.h a1, fa0
+; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; ZVFBFA-NEXT: vmv.v.x v9, a1
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v9, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v9, v10, v8, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, mf4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9, v0.t
+; ZVFBFA-NEXT: ret
%elt.head = insertelement <vscale x 1 x bfloat> poison, bfloat %b, i32 0
%vb = shufflevector <vscale x 1 x bfloat> %elt.head, <vscale x 1 x bfloat> poison, <vscale x 1 x i32> zeroinitializer
%v = call <vscale x 1 x bfloat> @llvm.vp.fadd.nxv1bf16(<vscale x 1 x bfloat> %va, <vscale x 1 x bfloat> %vb, <vscale x 1 x i1> %m, i32 %evl)
@@ -64,18 +137,44 @@ define <vscale x 1 x bfloat> @vfadd_vf_nxv1bf16(<vscale x 1 x bfloat> %va, bfloa
}
define <vscale x 1 x bfloat> @vfadd_vf_nxv1bf16_commute(<vscale x 1 x bfloat> %va, bfloat %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vf_nxv1bf16_commute:
-; CHECK: # %bb.0:
-; CHECK-NEXT: fmv.x.h a1, fa0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vmv.v.x v9, a1
-; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8, v0.t
-; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
-; CHECK-NEXT: vfadd.vv v9, v8, v10, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vf_nxv1bf16_commute:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: fmv.x.h a1, fa0
+; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT: vmv.v.x v9, a1
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v8, v0.t
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v8, v9, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFH-NEXT: vfadd.vv v9, v8, v10, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vf_nxv1bf16_commute:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: fmv.x.h a1, fa0
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vmv.v.x v9, a1
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v8, v0.t
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v8, v9, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v9, v8, v10, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv1bf16_commute:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fmv.x.h a1, fa0
+; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; ZVFBFA-NEXT: vmv.v.x v9, a1
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v9, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v9, v8, v10, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, mf4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9, v0.t
+; ZVFBFA-NEXT: ret
%elt.head = insertelement <vscale x 1 x bfloat> poison, bfloat %b, i32 0
%vb = shufflevector <vscale x 1 x bfloat> %elt.head, <vscale x 1 x bfloat> poison, <vscale x 1 x i32> zeroinitializer
%v = call <vscale x 1 x bfloat> @llvm.vp.fadd.nxv1bf16(<vscale x 1 x bfloat> %vb, <vscale x 1 x bfloat> %va, <vscale x 1 x i1> %m, i32 %evl)
@@ -83,18 +182,44 @@ define <vscale x 1 x bfloat> @vfadd_vf_nxv1bf16_commute(<vscale x 1 x bfloat> %v
}
define <vscale x 1 x bfloat> @vfadd_vf_nxv1bf16_unmasked(<vscale x 1 x bfloat> %va, bfloat %b, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vf_nxv1bf16_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: fmv.x.h a1, fa0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vmv.v.x v9, a1
-; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
-; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v9
-; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
-; CHECK-NEXT: vfadd.vv v9, v10, v8
-; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vf_nxv1bf16_unmasked:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: fmv.x.h a1, fa0
+; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT: vmv.v.x v9, a1
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v8
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v8, v9
+; ZVFH-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFH-NEXT: vfadd.vv v9, v10, v8
+; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v9
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vf_nxv1bf16_unmasked:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: fmv.x.h a1, fa0
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vmv.v.x v9, a1
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v8
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v8, v9
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v9, v10, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v9
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv1bf16_unmasked:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fmv.x.h a1, fa0
+; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; ZVFBFA-NEXT: vmv.v.x v9, a1
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8
+; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v9
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v9, v10, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, mf4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9
+; ZVFBFA-NEXT: ret
%elt.head = insertelement <vscale x 1 x bfloat> poison, bfloat %b, i32 0
%vb = shufflevector <vscale x 1 x bfloat> %elt.head, <vscale x 1 x bfloat> poison, <vscale x 1 x i32> zeroinitializer
%v = call <vscale x 1 x bfloat> @llvm.vp.fadd.nxv1bf16(<vscale x 1 x bfloat> %va, <vscale x 1 x bfloat> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
@@ -102,18 +227,44 @@ define <vscale x 1 x bfloat> @vfadd_vf_nxv1bf16_unmasked(<vscale x 1 x bfloat> %
}
define <vscale x 1 x bfloat> @vfadd_vf_nxv1bf16_unmasked_commute(<vscale x 1 x bfloat> %va, bfloat %b, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vf_nxv1bf16_unmasked_commute:
-; CHECK: # %bb.0:
-; CHECK-NEXT: fmv.x.h a1, fa0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vmv.v.x v9, a1
-; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
-; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v9
-; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
-; CHECK-NEXT: vfadd.vv v9, v8, v10
-; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vf_nxv1bf16_unmasked_commute:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: fmv.x.h a1, fa0
+; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT: vmv.v.x v9, a1
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v8
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v8, v9
+; ZVFH-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFH-NEXT: vfadd.vv v9, v8, v10
+; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v9
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vf_nxv1bf16_unmasked_commute:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: fmv.x.h a1, fa0
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vmv.v.x v9, a1
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v8
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v8, v9
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v9, v8, v10
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v9
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv1bf16_unmasked_commute:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fmv.x.h a1, fa0
+; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma
+; ZVFBFA-NEXT: vmv.v.x v9, a1
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8
+; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v9
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v9, v8, v10
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, mf4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9
+; ZVFBFA-NEXT: ret
%elt.head = insertelement <vscale x 1 x bfloat> poison, bfloat %b, i32 0
%vb = shufflevector <vscale x 1 x bfloat> %elt.head, <vscale x 1 x bfloat> poison, <vscale x 1 x i32> zeroinitializer
%v = call <vscale x 1 x bfloat> @llvm.vp.fadd.nxv1bf16(<vscale x 1 x bfloat> %vb, <vscale x 1 x bfloat> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
@@ -123,48 +274,118 @@ define <vscale x 1 x bfloat> @vfadd_vf_nxv1bf16_unmasked_commute(<vscale x 1 x b
declare <vscale x 2 x bfloat> @llvm.vp.fadd.nxv2bf16(<vscale x 2 x bfloat>, <vscale x 2 x bfloat>, <vscale x 2 x i1>, i32)
define <vscale x 2 x bfloat> @vfadd_vv_nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x bfloat> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vv_nxv2bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v9, v0.t
-; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; CHECK-NEXT: vfadd.vv v9, v9, v10, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vv_nxv2bf16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v9, v0.t
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v9, v8, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFH-NEXT: vfadd.vv v9, v9, v10, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vv_nxv2bf16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v9, v0.t
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v9, v8, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v9, v9, v10, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv2bf16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v9, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v9, v8, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v9, v9, v10, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, mf2, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9, v0.t
+; ZVFBFA-NEXT: ret
%v = call <vscale x 2 x bfloat> @llvm.vp.fadd.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x bfloat> %b, <vscale x 2 x i1> %m, i32 %evl)
ret <vscale x 2 x bfloat> %v
}
define <vscale x 2 x bfloat> @vfadd_vv_nxv2bf16_unmasked(<vscale x 2 x bfloat> %va, <vscale x 2 x bfloat> %b, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vv_nxv2bf16_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v9
-; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; CHECK-NEXT: vfadd.vv v9, v9, v10
-; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vv_nxv2bf16_unmasked:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v9
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v9, v8
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFH-NEXT: vfadd.vv v9, v9, v10
+; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v9
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vv_nxv2bf16_unmasked:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v9
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v9, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v9, v9, v10
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v9
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv2bf16_unmasked:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v9
+; ZVFBFA-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v9, v9, v10
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, mf2, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9
+; ZVFBFA-NEXT: ret
%v = call <vscale x 2 x bfloat> @llvm.vp.fadd.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x bfloat> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x bfloat> %v
}
define <vscale x 2 x bfloat> @vfadd_vf_nxv2bf16(<vscale x 2 x bfloat> %va, bfloat %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vf_nxv2bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: fmv.x.h a1, fa0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vmv.v.x v9, a1
-; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8, v0.t
-; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v9, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; CHECK-NEXT: vfadd.vv v9, v10, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vf_nxv2bf16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: fmv.x.h a1, fa0
+; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT: vmv.v.x v9, a1
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v8, v0.t
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v8, v9, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFH-NEXT: vfadd.vv v9, v10, v8, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vf_nxv2bf16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: fmv.x.h a1, fa0
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vmv.v.x v9, a1
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v8, v0.t
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v8, v9, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v9, v10, v8, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv2bf16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fmv.x.h a1, fa0
+; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; ZVFBFA-NEXT: vmv.v.x v9, a1
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v9, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v9, v10, v8, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, mf2, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9, v0.t
+; ZVFBFA-NEXT: ret
%elt.head = insertelement <vscale x 2 x bfloat> poison, bfloat %b, i32 0
%vb = shufflevector <vscale x 2 x bfloat> %elt.head, <vscale x 2 x bfloat> poison, <vscale x 2 x i32> zeroinitializer
%v = call <vscale x 2 x bfloat> @llvm.vp.fadd.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x bfloat> %vb, <vscale x 2 x i1> %m, i32 %evl)
@@ -172,18 +393,44 @@ define <vscale x 2 x bfloat> @vfadd_vf_nxv2bf16(<vscale x 2 x bfloat> %va, bfloa
}
define <vscale x 2 x bfloat> @vfadd_vf_nxv2bf16_unmasked(<vscale x 2 x bfloat> %va, bfloat %b, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vf_nxv2bf16_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: fmv.x.h a1, fa0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vmv.v.x v9, a1
-; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
-; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v9
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; CHECK-NEXT: vfadd.vv v9, v10, v8
-; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vf_nxv2bf16_unmasked:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: fmv.x.h a1, fa0
+; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT: vmv.v.x v9, a1
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v8
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v8, v9
+; ZVFH-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFH-NEXT: vfadd.vv v9, v10, v8
+; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v9
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vf_nxv2bf16_unmasked:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: fmv.x.h a1, fa0
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vmv.v.x v9, a1
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v8
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v8, v9
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v9, v10, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v9
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv2bf16_unmasked:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fmv.x.h a1, fa0
+; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma
+; ZVFBFA-NEXT: vmv.v.x v9, a1
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8
+; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v9
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v9, v10, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, mf2, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9
+; ZVFBFA-NEXT: ret
%elt.head = insertelement <vscale x 2 x bfloat> poison, bfloat %b, i32 0
%vb = shufflevector <vscale x 2 x bfloat> %elt.head, <vscale x 2 x bfloat> poison, <vscale x 2 x i32> zeroinitializer
%v = call <vscale x 2 x bfloat> @llvm.vp.fadd.nxv2bf16(<vscale x 2 x bfloat> %va, <vscale x 2 x bfloat> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
@@ -193,48 +440,118 @@ define <vscale x 2 x bfloat> @vfadd_vf_nxv2bf16_unmasked(<vscale x 2 x bfloat> %
declare <vscale x 4 x bfloat> @llvm.vp.fadd.nxv4bf16(<vscale x 4 x bfloat>, <vscale x 4 x bfloat>, <vscale x 4 x i1>, i32)
define <vscale x 4 x bfloat> @vfadd_vv_nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x bfloat> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vv_nxv4bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v9, v0.t
-; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; CHECK-NEXT: vfadd.vv v10, v12, v10, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v10, v0.t
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vv_nxv4bf16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v9, v0.t
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v12, v8, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVFH-NEXT: vfadd.vv v10, v12, v10, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v10, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vv_nxv4bf16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v9, v0.t
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v12, v8, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v10, v12, v10, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v10, v0.t
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv4bf16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v9, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v8, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v10, v12, v10, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m1, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v10, v0.t
+; ZVFBFA-NEXT: ret
%v = call <vscale x 4 x bfloat> @llvm.vp.fadd.nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x bfloat> %b, <vscale x 4 x i1> %m, i32 %evl)
ret <vscale x 4 x bfloat> %v
}
define <vscale x 4 x bfloat> @vfadd_vv_nxv4bf16_unmasked(<vscale x 4 x bfloat> %va, <vscale x 4 x bfloat> %b, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vv_nxv4bf16_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v9
-; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
-; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; CHECK-NEXT: vfadd.vv v10, v12, v10
-; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v10
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vv_nxv4bf16_unmasked:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v9
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v12, v8
+; ZVFH-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVFH-NEXT: vfadd.vv v10, v12, v10
+; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v10
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vv_nxv4bf16_unmasked:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v9
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v12, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v10, v12, v10
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v10
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv4bf16_unmasked:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v9
+; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v10, v12, v10
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m1, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v10
+; ZVFBFA-NEXT: ret
%v = call <vscale x 4 x bfloat> @llvm.vp.fadd.nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x bfloat> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
ret <vscale x 4 x bfloat> %v
}
define <vscale x 4 x bfloat> @vfadd_vf_nxv4bf16(<vscale x 4 x bfloat> %va, bfloat %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vf_nxv4bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: fmv.x.h a1, fa0
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vmv.v.x v12, a1
-; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8, v0.t
-; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; CHECK-NEXT: vfadd.vv v10, v10, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v10, v0.t
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vf_nxv4bf16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: fmv.x.h a1, fa0
+; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT: vmv.v.x v12, a1
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v8, v0.t
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v8, v12, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVFH-NEXT: vfadd.vv v10, v10, v8, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v10, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vf_nxv4bf16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: fmv.x.h a1, fa0
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vmv.v.x v12, a1
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v8, v0.t
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v8, v12, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v10, v10, v8, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v10, v0.t
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv4bf16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fmv.x.h a1, fa0
+; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; ZVFBFA-NEXT: vmv.v.x v12, a1
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v12, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v10, v10, v8, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m1, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v10, v0.t
+; ZVFBFA-NEXT: ret
%elt.head = insertelement <vscale x 4 x bfloat> poison, bfloat %b, i32 0
%vb = shufflevector <vscale x 4 x bfloat> %elt.head, <vscale x 4 x bfloat> poison, <vscale x 4 x i32> zeroinitializer
%v = call <vscale x 4 x bfloat> @llvm.vp.fadd.nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x bfloat> %vb, <vscale x 4 x i1> %m, i32 %evl)
@@ -242,18 +559,44 @@ define <vscale x 4 x bfloat> @vfadd_vf_nxv4bf16(<vscale x 4 x bfloat> %va, bfloa
}
define <vscale x 4 x bfloat> @vfadd_vf_nxv4bf16_unmasked(<vscale x 4 x bfloat> %va, bfloat %b, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vf_nxv4bf16_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: fmv.x.h a1, fa0
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vmv.v.x v12, a1
-; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8
-; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v12
-; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; CHECK-NEXT: vfadd.vv v10, v10, v8
-; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v10
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vf_nxv4bf16_unmasked:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: fmv.x.h a1, fa0
+; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT: vmv.v.x v12, a1
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v8
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v8, v12
+; ZVFH-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVFH-NEXT: vfadd.vv v10, v10, v8
+; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v10
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vf_nxv4bf16_unmasked:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: fmv.x.h a1, fa0
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vmv.v.x v12, a1
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v8
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v8, v12
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v10, v10, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v10
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv4bf16_unmasked:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fmv.x.h a1, fa0
+; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma
+; ZVFBFA-NEXT: vmv.v.x v12, a1
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8
+; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v12
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v10, v10, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m1, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v10
+; ZVFBFA-NEXT: ret
%elt.head = insertelement <vscale x 4 x bfloat> poison, bfloat %b, i32 0
%vb = shufflevector <vscale x 4 x bfloat> %elt.head, <vscale x 4 x bfloat> poison, <vscale x 4 x i32> zeroinitializer
%v = call <vscale x 4 x bfloat> @llvm.vp.fadd.nxv4bf16(<vscale x 4 x bfloat> %va, <vscale x 4 x bfloat> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
@@ -263,48 +606,118 @@ define <vscale x 4 x bfloat> @vfadd_vf_nxv4bf16_unmasked(<vscale x 4 x bfloat> %
declare <vscale x 8 x bfloat> @llvm.vp.fadd.nxv8bf16(<vscale x 8 x bfloat>, <vscale x 8 x bfloat>, <vscale x 8 x i1>, i32)
define <vscale x 8 x bfloat> @vfadd_vv_nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vv_nxv8bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v10, v0.t
-; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
-; CHECK-NEXT: vfadd.vv v12, v16, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v12, v0.t
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vv_nxv8bf16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v12, v10, v0.t
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v8, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; ZVFH-NEXT: vfadd.vv v12, v16, v12, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v12, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vv_nxv8bf16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v12, v10, v0.t
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v8, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v12, v16, v12, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v12, v0.t
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv8bf16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v10, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v8, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v12, v16, v12, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m2, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v12, v0.t
+; ZVFBFA-NEXT: ret
%v = call <vscale x 8 x bfloat> @llvm.vp.fadd.nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %b, <vscale x 8 x i1> %m, i32 %evl)
ret <vscale x 8 x bfloat> %v
}
define <vscale x 8 x bfloat> @vfadd_vv_nxv8bf16_unmasked(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %b, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vv_nxv8bf16_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v10
-; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8
-; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
-; CHECK-NEXT: vfadd.vv v12, v16, v12
-; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v12
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vv_nxv8bf16_unmasked:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v12, v10
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v8
+; ZVFH-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; ZVFH-NEXT: vfadd.vv v12, v16, v12
+; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v12
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vv_nxv8bf16_unmasked:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v12, v10
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v12, v16, v12
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v12
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv8bf16_unmasked:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v10
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v12, v16, v12
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m2, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v12
+; ZVFBFA-NEXT: ret
%v = call <vscale x 8 x bfloat> @llvm.vp.fadd.nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
ret <vscale x 8 x bfloat> %v
}
define <vscale x 8 x bfloat> @vfadd_vf_nxv8bf16(<vscale x 8 x bfloat> %va, bfloat %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vf_nxv8bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: fmv.x.h a1, fa0
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vmv.v.x v16, a1
-; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8, v0.t
-; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
-; CHECK-NEXT: vfadd.vv v12, v12, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v12, v0.t
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vf_nxv8bf16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: fmv.x.h a1, fa0
+; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT: vmv.v.x v16, a1
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v12, v8, v0.t
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v8, v16, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; ZVFH-NEXT: vfadd.vv v12, v12, v8, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v12, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vf_nxv8bf16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: fmv.x.h a1, fa0
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vmv.v.x v16, a1
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v12, v8, v0.t
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v8, v16, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v12, v12, v8, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v12, v0.t
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv8bf16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fmv.x.h a1, fa0
+; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; ZVFBFA-NEXT: vmv.v.x v16, a1
+; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v8, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v16, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v12, v12, v8, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m2, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v12, v0.t
+; ZVFBFA-NEXT: ret
%elt.head = insertelement <vscale x 8 x bfloat> poison, bfloat %b, i32 0
%vb = shufflevector <vscale x 8 x bfloat> %elt.head, <vscale x 8 x bfloat> poison, <vscale x 8 x i32> zeroinitializer
%v = call <vscale x 8 x bfloat> @llvm.vp.fadd.nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb, <vscale x 8 x i1> %m, i32 %evl)
@@ -312,18 +725,44 @@ define <vscale x 8 x bfloat> @vfadd_vf_nxv8bf16(<vscale x 8 x bfloat> %va, bfloa
}
define <vscale x 8 x bfloat> @vfadd_vf_nxv8bf16_unmasked(<vscale x 8 x bfloat> %va, bfloat %b, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vf_nxv8bf16_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: fmv.x.h a1, fa0
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vmv.v.x v16, a1
-; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8
-; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v16
-; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
-; CHECK-NEXT: vfadd.vv v12, v12, v8
-; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v12
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vf_nxv8bf16_unmasked:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: fmv.x.h a1, fa0
+; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT: vmv.v.x v16, a1
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v12, v8
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v8, v16
+; ZVFH-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; ZVFH-NEXT: vfadd.vv v12, v12, v8
+; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v12
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vf_nxv8bf16_unmasked:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: fmv.x.h a1, fa0
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vmv.v.x v16, a1
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v12, v8
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v8, v16
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v12, v12, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v12
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv8bf16_unmasked:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fmv.x.h a1, fa0
+; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma
+; ZVFBFA-NEXT: vmv.v.x v16, a1
+; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v8
+; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v16
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v12, v12, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m2, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v12
+; ZVFBFA-NEXT: ret
%elt.head = insertelement <vscale x 8 x bfloat> poison, bfloat %b, i32 0
%vb = shufflevector <vscale x 8 x bfloat> %elt.head, <vscale x 8 x bfloat> poison, <vscale x 8 x i32> zeroinitializer
%v = call <vscale x 8 x bfloat> @llvm.vp.fadd.nxv8bf16(<vscale x 8 x bfloat> %va, <vscale x 8 x bfloat> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
@@ -333,48 +772,118 @@ define <vscale x 8 x bfloat> @vfadd_vf_nxv8bf16_unmasked(<vscale x 8 x bfloat> %
declare <vscale x 16 x bfloat> @llvm.vp.fadd.nxv16bf16(<vscale x 16 x bfloat>, <vscale x 16 x bfloat>, <vscale x 16 x i1>, i32)
define <vscale x 16 x bfloat> @vfadd_vv_nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x bfloat> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vv_nxv16bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v12, v0.t
-; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT: vfadd.vv v16, v24, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v16, v0.t
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vv_nxv16bf16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v12, v0.t
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v8, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFH-NEXT: vfadd.vv v16, v24, v16, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v16, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vv_nxv16bf16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v12, v0.t
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v8, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v16, v24, v16, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v16, v0.t
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv16bf16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v12, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v8, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v16, v24, v16, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16, v0.t
+; ZVFBFA-NEXT: ret
%v = call <vscale x 16 x bfloat> @llvm.vp.fadd.nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x bfloat> %b, <vscale x 16 x i1> %m, i32 %evl)
ret <vscale x 16 x bfloat> %v
}
define <vscale x 16 x bfloat> @vfadd_vv_nxv16bf16_unmasked(<vscale x 16 x bfloat> %va, <vscale x 16 x bfloat> %b, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vv_nxv16bf16_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v12
-; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v8
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT: vfadd.vv v16, v24, v16
-; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v16
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vv_nxv16bf16_unmasked:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v12
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v8
+; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFH-NEXT: vfadd.vv v16, v24, v16
+; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v16
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vv_nxv16bf16_unmasked:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v12
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v16, v24, v16
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v16
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv16bf16_unmasked:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v12
+; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v16, v24, v16
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16
+; ZVFBFA-NEXT: ret
%v = call <vscale x 16 x bfloat> @llvm.vp.fadd.nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x bfloat> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
ret <vscale x 16 x bfloat> %v
}
define <vscale x 16 x bfloat> @vfadd_vf_nxv16bf16(<vscale x 16 x bfloat> %va, bfloat %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vf_nxv16bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: fmv.x.h a1, fa0
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmv.v.x v24, a1
-; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8, v0.t
-; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT: vfadd.vv v16, v16, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v16, v0.t
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vf_nxv16bf16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: fmv.x.h a1, fa0
+; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; ZVFH-NEXT: vmv.v.x v24, a1
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v8, v0.t
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v8, v24, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFH-NEXT: vfadd.vv v16, v16, v8, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v16, v0.t
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vf_nxv16bf16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: fmv.x.h a1, fa0
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vmv.v.x v24, a1
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v8, v0.t
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v8, v24, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v16, v16, v8, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v16, v0.t
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv16bf16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fmv.x.h a1, fa0
+; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vmv.v.x v24, a1
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v8, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v24, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v16, v16, v8, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16, v0.t
+; ZVFBFA-NEXT: ret
%elt.head = insertelement <vscale x 16 x bfloat> poison, bfloat %b, i32 0
%vb = shufflevector <vscale x 16 x bfloat> %elt.head, <vscale x 16 x bfloat> poison, <vscale x 16 x i32> zeroinitializer
%v = call <vscale x 16 x bfloat> @llvm.vp.fadd.nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x bfloat> %vb, <vscale x 16 x i1> %m, i32 %evl)
@@ -382,18 +891,44 @@ define <vscale x 16 x bfloat> @vfadd_vf_nxv16bf16(<vscale x 16 x bfloat> %va, bf
}
define <vscale x 16 x bfloat> @vfadd_vf_nxv16bf16_unmasked(<vscale x 16 x bfloat> %va, bfloat %b, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vf_nxv16bf16_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: fmv.x.h a1, fa0
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmv.v.x v24, a1
-; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8
-; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v24
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT: vfadd.vv v16, v16, v8
-; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v16
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vf_nxv16bf16_unmasked:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: fmv.x.h a1, fa0
+; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; ZVFH-NEXT: vmv.v.x v24, a1
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v8
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v8, v24
+; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFH-NEXT: vfadd.vv v16, v16, v8
+; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v16
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vf_nxv16bf16_unmasked:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: fmv.x.h a1, fa0
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vmv.v.x v24, a1
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v8
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v8, v24
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v16, v16, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v16
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv16bf16_unmasked:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fmv.x.h a1, fa0
+; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vmv.v.x v24, a1
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v8
+; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v24
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v16, v16, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16
+; ZVFBFA-NEXT: ret
%elt.head = insertelement <vscale x 16 x bfloat> poison, bfloat %b, i32 0
%vb = shufflevector <vscale x 16 x bfloat> %elt.head, <vscale x 16 x bfloat> poison, <vscale x 16 x i32> zeroinitializer
%v = call <vscale x 16 x bfloat> @llvm.vp.fadd.nxv16bf16(<vscale x 16 x bfloat> %va, <vscale x 16 x bfloat> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
@@ -403,173 +938,493 @@ define <vscale x 16 x bfloat> @vfadd_vf_nxv16bf16_unmasked(<vscale x 16 x bfloat
declare <vscale x 32 x bfloat> @llvm.vp.fadd.nxv32bf16(<vscale x 32 x bfloat>, <vscale x 32 x bfloat>, <vscale x 32 x i1>, i32)
define <vscale x 32 x bfloat> @vfadd_vv_nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x bfloat> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vv_nxv32bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
-; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
-; CHECK-NEXT: vmv1r.v v7, v0
-; CHECK-NEXT: csrr a2, vlenb
-; CHECK-NEXT: slli a1, a2, 1
-; CHECK-NEXT: srli a2, a2, 2
-; CHECK-NEXT: sub a3, a0, a1
-; CHECK-NEXT: vslidedown.vx v0, v0, a2
-; CHECK-NEXT: sltu a2, a0, a3
-; CHECK-NEXT: addi a2, a2, -1
-; CHECK-NEXT: and a2, a2, a3
-; CHECK-NEXT: addi a3, sp, 16
-; CHECK-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill
-; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v20, v0.t
-; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT: vfadd.vv v16, v16, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v12, v16, v0.t
-; CHECK-NEXT: bltu a0, a1, .LBB22_2
-; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: mv a0, a1
-; CHECK-NEXT: .LBB22_2:
-; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: addi a1, sp, 16
-; CHECK-NEXT: vl8r.v v24, (a1) # vscale x 64-byte Folded Reload
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v24, v0.t
-; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v8, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT: vfadd.vv v16, v24, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v16, v0.t
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vv_nxv32bf16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: addi sp, sp, -16
+; ZVFH-NEXT: .cfi_def_cfa_offset 16
+; ZVFH-NEXT: csrr a1, vlenb
+; ZVFH-NEXT: slli a1, a1, 3
+; ZVFH-NEXT: sub sp, sp, a1
+; ZVFH-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFH-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
+; ZVFH-NEXT: vmv1r.v v7, v0
+; ZVFH-NEXT: csrr a2, vlenb
+; ZVFH-NEXT: slli a1, a2, 1
+; ZVFH-NEXT: srli a2, a2, 2
+; ZVFH-NEXT: sub a3, a0, a1
+; ZVFH-NEXT: vslidedown.vx v0, v0, a2
+; ZVFH-NEXT: sltu a2, a0, a3
+; ZVFH-NEXT: addi a2, a2, -1
+; ZVFH-NEXT: and a2, a2, a3
+; ZVFH-NEXT: addi a3, sp, 16
+; ZVFH-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill
+; ZVFH-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v20, v0.t
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v12, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFH-NEXT: vfadd.vv v16, v16, v24, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v12, v16, v0.t
+; ZVFH-NEXT: bltu a0, a1, .LBB22_2
+; ZVFH-NEXT: # %bb.1:
+; ZVFH-NEXT: mv a0, a1
+; ZVFH-NEXT: .LBB22_2:
+; ZVFH-NEXT: vmv1r.v v0, v7
+; ZVFH-NEXT: addi a1, sp, 16
+; ZVFH-NEXT: vl8r.v v24, (a1) # vscale x 64-byte Folded Reload
+; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v24, v0.t
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v8, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFH-NEXT: vfadd.vv v16, v24, v16, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v16, v0.t
+; ZVFH-NEXT: csrr a0, vlenb
+; ZVFH-NEXT: slli a0, a0, 3
+; ZVFH-NEXT: add sp, sp, a0
+; ZVFH-NEXT: .cfi_def_cfa sp, 16
+; ZVFH-NEXT: addi sp, sp, 16
+; ZVFH-NEXT: .cfi_def_cfa_offset 0
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vv_nxv32bf16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: addi sp, sp, -16
+; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
+; ZVFHMIN-NEXT: csrr a1, vlenb
+; ZVFHMIN-NEXT: slli a1, a1, 3
+; ZVFHMIN-NEXT: sub sp, sp, a1
+; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFHMIN-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
+; ZVFHMIN-NEXT: vmv1r.v v7, v0
+; ZVFHMIN-NEXT: csrr a2, vlenb
+; ZVFHMIN-NEXT: slli a1, a2, 1
+; ZVFHMIN-NEXT: srli a2, a2, 2
+; ZVFHMIN-NEXT: sub a3, a0, a1
+; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a2
+; ZVFHMIN-NEXT: sltu a2, a0, a3
+; ZVFHMIN-NEXT: addi a2, a2, -1
+; ZVFHMIN-NEXT: and a2, a2, a3
+; ZVFHMIN-NEXT: addi a3, sp, 16
+; ZVFHMIN-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v20, v0.t
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v12, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v16, v16, v24, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v12, v16, v0.t
+; ZVFHMIN-NEXT: bltu a0, a1, .LBB22_2
+; ZVFHMIN-NEXT: # %bb.1:
+; ZVFHMIN-NEXT: mv a0, a1
+; ZVFHMIN-NEXT: .LBB22_2:
+; ZVFHMIN-NEXT: vmv1r.v v0, v7
+; ZVFHMIN-NEXT: addi a1, sp, 16
+; ZVFHMIN-NEXT: vl8r.v v24, (a1) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v24, v0.t
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v8, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v16, v24, v16, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v16, v0.t
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
+; ZVFHMIN-NEXT: addi sp, sp, 16
+; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv32bf16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: addi sp, sp, -16
+; ZVFBFA-NEXT: .cfi_def_cfa_offset 16
+; ZVFBFA-NEXT: csrr a1, vlenb
+; ZVFBFA-NEXT: slli a1, a1, 3
+; ZVFBFA-NEXT: sub sp, sp, a1
+; ZVFBFA-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFBFA-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
+; ZVFBFA-NEXT: vmv1r.v v7, v0
+; ZVFBFA-NEXT: csrr a2, vlenb
+; ZVFBFA-NEXT: slli a1, a2, 1
+; ZVFBFA-NEXT: srli a2, a2, 2
+; ZVFBFA-NEXT: sub a3, a0, a1
+; ZVFBFA-NEXT: vslidedown.vx v0, v0, a2
+; ZVFBFA-NEXT: sltu a2, a0, a3
+; ZVFBFA-NEXT: addi a2, a2, -1
+; ZVFBFA-NEXT: and a2, a2, a3
+; ZVFBFA-NEXT: addi a3, sp, 16
+; ZVFBFA-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill
+; ZVFBFA-NEXT: vsetvli zero, a2, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v20, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v12, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v16, v16, v24, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v12, v16, v0.t
+; ZVFBFA-NEXT: bltu a0, a1, .LBB22_2
+; ZVFBFA-NEXT: # %bb.1:
+; ZVFBFA-NEXT: mv a0, a1
+; ZVFBFA-NEXT: .LBB22_2:
+; ZVFBFA-NEXT: vmv1r.v v0, v7
+; ZVFBFA-NEXT: addi a1, sp, 16
+; ZVFBFA-NEXT: vl8r.v v24, (a1) # vscale x 64-byte Folded Reload
+; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v24, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v8, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v16, v24, v16, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16, v0.t
+; ZVFBFA-NEXT: csrr a0, vlenb
+; ZVFBFA-NEXT: slli a0, a0, 3
+; ZVFBFA-NEXT: add sp, sp, a0
+; ZVFBFA-NEXT: .cfi_def_cfa sp, 16
+; ZVFBFA-NEXT: addi sp, sp, 16
+; ZVFBFA-NEXT: .cfi_def_cfa_offset 0
+; ZVFBFA-NEXT: ret
%v = call <vscale x 32 x bfloat> @llvm.vp.fadd.nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x bfloat> %b, <vscale x 32 x i1> %m, i32 %evl)
ret <vscale x 32 x bfloat> %v
}
define <vscale x 32 x bfloat> @vfadd_vv_nxv32bf16_unmasked(<vscale x 32 x bfloat> %va, <vscale x 32 x bfloat> %b, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vv_nxv32bf16_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
-; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: csrr a2, vlenb
-; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma
-; CHECK-NEXT: vmset.m v24
-; CHECK-NEXT: slli a1, a2, 1
-; CHECK-NEXT: srli a2, a2, 2
-; CHECK-NEXT: sub a3, a0, a1
-; CHECK-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vx v0, v24, a2
-; CHECK-NEXT: sltu a2, a0, a3
-; CHECK-NEXT: addi a2, a2, -1
-; CHECK-NEXT: and a2, a2, a3
-; CHECK-NEXT: addi a3, sp, 16
-; CHECK-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill
-; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v20, v0.t
-; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT: vfadd.vv v16, v16, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v12, v16, v0.t
-; CHECK-NEXT: bltu a0, a1, .LBB23_2
-; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: mv a0, a1
-; CHECK-NEXT: .LBB23_2:
-; CHECK-NEXT: addi a1, sp, 16
-; CHECK-NEXT: vl8r.v v24, (a1) # vscale x 64-byte Folded Reload
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v24
-; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v8
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT: vfadd.vv v16, v24, v16
-; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v16
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vv_nxv32bf16_unmasked:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: addi sp, sp, -16
+; ZVFH-NEXT: .cfi_def_cfa_offset 16
+; ZVFH-NEXT: csrr a1, vlenb
+; ZVFH-NEXT: slli a1, a1, 3
+; ZVFH-NEXT: sub sp, sp, a1
+; ZVFH-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFH-NEXT: csrr a2, vlenb
+; ZVFH-NEXT: vsetvli a1, zero, e8, m4, ta, ma
+; ZVFH-NEXT: vmset.m v24
+; ZVFH-NEXT: slli a1, a2, 1
+; ZVFH-NEXT: srli a2, a2, 2
+; ZVFH-NEXT: sub a3, a0, a1
+; ZVFH-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
+; ZVFH-NEXT: vslidedown.vx v0, v24, a2
+; ZVFH-NEXT: sltu a2, a0, a3
+; ZVFH-NEXT: addi a2, a2, -1
+; ZVFH-NEXT: and a2, a2, a3
+; ZVFH-NEXT: addi a3, sp, 16
+; ZVFH-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill
+; ZVFH-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v20, v0.t
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v12, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFH-NEXT: vfadd.vv v16, v16, v24, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v12, v16, v0.t
+; ZVFH-NEXT: bltu a0, a1, .LBB23_2
+; ZVFH-NEXT: # %bb.1:
+; ZVFH-NEXT: mv a0, a1
+; ZVFH-NEXT: .LBB23_2:
+; ZVFH-NEXT: addi a1, sp, 16
+; ZVFH-NEXT: vl8r.v v24, (a1) # vscale x 64-byte Folded Reload
+; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v24
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v8
+; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFH-NEXT: vfadd.vv v16, v24, v16
+; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v16
+; ZVFH-NEXT: csrr a0, vlenb
+; ZVFH-NEXT: slli a0, a0, 3
+; ZVFH-NEXT: add sp, sp, a0
+; ZVFH-NEXT: .cfi_def_cfa sp, 16
+; ZVFH-NEXT: addi sp, sp, 16
+; ZVFH-NEXT: .cfi_def_cfa_offset 0
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vv_nxv32bf16_unmasked:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: addi sp, sp, -16
+; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
+; ZVFHMIN-NEXT: csrr a1, vlenb
+; ZVFHMIN-NEXT: slli a1, a1, 3
+; ZVFHMIN-NEXT: sub sp, sp, a1
+; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFHMIN-NEXT: csrr a2, vlenb
+; ZVFHMIN-NEXT: vsetvli a1, zero, e8, m4, ta, ma
+; ZVFHMIN-NEXT: vmset.m v24
+; ZVFHMIN-NEXT: slli a1, a2, 1
+; ZVFHMIN-NEXT: srli a2, a2, 2
+; ZVFHMIN-NEXT: sub a3, a0, a1
+; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
+; ZVFHMIN-NEXT: vslidedown.vx v0, v24, a2
+; ZVFHMIN-NEXT: sltu a2, a0, a3
+; ZVFHMIN-NEXT: addi a2, a2, -1
+; ZVFHMIN-NEXT: and a2, a2, a3
+; ZVFHMIN-NEXT: addi a3, sp, 16
+; ZVFHMIN-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v20, v0.t
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v12, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v16, v16, v24, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v12, v16, v0.t
+; ZVFHMIN-NEXT: bltu a0, a1, .LBB23_2
+; ZVFHMIN-NEXT: # %bb.1:
+; ZVFHMIN-NEXT: mv a0, a1
+; ZVFHMIN-NEXT: .LBB23_2:
+; ZVFHMIN-NEXT: addi a1, sp, 16
+; ZVFHMIN-NEXT: vl8r.v v24, (a1) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v24
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v16, v24, v16
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v16
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
+; ZVFHMIN-NEXT: addi sp, sp, 16
+; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv32bf16_unmasked:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: addi sp, sp, -16
+; ZVFBFA-NEXT: .cfi_def_cfa_offset 16
+; ZVFBFA-NEXT: csrr a1, vlenb
+; ZVFBFA-NEXT: slli a1, a1, 3
+; ZVFBFA-NEXT: sub sp, sp, a1
+; ZVFBFA-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFBFA-NEXT: csrr a2, vlenb
+; ZVFBFA-NEXT: vsetvli a1, zero, e8, m4, ta, ma
+; ZVFBFA-NEXT: vmset.m v24
+; ZVFBFA-NEXT: slli a1, a2, 1
+; ZVFBFA-NEXT: srli a2, a2, 2
+; ZVFBFA-NEXT: sub a3, a0, a1
+; ZVFBFA-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
+; ZVFBFA-NEXT: vslidedown.vx v0, v24, a2
+; ZVFBFA-NEXT: sltu a2, a0, a3
+; ZVFBFA-NEXT: addi a2, a2, -1
+; ZVFBFA-NEXT: and a2, a2, a3
+; ZVFBFA-NEXT: addi a3, sp, 16
+; ZVFBFA-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill
+; ZVFBFA-NEXT: vsetvli zero, a2, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v20, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v12, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v16, v16, v24, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v12, v16, v0.t
+; ZVFBFA-NEXT: bltu a0, a1, .LBB23_2
+; ZVFBFA-NEXT: # %bb.1:
+; ZVFBFA-NEXT: mv a0, a1
+; ZVFBFA-NEXT: .LBB23_2:
+; ZVFBFA-NEXT: addi a1, sp, 16
+; ZVFBFA-NEXT: vl8r.v v24, (a1) # vscale x 64-byte Folded Reload
+; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v24
+; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v16, v24, v16
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16
+; ZVFBFA-NEXT: csrr a0, vlenb
+; ZVFBFA-NEXT: slli a0, a0, 3
+; ZVFBFA-NEXT: add sp, sp, a0
+; ZVFBFA-NEXT: .cfi_def_cfa sp, 16
+; ZVFBFA-NEXT: addi sp, sp, 16
+; ZVFBFA-NEXT: .cfi_def_cfa_offset 0
+; ZVFBFA-NEXT: ret
%v = call <vscale x 32 x bfloat> @llvm.vp.fadd.nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x bfloat> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
ret <vscale x 32 x bfloat> %v
}
define <vscale x 32 x bfloat> @vfadd_vf_nxv32bf16(<vscale x 32 x bfloat> %va, bfloat %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vf_nxv32bf16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 4
-; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v7, v0
-; CHECK-NEXT: fmv.x.h a1, fa0
-; CHECK-NEXT: csrr a2, vlenb
-; CHECK-NEXT: vmv.v.x v24, a1
-; CHECK-NEXT: slli a1, a2, 1
-; CHECK-NEXT: srli a2, a2, 2
-; CHECK-NEXT: sub a3, a0, a1
-; CHECK-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vx v0, v0, a2
-; CHECK-NEXT: sltu a2, a0, a3
-; CHECK-NEXT: addi a2, a2, -1
-; CHECK-NEXT: and a2, a2, a3
-; CHECK-NEXT: csrr a3, vlenb
-; CHECK-NEXT: slli a3, a3, 3
-; CHECK-NEXT: add a3, sp, a3
-; CHECK-NEXT: addi a3, a3, 16
-; CHECK-NEXT: vs8r.v v24, (a3) # vscale x 64-byte Folded Spill
-; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v28, v0.t
-; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT: vfadd.vv v16, v24, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v12, v16, v0.t
-; CHECK-NEXT: bltu a0, a1, .LBB24_2
-; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: mv a0, a1
-; CHECK-NEXT: .LBB24_2:
-; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8, v0.t
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v16, v0.t
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT: vfadd.vv v16, v16, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v16, v0.t
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
-; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vf_nxv32bf16:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: addi sp, sp, -16
+; ZVFH-NEXT: .cfi_def_cfa_offset 16
+; ZVFH-NEXT: csrr a1, vlenb
+; ZVFH-NEXT: slli a1, a1, 4
+; ZVFH-NEXT: sub sp, sp, a1
+; ZVFH-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; ZVFH-NEXT: vsetvli a1, zero, e16, m8, ta, ma
+; ZVFH-NEXT: vmv1r.v v7, v0
+; ZVFH-NEXT: fmv.x.h a1, fa0
+; ZVFH-NEXT: csrr a2, vlenb
+; ZVFH-NEXT: vmv.v.x v24, a1
+; ZVFH-NEXT: slli a1, a2, 1
+; ZVFH-NEXT: srli a2, a2, 2
+; ZVFH-NEXT: sub a3, a0, a1
+; ZVFH-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
+; ZVFH-NEXT: vslidedown.vx v0, v0, a2
+; ZVFH-NEXT: sltu a2, a0, a3
+; ZVFH-NEXT: addi a2, a2, -1
+; ZVFH-NEXT: and a2, a2, a3
+; ZVFH-NEXT: csrr a3, vlenb
+; ZVFH-NEXT: slli a3, a3, 3
+; ZVFH-NEXT: add a3, sp, a3
+; ZVFH-NEXT: addi a3, a3, 16
+; ZVFH-NEXT: vs8r.v v24, (a3) # vscale x 64-byte Folded Spill
+; ZVFH-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v28, v0.t
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v12, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFH-NEXT: vfadd.vv v16, v24, v16, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v12, v16, v0.t
+; ZVFH-NEXT: bltu a0, a1, .LBB24_2
+; ZVFH-NEXT: # %bb.1:
+; ZVFH-NEXT: mv a0, a1
+; ZVFH-NEXT: .LBB24_2:
+; ZVFH-NEXT: vmv1r.v v0, v7
+; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v8, v0.t
+; ZVFH-NEXT: addi a0, sp, 16
+; ZVFH-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
+; ZVFH-NEXT: csrr a0, vlenb
+; ZVFH-NEXT: slli a0, a0, 3
+; ZVFH-NEXT: add a0, sp, a0
+; ZVFH-NEXT: addi a0, a0, 16
+; ZVFH-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v16, v0.t
+; ZVFH-NEXT: addi a0, sp, 16
+; ZVFH-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFH-NEXT: vfadd.vv v16, v16, v24, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v16, v0.t
+; ZVFH-NEXT: csrr a0, vlenb
+; ZVFH-NEXT: slli a0, a0, 4
+; ZVFH-NEXT: add sp, sp, a0
+; ZVFH-NEXT: .cfi_def_cfa sp, 16
+; ZVFH-NEXT: addi sp, sp, 16
+; ZVFH-NEXT: .cfi_def_cfa_offset 0
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vf_nxv32bf16:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: addi sp, sp, -16
+; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
+; ZVFHMIN-NEXT: csrr a1, vlenb
+; ZVFHMIN-NEXT: slli a1, a1, 4
+; ZVFHMIN-NEXT: sub sp, sp, a1
+; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m8, ta, ma
+; ZVFHMIN-NEXT: vmv1r.v v7, v0
+; ZVFHMIN-NEXT: fmv.x.h a1, fa0
+; ZVFHMIN-NEXT: csrr a2, vlenb
+; ZVFHMIN-NEXT: vmv.v.x v24, a1
+; ZVFHMIN-NEXT: slli a1, a2, 1
+; ZVFHMIN-NEXT: srli a2, a2, 2
+; ZVFHMIN-NEXT: sub a3, a0, a1
+; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
+; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a2
+; ZVFHMIN-NEXT: sltu a2, a0, a3
+; ZVFHMIN-NEXT: addi a2, a2, -1
+; ZVFHMIN-NEXT: and a2, a2, a3
+; ZVFHMIN-NEXT: csrr a3, vlenb
+; ZVFHMIN-NEXT: slli a3, a3, 3
+; ZVFHMIN-NEXT: add a3, sp, a3
+; ZVFHMIN-NEXT: addi a3, a3, 16
+; ZVFHMIN-NEXT: vs8r.v v24, (a3) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v28, v0.t
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v12, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v16, v24, v16, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v12, v16, v0.t
+; ZVFHMIN-NEXT: bltu a0, a1, .LBB24_2
+; ZVFHMIN-NEXT: # %bb.1:
+; ZVFHMIN-NEXT: mv a0, a1
+; ZVFHMIN-NEXT: .LBB24_2:
+; ZVFHMIN-NEXT: vmv1r.v v0, v7
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v8, v0.t
+; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v16, v0.t
+; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v16, v16, v24, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v16, v0.t
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 4
+; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
+; ZVFHMIN-NEXT: addi sp, sp, 16
+; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv32bf16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: addi sp, sp, -16
+; ZVFBFA-NEXT: .cfi_def_cfa_offset 16
+; ZVFBFA-NEXT: csrr a1, vlenb
+; ZVFBFA-NEXT: slli a1, a1, 4
+; ZVFBFA-NEXT: sub sp, sp, a1
+; ZVFBFA-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; ZVFBFA-NEXT: vsetvli a1, zero, e16, m8, ta, ma
+; ZVFBFA-NEXT: vmv1r.v v7, v0
+; ZVFBFA-NEXT: fmv.x.h a1, fa0
+; ZVFBFA-NEXT: csrr a2, vlenb
+; ZVFBFA-NEXT: vmv.v.x v24, a1
+; ZVFBFA-NEXT: slli a1, a2, 1
+; ZVFBFA-NEXT: srli a2, a2, 2
+; ZVFBFA-NEXT: sub a3, a0, a1
+; ZVFBFA-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
+; ZVFBFA-NEXT: vslidedown.vx v0, v0, a2
+; ZVFBFA-NEXT: sltu a2, a0, a3
+; ZVFBFA-NEXT: addi a2, a2, -1
+; ZVFBFA-NEXT: and a2, a2, a3
+; ZVFBFA-NEXT: csrr a3, vlenb
+; ZVFBFA-NEXT: slli a3, a3, 3
+; ZVFBFA-NEXT: add a3, sp, a3
+; ZVFBFA-NEXT: addi a3, a3, 16
+; ZVFBFA-NEXT: vs8r.v v24, (a3) # vscale x 64-byte Folded Spill
+; ZVFBFA-NEXT: vsetvli zero, a2, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v28, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v12, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v16, v24, v16, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v12, v16, v0.t
+; ZVFBFA-NEXT: bltu a0, a1, .LBB24_2
+; ZVFBFA-NEXT: # %bb.1:
+; ZVFBFA-NEXT: mv a0, a1
+; ZVFBFA-NEXT: .LBB24_2:
+; ZVFBFA-NEXT: vmv1r.v v0, v7
+; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v8, v0.t
+; ZVFBFA-NEXT: addi a0, sp, 16
+; ZVFBFA-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
+; ZVFBFA-NEXT: csrr a0, vlenb
+; ZVFBFA-NEXT: slli a0, a0, 3
+; ZVFBFA-NEXT: add a0, sp, a0
+; ZVFBFA-NEXT: addi a0, a0, 16
+; ZVFBFA-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v16, v0.t
+; ZVFBFA-NEXT: addi a0, sp, 16
+; ZVFBFA-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v16, v16, v24, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16, v0.t
+; ZVFBFA-NEXT: csrr a0, vlenb
+; ZVFBFA-NEXT: slli a0, a0, 4
+; ZVFBFA-NEXT: add sp, sp, a0
+; ZVFBFA-NEXT: .cfi_def_cfa sp, 16
+; ZVFBFA-NEXT: addi sp, sp, 16
+; ZVFBFA-NEXT: .cfi_def_cfa_offset 0
+; ZVFBFA-NEXT: ret
%elt.head = insertelement <vscale x 32 x bfloat> poison, bfloat %b, i32 0
%vb = shufflevector <vscale x 32 x bfloat> %elt.head, <vscale x 32 x bfloat> poison, <vscale x 32 x i32> zeroinitializer
%v = call <vscale x 32 x bfloat> @llvm.vp.fadd.nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x bfloat> %vb, <vscale x 32 x i1> %m, i32 %evl)
@@ -577,56 +1432,158 @@ define <vscale x 32 x bfloat> @vfadd_vf_nxv32bf16(<vscale x 32 x bfloat> %va, bf
}
define <vscale x 32 x bfloat> @vfadd_vf_nxv32bf16_unmasked(<vscale x 32 x bfloat> %va, bfloat %b, i32 zeroext %evl) {
-; CHECK-LABEL: vfadd_vf_nxv32bf16_unmasked:
-; CHECK: # %bb.0:
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
-; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: fmv.x.h a1, fa0
-; CHECK-NEXT: csrr a2, vlenb
-; CHECK-NEXT: vsetvli a3, zero, e16, m8, ta, ma
-; CHECK-NEXT: vmset.m v24
-; CHECK-NEXT: vmv.v.x v16, a1
-; CHECK-NEXT: slli a1, a2, 1
-; CHECK-NEXT: srli a2, a2, 2
-; CHECK-NEXT: sub a3, a0, a1
-; CHECK-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vx v0, v24, a2
-; CHECK-NEXT: sltu a2, a0, a3
-; CHECK-NEXT: addi a2, a2, -1
-; CHECK-NEXT: and a2, a2, a3
-; CHECK-NEXT: addi a3, sp, 16
-; CHECK-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill
-; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v20, v0.t
-; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v12, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT: vfadd.vv v16, v16, v24, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v12, v16, v0.t
-; CHECK-NEXT: bltu a0, a1, .LBB25_2
-; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: mv a0, a1
-; CHECK-NEXT: .LBB25_2:
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v0, (a0) # vscale x 64-byte Folded Reload
-; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v0
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT: vfadd.vv v16, v16, v24
-; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-NEXT: vfncvtbf16.f.f.w v8, v16
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: .cfi_def_cfa sp, 16
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: .cfi_def_cfa_offset 0
-; CHECK-NEXT: ret
+; ZVFH-LABEL: vfadd_vf_nxv32bf16_unmasked:
+; ZVFH: # %bb.0:
+; ZVFH-NEXT: addi sp, sp, -16
+; ZVFH-NEXT: .cfi_def_cfa_offset 16
+; ZVFH-NEXT: csrr a1, vlenb
+; ZVFH-NEXT: slli a1, a1, 3
+; ZVFH-NEXT: sub sp, sp, a1
+; ZVFH-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFH-NEXT: fmv.x.h a1, fa0
+; ZVFH-NEXT: csrr a2, vlenb
+; ZVFH-NEXT: vsetvli a3, zero, e16, m8, ta, ma
+; ZVFH-NEXT: vmset.m v24
+; ZVFH-NEXT: vmv.v.x v16, a1
+; ZVFH-NEXT: slli a1, a2, 1
+; ZVFH-NEXT: srli a2, a2, 2
+; ZVFH-NEXT: sub a3, a0, a1
+; ZVFH-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
+; ZVFH-NEXT: vslidedown.vx v0, v24, a2
+; ZVFH-NEXT: sltu a2, a0, a3
+; ZVFH-NEXT: addi a2, a2, -1
+; ZVFH-NEXT: and a2, a2, a3
+; ZVFH-NEXT: addi a3, sp, 16
+; ZVFH-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill
+; ZVFH-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v20, v0.t
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v12, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFH-NEXT: vfadd.vv v16, v16, v24, v0.t
+; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v12, v16, v0.t
+; ZVFH-NEXT: bltu a0, a1, .LBB25_2
+; ZVFH-NEXT: # %bb.1:
+; ZVFH-NEXT: mv a0, a1
+; ZVFH-NEXT: .LBB25_2:
+; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v8
+; ZVFH-NEXT: addi a0, sp, 16
+; ZVFH-NEXT: vl8r.v v0, (a0) # vscale x 64-byte Folded Reload
+; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v0
+; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFH-NEXT: vfadd.vv v16, v16, v24
+; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v16
+; ZVFH-NEXT: csrr a0, vlenb
+; ZVFH-NEXT: slli a0, a0, 3
+; ZVFH-NEXT: add sp, sp, a0
+; ZVFH-NEXT: .cfi_def_cfa sp, 16
+; ZVFH-NEXT: addi sp, sp, 16
+; ZVFH-NEXT: .cfi_def_cfa_offset 0
+; ZVFH-NEXT: ret
+;
+; ZVFHMIN-LABEL: vfadd_vf_nxv32bf16_unmasked:
+; ZVFHMIN: # %bb.0:
+; ZVFHMIN-NEXT: addi sp, sp, -16
+; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
+; ZVFHMIN-NEXT: csrr a1, vlenb
+; ZVFHMIN-NEXT: slli a1, a1, 3
+; ZVFHMIN-NEXT: sub sp, sp, a1
+; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFHMIN-NEXT: fmv.x.h a1, fa0
+; ZVFHMIN-NEXT: csrr a2, vlenb
+; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m8, ta, ma
+; ZVFHMIN-NEXT: vmset.m v24
+; ZVFHMIN-NEXT: vmv.v.x v16, a1
+; ZVFHMIN-NEXT: slli a1, a2, 1
+; ZVFHMIN-NEXT: srli a2, a2, 2
+; ZVFHMIN-NEXT: sub a3, a0, a1
+; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
+; ZVFHMIN-NEXT: vslidedown.vx v0, v24, a2
+; ZVFHMIN-NEXT: sltu a2, a0, a3
+; ZVFHMIN-NEXT: addi a2, a2, -1
+; ZVFHMIN-NEXT: and a2, a2, a3
+; ZVFHMIN-NEXT: addi a3, sp, 16
+; ZVFHMIN-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill
+; ZVFHMIN-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v20, v0.t
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v12, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v16, v16, v24, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v12, v16, v0.t
+; ZVFHMIN-NEXT: bltu a0, a1, .LBB25_2
+; ZVFHMIN-NEXT: # %bb.1:
+; ZVFHMIN-NEXT: mv a0, a1
+; ZVFHMIN-NEXT: .LBB25_2:
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v8
+; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: vl8r.v v0, (a0) # vscale x 64-byte Folded Reload
+; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v0
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfadd.vv v16, v16, v24
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v16
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
+; ZVFHMIN-NEXT: addi sp, sp, 16
+; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0
+; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv32bf16_unmasked:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: addi sp, sp, -16
+; ZVFBFA-NEXT: .cfi_def_cfa_offset 16
+; ZVFBFA-NEXT: csrr a1, vlenb
+; ZVFBFA-NEXT: slli a1, a1, 3
+; ZVFBFA-NEXT: sub sp, sp, a1
+; ZVFBFA-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFBFA-NEXT: fmv.x.h a1, fa0
+; ZVFBFA-NEXT: csrr a2, vlenb
+; ZVFBFA-NEXT: vsetvli a3, zero, e16, m8, ta, ma
+; ZVFBFA-NEXT: vmset.m v24
+; ZVFBFA-NEXT: vmv.v.x v16, a1
+; ZVFBFA-NEXT: slli a1, a2, 1
+; ZVFBFA-NEXT: srli a2, a2, 2
+; ZVFBFA-NEXT: sub a3, a0, a1
+; ZVFBFA-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
+; ZVFBFA-NEXT: vslidedown.vx v0, v24, a2
+; ZVFBFA-NEXT: sltu a2, a0, a3
+; ZVFBFA-NEXT: addi a2, a2, -1
+; ZVFBFA-NEXT: and a2, a2, a3
+; ZVFBFA-NEXT: addi a3, sp, 16
+; ZVFBFA-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill
+; ZVFBFA-NEXT: vsetvli zero, a2, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v20, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v12, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v16, v16, v24, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v12, v16, v0.t
+; ZVFBFA-NEXT: bltu a0, a1, .LBB25_2
+; ZVFBFA-NEXT: # %bb.1:
+; ZVFBFA-NEXT: mv a0, a1
+; ZVFBFA-NEXT: .LBB25_2:
+; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v8
+; ZVFBFA-NEXT: addi a0, sp, 16
+; ZVFBFA-NEXT: vl8r.v v0, (a0) # vscale x 64-byte Folded Reload
+; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v0
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v16, v16, v24
+; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16
+; ZVFBFA-NEXT: csrr a0, vlenb
+; ZVFBFA-NEXT: slli a0, a0, 3
+; ZVFBFA-NEXT: add sp, sp, a0
+; ZVFBFA-NEXT: .cfi_def_cfa sp, 16
+; ZVFBFA-NEXT: addi sp, sp, 16
+; ZVFBFA-NEXT: .cfi_def_cfa_offset 0
+; ZVFBFA-NEXT: ret
%elt.head = insertelement <vscale x 32 x bfloat> poison, bfloat %b, i32 0
%vb = shufflevector <vscale x 32 x bfloat> %elt.head, <vscale x 32 x bfloat> poison, <vscale x 32 x i32> zeroinitializer
%v = call <vscale x 32 x bfloat> @llvm.vp.fadd.nxv32bf16(<vscale x 32 x bfloat> %va, <vscale x 32 x bfloat> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
@@ -651,6 +1608,17 @@ define <vscale x 1 x half> @vfadd_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9, v0.t
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv1f16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v9, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v9, v8, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v9, v9, v10, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9, v0.t
+; ZVFBFA-NEXT: ret
%v = call <vscale x 1 x half> @llvm.vp.fadd.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x i1> %m, i32 %evl)
ret <vscale x 1 x half> %v
}
@@ -672,6 +1640,17 @@ define <vscale x 1 x half> @vfadd_vv_nxv1f16_unmasked(<vscale x 1 x half> %va, <
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv1f16_unmasked:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v9
+; ZVFBFA-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v9, v9, v10
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9
+; ZVFBFA-NEXT: ret
%v = call <vscale x 1 x half> @llvm.vp.fadd.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x i1> splat (i1 true), i32 %evl)
ret <vscale x 1 x half> %v
}
@@ -695,6 +1674,19 @@ define <vscale x 1 x half> @vfadd_vf_nxv1f16(<vscale x 1 x half> %va, half %b, <
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9, v0.t
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv1f16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fmv.x.w a1, fa0
+; ZVFBFA-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFBFA-NEXT: vmv.v.x v9, a1
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v9, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v9, v10, v8, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9, v0.t
+; ZVFBFA-NEXT: ret
%elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
%v = call <vscale x 1 x half> @llvm.vp.fadd.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 %evl)
@@ -720,6 +1712,19 @@ define <vscale x 1 x half> @vfadd_vf_nxv1f16_commute(<vscale x 1 x half> %va, ha
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9, v0.t
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv1f16_commute:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fmv.x.w a1, fa0
+; ZVFBFA-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFBFA-NEXT: vmv.v.x v9, a1
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v9, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v9, v8, v10, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9, v0.t
+; ZVFBFA-NEXT: ret
%elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
%v = call <vscale x 1 x half> @llvm.vp.fadd.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x half> %va, <vscale x 1 x i1> %m, i32 %evl)
@@ -745,6 +1750,19 @@ define <vscale x 1 x half> @vfadd_vf_nxv1f16_unmasked(<vscale x 1 x half> %va, h
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv1f16_unmasked:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fmv.x.w a1, fa0
+; ZVFBFA-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFBFA-NEXT: vmv.v.x v9, a1
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8
+; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v9
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v9, v10, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9
+; ZVFBFA-NEXT: ret
%elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
%v = call <vscale x 1 x half> @llvm.vp.fadd.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> splat (i1 true), i32 %evl)
@@ -770,6 +1788,19 @@ define <vscale x 1 x half> @vfadd_vf_nxv1f16_unmasked_commute(<vscale x 1 x half
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv1f16_unmasked_commute:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fmv.x.w a1, fa0
+; ZVFBFA-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFBFA-NEXT: vmv.v.x v9, a1
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8
+; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v9
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v9, v8, v10
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9
+; ZVFBFA-NEXT: ret
%elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
%v = call <vscale x 1 x half> @llvm.vp.fadd.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x half> %va, <vscale x 1 x i1> splat (i1 true), i32 %evl)
@@ -795,6 +1826,17 @@ define <vscale x 2 x half> @vfadd_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9, v0.t
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv2f16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v9, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v9, v8, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v9, v9, v10, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9, v0.t
+; ZVFBFA-NEXT: ret
%v = call <vscale x 2 x half> @llvm.vp.fadd.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x i1> %m, i32 %evl)
ret <vscale x 2 x half> %v
}
@@ -816,6 +1858,17 @@ define <vscale x 2 x half> @vfadd_vv_nxv2f16_unmasked(<vscale x 2 x half> %va, <
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv2f16_unmasked:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v9
+; ZVFBFA-NEXT: vfwcvt.f.f.v v9, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v9, v9, v10
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9
+; ZVFBFA-NEXT: ret
%v = call <vscale x 2 x half> @llvm.vp.fadd.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x half> %v
}
@@ -839,6 +1892,19 @@ define <vscale x 2 x half> @vfadd_vf_nxv2f16(<vscale x 2 x half> %va, half %b, <
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9, v0.t
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv2f16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fmv.x.w a1, fa0
+; ZVFBFA-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFBFA-NEXT: vmv.v.x v9, a1
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v9, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v9, v10, v8, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9, v0.t
+; ZVFBFA-NEXT: ret
%elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
%v = call <vscale x 2 x half> @llvm.vp.fadd.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 %evl)
@@ -864,6 +1930,19 @@ define <vscale x 2 x half> @vfadd_vf_nxv2f16_unmasked(<vscale x 2 x half> %va, h
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv2f16_unmasked:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fmv.x.w a1, fa0
+; ZVFBFA-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFBFA-NEXT: vmv.v.x v9, a1
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8
+; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v9
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v9, v10, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9
+; ZVFBFA-NEXT: ret
%elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
%v = call <vscale x 2 x half> @llvm.vp.fadd.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> splat (i1 true), i32 %evl)
@@ -889,6 +1968,17 @@ define <vscale x 4 x half> @vfadd_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10, v0.t
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv4f16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v9, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v8, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v10, v12, v10, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v10, v0.t
+; ZVFBFA-NEXT: ret
%v = call <vscale x 4 x half> @llvm.vp.fadd.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x i1> %m, i32 %evl)
ret <vscale x 4 x half> %v
}
@@ -910,6 +2000,17 @@ define <vscale x 4 x half> @vfadd_vv_nxv4f16_unmasked(<vscale x 4 x half> %va, <
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv4f16_unmasked:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v9
+; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v10, v12, v10
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v10
+; ZVFBFA-NEXT: ret
%v = call <vscale x 4 x half> @llvm.vp.fadd.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x i1> splat (i1 true), i32 %evl)
ret <vscale x 4 x half> %v
}
@@ -933,6 +2034,19 @@ define <vscale x 4 x half> @vfadd_vf_nxv4f16(<vscale x 4 x half> %va, half %b, <
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10, v0.t
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv4f16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fmv.x.w a1, fa0
+; ZVFBFA-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; ZVFBFA-NEXT: vmv.v.x v12, a1
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v12, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v10, v10, v8, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v10, v0.t
+; ZVFBFA-NEXT: ret
%elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
%v = call <vscale x 4 x half> @llvm.vp.fadd.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 %evl)
@@ -958,6 +2072,19 @@ define <vscale x 4 x half> @vfadd_vf_nxv4f16_unmasked(<vscale x 4 x half> %va, h
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv4f16_unmasked:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fmv.x.w a1, fa0
+; ZVFBFA-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; ZVFBFA-NEXT: vmv.v.x v12, a1
+; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8
+; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v12
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v10, v10, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, m1, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v10
+; ZVFBFA-NEXT: ret
%elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
%v = call <vscale x 4 x half> @llvm.vp.fadd.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> splat (i1 true), i32 %evl)
@@ -983,6 +2110,17 @@ define <vscale x 8 x half> @vfadd_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12, v0.t
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv8f16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v10, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v8, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v12, v16, v12, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v12, v0.t
+; ZVFBFA-NEXT: ret
%v = call <vscale x 8 x half> @llvm.vp.fadd.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x i1> %m, i32 %evl)
ret <vscale x 8 x half> %v
}
@@ -1004,6 +2142,17 @@ define <vscale x 8 x half> @vfadd_vv_nxv8f16_unmasked(<vscale x 8 x half> %va, <
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv8f16_unmasked:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v10
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v12, v16, v12
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v12
+; ZVFBFA-NEXT: ret
%v = call <vscale x 8 x half> @llvm.vp.fadd.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x i1> splat (i1 true), i32 %evl)
ret <vscale x 8 x half> %v
}
@@ -1027,6 +2176,19 @@ define <vscale x 8 x half> @vfadd_vf_nxv8f16(<vscale x 8 x half> %va, half %b, <
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12, v0.t
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv8f16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fmv.x.w a1, fa0
+; ZVFBFA-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; ZVFBFA-NEXT: vmv.v.x v16, a1
+; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v8, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v16, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v12, v12, v8, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v12, v0.t
+; ZVFBFA-NEXT: ret
%elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
%v = call <vscale x 8 x half> @llvm.vp.fadd.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 %evl)
@@ -1052,6 +2214,19 @@ define <vscale x 8 x half> @vfadd_vf_nxv8f16_unmasked(<vscale x 8 x half> %va, h
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv8f16_unmasked:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fmv.x.w a1, fa0
+; ZVFBFA-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; ZVFBFA-NEXT: vmv.v.x v16, a1
+; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v8
+; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v16
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m4, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v12, v12, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v12
+; ZVFBFA-NEXT: ret
%elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
%v = call <vscale x 8 x half> @llvm.vp.fadd.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> splat (i1 true), i32 %evl)
@@ -1077,6 +2252,17 @@ define <vscale x 16 x half> @vfadd_vv_nxv16f16(<vscale x 16 x half> %va, <vscale
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16, v0.t
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv16f16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v12, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v8, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v16, v24, v16, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16, v0.t
+; ZVFBFA-NEXT: ret
%v = call <vscale x 16 x half> @llvm.vp.fadd.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x i1> %m, i32 %evl)
ret <vscale x 16 x half> %v
}
@@ -1098,6 +2284,17 @@ define <vscale x 16 x half> @vfadd_vv_nxv16f16_unmasked(<vscale x 16 x half> %va
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv16f16_unmasked:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v12
+; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v16, v24, v16
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16
+; ZVFBFA-NEXT: ret
%v = call <vscale x 16 x half> @llvm.vp.fadd.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x i1> splat (i1 true), i32 %evl)
ret <vscale x 16 x half> %v
}
@@ -1121,6 +2318,19 @@ define <vscale x 16 x half> @vfadd_vf_nxv16f16(<vscale x 16 x half> %va, half %b
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16, v0.t
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv16f16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fmv.x.w a1, fa0
+; ZVFBFA-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; ZVFBFA-NEXT: vmv.v.x v24, a1
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v8, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v24, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v16, v16, v8, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16, v0.t
+; ZVFBFA-NEXT: ret
%elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
%v = call <vscale x 16 x half> @llvm.vp.fadd.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 %evl)
@@ -1146,6 +2356,19 @@ define <vscale x 16 x half> @vfadd_vf_nxv16f16_unmasked(<vscale x 16 x half> %va
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv16f16_unmasked:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: fmv.x.w a1, fa0
+; ZVFBFA-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; ZVFBFA-NEXT: vmv.v.x v24, a1
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v8
+; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v24
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v16, v16, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16
+; ZVFBFA-NEXT: ret
%elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
%v = call <vscale x 16 x half> @llvm.vp.fadd.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> splat (i1 true), i32 %evl)
@@ -1209,6 +2432,55 @@ define <vscale x 32 x half> @vfadd_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv32f16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: addi sp, sp, -16
+; ZVFBFA-NEXT: .cfi_def_cfa_offset 16
+; ZVFBFA-NEXT: csrr a1, vlenb
+; ZVFBFA-NEXT: slli a1, a1, 3
+; ZVFBFA-NEXT: sub sp, sp, a1
+; ZVFBFA-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFBFA-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
+; ZVFBFA-NEXT: vmv1r.v v7, v0
+; ZVFBFA-NEXT: csrr a2, vlenb
+; ZVFBFA-NEXT: slli a1, a2, 1
+; ZVFBFA-NEXT: srli a2, a2, 2
+; ZVFBFA-NEXT: sub a3, a0, a1
+; ZVFBFA-NEXT: vslidedown.vx v0, v0, a2
+; ZVFBFA-NEXT: sltu a2, a0, a3
+; ZVFBFA-NEXT: addi a2, a2, -1
+; ZVFBFA-NEXT: and a2, a2, a3
+; ZVFBFA-NEXT: addi a3, sp, 16
+; ZVFBFA-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill
+; ZVFBFA-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v20, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v12, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v16, v16, v24, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v12, v16, v0.t
+; ZVFBFA-NEXT: bltu a0, a1, .LBB48_2
+; ZVFBFA-NEXT: # %bb.1:
+; ZVFBFA-NEXT: mv a0, a1
+; ZVFBFA-NEXT: .LBB48_2:
+; ZVFBFA-NEXT: vmv1r.v v0, v7
+; ZVFBFA-NEXT: addi a1, sp, 16
+; ZVFBFA-NEXT: vl8r.v v24, (a1) # vscale x 64-byte Folded Reload
+; ZVFBFA-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v24, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v8, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v16, v24, v16, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16, v0.t
+; ZVFBFA-NEXT: csrr a0, vlenb
+; ZVFBFA-NEXT: slli a0, a0, 3
+; ZVFBFA-NEXT: add sp, sp, a0
+; ZVFBFA-NEXT: .cfi_def_cfa sp, 16
+; ZVFBFA-NEXT: addi sp, sp, 16
+; ZVFBFA-NEXT: .cfi_def_cfa_offset 0
+; ZVFBFA-NEXT: ret
%v = call <vscale x 32 x half> @llvm.vp.fadd.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl)
ret <vscale x 32 x half> %v
}
@@ -1268,6 +2540,55 @@ define <vscale x 32 x half> @vfadd_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vv_nxv32f16_unmasked:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: addi sp, sp, -16
+; ZVFBFA-NEXT: .cfi_def_cfa_offset 16
+; ZVFBFA-NEXT: csrr a1, vlenb
+; ZVFBFA-NEXT: slli a1, a1, 3
+; ZVFBFA-NEXT: sub sp, sp, a1
+; ZVFBFA-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFBFA-NEXT: csrr a2, vlenb
+; ZVFBFA-NEXT: vsetvli a1, zero, e8, m4, ta, ma
+; ZVFBFA-NEXT: vmset.m v24
+; ZVFBFA-NEXT: slli a1, a2, 1
+; ZVFBFA-NEXT: srli a2, a2, 2
+; ZVFBFA-NEXT: sub a3, a0, a1
+; ZVFBFA-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
+; ZVFBFA-NEXT: vslidedown.vx v0, v24, a2
+; ZVFBFA-NEXT: sltu a2, a0, a3
+; ZVFBFA-NEXT: addi a2, a2, -1
+; ZVFBFA-NEXT: and a2, a2, a3
+; ZVFBFA-NEXT: addi a3, sp, 16
+; ZVFBFA-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill
+; ZVFBFA-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v20, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v12, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v16, v16, v24, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v12, v16, v0.t
+; ZVFBFA-NEXT: bltu a0, a1, .LBB49_2
+; ZVFBFA-NEXT: # %bb.1:
+; ZVFBFA-NEXT: mv a0, a1
+; ZVFBFA-NEXT: .LBB49_2:
+; ZVFBFA-NEXT: addi a1, sp, 16
+; ZVFBFA-NEXT: vl8r.v v24, (a1) # vscale x 64-byte Folded Reload
+; ZVFBFA-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v24
+; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v8
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v16, v24, v16
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16
+; ZVFBFA-NEXT: csrr a0, vlenb
+; ZVFBFA-NEXT: slli a0, a0, 3
+; ZVFBFA-NEXT: add sp, sp, a0
+; ZVFBFA-NEXT: .cfi_def_cfa sp, 16
+; ZVFBFA-NEXT: addi sp, sp, 16
+; ZVFBFA-NEXT: .cfi_def_cfa_offset 0
+; ZVFBFA-NEXT: ret
%v = call <vscale x 32 x half> @llvm.vp.fadd.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x i1> splat (i1 true), i32 %evl)
ret <vscale x 32 x half> %v
}
@@ -1340,6 +2661,68 @@ define <vscale x 32 x half> @vfadd_vf_nxv32f16(<vscale x 32 x half> %va, half %b
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv32f16:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: addi sp, sp, -16
+; ZVFBFA-NEXT: .cfi_def_cfa_offset 16
+; ZVFBFA-NEXT: csrr a1, vlenb
+; ZVFBFA-NEXT: slli a1, a1, 4
+; ZVFBFA-NEXT: sub sp, sp, a1
+; ZVFBFA-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; ZVFBFA-NEXT: vsetvli a1, zero, e16, m8, ta, ma
+; ZVFBFA-NEXT: vmv1r.v v7, v0
+; ZVFBFA-NEXT: fmv.x.w a1, fa0
+; ZVFBFA-NEXT: csrr a2, vlenb
+; ZVFBFA-NEXT: vmv.v.x v24, a1
+; ZVFBFA-NEXT: slli a1, a2, 1
+; ZVFBFA-NEXT: srli a2, a2, 2
+; ZVFBFA-NEXT: sub a3, a0, a1
+; ZVFBFA-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
+; ZVFBFA-NEXT: vslidedown.vx v0, v0, a2
+; ZVFBFA-NEXT: sltu a2, a0, a3
+; ZVFBFA-NEXT: addi a2, a2, -1
+; ZVFBFA-NEXT: and a2, a2, a3
+; ZVFBFA-NEXT: csrr a3, vlenb
+; ZVFBFA-NEXT: slli a3, a3, 3
+; ZVFBFA-NEXT: add a3, sp, a3
+; ZVFBFA-NEXT: addi a3, a3, 16
+; ZVFBFA-NEXT: vs8r.v v24, (a3) # vscale x 64-byte Folded Spill
+; ZVFBFA-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v28, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v12, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v16, v24, v16, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v12, v16, v0.t
+; ZVFBFA-NEXT: bltu a0, a1, .LBB50_2
+; ZVFBFA-NEXT: # %bb.1:
+; ZVFBFA-NEXT: mv a0, a1
+; ZVFBFA-NEXT: .LBB50_2:
+; ZVFBFA-NEXT: vmv1r.v v0, v7
+; ZVFBFA-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v8, v0.t
+; ZVFBFA-NEXT: addi a0, sp, 16
+; ZVFBFA-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill
+; ZVFBFA-NEXT: csrr a0, vlenb
+; ZVFBFA-NEXT: slli a0, a0, 3
+; ZVFBFA-NEXT: add a0, sp, a0
+; ZVFBFA-NEXT: addi a0, a0, 16
+; ZVFBFA-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v16, v0.t
+; ZVFBFA-NEXT: addi a0, sp, 16
+; ZVFBFA-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v16, v16, v24, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16, v0.t
+; ZVFBFA-NEXT: csrr a0, vlenb
+; ZVFBFA-NEXT: slli a0, a0, 4
+; ZVFBFA-NEXT: add sp, sp, a0
+; ZVFBFA-NEXT: .cfi_def_cfa sp, 16
+; ZVFBFA-NEXT: addi sp, sp, 16
+; ZVFBFA-NEXT: .cfi_def_cfa_offset 0
+; ZVFBFA-NEXT: ret
%elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
%v = call <vscale x 32 x half> @llvm.vp.fadd.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl)
@@ -1403,6 +2786,57 @@ define <vscale x 32 x half> @vfadd_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0
; ZVFHMIN-NEXT: ret
+;
+; ZVFBFA-LABEL: vfadd_vf_nxv32f16_unmasked:
+; ZVFBFA: # %bb.0:
+; ZVFBFA-NEXT: addi sp, sp, -16
+; ZVFBFA-NEXT: .cfi_def_cfa_offset 16
+; ZVFBFA-NEXT: csrr a1, vlenb
+; ZVFBFA-NEXT: slli a1, a1, 3
+; ZVFBFA-NEXT: sub sp, sp, a1
+; ZVFBFA-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFBFA-NEXT: fmv.x.w a1, fa0
+; ZVFBFA-NEXT: csrr a2, vlenb
+; ZVFBFA-NEXT: vsetvli a3, zero, e16, m8, ta, ma
+; ZVFBFA-NEXT: vmset.m v24
+; ZVFBFA-NEXT: vmv.v.x v16, a1
+; ZVFBFA-NEXT: slli a1, a2, 1
+; ZVFBFA-NEXT: srli a2, a2, 2
+; ZVFBFA-NEXT: sub a3, a0, a1
+; ZVFBFA-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
+; ZVFBFA-NEXT: vslidedown.vx v0, v24, a2
+; ZVFBFA-NEXT: sltu a2, a0, a3
+; ZVFBFA-NEXT: addi a2, a2, -1
+; ZVFBFA-NEXT: and a2, a2, a3
+; ZVFBFA-NEXT: addi a3, sp, 16
+; ZVFBFA-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill
+; ZVFBFA-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v20, v0.t
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v12, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v16, v16, v24, v0.t
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v12, v16, v0.t
+; ZVFBFA-NEXT: bltu a0, a1, .LBB51_2
+; ZVFBFA-NEXT: # %bb.1:
+; ZVFBFA-NEXT: mv a0, a1
+; ZVFBFA-NEXT: .LBB51_2:
+; ZVFBFA-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v8
+; ZVFBFA-NEXT: addi a0, sp, 16
+; ZVFBFA-NEXT: vl8r.v v0, (a0) # vscale x 64-byte Folded Reload
+; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v0
+; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFBFA-NEXT: vfadd.vv v16, v16, v24
+; ZVFBFA-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16
+; ZVFBFA-NEXT: csrr a0, vlenb
+; ZVFBFA-NEXT: slli a0, a0, 3
+; ZVFBFA-NEXT: add sp, sp, a0
+; ZVFBFA-NEXT: .cfi_def_cfa sp, 16
+; ZVFBFA-NEXT: addi sp, sp, 16
+; ZVFBFA-NEXT: .cfi_def_cfa_offset 0
+; ZVFBFA-NEXT: ret
%elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
%v = call <vscale x 32 x half> @llvm.vp.fadd.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> splat (i1 true), i32 %evl)
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_shader_clock/shader_clock.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_shader_clock/shader_clock.ll
index bd07ba1..eb4cf76 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_shader_clock/shader_clock.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_shader_clock/shader_clock.ll
@@ -20,6 +20,9 @@
; CHECK: OpReadClockKHR [[v2uint]] [[uint_1]]
; CHECK: OpReadClockKHR [[v2uint]] [[uint_2]]
; CHECK: OpReadClockKHR [[v2uint]] [[uint_3]]
+; CHECK: OpReadClockKHR [[ulong]] [[uint_1]]
+; CHECK: OpReadClockKHR [[ulong]] [[uint_2]]
+; CHECK: OpReadClockKHR [[ulong]] [[uint_3]]
define dso_local spir_kernel void @test_clocks(ptr addrspace(1) nocapture noundef writeonly align 8 %out64, ptr addrspace(1) nocapture noundef writeonly align 8 %outv2) {
entry:
@@ -39,6 +42,9 @@ entry:
%call9 = tail call spir_func <2 x i32> @_Z25clock_read_hilo_sub_groupv()
%arrayidx10 = getelementptr inbounds i8, ptr addrspace(1) %outv2, i32 16
store <2 x i32> %call9, ptr addrspace(1) %arrayidx10, align 8
+ %call10 = call spir_func i64 @_Z27__spirv_ReadClockKHR_Rulongi(i32 1)
+ %call11 = call spir_func i64 @_Z27__spirv_ReadClockKHR_Rulongi(i32 2)
+ %call12 = call spir_func i64 @_Z27__spirv_ReadClockKHR_Rulongi(i32 3)
ret void
}
@@ -59,3 +65,6 @@ declare spir_func <2 x i32> @_Z26clock_read_hilo_work_groupv() local_unnamed_add
; Function Attrs: convergent nounwind
declare spir_func <2 x i32> @_Z25clock_read_hilo_sub_groupv() local_unnamed_addr
+
+; Function Attrs: nounwind
+declare spir_func i64 @_Z27__spirv_ReadClockKHR_Rulongi(i32)
diff --git a/llvm/test/CodeGen/Thumb/PR17309.ll b/llvm/test/CodeGen/Thumb/PR17309.ll
index b548499..4da25ca 100644
--- a/llvm/test/CodeGen/Thumb/PR17309.ll
+++ b/llvm/test/CodeGen/Thumb/PR17309.ll
@@ -48,7 +48,7 @@ declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #1
declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #1
-attributes #0 = { nounwind optsize "less-precise-fpmad"="false" "frame-pointer"="non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind optsize "less-precise-fpmad"="false" "frame-pointer"="non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "use-soft-float"="false" }
attributes #1 = { nounwind }
-attributes #2 = { optsize "less-precise-fpmad"="false" "frame-pointer"="non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { optsize "less-precise-fpmad"="false" "frame-pointer"="non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "use-soft-float"="false" }
attributes #3 = { nounwind optsize }
diff --git a/llvm/test/CodeGen/Thumb/fastcc.ll b/llvm/test/CodeGen/Thumb/fastcc.ll
index be356d8..000e20a 100644
--- a/llvm/test/CodeGen/Thumb/fastcc.ll
+++ b/llvm/test/CodeGen/Thumb/fastcc.ll
@@ -29,7 +29,7 @@ for.body193: ; preds = %for.body193, %for.e
br label %for.body193
}
-attributes #0 = { optsize "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { optsize "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "use-soft-float"="false" }
!llvm.ident = !{!0}
diff --git a/llvm/test/CodeGen/Thumb/ldm-merge-call.ll b/llvm/test/CodeGen/Thumb/ldm-merge-call.ll
index 700b207..33c4346 100644
--- a/llvm/test/CodeGen/Thumb/ldm-merge-call.ll
+++ b/llvm/test/CodeGen/Thumb/ldm-merge-call.ll
@@ -19,6 +19,6 @@ entry:
; Function Attrs: optsize
declare i32 @bar(i32, i32, i32, i32) #1
-attributes #0 = { nounwind optsize "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { optsize "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind optsize "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "use-soft-float"="false" }
+attributes #1 = { optsize "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "use-soft-float"="false" }
attributes #2 = { nounwind optsize }
diff --git a/llvm/test/CodeGen/Thumb/stack_guard_remat.ll b/llvm/test/CodeGen/Thumb/stack_guard_remat.ll
index cc14239..82314be 100644
--- a/llvm/test/CodeGen/Thumb/stack_guard_remat.ll
+++ b/llvm/test/CodeGen/Thumb/stack_guard_remat.ll
@@ -50,4 +50,4 @@ declare void @foo3(ptr)
; Function Attrs: nounwind
declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
-attributes #0 = { nounwind ssp "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind ssp "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "use-soft-float"="false" }
diff --git a/llvm/test/CodeGen/Thumb/stm-merge.ll b/llvm/test/CodeGen/Thumb/stm-merge.ll
index 837c2f6..426210a 100644
--- a/llvm/test/CodeGen/Thumb/stm-merge.ll
+++ b/llvm/test/CodeGen/Thumb/stm-merge.ll
@@ -38,4 +38,4 @@ for.end8: ; preds = %for.body5
ret void
}
-attributes #0 = { nounwind optsize "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind optsize "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "use-soft-float"="false" }
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vpt-block-debug.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vpt-block-debug.mir
index 4e2a275..00f0a1c 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vpt-block-debug.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/vpt-block-debug.mir
@@ -118,7 +118,7 @@
declare i32 @llvm.start.loop.iterations.i32(i32)
declare i32 @llvm.loop.decrement.reg.i32(i32, i32)
- attributes #0 = { nofree nounwind "frame-pointer"="all" "min-legal-vector-width"="0" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="cortex-m55" "target-features"="+armv8.1-m.main,+cdecp0,+dsp,+fp-armv8d16,+fp-armv8d16sp,+fp16,+fp64,+fullfp16,+hwdiv,+lob,+mve,+mve.fp,+ras,+strict-align,+thumb-mode,+vfp2,+vfp2sp,+vfp3d16,+vfp3d16sp,+vfp4d16,+vfp4d16sp,-aes,-bf16,-cdecp1,-cdecp2,-cdecp3,-cdecp4,-cdecp5,-cdecp6,-cdecp7,-crc,-crypto,-d32,-dotprod,-fp-armv8,-fp-armv8sp,-fp16fml,-hwdiv-arm,-i8mm,-neon,-sb,-sha2,-vfp3,-vfp3sp,-vfp4,-vfp4sp" "unsafe-fp-math"="true" }
+ attributes #0 = { nofree nounwind "frame-pointer"="all" "min-legal-vector-width"="0" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="cortex-m55" "target-features"="+armv8.1-m.main,+cdecp0,+dsp,+fp-armv8d16,+fp-armv8d16sp,+fp16,+fp64,+fullfp16,+hwdiv,+lob,+mve,+mve.fp,+ras,+strict-align,+thumb-mode,+vfp2,+vfp2sp,+vfp3d16,+vfp3d16sp,+vfp4d16,+vfp4d16sp,-aes,-bf16,-cdecp1,-cdecp2,-cdecp3,-cdecp4,-cdecp5,-cdecp6,-cdecp7,-crc,-crypto,-d32,-dotprod,-fp-armv8,-fp-armv8sp,-fp16fml,-hwdiv-arm,-i8mm,-neon,-sb,-sha2,-vfp3,-vfp3sp,-vfp4,-vfp4sp" }
!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, retainedTypes: !2, splitDebugInlining: false, nameTableKind: None)
!1 = !DIFile(filename: "tmp.c", directory: "")
diff --git a/llvm/test/CodeGen/Thumb2/mve-vpt-2-blocks-1-pred.mir b/llvm/test/CodeGen/Thumb2/mve-vpt-2-blocks-1-pred.mir
index 4e817ba..e48a038 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vpt-2-blocks-1-pred.mir
+++ b/llvm/test/CodeGen/Thumb2/mve-vpt-2-blocks-1-pred.mir
@@ -13,7 +13,7 @@
ret <4 x float> %inactive1
}
- attributes #0 = { nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="128" "frame-pointer"="none" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+armv8.1-m.main,+hwdiv,+mve.fp,+ras,+thumb-mode" "unsafe-fp-math"="false" "use-soft-float"="false" }
+ attributes #0 = { nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="128" "frame-pointer"="none" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+armv8.1-m.main,+hwdiv,+mve.fp,+ras,+thumb-mode" "use-soft-float"="false" }
attributes #1 = { nounwind readnone }
attributes #2 = { nounwind }
diff --git a/llvm/test/CodeGen/Thumb2/mve-vpt-2-blocks-2-preds.mir b/llvm/test/CodeGen/Thumb2/mve-vpt-2-blocks-2-preds.mir
index 6b5cbce..b8657c2 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vpt-2-blocks-2-preds.mir
+++ b/llvm/test/CodeGen/Thumb2/mve-vpt-2-blocks-2-preds.mir
@@ -16,7 +16,7 @@
declare <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float>, <4 x float>, <4 x float>, i32) #1
- attributes #0 = { nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="128" "frame-pointer"="none" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+armv8.1-m.main,+hwdiv,+mve.fp,+ras,+thumb-mode" "unsafe-fp-math"="false" "use-soft-float"="false" }
+ attributes #0 = { nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="128" "frame-pointer"="none" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+armv8.1-m.main,+hwdiv,+mve.fp,+ras,+thumb-mode" "use-soft-float"="false" }
attributes #1 = { nounwind readnone }
attributes #2 = { nounwind }
diff --git a/llvm/test/CodeGen/Thumb2/mve-vpt-2-blocks-ctrl-flow.mir b/llvm/test/CodeGen/Thumb2/mve-vpt-2-blocks-ctrl-flow.mir
index 91ccf3b..68a38a4 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vpt-2-blocks-ctrl-flow.mir
+++ b/llvm/test/CodeGen/Thumb2/mve-vpt-2-blocks-ctrl-flow.mir
@@ -19,7 +19,7 @@
declare <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float>, <4 x float>, <4 x float>, i32) #1
- attributes #0 = { nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="128" "frame-pointer"="none" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+armv8.1-m.main,+hwdiv,+mve.fp,+ras,+thumb-mode" "unsafe-fp-math"="false" "use-soft-float"="false" }
+ attributes #0 = { nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="128" "frame-pointer"="none" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+armv8.1-m.main,+hwdiv,+mve.fp,+ras,+thumb-mode" "use-soft-float"="false" }
attributes #1 = { nounwind readnone }
attributes #2 = { nounwind }
diff --git a/llvm/test/CodeGen/Thumb2/mve-vpt-2-blocks-non-consecutive-ins.mir b/llvm/test/CodeGen/Thumb2/mve-vpt-2-blocks-non-consecutive-ins.mir
index 1f19ed9..caa7b17 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vpt-2-blocks-non-consecutive-ins.mir
+++ b/llvm/test/CodeGen/Thumb2/mve-vpt-2-blocks-non-consecutive-ins.mir
@@ -17,7 +17,7 @@
declare <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float>, <4 x float>, <4 x float>, i32) #1
- attributes #0 = { nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="128" "frame-pointer"="none" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+armv8.1-m.main,+hwdiv,+mve.fp,+ras,+thumb-mode" "unsafe-fp-math"="false" "use-soft-float"="false" }
+ attributes #0 = { nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="128" "frame-pointer"="none" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+armv8.1-m.main,+hwdiv,+mve.fp,+ras,+thumb-mode" "use-soft-float"="false" }
attributes #1 = { nounwind readnone }
attributes #2 = { nounwind }
diff --git a/llvm/test/CodeGen/Thumb2/mve-vpt-2-blocks.mir b/llvm/test/CodeGen/Thumb2/mve-vpt-2-blocks.mir
index 4f75b01..2f07485 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vpt-2-blocks.mir
+++ b/llvm/test/CodeGen/Thumb2/mve-vpt-2-blocks.mir
@@ -18,7 +18,7 @@
declare <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float>, <4 x float>, <4 x float>, i32) #1
- attributes #0 = { nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="128" "frame-pointer"="none" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+armv8.1-m.main,+hwdiv,+mve.fp,+ras,+thumb-mode" "unsafe-fp-math"="false" "use-soft-float"="false" }
+ attributes #0 = { nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="128" "frame-pointer"="none" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+armv8.1-m.main,+hwdiv,+mve.fp,+ras,+thumb-mode" "use-soft-float"="false" }
attributes #1 = { nounwind readnone }
attributes #2 = { nounwind }
diff --git a/llvm/test/CodeGen/Thumb2/mve-vpt-3-blocks-kill-vpr.mir b/llvm/test/CodeGen/Thumb2/mve-vpt-3-blocks-kill-vpr.mir
index c268388..f6b64a0 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vpt-3-blocks-kill-vpr.mir
+++ b/llvm/test/CodeGen/Thumb2/mve-vpt-3-blocks-kill-vpr.mir
@@ -17,7 +17,7 @@
declare <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float>, <4 x float>, <4 x float>, i32) #1
- attributes #0 = { nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="128" "frame-pointer"="none" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+armv8.1-m.main,+hwdiv,+mve.fp,+ras,+thumb-mode" "unsafe-fp-math"="false" "use-soft-float"="false" }
+ attributes #0 = { nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="128" "frame-pointer"="none" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+armv8.1-m.main,+hwdiv,+mve.fp,+ras,+thumb-mode" "use-soft-float"="false" }
attributes #1 = { nounwind readnone }
attributes #2 = { nounwind }
diff --git a/llvm/test/CodeGen/Thumb2/mve-vpt-block-1-ins.mir b/llvm/test/CodeGen/Thumb2/mve-vpt-block-1-ins.mir
index 1e9e0e3..d086566 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vpt-block-1-ins.mir
+++ b/llvm/test/CodeGen/Thumb2/mve-vpt-block-1-ins.mir
@@ -14,7 +14,7 @@
declare <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float>, <4 x float>, <4 x float>, i32) #1
- attributes #0 = { nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="128" "frame-pointer"="none" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+armv8.1-m.main,+hwdiv,+mve.fp,+ras,+thumb-mode" "unsafe-fp-math"="false" "use-soft-float"="false" }
+ attributes #0 = { nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="128" "frame-pointer"="none" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+armv8.1-m.main,+hwdiv,+mve.fp,+ras,+thumb-mode" "use-soft-float"="false" }
attributes #1 = { nounwind readnone }
attributes #2 = { nounwind }
diff --git a/llvm/test/CodeGen/Thumb2/mve-vpt-block-2-ins.mir b/llvm/test/CodeGen/Thumb2/mve-vpt-block-2-ins.mir
index cb73cdf..5436882 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vpt-block-2-ins.mir
+++ b/llvm/test/CodeGen/Thumb2/mve-vpt-block-2-ins.mir
@@ -15,7 +15,7 @@
declare <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float>, <4 x float>, <4 x float>, i32) #1
- attributes #0 = { nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="128" "frame-pointer"="none" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+armv8.1-m.main,+hwdiv,+mve.fp,+ras,+thumb-mode" "unsafe-fp-math"="false" "use-soft-float"="false" }
+ attributes #0 = { nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="128" "frame-pointer"="none" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+armv8.1-m.main,+hwdiv,+mve.fp,+ras,+thumb-mode" "use-soft-float"="false" }
attributes #1 = { nounwind readnone }
attributes #2 = { nounwind }
diff --git a/llvm/test/CodeGen/Thumb2/mve-vpt-block-4-ins.mir b/llvm/test/CodeGen/Thumb2/mve-vpt-block-4-ins.mir
index 62d7640d..435836d 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vpt-block-4-ins.mir
+++ b/llvm/test/CodeGen/Thumb2/mve-vpt-block-4-ins.mir
@@ -17,7 +17,7 @@
declare <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float>, <4 x float>, <4 x float>, i32) #1
- attributes #0 = { nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="128" "frame-pointer"="none" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+armv8.1-m.main,+hwdiv,+mve.fp,+ras,+thumb-mode" "unsafe-fp-math"="false" "use-soft-float"="false" }
+ attributes #0 = { nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="128" "frame-pointer"="none" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+armv8.1-m.main,+hwdiv,+mve.fp,+ras,+thumb-mode" "use-soft-float"="false" }
attributes #1 = { nounwind readnone }
attributes #2 = { nounwind }
diff --git a/llvm/test/CodeGen/Thumb2/mve-vpt-block-elses.mir b/llvm/test/CodeGen/Thumb2/mve-vpt-block-elses.mir
index 130c7f4..dc195dd 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vpt-block-elses.mir
+++ b/llvm/test/CodeGen/Thumb2/mve-vpt-block-elses.mir
@@ -17,7 +17,7 @@
declare <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float>, <4 x float>, <4 x float>, i32) #1
- attributes #0 = { nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="128" "frame-pointer"="none" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+armv8.1-m.main,+hwdiv,+mve.fp,+ras,+thumb-mode" "unsafe-fp-math"="false" "use-soft-float"="false" }
+ attributes #0 = { nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="128" "frame-pointer"="none" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+armv8.1-m.main,+hwdiv,+mve.fp,+ras,+thumb-mode" "use-soft-float"="false" }
attributes #1 = { nounwind readnone }
attributes #2 = { nounwind }
diff --git a/llvm/test/CodeGen/Thumb2/mve-vpt-block-fold-vcmp.mir b/llvm/test/CodeGen/Thumb2/mve-vpt-block-fold-vcmp.mir
index 0ffed2e..ee2e58f 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vpt-block-fold-vcmp.mir
+++ b/llvm/test/CodeGen/Thumb2/mve-vpt-block-fold-vcmp.mir
@@ -18,7 +18,7 @@
declare <4 x i32> @llvm.masked.load.v4i32.p0(ptr, i32 immarg, <4 x i1>, <4 x i32>) #2
declare void @llvm.masked.store.v4i32.p0(<4 x i32>, ptr, i32 immarg, <4 x i1>) #3
- attributes #0 = { nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="all" "less-precise-fpmad"="false" "min-legal-vector-width"="128" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+armv8.1-m.main,+fp-armv8d16sp,+fp16,+fpregs,+fullfp16,+hwdiv,+lob,+mve.fp,+ras,+strict-align,+thumb-mode,+vfp2sp,+vfp3d16sp,+vfp4d16sp" "unsafe-fp-math"="false" "use-soft-float"="false" }
+ attributes #0 = { nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="all" "less-precise-fpmad"="false" "min-legal-vector-width"="128" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+armv8.1-m.main,+fp-armv8d16sp,+fp16,+fpregs,+fullfp16,+hwdiv,+lob,+mve.fp,+ras,+strict-align,+thumb-mode,+vfp2sp,+vfp3d16sp,+vfp4d16sp" "use-soft-float"="false" }
attributes #1 = { nounwind readnone }
attributes #2 = { argmemonly nounwind readonly willreturn }
attributes #3 = { argmemonly nounwind willreturn }
diff --git a/llvm/test/CodeGen/Thumb2/mve-vpt-block-optnone.mir b/llvm/test/CodeGen/Thumb2/mve-vpt-block-optnone.mir
index 695a8d8..ba21068 100644
--- a/llvm/test/CodeGen/Thumb2/mve-vpt-block-optnone.mir
+++ b/llvm/test/CodeGen/Thumb2/mve-vpt-block-optnone.mir
@@ -14,7 +14,7 @@
declare <4 x float> @llvm.arm.mve.vminnm.m.v4f32.v4f32.v4f32.v4f32.i32(<4 x float>, <4 x float>, <4 x float>, i32) #1
- attributes #0 = { noinline optnone nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="128" "no-frame-pointer-elim"="false" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+armv8.1-m.main,+hwdiv,+mve.fp,+ras,+thumb-mode" "unsafe-fp-math"="false" "use-soft-float"="false" }
+ attributes #0 = { noinline optnone nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="128" "no-frame-pointer-elim"="false" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+armv8.1-m.main,+hwdiv,+mve.fp,+ras,+thumb-mode" "use-soft-float"="false" }
attributes #1 = { nounwind readnone }
attributes #2 = { nounwind }
diff --git a/llvm/test/CodeGen/Thumb2/pacbti-m-outliner-4.ll b/llvm/test/CodeGen/Thumb2/pacbti-m-outliner-4.ll
index 8777d51..db779de 100644
--- a/llvm/test/CodeGen/Thumb2/pacbti-m-outliner-4.ll
+++ b/llvm/test/CodeGen/Thumb2/pacbti-m-outliner-4.ll
@@ -179,7 +179,7 @@ return: ; preds = %entry, %if.end
; CHECK-NOT: aut
; CHECK: b _Z1hii
-attributes #0 = { minsize noinline optsize "sign-return-address"="non-leaf" "denormal-fp-math"="preserve-sign,preserve-sign" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="cortex-m3" "target-features"="+armv7-m,+hwdiv,+thumb-mode" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { minsize noinline optsize "sign-return-address"="non-leaf" "denormal-fp-math"="preserve-sign,preserve-sign" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="cortex-m3" "target-features"="+armv7-m,+hwdiv,+thumb-mode" "use-soft-float"="false" }
attributes #1 = { nounwind "sign-return-address"="non-leaf" }
attributes #2 = { noreturn "sign-return-address"="non-leaf" }
diff --git a/llvm/test/CodeGen/Thumb2/stack_guard_remat.ll b/llvm/test/CodeGen/Thumb2/stack_guard_remat.ll
index 4a93c2c..0ee075c 100644
--- a/llvm/test/CodeGen/Thumb2/stack_guard_remat.ll
+++ b/llvm/test/CodeGen/Thumb2/stack_guard_remat.ll
@@ -38,7 +38,7 @@ declare void @foo3(ptr)
; Function Attrs: nounwind
declare void @llvm.lifetime.end.p0(i64, ptr nocapture)
-attributes #0 = { nounwind ssp "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind ssp "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "use-soft-float"="false" }
!llvm.module.flags = !{!0}
!0 = !{i32 7, !"PIC Level", i32 2}
diff --git a/llvm/test/CodeGen/Thumb2/t2sizereduction.mir b/llvm/test/CodeGen/Thumb2/t2sizereduction.mir
index 48b75ed5..f5eb642 100644
--- a/llvm/test/CodeGen/Thumb2/t2sizereduction.mir
+++ b/llvm/test/CodeGen/Thumb2/t2sizereduction.mir
@@ -29,7 +29,7 @@
br i1 %exitcond, label %for.cond.cleanup, label %for.body
}
- attributes #0 = { norecurse nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="cortex-m7" "target-features"="-d32,+dsp,+fp-armv8,-fp64,+hwdiv,+strict-align,+thumb-mode,-crc,-dotprod,-hwdiv-arm,-ras" "unsafe-fp-math"="false" "use-soft-float"="false" }
+ attributes #0 = { norecurse nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="cortex-m7" "target-features"="-d32,+dsp,+fp-armv8,-fp64,+hwdiv,+strict-align,+thumb-mode,-crc,-dotprod,-hwdiv-arm,-ras" "use-soft-float"="false" }
...
---
diff --git a/llvm/test/CodeGen/WebAssembly/memory-interleave.ll b/llvm/test/CodeGen/WebAssembly/memory-interleave.ll
index 104ec31..5eb49fd 100644
--- a/llvm/test/CodeGen/WebAssembly/memory-interleave.ll
+++ b/llvm/test/CodeGen/WebAssembly/memory-interleave.ll
@@ -2103,10 +2103,7 @@ for.body: ; preds = %entry, %for.body
; CHECK-LABEL: four_floats_same_op:
; CHECK: loop
-; CHECK: v128.load
-; CHECK: v128.load
-; CHECK: f32x4.mul
-; CHECK: v128.store
+; CHECK-NOT: v128.load
define hidden void @four_floats_same_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) {
entry:
%cmp45.not = icmp eq i32 %N, 0
diff --git a/llvm/test/CodeGen/WebAssembly/simd-relaxed-fmax.ll b/llvm/test/CodeGen/WebAssembly/simd-relaxed-fmax.ll
index 45f4ddd..f224a0d 100644
--- a/llvm/test/CodeGen/WebAssembly/simd-relaxed-fmax.ll
+++ b/llvm/test/CodeGen/WebAssembly/simd-relaxed-fmax.ll
@@ -54,6 +54,250 @@ define <2 x double> @test_minimumnum_f64x2(<2 x double> %a, <2 x double> %b) {
ret <2 x double> %result
}
+define <4 x float> @test_pmax_v4f32_olt(<4 x float> %x, <4 x float> %y) {
+; CHECK-LABEL: test_pmax_v4f32_olt:
+; CHECK: .functype test_pmax_v4f32_olt (v128, v128) -> (v128)
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: local.get 1
+; CHECK-NEXT: f32x4.relaxed_max
+; CHECK-NEXT: # fallthrough-return
+ %c = fcmp olt <4 x float> %x, %y
+ %a = select <4 x i1> %c, <4 x float> %y, <4 x float> %x
+ ret <4 x float> %a
+}
+
+define <4 x float> @test_pmax_v4f32_ole(<4 x float> %x, <4 x float> %y) {
+; CHECK-LABEL: test_pmax_v4f32_ole:
+; CHECK: .functype test_pmax_v4f32_ole (v128, v128) -> (v128)
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: local.get 1
+; CHECK-NEXT: f32x4.relaxed_max
+; CHECK-NEXT: # fallthrough-return
+ %c = fcmp ole <4 x float> %x, %y
+ %a = select <4 x i1> %c, <4 x float> %y, <4 x float> %x
+ ret <4 x float> %a
+}
+
+define <4 x float> @test_pmax_v4f32_ogt(<4 x float> %x, <4 x float> %y) {
+; CHECK-LABEL: test_pmax_v4f32_ogt:
+; CHECK: .functype test_pmax_v4f32_ogt (v128, v128) -> (v128)
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: local.get 1
+; CHECK-NEXT: f32x4.relaxed_max
+; CHECK-NEXT: # fallthrough-return
+ %c = fcmp ogt <4 x float> %y, %x
+ %a = select <4 x i1> %c, <4 x float> %y, <4 x float> %x
+ ret <4 x float> %a
+}
+
+define <4 x float> @test_pmax_v4f32_oge(<4 x float> %x, <4 x float> %y) {
+; CHECK-LABEL: test_pmax_v4f32_oge:
+; CHECK: .functype test_pmax_v4f32_oge (v128, v128) -> (v128)
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: local.get 1
+; CHECK-NEXT: f32x4.relaxed_max
+; CHECK-NEXT: # fallthrough-return
+ %c = fcmp oge <4 x float> %y, %x
+ %a = select <4 x i1> %c, <4 x float> %y, <4 x float> %x
+ ret <4 x float> %a
+}
+
+; For setlt
+define <4 x float> @pmax_v4f32_fast_olt(<4 x float> %x, <4 x float> %y) {
+; CHECK-LABEL: pmax_v4f32_fast_olt:
+; CHECK: .functype pmax_v4f32_fast_olt (v128, v128) -> (v128)
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: local.get 1
+; CHECK-NEXT: f32x4.relaxed_max
+; CHECK-NEXT: # fallthrough-return
+ %c = fcmp fast olt <4 x float> %x, %y
+ %a = select <4 x i1> %c, <4 x float> %y, <4 x float> %x
+ ret <4 x float> %a
+}
+
+; For setle
+define <4 x float> @test_pmax_v4f32_fast_ole(<4 x float> %x, <4 x float> %y) {
+; CHECK-LABEL: test_pmax_v4f32_fast_ole:
+; CHECK: .functype test_pmax_v4f32_fast_ole (v128, v128) -> (v128)
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: local.get 1
+; CHECK-NEXT: f32x4.relaxed_max
+; CHECK-NEXT: # fallthrough-return
+ %c = fcmp fast ole <4 x float> %x, %y
+ %a = select <4 x i1> %c, <4 x float> %y, <4 x float> %x
+ ret <4 x float> %a
+}
+
+; For setgt
+define <4 x float> @test_pmax_v4f32_fast_ogt(<4 x float> %x, <4 x float> %y) {
+; CHECK-LABEL: test_pmax_v4f32_fast_ogt:
+; CHECK: .functype test_pmax_v4f32_fast_ogt (v128, v128) -> (v128)
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: local.get 1
+; CHECK-NEXT: f32x4.relaxed_max
+; CHECK-NEXT: # fallthrough-return
+ %c = fcmp fast ogt <4 x float> %x, %y
+ %a = select <4 x i1> %c, <4 x float> %x, <4 x float> %y
+ ret <4 x float> %a
+}
+
+; For setge
+define <4 x float> @test_pmax_v4f32_fast_oge(<4 x float> %x, <4 x float> %y) {
+; CHECK-LABEL: test_pmax_v4f32_fast_oge:
+; CHECK: .functype test_pmax_v4f32_fast_oge (v128, v128) -> (v128)
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: local.get 1
+; CHECK-NEXT: f32x4.relaxed_max
+; CHECK-NEXT: # fallthrough-return
+ %c = fcmp fast oge <4 x float> %x, %y
+ %a = select <4 x i1> %c, <4 x float> %x, <4 x float> %y
+ ret <4 x float> %a
+}
+
+define <4 x i32> @test_pmax_int_v4f32(<4 x i32> %x, <4 x i32> %y) {
+; CHECK-LABEL: test_pmax_int_v4f32:
+; CHECK: .functype test_pmax_int_v4f32 (v128, v128) -> (v128)
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: local.get 1
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: f32x4.relaxed_max
+; CHECK-NEXT: # fallthrough-return
+ %fx = bitcast <4 x i32> %x to <4 x float>
+ %fy = bitcast <4 x i32> %y to <4 x float>
+ %c = fcmp olt <4 x float> %fy, %fx
+ %a = select <4 x i1> %c, <4 x i32> %x, <4 x i32> %y
+ ret <4 x i32> %a
+}
+
+define <2 x double> @test_pmax_v2f64_olt(<2 x double> %x, <2 x double> %y) {
+; CHECK-LABEL: test_pmax_v2f64_olt:
+; CHECK: .functype test_pmax_v2f64_olt (v128, v128) -> (v128)
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: local.get 1
+; CHECK-NEXT: f64x2.relaxed_max
+; CHECK-NEXT: # fallthrough-return
+ %c = fcmp olt <2 x double> %x, %y
+ %a = select <2 x i1> %c, <2 x double> %y, <2 x double> %x
+ ret <2 x double> %a
+}
+
+define <2 x double> @test_pmax_v2f64_ole(<2 x double> %x, <2 x double> %y) {
+; CHECK-LABEL: test_pmax_v2f64_ole:
+; CHECK: .functype test_pmax_v2f64_ole (v128, v128) -> (v128)
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: local.get 1
+; CHECK-NEXT: f64x2.relaxed_max
+; CHECK-NEXT: # fallthrough-return
+ %c = fcmp ole <2 x double> %x, %y
+ %a = select <2 x i1> %c, <2 x double> %y, <2 x double> %x
+ ret <2 x double> %a
+}
+
+define <2 x double> @test_pmax_v2f64_ogt(<2 x double> %x, <2 x double> %y) {
+; CHECK-LABEL: test_pmax_v2f64_ogt:
+; CHECK: .functype test_pmax_v2f64_ogt (v128, v128) -> (v128)
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: local.get 1
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: f64x2.relaxed_max
+; CHECK-NEXT: # fallthrough-return
+ %c = fcmp ogt <2 x double> %x, %y
+ %a = select <2 x i1> %c, <2 x double> %x, <2 x double> %y
+ ret <2 x double> %a
+}
+define <2 x double> @test_pmax_v2f64_oge(<2 x double> %x, <2 x double> %y) {
+; CHECK-LABEL: test_pmax_v2f64_oge:
+; CHECK: .functype test_pmax_v2f64_oge (v128, v128) -> (v128)
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: local.get 1
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: f64x2.relaxed_max
+; CHECK-NEXT: # fallthrough-return
+ %c = fcmp oge <2 x double> %x, %y
+ %a = select <2 x i1> %c, <2 x double> %x, <2 x double> %y
+ ret <2 x double> %a
+}
+
+; For setlt
+define <2 x double> @pmax_v2f64_fast_olt(<2 x double> %x, <2 x double> %y) {
+; CHECK-LABEL: pmax_v2f64_fast_olt:
+; CHECK: .functype pmax_v2f64_fast_olt (v128, v128) -> (v128)
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: local.get 1
+; CHECK-NEXT: f64x2.relaxed_max
+; CHECK-NEXT: # fallthrough-return
+ %c = fcmp fast olt <2 x double> %x, %y
+ %a = select <2 x i1> %c, <2 x double> %y, <2 x double> %x
+ ret <2 x double> %a
+}
+
+; For setle
+define <2 x double> @test_pmax_v2f64_fast_ole(<2 x double> %x, <2 x double> %y) {
+; CHECK-LABEL: test_pmax_v2f64_fast_ole:
+; CHECK: .functype test_pmax_v2f64_fast_ole (v128, v128) -> (v128)
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: local.get 1
+; CHECK-NEXT: f64x2.relaxed_max
+; CHECK-NEXT: # fallthrough-return
+ %c = fcmp fast ole <2 x double> %x, %y
+ %a = select <2 x i1> %c, <2 x double> %y, <2 x double> %x
+ ret <2 x double> %a
+}
+; For setgt
+define <2 x double> @test_pmax_v2f64_fast_ogt(<2 x double> %x, <2 x double> %y) {
+; CHECK-LABEL: test_pmax_v2f64_fast_ogt:
+; CHECK: .functype test_pmax_v2f64_fast_ogt (v128, v128) -> (v128)
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: local.get 1
+; CHECK-NEXT: f64x2.relaxed_max
+; CHECK-NEXT: # fallthrough-return
+ %c = fcmp fast ogt <2 x double> %x, %y
+ %a = select <2 x i1> %c, <2 x double> %x, <2 x double> %y
+ ret <2 x double> %a
+}
+
+; For setge
+define <2 x double> @test_pmax_v2f64_fast_oge(<2 x double> %x, <2 x double> %y) {
+; CHECK-LABEL: test_pmax_v2f64_fast_oge:
+; CHECK: .functype test_pmax_v2f64_fast_oge (v128, v128) -> (v128)
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: local.get 1
+; CHECK-NEXT: f64x2.relaxed_max
+; CHECK-NEXT: # fallthrough-return
+ %c = fcmp fast oge <2 x double> %x, %y
+ %a = select <2 x i1> %c, <2 x double> %x, <2 x double> %y
+ ret <2 x double> %a
+}
+
+define <2 x i64> @test_pmax_int_v2f64(<2 x i64> %x, <2 x i64> %y) {
+; CHECK-LABEL: test_pmax_int_v2f64:
+; CHECK: .functype test_pmax_int_v2f64 (v128, v128) -> (v128)
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: local.get 1
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: f64x2.relaxed_max
+; CHECK-NEXT: # fallthrough-return
+ %fx = bitcast <2 x i64> %x to <2 x double>
+ %fy = bitcast <2 x i64> %y to <2 x double>
+ %c = fcmp olt <2 x double> %fy, %fx
+ %a = select <2 x i1> %c, <2 x i64> %x, <2 x i64> %y
+ ret <2 x i64> %a
+}
+
declare <4 x float> @llvm.maxnum.v4f32(<4 x float>, <4 x float>)
declare <4 x float> @llvm.maximumnum.v4f32(<4 x float>, <4 x float>)
declare <2 x double> @llvm.maxnum.v2f64(<2 x double>, <2 x double>)
diff --git a/llvm/test/CodeGen/WebAssembly/simd-relaxed-fmin.ll b/llvm/test/CodeGen/WebAssembly/simd-relaxed-fmin.ll
index f3eec02..4604465 100644
--- a/llvm/test/CodeGen/WebAssembly/simd-relaxed-fmin.ll
+++ b/llvm/test/CodeGen/WebAssembly/simd-relaxed-fmin.ll
@@ -53,6 +53,252 @@ define <2 x double> @test_minimumnum_f64x2(<2 x double> %a, <2 x double> %b) {
ret <2 x double> %result
}
+define <4 x float> @test_pmin_v4f32_olt(<4 x float> %x, <4 x float> %y) {
+; CHECK-LABEL: test_pmin_v4f32_olt:
+; CHECK: .functype test_pmin_v4f32_olt (v128, v128) -> (v128)
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: local.get 1
+; CHECK-NEXT: f32x4.relaxed_min
+; CHECK-NEXT: # fallthrough-return
+ %c = fcmp olt <4 x float> %y, %x
+ %a = select <4 x i1> %c, <4 x float> %y, <4 x float> %x
+ ret <4 x float> %a
+}
+
+define <4 x float> @test_pmin_v4f32_ole(<4 x float> %x, <4 x float> %y) {
+; CHECK-LABEL: test_pmin_v4f32_ole:
+; CHECK: .functype test_pmin_v4f32_ole (v128, v128) -> (v128)
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: local.get 1
+; CHECK-NEXT: f32x4.relaxed_min
+; CHECK-NEXT: # fallthrough-return
+ %c = fcmp ole <4 x float> %y, %x
+ %a = select <4 x i1> %c, <4 x float> %y, <4 x float> %x
+ ret <4 x float> %a
+}
+
+define <4 x float> @test_pmin_v4f32_ogt(<4 x float> %x, <4 x float> %y) {
+; CHECK-LABEL: test_pmin_v4f32_ogt:
+; CHECK: .functype test_pmin_v4f32_ogt (v128, v128) -> (v128)
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: local.get 1
+; CHECK-NEXT: f32x4.relaxed_min
+; CHECK-NEXT: # fallthrough-return
+ %c = fcmp ogt <4 x float> %x, %y
+ %a = select <4 x i1> %c, <4 x float> %y, <4 x float> %x
+ ret <4 x float> %a
+}
+
+define <4 x float> @test_pmin_v4f32_oge(<4 x float> %x, <4 x float> %y) {
+; CHECK-LABEL: test_pmin_v4f32_oge:
+; CHECK: .functype test_pmin_v4f32_oge (v128, v128) -> (v128)
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: local.get 1
+; CHECK-NEXT: f32x4.relaxed_min
+; CHECK-NEXT: # fallthrough-return
+ %c = fcmp oge <4 x float> %x, %y
+ %a = select <4 x i1> %c, <4 x float> %y, <4 x float> %x
+ ret <4 x float> %a
+}
+
+; For setlt
+define <4 x float> @pmin_v4f32_fast_olt(<4 x float> %x, <4 x float> %y) {
+; CHECK-LABEL: pmin_v4f32_fast_olt:
+; CHECK: .functype pmin_v4f32_fast_olt (v128, v128) -> (v128)
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: local.get 1
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: f32x4.relaxed_min
+; CHECK-NEXT: # fallthrough-return
+ %c = fcmp fast olt <4 x float> %y, %x
+ %a = select <4 x i1> %c, <4 x float> %y, <4 x float> %x
+ ret <4 x float> %a
+}
+
+; For setle
+define <4 x float> @test_pmin_v4f32_fast_ole(<4 x float> %x, <4 x float> %y) {
+; CHECK-LABEL: test_pmin_v4f32_fast_ole:
+; CHECK: .functype test_pmin_v4f32_fast_ole (v128, v128) -> (v128)
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: local.get 1
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: f32x4.relaxed_min
+; CHECK-NEXT: # fallthrough-return
+ %c = fcmp fast ole <4 x float> %y, %x
+ %a = select <4 x i1> %c, <4 x float> %y, <4 x float> %x
+ ret <4 x float> %a
+}
+
+; For setgt
+define <4 x float> @test_pmin_v4f32_fast_ogt(<4 x float> %x, <4 x float> %y) {
+; CHECK-LABEL: test_pmin_v4f32_fast_ogt:
+; CHECK: .functype test_pmin_v4f32_fast_ogt (v128, v128) -> (v128)
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: local.get 1
+; CHECK-NEXT: f32x4.relaxed_min
+; CHECK-NEXT: # fallthrough-return
+ %c = fcmp fast ogt <4 x float> %x, %y
+ %a = select <4 x i1> %c, <4 x float> %y, <4 x float> %x
+ ret <4 x float> %a
+}
+
+; For setge
+define <4 x float> @test_pmin_v4f32_fast_oge(<4 x float> %x, <4 x float> %y) {
+; CHECK-LABEL: test_pmin_v4f32_fast_oge:
+; CHECK: .functype test_pmin_v4f32_fast_oge (v128, v128) -> (v128)
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: local.get 1
+; CHECK-NEXT: f32x4.relaxed_min
+; CHECK-NEXT: # fallthrough-return
+ %c = fcmp fast oge <4 x float> %x, %y
+ %a = select <4 x i1> %c, <4 x float> %y, <4 x float> %x
+ ret <4 x float> %a
+}
+
+define <4 x i32> @test_pmin_int_v4f32(<4 x i32> %x, <4 x i32> %y) {
+; CHECK-LABEL: test_pmin_int_v4f32:
+; CHECK: .functype test_pmin_int_v4f32 (v128, v128) -> (v128)
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: local.get 1
+; CHECK-NEXT: f32x4.relaxed_min
+; CHECK-NEXT: # fallthrough-return
+ %fx = bitcast <4 x i32> %x to <4 x float>
+ %fy = bitcast <4 x i32> %y to <4 x float>
+ %c = fcmp olt <4 x float> %fy, %fx
+ %a = select <4 x i1> %c, <4 x i32> %y, <4 x i32> %x
+ ret <4 x i32> %a
+}
+
+define <2 x double> @test_pmin_v2f64_olt(<2 x double> %x, <2 x double> %y) {
+; CHECK-LABEL: test_pmin_v2f64_olt:
+; CHECK: .functype test_pmin_v2f64_olt (v128, v128) -> (v128)
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: local.get 1
+; CHECK-NEXT: f64x2.relaxed_min
+; CHECK-NEXT: # fallthrough-return
+ %c = fcmp olt <2 x double> %y, %x
+ %a = select <2 x i1> %c, <2 x double> %y, <2 x double> %x
+ ret <2 x double> %a
+}
+
+define <2 x double> @test_pmin_v2f64_ole(<2 x double> %x, <2 x double> %y) {
+; CHECK-LABEL: test_pmin_v2f64_ole:
+; CHECK: .functype test_pmin_v2f64_ole (v128, v128) -> (v128)
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: local.get 1
+; CHECK-NEXT: f64x2.relaxed_min
+; CHECK-NEXT: # fallthrough-return
+ %c = fcmp ole <2 x double> %y, %x
+ %a = select <2 x i1> %c, <2 x double> %y, <2 x double> %x
+ ret <2 x double> %a
+}
+
+define <2 x double> @test_pmin_v2f64_ogt(<2 x double> %x, <2 x double> %y) {
+; CHECK-LABEL: test_pmin_v2f64_ogt:
+; CHECK: .functype test_pmin_v2f64_ogt (v128, v128) -> (v128)
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: local.get 1
+; CHECK-NEXT: f64x2.relaxed_min
+; CHECK-NEXT: # fallthrough-return
+ %c = fcmp ogt <2 x double> %x, %y
+ %a = select <2 x i1> %c, <2 x double> %y, <2 x double> %x
+ ret <2 x double> %a
+}
+
+define <2 x double> @test_pmin_v2f64_oge(<2 x double> %x, <2 x double> %y) {
+; CHECK-LABEL: test_pmin_v2f64_oge:
+; CHECK: .functype test_pmin_v2f64_oge (v128, v128) -> (v128)
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: local.get 1
+; CHECK-NEXT: f64x2.relaxed_min
+; CHECK-NEXT: # fallthrough-return
+ %c = fcmp oge <2 x double> %x, %y
+ %a = select <2 x i1> %c, <2 x double> %y, <2 x double> %x
+ ret <2 x double> %a
+}
+
+; For setlt
+define <2 x double> @pmin_v2f64_fast_olt(<2 x double> %x, <2 x double> %y) {
+; CHECK-LABEL: pmin_v2f64_fast_olt:
+; CHECK: .functype pmin_v2f64_fast_olt (v128, v128) -> (v128)
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: local.get 1
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: f64x2.relaxed_min
+; CHECK-NEXT: # fallthrough-return
+ %c = fcmp fast olt <2 x double> %y, %x
+ %a = select <2 x i1> %c, <2 x double> %y, <2 x double> %x
+ ret <2 x double> %a
+}
+
+; For setle
+define <2 x double> @test_pmin_v2f64_fast_ole(<2 x double> %x, <2 x double> %y) {
+; CHECK-LABEL: test_pmin_v2f64_fast_ole:
+; CHECK: .functype test_pmin_v2f64_fast_ole (v128, v128) -> (v128)
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: local.get 1
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: f64x2.relaxed_min
+; CHECK-NEXT: # fallthrough-return
+ %c = fcmp fast ole <2 x double> %y, %x
+ %a = select <2 x i1> %c, <2 x double> %y, <2 x double> %x
+ ret <2 x double> %a
+}
+
+; For setgt
+define <2 x double> @test_pmin_v2f64_fast_ogt(<2 x double> %x, <2 x double> %y) {
+; CHECK-LABEL: test_pmin_v2f64_fast_ogt:
+; CHECK: .functype test_pmin_v2f64_fast_ogt (v128, v128) -> (v128)
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: local.get 1
+; CHECK-NEXT: f64x2.relaxed_min
+; CHECK-NEXT: # fallthrough-return
+ %c = fcmp fast ogt <2 x double> %x, %y
+ %a = select <2 x i1> %c, <2 x double> %y, <2 x double> %x
+ ret <2 x double> %a
+}
+
+; For setge
+define <2 x double> @test_pmin_v2f64_fast_oge(<2 x double> %x, <2 x double> %y) {
+; CHECK-LABEL: test_pmin_v2f64_fast_oge:
+; CHECK: .functype test_pmin_v2f64_fast_oge (v128, v128) -> (v128)
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: local.get 1
+; CHECK-NEXT: f64x2.relaxed_min
+; CHECK-NEXT: # fallthrough-return
+ %c = fcmp fast oge <2 x double> %x, %y
+ %a = select <2 x i1> %c, <2 x double> %y, <2 x double> %x
+ ret <2 x double> %a
+}
+
+define <2 x i64> @test_pmin_int_v2f64(<2 x i64> %x, <2 x i64> %y) {
+; CHECK-LABEL: test_pmin_int_v2f64:
+; CHECK: .functype test_pmin_int_v2f64 (v128, v128) -> (v128)
+; CHECK-NEXT: # %bb.0:
+; CHECK-NEXT: local.get 0
+; CHECK-NEXT: local.get 1
+; CHECK-NEXT: f64x2.relaxed_min
+; CHECK-NEXT: # fallthrough-return
+ %fx = bitcast <2 x i64> %x to <2 x double>
+ %fy = bitcast <2 x i64> %y to <2 x double>
+ %c = fcmp olt <2 x double> %fy, %fx
+ %a = select <2 x i1> %c, <2 x i64> %y, <2 x i64> %x
+ ret <2 x i64> %a
+}
+
declare <4 x float> @llvm.minnum.v4f32(<4 x float>, <4 x float>)
declare <4 x float> @llvm.fminimumnum.v4f32(<4 x float>, <4 x float>)
declare <2 x double> @llvm.minnum.v2f64(<2 x double>, <2 x double>)
diff --git a/llvm/test/CodeGen/X86/atomic-load-store.ll b/llvm/test/CodeGen/X86/atomic-load-store.ll
index 45277ce..9fab8b9 100644
--- a/llvm/test/CodeGen/X86/atomic-load-store.ll
+++ b/llvm/test/CodeGen/X86/atomic-load-store.ll
@@ -1,12 +1,12 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-- -verify-machineinstrs -mcpu=x86-64 | FileCheck %s --check-prefixes=CHECK,CHECK-O3
-; RUN: llc < %s -mtriple=x86_64-- -verify-machineinstrs -mcpu=x86-64-v2 | FileCheck %s --check-prefixes=CHECK,CHECK-O3
-; RUN: llc < %s -mtriple=x86_64-- -verify-machineinstrs -mcpu=x86-64-v3 | FileCheck %s --check-prefixes=CHECK,CHECK-O3
-; RUN: llc < %s -mtriple=x86_64-- -verify-machineinstrs -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=CHECK,CHECK-O3
+; RUN: llc < %s -mtriple=x86_64-- -verify-machineinstrs -mcpu=x86-64-v2 | FileCheck %s --check-prefixes=CHECK,CHECK-SSE-O3
+; RUN: llc < %s -mtriple=x86_64-- -verify-machineinstrs -mcpu=x86-64-v3 | FileCheck %s --check-prefixes=CHECK,CHECK-AVX-O3
+; RUN: llc < %s -mtriple=x86_64-- -verify-machineinstrs -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=CHECK,CHECK-AVX-O3
; RUN: llc < %s -mtriple=x86_64-- -verify-machineinstrs -O0 -mcpu=x86-64 | FileCheck %s --check-prefixes=CHECK,CHECK-O0
-; RUN: llc < %s -mtriple=x86_64-- -verify-machineinstrs -O0 -mcpu=x86-64-v2 | FileCheck %s --check-prefixes=CHECK,CHECK-O0
-; RUN: llc < %s -mtriple=x86_64-- -verify-machineinstrs -O0 -mcpu=x86-64-v3 | FileCheck %s --check-prefixes=CHECK,CHECK-O0
-; RUN: llc < %s -mtriple=x86_64-- -verify-machineinstrs -O0 -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=CHECK,CHECK-O0
+; RUN: llc < %s -mtriple=x86_64-- -verify-machineinstrs -O0 -mcpu=x86-64-v2 | FileCheck %s --check-prefixes=CHECK,CHECK-SSE-O0
+; RUN: llc < %s -mtriple=x86_64-- -verify-machineinstrs -O0 -mcpu=x86-64-v3 | FileCheck %s --check-prefixes=CHECK,CHECK-AVX-O0
+; RUN: llc < %s -mtriple=x86_64-- -verify-machineinstrs -O0 -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=CHECK,CHECK-AVX-O0
define void @test1(ptr %ptr, i32 %val1) {
; CHECK-LABEL: test1:
@@ -34,6 +34,355 @@ define i32 @test3(ptr %ptr) {
%val = load atomic i32, ptr %ptr seq_cst, align 4
ret i32 %val
}
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; CHECK-O0: {{.*}}
-; CHECK-O3: {{.*}}
+
+define <1 x i32> @atomic_vec1_i32(ptr %x) {
+; CHECK-LABEL: atomic_vec1_i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movl (%rdi), %eax
+; CHECK-NEXT: retq
+ %ret = load atomic <1 x i32>, ptr %x acquire, align 4
+ ret <1 x i32> %ret
+}
+
+define <1 x i8> @atomic_vec1_i8(ptr %x) {
+; CHECK-O3-LABEL: atomic_vec1_i8:
+; CHECK-O3: # %bb.0:
+; CHECK-O3-NEXT: movzbl (%rdi), %eax
+; CHECK-O3-NEXT: retq
+;
+; CHECK-SSE-O3-LABEL: atomic_vec1_i8:
+; CHECK-SSE-O3: # %bb.0:
+; CHECK-SSE-O3-NEXT: movzbl (%rdi), %eax
+; CHECK-SSE-O3-NEXT: retq
+;
+; CHECK-AVX-O3-LABEL: atomic_vec1_i8:
+; CHECK-AVX-O3: # %bb.0:
+; CHECK-AVX-O3-NEXT: movzbl (%rdi), %eax
+; CHECK-AVX-O3-NEXT: retq
+;
+; CHECK-O0-LABEL: atomic_vec1_i8:
+; CHECK-O0: # %bb.0:
+; CHECK-O0-NEXT: movb (%rdi), %al
+; CHECK-O0-NEXT: retq
+;
+; CHECK-SSE-O0-LABEL: atomic_vec1_i8:
+; CHECK-SSE-O0: # %bb.0:
+; CHECK-SSE-O0-NEXT: movb (%rdi), %al
+; CHECK-SSE-O0-NEXT: retq
+;
+; CHECK-AVX-O0-LABEL: atomic_vec1_i8:
+; CHECK-AVX-O0: # %bb.0:
+; CHECK-AVX-O0-NEXT: movb (%rdi), %al
+; CHECK-AVX-O0-NEXT: retq
+ %ret = load atomic <1 x i8>, ptr %x acquire, align 1
+ ret <1 x i8> %ret
+}
+
+define <1 x i16> @atomic_vec1_i16(ptr %x) {
+; CHECK-O3-LABEL: atomic_vec1_i16:
+; CHECK-O3: # %bb.0:
+; CHECK-O3-NEXT: movzwl (%rdi), %eax
+; CHECK-O3-NEXT: retq
+;
+; CHECK-SSE-O3-LABEL: atomic_vec1_i16:
+; CHECK-SSE-O3: # %bb.0:
+; CHECK-SSE-O3-NEXT: movzwl (%rdi), %eax
+; CHECK-SSE-O3-NEXT: retq
+;
+; CHECK-AVX-O3-LABEL: atomic_vec1_i16:
+; CHECK-AVX-O3: # %bb.0:
+; CHECK-AVX-O3-NEXT: movzwl (%rdi), %eax
+; CHECK-AVX-O3-NEXT: retq
+;
+; CHECK-O0-LABEL: atomic_vec1_i16:
+; CHECK-O0: # %bb.0:
+; CHECK-O0-NEXT: movw (%rdi), %ax
+; CHECK-O0-NEXT: retq
+;
+; CHECK-SSE-O0-LABEL: atomic_vec1_i16:
+; CHECK-SSE-O0: # %bb.0:
+; CHECK-SSE-O0-NEXT: movw (%rdi), %ax
+; CHECK-SSE-O0-NEXT: retq
+;
+; CHECK-AVX-O0-LABEL: atomic_vec1_i16:
+; CHECK-AVX-O0: # %bb.0:
+; CHECK-AVX-O0-NEXT: movw (%rdi), %ax
+; CHECK-AVX-O0-NEXT: retq
+ %ret = load atomic <1 x i16>, ptr %x acquire, align 2
+ ret <1 x i16> %ret
+}
+
+define <1 x i32> @atomic_vec1_i8_zext(ptr %x) {
+; CHECK-O3-LABEL: atomic_vec1_i8_zext:
+; CHECK-O3: # %bb.0:
+; CHECK-O3-NEXT: movzbl (%rdi), %eax
+; CHECK-O3-NEXT: movzbl %al, %eax
+; CHECK-O3-NEXT: retq
+;
+; CHECK-SSE-O3-LABEL: atomic_vec1_i8_zext:
+; CHECK-SSE-O3: # %bb.0:
+; CHECK-SSE-O3-NEXT: movzbl (%rdi), %eax
+; CHECK-SSE-O3-NEXT: movzbl %al, %eax
+; CHECK-SSE-O3-NEXT: retq
+;
+; CHECK-AVX-O3-LABEL: atomic_vec1_i8_zext:
+; CHECK-AVX-O3: # %bb.0:
+; CHECK-AVX-O3-NEXT: movzbl (%rdi), %eax
+; CHECK-AVX-O3-NEXT: movzbl %al, %eax
+; CHECK-AVX-O3-NEXT: retq
+;
+; CHECK-O0-LABEL: atomic_vec1_i8_zext:
+; CHECK-O0: # %bb.0:
+; CHECK-O0-NEXT: movb (%rdi), %al
+; CHECK-O0-NEXT: movzbl %al, %eax
+; CHECK-O0-NEXT: retq
+;
+; CHECK-SSE-O0-LABEL: atomic_vec1_i8_zext:
+; CHECK-SSE-O0: # %bb.0:
+; CHECK-SSE-O0-NEXT: movb (%rdi), %al
+; CHECK-SSE-O0-NEXT: movzbl %al, %eax
+; CHECK-SSE-O0-NEXT: retq
+;
+; CHECK-AVX-O0-LABEL: atomic_vec1_i8_zext:
+; CHECK-AVX-O0: # %bb.0:
+; CHECK-AVX-O0-NEXT: movb (%rdi), %al
+; CHECK-AVX-O0-NEXT: movzbl %al, %eax
+; CHECK-AVX-O0-NEXT: retq
+ %ret = load atomic <1 x i8>, ptr %x acquire, align 1
+ %zret = zext <1 x i8> %ret to <1 x i32>
+ ret <1 x i32> %zret
+}
+
+define <1 x i64> @atomic_vec1_i16_sext(ptr %x) {
+; CHECK-O3-LABEL: atomic_vec1_i16_sext:
+; CHECK-O3: # %bb.0:
+; CHECK-O3-NEXT: movzwl (%rdi), %eax
+; CHECK-O3-NEXT: movswq %ax, %rax
+; CHECK-O3-NEXT: retq
+;
+; CHECK-SSE-O3-LABEL: atomic_vec1_i16_sext:
+; CHECK-SSE-O3: # %bb.0:
+; CHECK-SSE-O3-NEXT: movzwl (%rdi), %eax
+; CHECK-SSE-O3-NEXT: movswq %ax, %rax
+; CHECK-SSE-O3-NEXT: retq
+;
+; CHECK-AVX-O3-LABEL: atomic_vec1_i16_sext:
+; CHECK-AVX-O3: # %bb.0:
+; CHECK-AVX-O3-NEXT: movzwl (%rdi), %eax
+; CHECK-AVX-O3-NEXT: movswq %ax, %rax
+; CHECK-AVX-O3-NEXT: retq
+;
+; CHECK-O0-LABEL: atomic_vec1_i16_sext:
+; CHECK-O0: # %bb.0:
+; CHECK-O0-NEXT: movw (%rdi), %ax
+; CHECK-O0-NEXT: movswq %ax, %rax
+; CHECK-O0-NEXT: retq
+;
+; CHECK-SSE-O0-LABEL: atomic_vec1_i16_sext:
+; CHECK-SSE-O0: # %bb.0:
+; CHECK-SSE-O0-NEXT: movw (%rdi), %ax
+; CHECK-SSE-O0-NEXT: movswq %ax, %rax
+; CHECK-SSE-O0-NEXT: retq
+;
+; CHECK-AVX-O0-LABEL: atomic_vec1_i16_sext:
+; CHECK-AVX-O0: # %bb.0:
+; CHECK-AVX-O0-NEXT: movw (%rdi), %ax
+; CHECK-AVX-O0-NEXT: movswq %ax, %rax
+; CHECK-AVX-O0-NEXT: retq
+ %ret = load atomic <1 x i16>, ptr %x acquire, align 2
+ %sret = sext <1 x i16> %ret to <1 x i64>
+ ret <1 x i64> %sret
+}
+
+define <1 x ptr addrspace(270)> @atomic_vec1_ptr270(ptr %x) {
+; CHECK-LABEL: atomic_vec1_ptr270:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movl (%rdi), %eax
+; CHECK-NEXT: retq
+ %ret = load atomic <1 x ptr addrspace(270)>, ptr %x acquire, align 4
+ ret <1 x ptr addrspace(270)> %ret
+}
+
+define <1 x bfloat> @atomic_vec1_bfloat(ptr %x) {
+; CHECK-O3-LABEL: atomic_vec1_bfloat:
+; CHECK-O3: # %bb.0:
+; CHECK-O3-NEXT: movzwl (%rdi), %eax
+; CHECK-O3-NEXT: pinsrw $0, %eax, %xmm0
+; CHECK-O3-NEXT: retq
+;
+; CHECK-SSE-O3-LABEL: atomic_vec1_bfloat:
+; CHECK-SSE-O3: # %bb.0:
+; CHECK-SSE-O3-NEXT: movzwl (%rdi), %eax
+; CHECK-SSE-O3-NEXT: pinsrw $0, %eax, %xmm0
+; CHECK-SSE-O3-NEXT: retq
+;
+; CHECK-AVX-O3-LABEL: atomic_vec1_bfloat:
+; CHECK-AVX-O3: # %bb.0:
+; CHECK-AVX-O3-NEXT: movzwl (%rdi), %eax
+; CHECK-AVX-O3-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0
+; CHECK-AVX-O3-NEXT: retq
+;
+; CHECK-O0-LABEL: atomic_vec1_bfloat:
+; CHECK-O0: # %bb.0:
+; CHECK-O0-NEXT: movw (%rdi), %cx
+; CHECK-O0-NEXT: # implicit-def: $eax
+; CHECK-O0-NEXT: movw %cx, %ax
+; CHECK-O0-NEXT: # implicit-def: $xmm0
+; CHECK-O0-NEXT: pinsrw $0, %eax, %xmm0
+; CHECK-O0-NEXT: retq
+;
+; CHECK-SSE-O0-LABEL: atomic_vec1_bfloat:
+; CHECK-SSE-O0: # %bb.0:
+; CHECK-SSE-O0-NEXT: movw (%rdi), %cx
+; CHECK-SSE-O0-NEXT: # implicit-def: $eax
+; CHECK-SSE-O0-NEXT: movw %cx, %ax
+; CHECK-SSE-O0-NEXT: # implicit-def: $xmm0
+; CHECK-SSE-O0-NEXT: pinsrw $0, %eax, %xmm0
+; CHECK-SSE-O0-NEXT: retq
+;
+; CHECK-AVX-O0-LABEL: atomic_vec1_bfloat:
+; CHECK-AVX-O0: # %bb.0:
+; CHECK-AVX-O0-NEXT: movw (%rdi), %cx
+; CHECK-AVX-O0-NEXT: # implicit-def: $eax
+; CHECK-AVX-O0-NEXT: movw %cx, %ax
+; CHECK-AVX-O0-NEXT: # implicit-def: $xmm0
+; CHECK-AVX-O0-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0
+; CHECK-AVX-O0-NEXT: retq
+ %ret = load atomic <1 x bfloat>, ptr %x acquire, align 2
+ ret <1 x bfloat> %ret
+}
+
+define <1 x ptr> @atomic_vec1_ptr_align(ptr %x) nounwind {
+; CHECK-LABEL: atomic_vec1_ptr_align:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movq (%rdi), %rax
+; CHECK-NEXT: retq
+ %ret = load atomic <1 x ptr>, ptr %x acquire, align 8
+ ret <1 x ptr> %ret
+}
+
+define <1 x i64> @atomic_vec1_i64_align(ptr %x) nounwind {
+; CHECK-LABEL: atomic_vec1_i64_align:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movq (%rdi), %rax
+; CHECK-NEXT: retq
+ %ret = load atomic <1 x i64>, ptr %x acquire, align 8
+ ret <1 x i64> %ret
+}
+
+define <1 x half> @atomic_vec1_half(ptr %x) {
+; CHECK-O3-LABEL: atomic_vec1_half:
+; CHECK-O3: # %bb.0:
+; CHECK-O3-NEXT: movzwl (%rdi), %eax
+; CHECK-O3-NEXT: pinsrw $0, %eax, %xmm0
+; CHECK-O3-NEXT: retq
+;
+; CHECK-SSE-O3-LABEL: atomic_vec1_half:
+; CHECK-SSE-O3: # %bb.0:
+; CHECK-SSE-O3-NEXT: movzwl (%rdi), %eax
+; CHECK-SSE-O3-NEXT: pinsrw $0, %eax, %xmm0
+; CHECK-SSE-O3-NEXT: retq
+;
+; CHECK-AVX-O3-LABEL: atomic_vec1_half:
+; CHECK-AVX-O3: # %bb.0:
+; CHECK-AVX-O3-NEXT: movzwl (%rdi), %eax
+; CHECK-AVX-O3-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0
+; CHECK-AVX-O3-NEXT: retq
+;
+; CHECK-O0-LABEL: atomic_vec1_half:
+; CHECK-O0: # %bb.0:
+; CHECK-O0-NEXT: movw (%rdi), %cx
+; CHECK-O0-NEXT: # implicit-def: $eax
+; CHECK-O0-NEXT: movw %cx, %ax
+; CHECK-O0-NEXT: # implicit-def: $xmm0
+; CHECK-O0-NEXT: pinsrw $0, %eax, %xmm0
+; CHECK-O0-NEXT: retq
+;
+; CHECK-SSE-O0-LABEL: atomic_vec1_half:
+; CHECK-SSE-O0: # %bb.0:
+; CHECK-SSE-O0-NEXT: movw (%rdi), %cx
+; CHECK-SSE-O0-NEXT: # implicit-def: $eax
+; CHECK-SSE-O0-NEXT: movw %cx, %ax
+; CHECK-SSE-O0-NEXT: # implicit-def: $xmm0
+; CHECK-SSE-O0-NEXT: pinsrw $0, %eax, %xmm0
+; CHECK-SSE-O0-NEXT: retq
+;
+; CHECK-AVX-O0-LABEL: atomic_vec1_half:
+; CHECK-AVX-O0: # %bb.0:
+; CHECK-AVX-O0-NEXT: movw (%rdi), %cx
+; CHECK-AVX-O0-NEXT: # implicit-def: $eax
+; CHECK-AVX-O0-NEXT: movw %cx, %ax
+; CHECK-AVX-O0-NEXT: # implicit-def: $xmm0
+; CHECK-AVX-O0-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0
+; CHECK-AVX-O0-NEXT: retq
+ %ret = load atomic <1 x half>, ptr %x acquire, align 2
+ ret <1 x half> %ret
+}
+
+define <1 x float> @atomic_vec1_float(ptr %x) {
+; CHECK-O3-LABEL: atomic_vec1_float:
+; CHECK-O3: # %bb.0:
+; CHECK-O3-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-O3-NEXT: retq
+;
+; CHECK-SSE-O3-LABEL: atomic_vec1_float:
+; CHECK-SSE-O3: # %bb.0:
+; CHECK-SSE-O3-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-SSE-O3-NEXT: retq
+;
+; CHECK-AVX-O3-LABEL: atomic_vec1_float:
+; CHECK-AVX-O3: # %bb.0:
+; CHECK-AVX-O3-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-AVX-O3-NEXT: retq
+;
+; CHECK-O0-LABEL: atomic_vec1_float:
+; CHECK-O0: # %bb.0:
+; CHECK-O0-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-O0-NEXT: retq
+;
+; CHECK-SSE-O0-LABEL: atomic_vec1_float:
+; CHECK-SSE-O0: # %bb.0:
+; CHECK-SSE-O0-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-SSE-O0-NEXT: retq
+;
+; CHECK-AVX-O0-LABEL: atomic_vec1_float:
+; CHECK-AVX-O0: # %bb.0:
+; CHECK-AVX-O0-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-AVX-O0-NEXT: retq
+ %ret = load atomic <1 x float>, ptr %x acquire, align 4
+ ret <1 x float> %ret
+}
+
+define <1 x double> @atomic_vec1_double_align(ptr %x) nounwind {
+; CHECK-O3-LABEL: atomic_vec1_double_align:
+; CHECK-O3: # %bb.0:
+; CHECK-O3-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-O3-NEXT: retq
+;
+; CHECK-SSE-O3-LABEL: atomic_vec1_double_align:
+; CHECK-SSE-O3: # %bb.0:
+; CHECK-SSE-O3-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-SSE-O3-NEXT: retq
+;
+; CHECK-AVX-O3-LABEL: atomic_vec1_double_align:
+; CHECK-AVX-O3: # %bb.0:
+; CHECK-AVX-O3-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-AVX-O3-NEXT: retq
+;
+; CHECK-O0-LABEL: atomic_vec1_double_align:
+; CHECK-O0: # %bb.0:
+; CHECK-O0-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-O0-NEXT: retq
+;
+; CHECK-SSE-O0-LABEL: atomic_vec1_double_align:
+; CHECK-SSE-O0: # %bb.0:
+; CHECK-SSE-O0-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-SSE-O0-NEXT: retq
+;
+; CHECK-AVX-O0-LABEL: atomic_vec1_double_align:
+; CHECK-AVX-O0: # %bb.0:
+; CHECK-AVX-O0-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-AVX-O0-NEXT: retq
+ %ret = load atomic <1 x double>, ptr %x acquire, align 8
+ ret <1 x double> %ret
+}
diff --git a/llvm/test/CodeGen/X86/basic-block-address-map-empty-function.ll b/llvm/test/CodeGen/X86/basic-block-address-map-empty-function.ll
index 4e76262..423e318 100644
--- a/llvm/test/CodeGen/X86/basic-block-address-map-empty-function.ll
+++ b/llvm/test/CodeGen/X86/basic-block-address-map-empty-function.ll
@@ -19,7 +19,7 @@ entry:
; CHECK: func:
; CHECK: .Lfunc_begin1:
; CHECK: .section .llvm_bb_addr_map,"o",@llvm_bb_addr_map,.text{{$}}
-; CHECK-NEXT: .byte 3 # version
+; CHECK-NEXT: .byte 4 # version
; BASIC-NEXT: .byte 0 # feature
; PGO-NEXT: .byte 3 # feature
; CHECK-NEXT: .quad .Lfunc_begin1 # function address
diff --git a/llvm/test/CodeGen/X86/basic-block-address-map-function-sections.ll b/llvm/test/CodeGen/X86/basic-block-address-map-function-sections.ll
index f610b04..e32e522 100644
--- a/llvm/test/CodeGen/X86/basic-block-address-map-function-sections.ll
+++ b/llvm/test/CodeGen/X86/basic-block-address-map-function-sections.ll
@@ -10,7 +10,7 @@ define dso_local i32 @_Z3barv() {
; CHECK-LABEL: _Z3barv:
; CHECK-NEXT: [[BAR_BEGIN:.Lfunc_begin[0-9]+]]:
; CHECK: .section .llvm_bb_addr_map,"o",@llvm_bb_addr_map,.text._Z3barv{{$}}
-; CHECK-NEXT: .byte 3 # version
+; CHECK-NEXT: .byte 4 # version
; CHECK-NEXT: .byte 0 # feature
; CHECK-NEXT: .quad [[BAR_BEGIN]] # function address
@@ -23,7 +23,7 @@ define dso_local i32 @_Z3foov() {
; CHECK-LABEL: _Z3foov:
; CHECK-NEXT: [[FOO_BEGIN:.Lfunc_begin[0-9]+]]:
; CHECK: .section .llvm_bb_addr_map,"o",@llvm_bb_addr_map,.text._Z3foov{{$}}
-; CHECK-NEXT: .byte 3 # version
+; CHECK-NEXT: .byte 4 # version
; CHECK-NEXT: .byte 32 # feature
; CHECK-NEXT: .quad [[FOO_BEGIN]] # function address
@@ -36,6 +36,6 @@ define linkonce_odr dso_local i32 @_Z4fooTIiET_v() comdat {
; CHECK-LABEL: _Z4fooTIiET_v:
; CHECK-NEXT: [[FOOCOMDAT_BEGIN:.Lfunc_begin[0-9]+]]:
; CHECK: .section .llvm_bb_addr_map,"oG",@llvm_bb_addr_map,.text._Z4fooTIiET_v,_Z4fooTIiET_v,comdat{{$}}
-; CHECK-NEXT: .byte 3 # version
+; CHECK-NEXT: .byte 4 # version
; CHECK-NEXT: .byte 0 # feature
; CHECK-NEXT: .quad [[FOOCOMDAT_BEGIN]] # function address
diff --git a/llvm/test/CodeGen/X86/basic-block-address-map-pgo-features.ll b/llvm/test/CodeGen/X86/basic-block-address-map-pgo-features.ll
index ba76f3e..12b1297 100644
--- a/llvm/test/CodeGen/X86/basic-block-address-map-pgo-features.ll
+++ b/llvm/test/CodeGen/X86/basic-block-address-map-pgo-features.ll
@@ -69,7 +69,7 @@ declare i32 @__gxx_personality_v0(...)
; CHECK-LABEL: .Lfunc_end0:
; CHECK: .section .llvm_bb_addr_map,"o",@llvm_bb_addr_map,.text._Z3bazb{{$}}
-; CHECK-NEXT: .byte 3 # version
+; CHECK-NEXT: .byte 4 # version
; BASIC-NEXT: .byte 32 # feature
; PGO-ALL-NEXT: .byte 39 # feature
; FEC-ONLY-NEXT:.byte 33 # feature
diff --git a/llvm/test/CodeGen/X86/basic-block-address-map-with-basic-block-sections.ll b/llvm/test/CodeGen/X86/basic-block-address-map-with-basic-block-sections.ll
index 6157f1a..aeb6dc95 100644
--- a/llvm/test/CodeGen/X86/basic-block-address-map-with-basic-block-sections.ll
+++ b/llvm/test/CodeGen/X86/basic-block-address-map-with-basic-block-sections.ll
@@ -47,7 +47,7 @@ declare i32 @__gxx_personality_v0(...)
; CHECK-LABEL: .Lfunc_end0:
; CHECK: .section .llvm_bb_addr_map,"o",@llvm_bb_addr_map,.text.hot._Z3bazb
-; CHECK-NEXT: .byte 3 # version
+; CHECK-NEXT: .byte 4 # version
; CHECK-NEXT: .byte 40 # feature
; CHECK-NEXT: .byte 2 # number of basic block ranges
; CHECK-NEXT: .quad .Lfunc_begin0 # base address
diff --git a/llvm/test/CodeGen/X86/basic-block-address-map-with-emit-bb-hash.ll b/llvm/test/CodeGen/X86/basic-block-address-map-with-emit-bb-hash.ll
new file mode 100644
index 0000000..a5678877
--- /dev/null
+++ b/llvm/test/CodeGen/X86/basic-block-address-map-with-emit-bb-hash.ll
@@ -0,0 +1,94 @@
+; Check the basic block sections labels option works when used along with -emit-bb-hash.
+; RUN: llc < %s -mtriple=x86_64 -function-sections -unique-section-names=true -basic-block-address-map -emit-bb-hash | FileCheck %s --check-prefixes=CHECK,UNIQ
+
+define void @_Z3bazb(i1 zeroext, i1 zeroext) personality ptr @__gxx_personality_v0 {
+ br i1 %0, label %3, label %8
+
+3:
+ %4 = invoke i32 @_Z3barv()
+ to label %8 unwind label %6
+ br label %10
+
+6:
+ landingpad { ptr, i32 }
+ catch ptr null
+ br label %12
+
+8:
+ %9 = call i32 @_Z3foov()
+ br i1 %1, label %12, label %10
+
+10:
+ %11 = select i1 %1, ptr blockaddress(@_Z3bazb, %3), ptr blockaddress(@_Z3bazb, %12) ; <ptr> [#uses=1]
+ indirectbr ptr %11, [label %3, label %12]
+
+12:
+ ret void
+}
+
+declare i32 @_Z3barv() #1
+
+declare i32 @_Z3foov() #1
+
+declare i32 @__gxx_personality_v0(...)
+
+; UNIQ: .section .text._Z3bazb,"ax",@progbits{{$}}
+; NOUNIQ: .section .text,"ax",@progbits,unique,1
+; CHECK-LABEL: _Z3bazb:
+; CHECK-LABEL: .Lfunc_begin0:
+; CHECK-LABEL: .LBB_END0_0:
+; CHECK-LABEL: .LBB0_1:
+; CHECK-LABEL: .LBB0_1_CS0:
+; CHECK-LABEL: .LBB_END0_1:
+; CHECK-LABEL: .LBB0_2:
+; CHECK-LABEL: .LBB0_2_CS0:
+; CHECK-LABEL: .LBB_END0_2:
+; CHECK-LABEL: .LBB0_3:
+; CHECK-LABEL: .LBB_END0_3:
+; CHECK-LABEL: .Lfunc_end0:
+
+; UNIQ: .section .llvm_bb_addr_map,"o",@llvm_bb_addr_map,.text._Z3bazb{{$}}
+;; Verify that with -unique-section-names=false, the unique id of the text section gets assigned to the llvm_bb_addr_map section.
+; NOUNIQ: .section .llvm_bb_addr_map,"o",@llvm_bb_addr_map,.text,unique,1
+; CHECK-NEXT: .byte 4 # version
+; CHECK-NEXT: .byte 96 # feature
+; CHECK-NEXT: .quad .Lfunc_begin0 # function address
+; CHECK-NEXT: .byte 6 # number of basic blocks
+; CHECK-NEXT: .byte 0 # BB id
+; CHECK-NEXT: .uleb128 .Lfunc_begin0-.Lfunc_begin0
+; CHECK-NEXT: .byte 0 # number of callsites
+; CHECK-NEXT: .uleb128 .LBB_END0_0-.Lfunc_begin0
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .quad {{-?[0-9]+}}
+; CHECK-NEXT: .byte 1 # BB id
+; CHECK-NEXT: .uleb128 .LBB0_1-.LBB_END0_0
+; CHECK-NEXT: .byte 1 # number of callsites
+; CHECK-NEXT: .uleb128 .LBB0_1_CS0-.LBB0_1
+; CHECK-NEXT: .uleb128 .LBB_END0_1-.LBB0_1_CS0
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .quad {{-?[0-9]+}}
+; CHECK-NEXT: .byte 3 # BB id
+; CHECK-NEXT: .uleb128 .LBB0_2-.LBB_END0_1
+; CHECK-NEXT: .byte 1 # number of callsites
+; CHECK-NEXT: .uleb128 .LBB0_2_CS0-.LBB0_2
+; CHECK-NEXT: .uleb128 .LBB_END0_2-.LBB0_2_CS0
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .quad {{-?[0-9]+}}
+; CHECK-NEXT: .byte 4 # BB id
+; CHECK-NEXT: .uleb128 .LBB0_3-.LBB_END0_2
+; CHECK-NEXT: .byte 0 # number of callsites
+; CHECK-NEXT: .uleb128 .LBB_END0_3-.LBB0_3
+; CHECK-NEXT: .byte 16
+; CHECK-NEXT: .quad {{-?[0-9]+}}
+; CHECK-NEXT: .byte 5 # BB id
+; CHECK-NEXT: .uleb128 .LBB0_4-.LBB_END0_3
+; CHECK-NEXT: .byte 0 # number of callsites
+; CHECK-NEXT: .uleb128 .LBB_END0_4-.LBB0_4
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .quad {{-?[0-9]+}}
+; CHECK-NEXT: .byte 2 # BB id
+; CHECK-NEXT: .uleb128 .LBB0_5-.LBB_END0_4
+; CHECK-NEXT: .byte 0 # number of callsites
+; CHECK-NEXT: .uleb128 .LBB_END0_5-.LBB0_5
+; CHECK-NEXT: .byte 5
+; CHECK-NEXT: .quad {{-?[0-9]+}}
diff --git a/llvm/test/CodeGen/X86/basic-block-address-map-with-mfs.ll b/llvm/test/CodeGen/X86/basic-block-address-map-with-mfs.ll
index 1e8cee4..d49b313 100644
--- a/llvm/test/CodeGen/X86/basic-block-address-map-with-mfs.ll
+++ b/llvm/test/CodeGen/X86/basic-block-address-map-with-mfs.ll
@@ -58,7 +58,7 @@ declare i32 @qux()
; CHECK-LABEL: .Lfunc_end0:
; CHECK: .section .llvm_bb_addr_map,"o",@llvm_bb_addr_map,.text.hot.foo
-; CHECK-NEXT: .byte 3 # version
+; CHECK-NEXT: .byte 4 # version
; BASIC-NEXT: .byte 40 # feature
; PGO-NEXT: .byte 47 # feature
; CHECK-NEXT: .byte 2 # number of basic block ranges
diff --git a/llvm/test/CodeGen/X86/basic-block-address-map.ll b/llvm/test/CodeGen/X86/basic-block-address-map.ll
index 5c8f3a6..64cf2c7 100644
--- a/llvm/test/CodeGen/X86/basic-block-address-map.ll
+++ b/llvm/test/CodeGen/X86/basic-block-address-map.ll
@@ -52,7 +52,7 @@ declare i32 @__gxx_personality_v0(...)
; UNIQ: .section .llvm_bb_addr_map,"o",@llvm_bb_addr_map,.text._Z3bazb{{$}}
;; Verify that with -unique-section-names=false, the unique id of the text section gets assigned to the llvm_bb_addr_map section.
; NOUNIQ: .section .llvm_bb_addr_map,"o",@llvm_bb_addr_map,.text,unique,1
-; CHECK-NEXT: .byte 3 # version
+; CHECK-NEXT: .byte 4 # version
; CHECK-NEXT: .byte 32 # feature
; CHECK-NEXT: .quad .Lfunc_begin0 # function address
; CHECK-NEXT: .byte 6 # number of basic blocks
diff --git a/llvm/test/CodeGen/X86/call-graph-section-addrtaken.ll b/llvm/test/CodeGen/X86/call-graph-section-addrtaken.ll
index 632d90d..f36baba 100644
--- a/llvm/test/CodeGen/X86/call-graph-section-addrtaken.ll
+++ b/llvm/test/CodeGen/X86/call-graph-section-addrtaken.ll
@@ -27,7 +27,7 @@ entry:
!1 = !{i64 0, !"_ZTSFivE.generalized"}
!2 = !{i64 0, !"_ZTSFviE.generalized"}
-; CHECK: .section .llvm.callgraph,"o",@progbits,.text
+; CHECK: .section .llvm.callgraph,"o",@llvm_call_graph,.text
;; Version
; CHECK-NEXT: .byte 0
;; Flags -- Potential indirect target so LSB is set to 1. Other bits are 0.
diff --git a/llvm/test/CodeGen/X86/call-graph-section-assembly.ll b/llvm/test/CodeGen/X86/call-graph-section-assembly.ll
index ed6849a..cdbad66 100644
--- a/llvm/test/CodeGen/X86/call-graph-section-assembly.ll
+++ b/llvm/test/CodeGen/X86/call-graph-section-assembly.ll
@@ -36,7 +36,7 @@ entry:
!4 = !{!5}
!5 = !{i64 0, !"_ZTSFPvS_E.generalized"}
-; CHECK: .section .llvm.callgraph,"o",@progbits,.text
+; CHECK: .section .llvm.callgraph,"o",@llvm_call_graph,.text
;; Version
; CHECK-NEXT: .byte 0
;; Flags
diff --git a/llvm/test/TableGen/intrinsic-manual-name.td b/llvm/test/TableGen/intrinsic-manual-name.td
new file mode 100644
index 0000000..5751fc2
--- /dev/null
+++ b/llvm/test/TableGen/intrinsic-manual-name.td
@@ -0,0 +1,6 @@
+// RUN: llvm-tblgen -gen-intrinsic-impl -I %p/../../include %s -DTEST_INTRINSICS_SUPPRESS_DEFS 2>&1 | FileCheck %s -DFILE=%s
+
+include "llvm/IR/Intrinsics.td"
+
+// CHECK: [[FILE]]:[[@LINE+1]]:5: note: Explicitly specified name matches default name, consider dropping it
+def int_foo0 : Intrinsic<[llvm_anyint_ty], [], [], "llvm.foo0">;
diff --git a/llvm/test/ThinLTO/X86/devirt_external_comdat_same_guid.ll b/llvm/test/ThinLTO/X86/devirt_external_comdat_same_guid.ll
index 1f0737b..d0b8d14 100644
--- a/llvm/test/ThinLTO/X86/devirt_external_comdat_same_guid.ll
+++ b/llvm/test/ThinLTO/X86/devirt_external_comdat_same_guid.ll
@@ -24,9 +24,9 @@
; RUN: llvm-dis %t5.1.4.opt.bc -o - | FileCheck %s --check-prefix=CHECK-IR1
; RUN: llvm-dis %t5.2.4.opt.bc -o - | FileCheck %s --check-prefix=CHECK-IR2
-; PRINT-DAG: Devirtualized call to {{.*}} (_ZN1A1nEi)
+; PRINT-DAG: Devirtualized call to {{.*}} (_ZN1B1nEi)
-; REMARK-DAG: single-impl: devirtualized a call to _ZN1A1nEi
+; REMARK-DAG: single-impl: devirtualized a call to _ZN1B1nEi
target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-grtev4-linux-gnu"
@@ -55,7 +55,7 @@ entry:
ret i32 0
}
-; CHECK-IR1: define i32 @test(
+; CHECK-IR1: define noundef i32 @test(
define i32 @test(ptr %obj, i32 %a) {
entry:
%vtable = load ptr, ptr %obj
@@ -65,7 +65,7 @@ entry:
%fptr1 = load ptr, ptr %fptrptr, align 8
; Check that the call was devirtualized.
- ; CHECK-IR1: tail call i32 {{.*}}@_ZN1A1nEi
+ ; CHECK-IR1: tail call i32 {{.*}}@_ZN1B1nEi
%call = tail call i32 %fptr1(ptr nonnull %obj, i32 %a)
ret i32 %call
@@ -73,7 +73,7 @@ entry:
; CHECK-IR2: define i32 @test2
; Check that the call was devirtualized.
-; CHECK-IR2: tail call i32 @_ZN1A1nEi
+; CHECK-IR2: tail call i32 @_ZN1B1nEi
declare i1 @llvm.type.test(ptr, metadata)
declare void @llvm.assume(i1)
diff --git a/llvm/test/ThinLTO/X86/dtlto/json.ll b/llvm/test/ThinLTO/X86/dtlto/json.ll
index 1a38438..ee1c428 100644
--- a/llvm/test/ThinLTO/X86/dtlto/json.ll
+++ b/llvm/test/ThinLTO/X86/dtlto/json.ll
@@ -7,21 +7,33 @@ RUN: rm -rf %t && split-file %s %t && cd %t
RUN: opt -thinlto-bc t1.ll -o t1.bc
RUN: opt -thinlto-bc t2.ll -o t2.bc
-; Perform DTLTO.
+; Perform DTLTO with clang.
RUN: not llvm-lto2 run t1.bc t2.bc -o my.output \
RUN: -r=t1.bc,t1,px -r=t2.bc,t2,px \
RUN: -dtlto-distributor=%python \
RUN: -dtlto-distributor-arg=%llvm_src_root/utils/dtlto/validate.py,--da1=10,--da2=10 \
RUN: -dtlto-compiler=my_clang.exe \
RUN: -dtlto-compiler-arg=--rota1=10,--rota2=20 \
-RUN: 2>&1 | FileCheck %s
+RUN: 2>&1 | FileCheck --check-prefixes=CHECK,CLANG %s
+
+; Perform DTLTO with LLVM driver.
+RUN: not llvm-lto2 run t1.bc t2.bc -o my.output \
+RUN: -r=t1.bc,t1,px -r=t2.bc,t2,px \
+RUN: -dtlto-distributor=%python \
+RUN: -dtlto-distributor-arg=%llvm_src_root/utils/dtlto/validate.py,--da1=10,--da2=10 \
+RUN: -dtlto-compiler=llvm \
+RUN: -dtlto-compiler-prepend-arg=clang \
+RUN: -dtlto-compiler-arg=--rota1=10,--rota2=20 \
+RUN: 2>&1 | FileCheck --check-prefixes=CHECK,LLVM %s
CHECK: distributor_args=['--da1=10', '--da2=10']
; Check the common object.
CHECK: "linker_output": "my.output"
CHECK: "args":
-CHECK-NEXT: "my_clang.exe"
+CLANG-NEXT: "my_clang.exe"
+LLVM-NEXT: "llvm"
+LLVM-NEXT: "clang"
CHECK-NEXT: "-c"
CHECK-NEXT: "--target=x86_64-unknown-linux-gnu"
CHECK-NEXT: "-O2"
@@ -30,7 +42,7 @@ CHECK-NEXT: "-Wno-unused-command-line-argument"
CHECK-NEXT: "--rota1=10"
CHECK-NEXT: "--rota2=20"
CHECK-NEXT: ]
-CHECK: "inputs": []
+CHECK: "inputs": []
; Check the first job entry.
CHECK: "args":
diff --git a/llvm/test/Transforms/ADCE/2016-09-06.ll b/llvm/test/Transforms/ADCE/2016-09-06.ll
index 850f412..1329ac6 100644
--- a/llvm/test/Transforms/ADCE/2016-09-06.ll
+++ b/llvm/test/Transforms/ADCE/2016-09-06.ll
@@ -5,7 +5,7 @@ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
; Function Attrs: nounwind uwtable
-define i32 @foo(i32, i32, i32) #0 {
+define i32 @foo(i32, i32, i32) {
%4 = alloca i32, align 4
%5 = alloca i32, align 4
%6 = alloca i32, align 4
@@ -48,8 +48,6 @@ B21:
ret i32 %I22
}
-attributes #0 = { nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
!llvm.ident = !{!0}
!0 = !{!"clang version 4.0.0"}
diff --git a/llvm/test/Transforms/ADCE/blocks-with-dead-term-nondeterministic.ll b/llvm/test/Transforms/ADCE/blocks-with-dead-term-nondeterministic.ll
index 9708be9..5e844b4 100644
--- a/llvm/test/Transforms/ADCE/blocks-with-dead-term-nondeterministic.ll
+++ b/llvm/test/Transforms/ADCE/blocks-with-dead-term-nondeterministic.ll
@@ -5,7 +5,7 @@ target triple = "x86_64-apple-macosx10.10.0"
; CHECK: uselistorder label %bb16, { 1, 0 }
; Function Attrs: noinline nounwind ssp uwtable
-define void @ham(i1 %arg) local_unnamed_addr #0 {
+define void @ham(i1 %arg) local_unnamed_addr {
bb:
br i1 false, label %bb1, label %bb22
@@ -64,8 +64,6 @@ bb22: ; preds = %bb21, %bb
ret void
}
-attributes #0 = { noinline nounwind ssp uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="core2" "target-features"="+cx16,+fxsr,+mmx,+sse,+sse2,+sse3,+ssse3,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
!llvm.module.flags = !{!0}
!0 = !{i32 7, !"PIC Level", i32 2}
diff --git a/llvm/test/Transforms/AddDiscriminators/basic.ll b/llvm/test/Transforms/AddDiscriminators/basic.ll
index 5186537..fc4c10a 100644
--- a/llvm/test/Transforms/AddDiscriminators/basic.ll
+++ b/llvm/test/Transforms/AddDiscriminators/basic.ll
@@ -11,7 +11,7 @@
; if (i < 10) x = i;
; }
-define void @foo(i32 %i) #0 !dbg !4 {
+define void @foo(i32 %i) !dbg !4 {
entry:
%i.addr = alloca i32, align 4
%x = alloca i32, align 4
@@ -35,8 +35,6 @@ if.end: ; preds = %if.then, %entry
; CHECK: ret void, !dbg ![[END:[0-9]+]]
}
-attributes #0 = { nounwind uwtable noinline optnone "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!7, !8}
!llvm.ident = !{!9}
diff --git a/llvm/test/Transforms/AddDiscriminators/call-nested.ll b/llvm/test/Transforms/AddDiscriminators/call-nested.ll
index 99340a5..f1373e4 100644
--- a/llvm/test/Transforms/AddDiscriminators/call-nested.ll
+++ b/llvm/test/Transforms/AddDiscriminators/call-nested.ll
@@ -9,7 +9,7 @@
; #6 }
; Function Attrs: uwtable
-define i32 @_Z3bazv() #0 !dbg !4 {
+define i32 @_Z3bazv() !dbg !4 {
%1 = call i32 @_Z3barv(), !dbg !11
; CHECK: %1 = call i32 @_Z3barv(), !dbg ![[CALL0:[0-9]+]]
%2 = call i32 @_Z3barv(), !dbg !12
@@ -19,12 +19,9 @@ define i32 @_Z3bazv() #0 !dbg !4 {
ret i32 %3, !dbg !14
}
-declare i32 @_Z3fooii(i32, i32) #1
+declare i32 @_Z3fooii(i32, i32)
-declare i32 @_Z3barv() #1
-
-attributes #0 = { uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
+declare i32 @_Z3barv()
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!8, !9}
diff --git a/llvm/test/Transforms/AddDiscriminators/call.ll b/llvm/test/Transforms/AddDiscriminators/call.ll
index 93d3aa4..11b21ef 100644
--- a/llvm/test/Transforms/AddDiscriminators/call.ll
+++ b/llvm/test/Transforms/AddDiscriminators/call.ll
@@ -8,7 +8,7 @@
; #5 }
; Function Attrs: uwtable
-define void @_Z3foov() #0 !dbg !4 {
+define void @_Z3foov() !dbg !4 {
call void @_Z3barv(), !dbg !10
; CHECK: call void @_Z3barv(), !dbg ![[CALL0:[0-9]+]]
%a = alloca [100 x i8], align 16
@@ -21,13 +21,10 @@ define void @_Z3foov() #0 !dbg !4 {
ret void, !dbg !13
}
-declare void @_Z3barv() #1
+declare void @_Z3barv()
declare void @llvm.lifetime.start.p0(ptr nocapture) nounwind argmemonly
declare void @llvm.lifetime.end.p0(ptr nocapture) nounwind argmemonly
-attributes #0 = { uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!7, !8}
!llvm.ident = !{!9}
diff --git a/llvm/test/Transforms/AddDiscriminators/diamond.ll b/llvm/test/Transforms/AddDiscriminators/diamond.ll
index c93a57a..9edcf39 100644
--- a/llvm/test/Transforms/AddDiscriminators/diamond.ll
+++ b/llvm/test/Transforms/AddDiscriminators/diamond.ll
@@ -12,7 +12,7 @@
; bar(3): discriminator 2
; Function Attrs: uwtable
-define void @_Z3fooi(i32 %i) #0 !dbg !4 {
+define void @_Z3fooi(i32 %i) !dbg !4 {
%1 = alloca i32, align 4
store i32 %i, ptr %1, align 4
call void @llvm.dbg.declare(metadata ptr %1, metadata !11, metadata !12), !dbg !13
@@ -34,13 +34,9 @@ define void @_Z3fooi(i32 %i) #0 !dbg !4 {
}
; Function Attrs: nounwind readnone
-declare void @llvm.dbg.declare(metadata, metadata, metadata) #1
+declare void @llvm.dbg.declare(metadata, metadata, metadata)
-declare void @_Z3bari(i32) #2
-
-attributes #0 = { uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind readnone }
-attributes #2 = { "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2" "unsafe-fp-math"="false" "use-soft-float"="false" }
+declare void @_Z3bari(i32)
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!8, !9}
diff --git a/llvm/test/Transforms/AddDiscriminators/first-only.ll b/llvm/test/Transforms/AddDiscriminators/first-only.ll
index 7ae9ed0..415e5f0 100644
--- a/llvm/test/Transforms/AddDiscriminators/first-only.ll
+++ b/llvm/test/Transforms/AddDiscriminators/first-only.ll
@@ -13,7 +13,7 @@
; }
; }
-define void @foo(i32 %i) #0 !dbg !4 {
+define void @foo(i32 %i) !dbg !4 {
entry:
%i.addr = alloca i32, align 4
%x = alloca i32, align 4
@@ -44,8 +44,6 @@ if.end: ; preds = %if.then, %entry
; CHECK: ret void, !dbg ![[END:[0-9]+]]
}
-attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!7, !8}
!llvm.ident = !{!9}
diff --git a/llvm/test/Transforms/AddDiscriminators/invoke.ll b/llvm/test/Transforms/AddDiscriminators/invoke.ll
index d39014d..a3989b6 100644
--- a/llvm/test/Transforms/AddDiscriminators/invoke.ll
+++ b/llvm/test/Transforms/AddDiscriminators/invoke.ll
@@ -5,14 +5,14 @@ target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.14.0"
; Function Attrs: ssp uwtable
-define void @_Z3foov() #0 personality ptr @__gxx_personality_v0 !dbg !8 {
+define void @_Z3foov() personality ptr @__gxx_personality_v0 !dbg !8 {
entry:
%exn.slot = alloca ptr
%ehselector.slot = alloca i32
; CHECK: call void @_Z12bar_noexceptv({{.*}} !dbg ![[CALL1:[0-9]+]]
- call void @_Z12bar_noexceptv() #4, !dbg !11
+ call void @_Z12bar_noexceptv(), !dbg !11
; CHECK: call void @_Z12bar_noexceptv({{.*}} !dbg ![[CALL2:[0-9]+]]
- call void @_Z12bar_noexceptv() #4, !dbg !13
+ call void @_Z12bar_noexceptv(), !dbg !13
invoke void @_Z3barv()
; CHECK: unwind label {{.*}} !dbg ![[INVOKE:[0-9]+]]
to label %invoke.cont unwind label %lpad, !dbg !14
@@ -31,8 +31,8 @@ lpad: ; preds = %entry
catch: ; preds = %lpad
%exn = load ptr, ptr %exn.slot, align 8, !dbg !15
- %3 = call ptr @__cxa_begin_catch(ptr %exn) #4, !dbg !15
- invoke void @__cxa_rethrow() #5
+ %3 = call ptr @__cxa_begin_catch(ptr %exn), !dbg !15
+ invoke void @__cxa_rethrow()
to label %unreachable unwind label %lpad1, !dbg !17
lpad1: ; preds = %catch
@@ -62,7 +62,7 @@ terminate.lpad: ; preds = %lpad1
%7 = landingpad { ptr, i32 }
catch ptr null, !dbg !20
%8 = extractvalue { ptr, i32 } %7, 0, !dbg !20
- call void @__clang_call_terminate(ptr %8) #6, !dbg !20
+ call void @__clang_call_terminate(ptr %8), !dbg !20
unreachable, !dbg !20
unreachable: ; preds = %catch
@@ -70,9 +70,9 @@ unreachable: ; preds = %catch
}
; Function Attrs: nounwind
-declare void @_Z12bar_noexceptv() #1
+declare void @_Z12bar_noexceptv()
-declare void @_Z3barv() #2
+declare void @_Z3barv()
declare i32 @__gxx_personality_v0(...)
@@ -83,22 +83,14 @@ declare void @__cxa_rethrow()
declare void @__cxa_end_catch()
; Function Attrs: noinline noreturn nounwind
-define linkonce_odr hidden void @__clang_call_terminate(ptr) #3 {
- %2 = call ptr @__cxa_begin_catch(ptr %0) #4
- call void @_ZSt9terminatev() #6
+define linkonce_odr hidden void @__clang_call_terminate(ptr) {
+ %2 = call ptr @__cxa_begin_catch(ptr %0)
+ call void @_ZSt9terminatev()
unreachable
}
declare void @_ZSt9terminatev()
-attributes #0 = { ssp uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+fxsr,+mmx,+sahf,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+fxsr,+mmx,+sahf,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #2 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+fxsr,+mmx,+sahf,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #3 = { noinline noreturn nounwind }
-attributes #4 = { nounwind }
-attributes #5 = { noreturn }
-attributes #6 = { noreturn nounwind }
-
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!3, !4, !5, !6}
!llvm.ident = !{!7}
diff --git a/llvm/test/Transforms/AddDiscriminators/multiple.ll b/llvm/test/Transforms/AddDiscriminators/multiple.ll
index 54c1a5d..8e8ca6a 100644
--- a/llvm/test/Transforms/AddDiscriminators/multiple.ll
+++ b/llvm/test/Transforms/AddDiscriminators/multiple.ll
@@ -10,7 +10,7 @@
; The two stores inside the if-then-else line must have different discriminator
; values.
-define void @foo(i32 %i) #0 !dbg !4 {
+define void @foo(i32 %i) !dbg !4 {
entry:
%i.addr = alloca i32, align 4
%x = alloca i32, align 4
@@ -45,8 +45,6 @@ if.end: ; preds = %if.else, %if.then
ret void, !dbg !12
}
-attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!7, !8}
!llvm.ident = !{!9}
diff --git a/llvm/test/Transforms/AddDiscriminators/no-discriminators.ll b/llvm/test/Transforms/AddDiscriminators/no-discriminators.ll
index c23edd6..f84579b 100644
--- a/llvm/test/Transforms/AddDiscriminators/no-discriminators.ll
+++ b/llvm/test/Transforms/AddDiscriminators/no-discriminators.ll
@@ -12,7 +12,7 @@
; altered. If they are, it means that the discriminators pass added a
; new lexical scope.
-define i32 @foo(i64 %i) #0 !dbg !4 {
+define i32 @foo(i64 %i) !dbg !4 {
entry:
%retval = alloca i32, align 4
%i.addr = alloca i64, align 8
@@ -39,10 +39,7 @@ return: ; preds = %if.else, %if.then
}
; Function Attrs: nounwind readnone
-declare void @llvm.dbg.declare(metadata, metadata, metadata) #1
-
-attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind readnone }
+declare void @llvm.dbg.declare(metadata, metadata, metadata)
; We should be able to add discriminators even in the absence of llvm.dbg.cu.
; When using sample profiles, the front end will generate line tables but it
diff --git a/llvm/test/Transforms/AddDiscriminators/oneline.ll b/llvm/test/Transforms/AddDiscriminators/oneline.ll
index 533d547..fc1675b 100644
--- a/llvm/test/Transforms/AddDiscriminators/oneline.ll
+++ b/llvm/test/Transforms/AddDiscriminators/oneline.ll
@@ -10,7 +10,7 @@
; return 100: discriminator 4
; return 99: discriminator 6
-define i32 @_Z3fooi(i32 %i) #0 !dbg !4 {
+define i32 @_Z3fooi(i32 %i) !dbg !4 {
%1 = alloca i32, align 4
%2 = alloca i32, align 4
store i32 %i, ptr %2, align 4, !tbaa !13
@@ -49,10 +49,7 @@ define i32 @_Z3fooi(i32 %i) #0 !dbg !4 {
}
; Function Attrs: nounwind readnone
-declare void @llvm.dbg.declare(metadata, metadata, metadata) #1
-
-attributes #0 = { nounwind uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind readnone }
+declare void @llvm.dbg.declare(metadata, metadata, metadata)
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!10, !11}
diff --git a/llvm/test/Transforms/Attributor/reduced/register_benchmark_test.ll b/llvm/test/Transforms/Attributor/reduced/register_benchmark_test.ll
index eb7d78f..4704238 100644
--- a/llvm/test/Transforms/Attributor/reduced/register_benchmark_test.ll
+++ b/llvm/test/Transforms/Attributor/reduced/register_benchmark_test.ll
@@ -1557,24 +1557,24 @@ declare dso_local void @_GLOBAL__sub_I_register_benchmark_test.cc() #0 section "
; Function Attrs: cold noreturn nounwind
declare void @llvm.trap() #20
-attributes #0 = { uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #2 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-jump-tables"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "use-soft-float"="false" }
+attributes #1 = { "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "use-soft-float"="false" }
+attributes #2 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "use-soft-float"="false" }
attributes #3 = { nounwind }
-attributes #4 = { nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #4 = { nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-jump-tables"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "use-soft-float"="false" }
attributes #5 = { argmemonly nounwind willreturn }
-attributes #6 = { alwaysinline uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #7 = { alwaysinline nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #8 = { nobuiltin "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #9 = { nobuiltin nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #10 = { inlinehint uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #11 = { inlinehint nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #12 = { noreturn nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #13 = { norecurse uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #6 = { alwaysinline uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-jump-tables"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "use-soft-float"="false" }
+attributes #7 = { alwaysinline nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-jump-tables"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "use-soft-float"="false" }
+attributes #8 = { nobuiltin "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "use-soft-float"="false" }
+attributes #9 = { nobuiltin nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "use-soft-float"="false" }
+attributes #10 = { inlinehint uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-jump-tables"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "use-soft-float"="false" }
+attributes #11 = { inlinehint nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-jump-tables"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "use-soft-float"="false" }
+attributes #12 = { noreturn nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "use-soft-float"="false" }
+attributes #13 = { norecurse uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-jump-tables"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "use-soft-float"="false" }
attributes #14 = { nounwind readnone willreturn }
-attributes #15 = { noreturn "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #15 = { noreturn "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "use-soft-float"="false" }
attributes #16 = { noinline noreturn nounwind }
attributes #17 = { argmemonly nounwind willreturn writeonly }
-attributes #18 = { noreturn uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #19 = { inlinehint noreturn uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #18 = { noreturn uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-jump-tables"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "use-soft-float"="false" }
+attributes #19 = { inlinehint noreturn uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="ieee,ieee" "denormal-fp-math-f32"="ieee,ieee" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-jump-tables"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "use-soft-float"="false" }
attributes #20 = { cold noreturn nounwind }
diff --git a/llvm/test/Transforms/CodeGenPrepare/ARM/bitreverse-recognize.ll b/llvm/test/Transforms/CodeGenPrepare/ARM/bitreverse-recognize.ll
index d272fef..189186b 100644
--- a/llvm/test/Transforms/CodeGenPrepare/ARM/bitreverse-recognize.ll
+++ b/llvm/test/Transforms/CodeGenPrepare/ARM/bitreverse-recognize.ll
@@ -4,7 +4,7 @@ target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
target triple = "armv7--linux-gnueabihf"
; CHECK-LABEL: @f
-define i32 @f(i32 %a) #0 {
+define i32 @f(i32 %a) {
; CHECK: call i32 @llvm.bitreverse.i32
entry:
br label %for.body
@@ -25,8 +25,6 @@ for.body: ; preds = %for.body, %entry
br i1 %exitcond, label %for.cond.cleanup, label %for.body, !llvm.loop !3
}
-attributes #0 = { norecurse nounwind readnone "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="cortex-a8" "target-features"="+dsp,+neon,+vfp3" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
!llvm.module.flags = !{!0, !1}
!llvm.ident = !{!2}
diff --git a/llvm/test/Transforms/CodeGenPrepare/X86/bitreverse-hang.ll b/llvm/test/Transforms/CodeGenPrepare/X86/bitreverse-hang.ll
index eec0967..35115cf 100644
--- a/llvm/test/Transforms/CodeGenPrepare/X86/bitreverse-hang.ll
+++ b/llvm/test/Transforms/CodeGenPrepare/X86/bitreverse-hang.ll
@@ -21,7 +21,7 @@
@b = common global i32 0, align 4
; CHECK: define i32 @fn1
-define i32 @fn1() #0 {
+define i32 @fn1() {
entry:
%b.promoted = load i32, ptr @b, align 4, !tbaa !2
br label %for.body
@@ -40,8 +40,6 @@ for.end: ; preds = %for.body
ret i32 undef
}
-attributes #0 = { norecurse nounwind ssp uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="core2" "target-features"="+cx16,+fxsr,+mmx,+sse,+sse2,+sse3,+ssse3" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
!llvm.module.flags = !{!0}
!llvm.ident = !{!1}
diff --git a/llvm/test/Transforms/CodeGenPrepare/dom-tree.ll b/llvm/test/Transforms/CodeGenPrepare/dom-tree.ll
index 1c990ff..14360fe 100644
--- a/llvm/test/Transforms/CodeGenPrepare/dom-tree.ll
+++ b/llvm/test/Transforms/CodeGenPrepare/dom-tree.ll
@@ -10,7 +10,7 @@
target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
target triple = "armv7--linux-gnueabihf"
-define i32 @f(i32 %a) #0 {
+define i32 @f(i32 %a) {
entry:
br label %for.body
@@ -30,8 +30,6 @@ for.body:
br i1 %exitcond, label %for.cond.cleanup, label %for.body, !llvm.loop !3
}
-attributes #0 = { norecurse nounwind readnone "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="cortex-a8" "target-features"="+dsp,+neon,+vfp3" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
!llvm.module.flags = !{!0, !1}
!llvm.ident = !{!2}
diff --git a/llvm/test/Transforms/ConstantHoisting/X86/ehpad.ll b/llvm/test/Transforms/ConstantHoisting/X86/ehpad.ll
index 46fa066..78760db 100644
--- a/llvm/test/Transforms/ConstantHoisting/X86/ehpad.ll
+++ b/llvm/test/Transforms/ConstantHoisting/X86/ehpad.ll
@@ -20,7 +20,7 @@ target triple = "x86_64-pc-windows-msvc"
; BFIHOIST: br label %endif
; Function Attrs: norecurse
-define i32 @main(i32 %argc, ptr nocapture readnone %argv) local_unnamed_addr #0 personality ptr @__CxxFrameHandler3 {
+define i32 @main(i32 %argc, ptr nocapture readnone %argv) local_unnamed_addr personality ptr @__CxxFrameHandler3 {
%call = tail call i64 @fn(i64 0)
%call1 = tail call i64 @fn(i64 1)
%tobool = icmp eq i32 %argc, 0
@@ -62,9 +62,6 @@ endif:
ret i32 0
}
-declare i64 @fn(i64) local_unnamed_addr #1
+declare i64 @fn(i64) local_unnamed_addr
declare i32 @__CxxFrameHandler3(...)
-
-attributes #0 = { norecurse "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "stack-protector-buffer-size"="8" "target-features"="+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "stack-protector-buffer-size"="8" "target-features"="+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/llvm/test/Transforms/ConstraintElimination/add-nsw.ll b/llvm/test/Transforms/ConstraintElimination/add-nsw.ll
index 5127e92..4b8ac09 100644
--- a/llvm/test/Transforms/ConstraintElimination/add-nsw.ll
+++ b/llvm/test/Transforms/ConstraintElimination/add-nsw.ll
@@ -757,8 +757,7 @@ define i1 @add_neg_1_known_sge_ult_1(i32 %a) {
; CHECK-NEXT: [[A_SGE:%.*]] = icmp sge i32 [[A:%.*]], 1
; CHECK-NEXT: call void @llvm.assume(i1 [[A_SGE]])
; CHECK-NEXT: [[SUB:%.*]] = add nsw i32 [[A]], -1
-; CHECK-NEXT: [[C:%.*]] = icmp ult i32 [[SUB]], [[A]]
-; CHECK-NEXT: ret i1 [[C]]
+; CHECK-NEXT: ret i1 true
;
entry:
%a.sge = icmp sge i32 %a, 1
@@ -823,8 +822,7 @@ define i1 @add_neg_3_known_sge_ult_1(i32 %a) {
; CHECK-NEXT: [[A_SGE:%.*]] = icmp sge i32 [[A:%.*]], 3
; CHECK-NEXT: call void @llvm.assume(i1 [[A_SGE]])
; CHECK-NEXT: [[SUB:%.*]] = add nsw i32 [[A]], -3
-; CHECK-NEXT: [[C:%.*]] = icmp ult i32 [[SUB]], [[A]]
-; CHECK-NEXT: ret i1 [[C]]
+; CHECK-NEXT: ret i1 true
;
entry:
%a.sge = icmp sge i32 %a, 3
diff --git a/llvm/test/Transforms/ConstraintElimination/gep-arithmetic-add.ll b/llvm/test/Transforms/ConstraintElimination/gep-arithmetic-add.ll
index 52adc78..8dcac78 100644
--- a/llvm/test/Transforms/ConstraintElimination/gep-arithmetic-add.ll
+++ b/llvm/test/Transforms/ConstraintElimination/gep-arithmetic-add.ll
@@ -389,8 +389,7 @@ define i1 @gep_count_add_1_sge_known_ult_1(i32 %count, ptr %p) {
; CHECK-NEXT: [[SUB:%.*]] = add nsw i32 [[COUNT]], -1
; CHECK-NEXT: [[SUB_EXT:%.*]] = zext i32 [[SUB]] to i64
; CHECK-NEXT: [[GEP_SUB:%.*]] = getelementptr inbounds i32, ptr [[P]], i64 [[SUB_EXT]]
-; CHECK-NEXT: [[C:%.*]] = icmp ult ptr [[GEP_SUB]], [[GEP_COUNT]]
-; CHECK-NEXT: ret i1 [[C]]
+; CHECK-NEXT: ret i1 true
;
entry:
%sge = icmp sge i32 %count, 1
@@ -415,8 +414,7 @@ define i1 @gep_count_add_1_sge_known_uge_1(i32 %count, ptr %p) {
; CHECK-NEXT: [[SUB:%.*]] = add nsw i32 [[COUNT]], -1
; CHECK-NEXT: [[SUB_EXT:%.*]] = zext i32 [[SUB]] to i64
; CHECK-NEXT: [[GEP_SUB:%.*]] = getelementptr inbounds i32, ptr [[P]], i64 [[SUB_EXT]]
-; CHECK-NEXT: [[C:%.*]] = icmp uge ptr [[GEP_SUB]], [[P]]
-; CHECK-NEXT: ret i1 [[C]]
+; CHECK-NEXT: ret i1 true
;
entry:
%sge = icmp sge i32 %count, 1
diff --git a/llvm/test/Transforms/Coroutines/coro-debug.ll b/llvm/test/Transforms/Coroutines/coro-debug.ll
index d1f1922..109be51f 100644
--- a/llvm/test/Transforms/Coroutines/coro-debug.ll
+++ b/llvm/test/Transforms/Coroutines/coro-debug.ll
@@ -14,7 +14,7 @@ entry:
%0 = call token @llvm.coro.id(i32 0, ptr null, ptr @flink, ptr null), !dbg !16
%1 = call i64 @llvm.coro.size.i64(), !dbg !16
%call = call ptr @malloc(i64 %1), !dbg !16
- %2 = call ptr @llvm.coro.begin(token %0, ptr %call) #7, !dbg !16
+ %2 = call ptr @llvm.coro.begin(token %0, ptr %call), !dbg !16
store ptr %2, ptr %coro_hdl, align 8, !dbg !16
%3 = call i8 @llvm.coro.suspend(token none, i1 false), !dbg !17
%conv = sext i8 %3 to i32, !dbg !17
@@ -69,7 +69,7 @@ coro_Cleanup: ; preds = %sw.epilog, %sw.bb1
br label %coro_Suspend, !dbg !24
coro_Suspend: ; preds = %coro_Cleanup, %sw.default
- call void @llvm.coro.end(ptr null, i1 false, token none) #7, !dbg !24
+ call void @llvm.coro.end(ptr null, i1 false, token none), !dbg !24
%7 = load ptr, ptr %coro_hdl, align 8, !dbg !24
store i32 0, ptr %late_local, !dbg !24
ret ptr %7, !dbg !24
@@ -82,47 +82,40 @@ ehcleanup:
}
; Function Attrs: nounwind readnone speculatable
-declare void @llvm.dbg.value(metadata, metadata, metadata) #1
+declare void @llvm.dbg.value(metadata, metadata, metadata)
; Function Attrs: nounwind readnone speculatable
-declare void @llvm.dbg.declare(metadata, metadata, metadata) #1
+declare void @llvm.dbg.declare(metadata, metadata, metadata)
; Function Attrs: argmemonly nounwind readonly
-declare token @llvm.coro.id(i32, ptr readnone, ptr nocapture readonly, ptr) #2
+declare token @llvm.coro.id(i32, ptr readnone, ptr nocapture readonly, ptr)
-declare ptr @malloc(i64) #3
+declare ptr @malloc(i64)
declare ptr @allocate()
declare void @print({ ptr, i32 })
declare void @log()
; Function Attrs: nounwind readnone
-declare i64 @llvm.coro.size.i64() #4
+declare i64 @llvm.coro.size.i64()
; Function Attrs: nounwind
-declare ptr @llvm.coro.begin(token, ptr writeonly) #5
+declare ptr @llvm.coro.begin(token, ptr writeonly)
; Function Attrs: nounwind
-declare i8 @llvm.coro.suspend(token, i1) #5
+declare i8 @llvm.coro.suspend(token, i1)
-declare void @free(ptr) #3
+declare void @free(ptr)
; Function Attrs: argmemonly nounwind readonly
-declare ptr @llvm.coro.free(token, ptr nocapture readonly) #2
+declare ptr @llvm.coro.free(token, ptr nocapture readonly)
; Function Attrs: nounwind
-declare void @llvm.coro.end(ptr, i1, token) #5
+declare void @llvm.coro.end(ptr, i1, token)
; Function Attrs: argmemonly nounwind readonly
-declare ptr @llvm.coro.subfn.addr(ptr nocapture readonly, i8) #2
-
-attributes #0 = { noinline nounwind presplitcoroutine "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-features"="+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind readnone speculatable }
-attributes #2 = { argmemonly nounwind readonly }
-attributes #3 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-features"="+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #4 = { nounwind readnone }
-attributes #5 = { nounwind }
-attributes #6 = { alwaysinline }
-attributes #7 = { noduplicate }
+declare ptr @llvm.coro.subfn.addr(ptr nocapture readonly, i8)
+
+attributes #0 = { noinline nounwind presplitcoroutine }
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!3, !4}
diff --git a/llvm/test/Transforms/Coroutines/coro-split-dbg.ll b/llvm/test/Transforms/Coroutines/coro-split-dbg.ll
index c53bea8..577ca9a 100644
--- a/llvm/test/Transforms/Coroutines/coro-split-dbg.ll
+++ b/llvm/test/Transforms/Coroutines/coro-split-dbg.ll
@@ -6,9 +6,9 @@ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
; Function Attrs: nounwind readnone
-declare void @llvm.dbg.declare(metadata, metadata, metadata) #1
+declare void @llvm.dbg.declare(metadata, metadata, metadata)
-declare void @bar(...) local_unnamed_addr #2
+declare void @bar(...) local_unnamed_addr
; Function Attrs: nounwind uwtable
define ptr @f() #3 !dbg !16 {
@@ -16,14 +16,14 @@ entry:
%0 = tail call token @llvm.coro.id(i32 0, ptr null, ptr @f, ptr null), !dbg !26
%1 = tail call i64 @llvm.coro.size.i64(), !dbg !26
%call = tail call ptr @malloc(i64 %1), !dbg !26
- %2 = tail call ptr @llvm.coro.begin(token %0, ptr %call) #9, !dbg !26
+ %2 = tail call ptr @llvm.coro.begin(token %0, ptr %call), !dbg !26
tail call void @llvm.dbg.value(metadata ptr %2, metadata !21, metadata !12), !dbg !26
br label %for.cond, !dbg !27
for.cond: ; preds = %for.cond, %entry
tail call void @llvm.dbg.value(metadata i32 undef, metadata !22, metadata !12), !dbg !28
- tail call void @llvm.dbg.value(metadata i32 undef, metadata !11, metadata !12) #7, !dbg !29
- tail call void (...) @bar() #7, !dbg !33
+ tail call void @llvm.dbg.value(metadata i32 undef, metadata !11, metadata !12), !dbg !29
+ tail call void (...) @bar(), !dbg !33
%3 = tail call token @llvm.coro.save(ptr null), !dbg !34
%4 = tail call i8 @llvm.coro.suspend(token %3, i1 false), !dbg !34
%conv = sext i8 %4 to i32, !dbg !34
@@ -38,40 +38,31 @@ coro_Cleanup: ; preds = %for.cond
br label %coro_Suspend, !dbg !36
coro_Suspend: ; preds = %for.cond, %if.then, %coro_Cleanup
- tail call void @llvm.coro.end(ptr null, i1 false, token none) #9, !dbg !38
+ tail call void @llvm.coro.end(ptr null, i1 false, token none), !dbg !38
ret ptr %2, !dbg !39
}
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0(ptr nocapture) #4
+declare void @llvm.lifetime.start.p0(ptr nocapture)
; Function Attrs: argmemonly nounwind readonly
-declare token @llvm.coro.id(i32, ptr readnone, ptr nocapture readonly, ptr) #5
+declare token @llvm.coro.id(i32, ptr readnone, ptr nocapture readonly, ptr)
; Function Attrs: nounwind
-declare noalias ptr @malloc(i64) local_unnamed_addr #6
-declare i64 @llvm.coro.size.i64() #1
-declare ptr @llvm.coro.begin(token, ptr writeonly) #7
-declare token @llvm.coro.save(ptr) #7
-declare i8 @llvm.coro.suspend(token, i1) #7
-declare void @llvm.lifetime.end.p0(ptr nocapture) #4
-declare ptr @llvm.coro.free(token, ptr nocapture readonly) #5
-declare void @free(ptr nocapture) local_unnamed_addr #6
-declare void @llvm.coro.end(ptr, i1, token) #7
-declare ptr @llvm.coro.subfn.addr(ptr nocapture readonly, i8) #5
+declare noalias ptr @malloc(i64) local_unnamed_addr
+declare i64 @llvm.coro.size.i64()
+declare ptr @llvm.coro.begin(token, ptr writeonly)
+declare token @llvm.coro.save(ptr)
+declare i8 @llvm.coro.suspend(token, i1)
+declare void @llvm.lifetime.end.p0(ptr nocapture)
+declare ptr @llvm.coro.free(token, ptr nocapture readonly)
+declare void @free(ptr nocapture) local_unnamed_addr
+declare void @llvm.coro.end(ptr, i1, token)
+declare ptr @llvm.coro.subfn.addr(ptr nocapture readonly, i8)
-declare void @llvm.dbg.value(metadata, metadata, metadata) #1
+declare void @llvm.dbg.value(metadata, metadata, metadata)
-attributes #0 = { nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind readnone }
-attributes #2 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #3 = { nounwind uwtable presplitcoroutine "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #4 = { argmemonly nounwind }
-attributes #5 = { argmemonly nounwind readonly }
-attributes #6 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #7 = { nounwind }
-attributes #8 = { alwaysinline nounwind }
-attributes #9 = { noduplicate }
+attributes #3 = { nounwind uwtable presplitcoroutine }
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!3, !4}
diff --git a/llvm/test/Transforms/DeadArgElim/dbginfo.ll b/llvm/test/Transforms/DeadArgElim/dbginfo.ll
index a27ca9d..f6313c7 100644
--- a/llvm/test/Transforms/DeadArgElim/dbginfo.ll
+++ b/llvm/test/Transforms/DeadArgElim/dbginfo.ll
@@ -21,14 +21,14 @@
; updated LLVM functions.
; Function Attrs: uwtable
-define void @_Z2f2v() #0 !dbg !4 {
+define void @_Z2f2v() !dbg !4 {
entry:
call void (i32, ...) @_ZL2f1iz(i32 1), !dbg !15
ret void, !dbg !16
}
; Function Attrs: nounwind uwtable
-define internal void @_ZL2f1iz(i32, ...) #1 !dbg !8 {
+define internal void @_ZL2f1iz(i32, ...) !dbg !8 {
entry:
call void @llvm.dbg.value(metadata i32 %0, metadata !17, metadata !18), !dbg !19
ret void, !dbg !20
@@ -40,8 +40,6 @@ declare void @llvm.dbg.declare(metadata, metadata, metadata) #2
; Function Attrs: nounwind readnone
declare void @llvm.dbg.value(metadata, metadata, metadata) #2
-attributes #0 = { uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #2 = { nounwind readnone }
!llvm.dbg.cu = !{!0}
diff --git a/llvm/test/Transforms/DeadStoreElimination/matrix-intrinsics.ll b/llvm/test/Transforms/DeadStoreElimination/matrix-intrinsics.ll
index ae3c746..2eaa275 100644
--- a/llvm/test/Transforms/DeadStoreElimination/matrix-intrinsics.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/matrix-intrinsics.ll
@@ -5,8 +5,8 @@ define void @dead_unstrided_store_non_matrix_load(ptr noalias %src, ptr noalias
; CHECK-LABEL: define void @dead_unstrided_store_non_matrix_load(
; CHECK-SAME: ptr noalias [[SRC:%.*]], ptr noalias [[DST:%.*]]) {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> zeroinitializer, ptr [[DST]], i32 4, i1 false, i32 4, i32 2)
; CHECK-NEXT: [[L:%.*]] = load double, ptr [[SRC]], align 8
+; CHECK-NEXT: call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> zeroinitializer, ptr [[DST]], i32 4, i1 false, i32 4, i32 2)
; CHECK-NEXT: ret void
;
entry:
@@ -173,7 +173,6 @@ define void @dead_unstrided_store(ptr noalias %src, ptr noalias %dst) {
; CHECK-LABEL: define void @dead_unstrided_store(
; CHECK-SAME: ptr noalias [[SRC:%.*]], ptr noalias [[DST:%.*]]) {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> zeroinitializer, ptr [[DST]], i32 4, i1 false, i32 4, i32 2)
; CHECK-NEXT: [[L:%.*]] = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr [[SRC]], i32 4, i1 false, i32 4, i32 2)
; CHECK-NEXT: call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> [[L]], ptr [[DST]], i32 4, i1 false, i32 4, i32 2)
; CHECK-NEXT: ret void
@@ -241,7 +240,6 @@ define void @dead_matrix_store_non_matrix_overwrite_unstrided(ptr noalias %src,
; CHECK-LABEL: define void @dead_matrix_store_non_matrix_overwrite_unstrided(
; CHECK-SAME: ptr noalias [[SRC:%.*]], ptr noalias [[DST:%.*]]) {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> zeroinitializer, ptr [[DST]], i32 4, i1 false, i32 4, i32 2)
; CHECK-NEXT: [[L:%.*]] = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr [[SRC]], i32 4, i1 false, i32 4, i32 2)
; CHECK-NEXT: store <8 x double> zeroinitializer, ptr [[DST]], align 64
; CHECK-NEXT: ret void
@@ -257,7 +255,6 @@ define void @dead_matrix_store_non_matrix_overwrite_strided(ptr noalias %src, pt
; CHECK-LABEL: define void @dead_matrix_store_non_matrix_overwrite_strided(
; CHECK-SAME: ptr noalias [[SRC:%.*]], ptr noalias [[DST:%.*]]) {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> zeroinitializer, ptr [[DST]], i32 4, i1 false, i32 4, i32 2)
; CHECK-NEXT: [[L:%.*]] = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr [[SRC]], i32 8, i1 false, i32 4, i32 2)
; CHECK-NEXT: store <16 x double> zeroinitializer, ptr [[DST]], align 128
; CHECK-NEXT: ret void
@@ -289,7 +286,6 @@ define void @live_matrix_store_non_matrix_overwrite_strided(ptr noalias %src, pt
; CHECK-LABEL: define void @live_matrix_store_non_matrix_overwrite_strided(
; CHECK-SAME: ptr noalias [[SRC:%.*]], ptr noalias [[DST:%.*]]) {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> zeroinitializer, ptr [[DST]], i32 4, i1 false, i32 4, i32 2)
; CHECK-NEXT: [[L:%.*]] = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr [[SRC]], i32 8, i1 false, i32 4, i32 2)
; CHECK-NEXT: store <8 x double> zeroinitializer, ptr [[DST]], align 64
; CHECK-NEXT: ret void
@@ -305,8 +301,6 @@ define void @dead_matrix_store_dimension_change(ptr noalias %src, ptr noalias %d
; CHECK-LABEL: define void @dead_matrix_store_dimension_change(
; CHECK-SAME: ptr noalias [[SRC:%.*]], ptr noalias [[DST:%.*]]) {
; CHECK-NEXT: [[ENTRY:.*:]]
-; CHECK-NEXT: [[L:%.*]] = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr [[SRC]], i32 8, i1 false, i32 4, i32 2)
-; CHECK-NEXT: call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> [[L]], ptr [[DST]], i32 4, i1 false, i32 4, i32 2)
; CHECK-NEXT: call void @llvm.matrix.column.major.store.v9f64.i32(<9 x double> zeroinitializer, ptr [[DST]], i32 3, i1 false, i32 3, i32 3)
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/DeadStoreElimination/mda-with-dbg-values.ll b/llvm/test/Transforms/DeadStoreElimination/mda-with-dbg-values.ll
index f66aa0f..2cec6b5 100644
--- a/llvm/test/Transforms/DeadStoreElimination/mda-with-dbg-values.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/mda-with-dbg-values.ll
@@ -14,7 +14,7 @@ target triple = "x86_64-unknown-linux-gnu"
@g = common global [1 x i8] zeroinitializer, align 1, !dbg !0
; Function Attrs: noinline nounwind uwtable
-define void @foo() #0 !dbg !14 {
+define void @foo() !dbg !14 {
entry:
%i = alloca i8, align 1
store i8 1, ptr %i, align 1, !dbg !19
@@ -37,7 +37,6 @@ declare void @llvm.dbg.value(metadata, i64, metadata, metadata) #1
; Function Attrs: argmemonly nounwind
declare void @llvm.memcpy.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1) #2
-attributes #0 = { noinline nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { nounwind readnone speculatable }
attributes #2 = { argmemonly nounwind }
diff --git a/llvm/test/Transforms/FunctionImport/Inputs/funcimport_debug.ll b/llvm/test/Transforms/FunctionImport/Inputs/funcimport_debug.ll
index 4d3a241..628669d 100644
--- a/llvm/test/Transforms/FunctionImport/Inputs/funcimport_debug.ll
+++ b/llvm/test/Transforms/FunctionImport/Inputs/funcimport_debug.ll
@@ -3,13 +3,11 @@ target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16
target triple = "x86_64-unknown-linux-gnu"
; Function Attrs: nounwind uwtable
-define void @func() #0 !dbg !4 {
+define void @func() !dbg !4 {
entry:
ret void, !dbg !10
}
-attributes #0 = { nounwind uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!7, !8}
!llvm.ident = !{!9}
diff --git a/llvm/test/Transforms/FunctionImport/funcimport_debug.ll b/llvm/test/Transforms/FunctionImport/funcimport_debug.ll
index f449047..713dc4e 100644
--- a/llvm/test/Transforms/FunctionImport/funcimport_debug.ll
+++ b/llvm/test/Transforms/FunctionImport/funcimport_debug.ll
@@ -24,16 +24,13 @@ target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16
target triple = "x86_64-unknown-linux-gnu"
; Function Attrs: nounwind uwtable
-define i32 @main() #0 !dbg !4 {
+define i32 @main() !dbg !4 {
entry:
call void (...) @func(), !dbg !11
ret i32 0, !dbg !12
}
-declare void @func(...) #1
-
-attributes #0 = { nounwind uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2" "unsafe-fp-math"="false" "use-soft-float"="false" }
+declare void @func(...)
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!8, !9}
diff --git a/llvm/test/Transforms/GCOVProfiling/exit-block.ll b/llvm/test/Transforms/GCOVProfiling/exit-block.ll
index 1840f04..543edac 100644
--- a/llvm/test/Transforms/GCOVProfiling/exit-block.ll
+++ b/llvm/test/Transforms/GCOVProfiling/exit-block.ll
@@ -13,7 +13,7 @@ target triple = "x86_64-unknown-linux-gnu"
@A = common global i32 0, align 4, !dbg !9
; Function Attrs: nounwind uwtable
-define void @test() #0 !dbg !4 {
+define void @test() !dbg !4 {
entry:
tail call void (...) @f() #2, !dbg !14
%0 = load i32, ptr @A, align 4, !dbg !15
@@ -28,12 +28,10 @@ if.end: ; preds = %entry, %if.then
ret void, !dbg !18
}
-declare void @f(...) #1
+declare void @f(...)
-declare void @g(...) #1
+declare void @g(...)
-attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #2 = { nounwind }
!llvm.gcov = !{!19}
diff --git a/llvm/test/Transforms/GCOVProfiling/linezero.ll b/llvm/test/Transforms/GCOVProfiling/linezero.ll
index 8bfeabd..1eae413 100644
--- a/llvm/test/Transforms/GCOVProfiling/linezero.ll
+++ b/llvm/test/Transforms/GCOVProfiling/linezero.ll
@@ -9,7 +9,7 @@ target triple = "x86_64-unknown-linux-gnu"
%struct.vector = type { i8 }
; Function Attrs: nounwind
-define i32 @_Z4testv() #0 !dbg !15 {
+define i32 @_Z4testv() !dbg !15 {
entry:
%retval = alloca i32, align 4
%__range = alloca ptr, align 8
@@ -63,19 +63,19 @@ return: ; No predecessors!
}
; Function Attrs: nounwind readnone
-declare void @llvm.dbg.declare(metadata, metadata, metadata) #1
+declare void @llvm.dbg.declare(metadata, metadata, metadata)
-declare void @_Z13TagFieldSpecsv() #2
+declare void @_Z13TagFieldSpecsv()
-declare ptr @_ZN6vector5beginEv(ptr) #2
+declare ptr @_ZN6vector5beginEv(ptr)
-declare ptr @_ZN6vector3endEv(ptr) #2
+declare ptr @_ZN6vector3endEv(ptr)
; Function Attrs: noreturn nounwind
-declare void @llvm.trap() #3
+declare void @llvm.trap()
; Function Attrs: nounwind
-define void @_Z2f1v() #0 !dbg !20 {
+define void @_Z2f1v() !dbg !20 {
entry:
br label %0
@@ -83,11 +83,6 @@ entry:
ret void, !dbg !45
}
-attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-realign-stack" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind readnone }
-attributes #2 = { "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-realign-stack" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #3 = { noreturn nounwind }
-
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!23, !24}
!llvm.gcov = !{!25}
diff --git a/llvm/test/Transforms/GCOVProfiling/split-indirectbr-critical-edges.ll b/llvm/test/Transforms/GCOVProfiling/split-indirectbr-critical-edges.ll
index 7f169e2..98bdd74 100644
--- a/llvm/test/Transforms/GCOVProfiling/split-indirectbr-critical-edges.ll
+++ b/llvm/test/Transforms/GCOVProfiling/split-indirectbr-critical-edges.ll
@@ -10,7 +10,7 @@
; CHECK-NEXT: load {{.*}} @__llvm_gcov_ctr
; CHECK-NOT: load {{.*}} @__llvm_gcov_ctr
-define dso_local i32 @cannot_split(ptr nocapture readonly %p) #0 !dbg !7 {
+define dso_local i32 @cannot_split(ptr nocapture readonly %p) !dbg !7 {
entry:
%targets = alloca <2 x ptr>, align 16
store <2 x ptr> <ptr blockaddress(@cannot_split, %indirect), ptr blockaddress(@cannot_split, %end)>, ptr %targets, align 16, !dbg !9
@@ -42,8 +42,6 @@ end: ; preds = %indirect
ret i32 0, !dbg !22
}
-attributes #0 = { norecurse nounwind readonly uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!3, !4, !5}
diff --git a/llvm/test/Transforms/GVN/cond_br2.ll b/llvm/test/Transforms/GVN/cond_br2.ll
index 6ceec95..4811ad0 100644
--- a/llvm/test/Transforms/GVN/cond_br2.ll
+++ b/llvm/test/Transforms/GVN/cond_br2.ll
@@ -11,12 +11,12 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
%"union.llvm::SmallVectorBase::U" = type { x86_fp80 }
; Function Attrs: ssp uwtable
-define void @_Z4testv() #0 personality ptr @__gxx_personality_v0 {
+define void @_Z4testv() personality ptr @__gxx_personality_v0 {
; CHECK-LABEL: define void @_Z4testv(
-; CHECK-SAME: ) #[[ATTR0:[0-9]+]] personality ptr @__gxx_personality_v0 {
+; CHECK-SAME: ) personality ptr @__gxx_personality_v0 {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[SV:%.*]] = alloca %"class.llvm::SmallVector", align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[SV]]) #[[ATTR4:[0-9]+]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[SV]])
; CHECK-NEXT: [[FIRSTEL_I_I_I_I_I_I:%.*]] = getelementptr inbounds %"class.llvm::SmallVector", ptr [[SV]], i64 0, i32 0, i32 0, i32 0, i32 0, i32 3
; CHECK-NEXT: store ptr [[FIRSTEL_I_I_I_I_I_I]], ptr [[SV]], align 16, !tbaa [[ANYPTR_TBAA0:![0-9]+]]
; CHECK-NEXT: [[ENDX_I_I_I_I_I_I:%.*]] = getelementptr inbounds %"class.llvm::SmallVector", ptr [[SV]], i64 0, i32 0, i32 0, i32 0, i32 0, i32 1
@@ -66,10 +66,10 @@ define void @_Z4testv() #0 personality ptr @__gxx_personality_v0 {
; CHECK-NEXT: [[CMP_I_I_I_I19:%.*]] = icmp eq ptr [[TMP0]], [[FIRSTEL_I_I_I_I_I_I]]
; CHECK-NEXT: br i1 [[CMP_I_I_I_I19]], label %[[_ZN4LLVM11SMALLVECTORIILJ8EED1EV_EXIT21:.*]], label %[[IF_THEN_I_I_I20:.*]]
; CHECK: [[IF_THEN_I_I_I20]]:
-; CHECK-NEXT: call void @free(ptr [[TMP0]]) #[[ATTR4]]
+; CHECK-NEXT: call void @free(ptr [[TMP0]])
; CHECK-NEXT: br label %[[_ZN4LLVM11SMALLVECTORIILJ8EED1EV_EXIT21]]
; CHECK: [[_ZN4LLVM11SMALLVECTORIILJ8EED1EV_EXIT21]]:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[SV]]) #[[ATTR4]]
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[SV]])
; CHECK-NEXT: ret void
; CHECK: [[LPAD]]:
; CHECK-NEXT: [[TMP1:%.*]] = landingpad { ptr, i32 }
@@ -78,7 +78,7 @@ define void @_Z4testv() #0 personality ptr @__gxx_personality_v0 {
; CHECK-NEXT: [[CMP_I_I_I_I:%.*]] = icmp eq ptr [[TMP2]], [[FIRSTEL_I_I_I_I_I_I]]
; CHECK-NEXT: br i1 [[CMP_I_I_I_I]], label %[[EH_RESUME:.*]], label %[[IF_THEN_I_I_I:.*]]
; CHECK: [[IF_THEN_I_I_I]]:
-; CHECK-NEXT: call void @free(ptr [[TMP2]]) #[[ATTR4]]
+; CHECK-NEXT: call void @free(ptr [[TMP2]])
; CHECK-NEXT: br label %[[EH_RESUME]]
; CHECK: [[EH_RESUME]]:
; CHECK-NEXT: resume { ptr, i32 } [[TMP1]]
@@ -86,7 +86,7 @@ define void @_Z4testv() #0 personality ptr @__gxx_personality_v0 {
entry:
%sv = alloca %"class.llvm::SmallVector", align 16
- call void @llvm.lifetime.start.p0(ptr %sv) #1
+ call void @llvm.lifetime.start.p0(ptr %sv)
%FirstEl.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SmallVector", ptr %sv, i64 0, i32 0, i32 0, i32 0, i32 0, i32 3
store ptr %FirstEl.i.i.i.i.i.i, ptr %sv, align 16, !tbaa !4
%EndX.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SmallVector", ptr %sv, i64 0, i32 0, i32 0, i32 0, i32 0, i32 1
@@ -151,11 +151,11 @@ invoke.cont3: ; preds = %invoke.cont2
br i1 %cmp.i.i.i.i19, label %_ZN4llvm11SmallVectorIiLj8EED1Ev.exit21, label %if.then.i.i.i20
if.then.i.i.i20: ; preds = %invoke.cont3
- call void @free(ptr %5) #1
+ call void @free(ptr %5)
br label %_ZN4llvm11SmallVectorIiLj8EED1Ev.exit21
_ZN4llvm11SmallVectorIiLj8EED1Ev.exit21: ; preds = %invoke.cont3, %if.then.i.i.i20
- call void @llvm.lifetime.end.p0(ptr %sv) #1
+ call void @llvm.lifetime.end.p0(ptr %sv)
ret void
lpad: ; preds = %if.end.i14, %if.end.i, %invoke.cont2
@@ -166,7 +166,7 @@ lpad: ; preds = %if.end.i14, %if.end
br i1 %cmp.i.i.i.i, label %eh.resume, label %if.then.i.i.i
if.then.i.i.i: ; preds = %lpad
- call void @free(ptr %7) #1
+ call void @free(ptr %7)
br label %eh.resume
eh.resume: ; preds = %if.then.i.i.i, %lpad
@@ -174,24 +174,19 @@ eh.resume: ; preds = %if.then.i.i.i, %lpa
}
; Function Attrs: nounwind
-declare void @llvm.lifetime.start.p0(ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture)
declare i32 @__gxx_personality_v0(...)
-declare void @_Z1gRN4llvm11SmallVectorIiLj8EEE(ptr) #2
+declare void @_Z1gRN4llvm11SmallVectorIiLj8EEE(ptr)
; Function Attrs: nounwind
-declare void @llvm.lifetime.end.p0(ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture)
-declare void @_ZN4llvm15SmallVectorBase8grow_podEmm(ptr, i64, i64) #2
+declare void @_ZN4llvm15SmallVectorBase8grow_podEmm(ptr, i64, i64)
; Function Attrs: nounwind
-declare void @free(ptr nocapture) #3
-
-attributes #0 = { ssp uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind }
-attributes #2 = { "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #3 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+declare void @free(ptr nocapture)
!0 = !{!"any pointer", !1}
!1 = !{!"omnipotent char", !2}
diff --git a/llvm/test/Transforms/GVN/matrix-intrinsics.ll b/llvm/test/Transforms/GVN/matrix-intrinsics.ll
index 78dbfe1..03bd45b 100644
--- a/llvm/test/Transforms/GVN/matrix-intrinsics.ll
+++ b/llvm/test/Transforms/GVN/matrix-intrinsics.ll
@@ -8,9 +8,8 @@ define void @redundant_unstrided_load(ptr %src) {
; CHECK-NEXT: [[SRC_OFFSET:%.*]] = getelementptr inbounds double, ptr [[SRC]], i32 8
; CHECK-NEXT: [[L:%.*]] = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr [[SRC_OFFSET]], i32 4, i1 false, i32 4, i32 2)
; CHECK-NEXT: call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> [[L]], ptr [[SRC]], i32 4, i1 false, i32 4, i32 2)
-; CHECK-NEXT: [[L_2:%.*]] = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr [[SRC_OFFSET]], i32 4, i1 false, i32 4, i32 2)
; CHECK-NEXT: call void @use(<8 x double> [[L]])
-; CHECK-NEXT: call void @use(<8 x double> [[L_2]])
+; CHECK-NEXT: call void @use(<8 x double> [[L]])
; CHECK-NEXT: ret void
;
entry:
@@ -30,9 +29,8 @@ define void @redundant_unstrided_load_non_matrix_store(ptr %src) {
; CHECK-NEXT: [[SRC_OFFSET:%.*]] = getelementptr inbounds double, ptr [[SRC]], i32 1
; CHECK-NEXT: [[L:%.*]] = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr [[SRC_OFFSET]], i32 4, i1 false, i32 4, i32 2)
; CHECK-NEXT: store double 4.200000e+01, ptr [[SRC]], align 8
-; CHECK-NEXT: [[L_2:%.*]] = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr [[SRC_OFFSET]], i32 4, i1 false, i32 4, i32 2)
; CHECK-NEXT: call void @use(<8 x double> [[L]])
-; CHECK-NEXT: call void @use(<8 x double> [[L_2]])
+; CHECK-NEXT: call void @use(<8 x double> [[L]])
; CHECK-NEXT: ret void
;
entry:
@@ -52,9 +50,8 @@ define void @redundant_strided_load(ptr %src) {
; CHECK-NEXT: [[SRC_OFFSET:%.*]] = getelementptr inbounds double, ptr [[SRC]], i32 16
; CHECK-NEXT: [[L:%.*]] = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr [[SRC_OFFSET]], i32 8, i1 false, i32 4, i32 2)
; CHECK-NEXT: call void @llvm.matrix.column.major.store.v8f64.i32(<8 x double> [[L]], ptr [[SRC]], i32 8, i1 false, i32 4, i32 2)
-; CHECK-NEXT: [[L_2:%.*]] = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr [[SRC_OFFSET]], i32 8, i1 false, i32 4, i32 2)
; CHECK-NEXT: call void @use(<8 x double> [[L]])
-; CHECK-NEXT: call void @use(<8 x double> [[L_2]])
+; CHECK-NEXT: call void @use(<8 x double> [[L]])
; CHECK-NEXT: ret void
;
entry:
@@ -75,9 +72,8 @@ define void @redundant_strided_load_non_matrix_store(ptr %src) {
; CHECK-NEXT: [[SRC_OFFSET:%.*]] = getelementptr inbounds double, ptr [[SRC]], i32 16
; CHECK-NEXT: [[L:%.*]] = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr [[SRC_OFFSET]], i32 8, i1 false, i32 4, i32 2)
; CHECK-NEXT: store double 4.200000e+01, ptr [[SRC]], align 8
-; CHECK-NEXT: [[L_2:%.*]] = call <8 x double> @llvm.matrix.column.major.load.v8f64.i32(ptr [[SRC_OFFSET]], i32 8, i1 false, i32 4, i32 2)
; CHECK-NEXT: call void @use(<8 x double> [[L]])
-; CHECK-NEXT: call void @use(<8 x double> [[L_2]])
+; CHECK-NEXT: call void @use(<8 x double> [[L]])
; CHECK-NEXT: ret void
;
entry:
diff --git a/llvm/test/Transforms/GVN/pr33549.ll b/llvm/test/Transforms/GVN/pr33549.ll
index a8ce37c..a1e28f7 100644
--- a/llvm/test/Transforms/GVN/pr33549.ll
+++ b/llvm/test/Transforms/GVN/pr33549.ll
@@ -4,9 +4,9 @@
@Data = common local_unnamed_addr global [32 x i32] zeroinitializer, align 4
; Function Attrs: norecurse nounwind
-define void @testshl() local_unnamed_addr #0 {
+define void @testshl() local_unnamed_addr {
; CHECK-LABEL: define void @testshl(
-; CHECK-SAME: ) local_unnamed_addr #[[ATTR0:[0-9]+]] {
+; CHECK-SAME: ) local_unnamed_addr {
; CHECK-NEXT: [[ENTRY:.*]]:
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
@@ -78,8 +78,6 @@ for.end10: ; preds = %for.inc8
ret void
}
-attributes #0 = { norecurse nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="cortex-m7" "target-features"="+d16,+dsp,+fp-armv8,+hwdiv,+strict-align,+thumb-mode" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
!llvm.module.flags = !{!0, !1}
!llvm.ident = !{!2}
diff --git a/llvm/test/Transforms/GVN/pr42605.ll b/llvm/test/Transforms/GVN/pr42605.ll
index 3e6241c..98b447f8 100644
--- a/llvm/test/Transforms/GVN/pr42605.ll
+++ b/llvm/test/Transforms/GVN/pr42605.ll
@@ -114,7 +114,7 @@ if.end: ; preds = %if.then, %entry
ret void
}
-attributes #0 = { noinline norecurse nounwind readonly uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { noinline norecurse nounwind readonly uwtable }
!llvm.module.flags = !{!0}
!llvm.ident = !{!1}
diff --git a/llvm/test/Transforms/GVNHoist/hoist-unsafe-pr31729.ll b/llvm/test/Transforms/GVNHoist/hoist-unsafe-pr31729.ll
index b9c8537..082d016 100644
--- a/llvm/test/Transforms/GVNHoist/hoist-unsafe-pr31729.ll
+++ b/llvm/test/Transforms/GVNHoist/hoist-unsafe-pr31729.ll
@@ -14,7 +14,7 @@
@res = common global i32 0, align 4
; Function Attrs:
-define i64 @func() #0 {
+define i64 @func() {
entry:
ret i64 1
}
@@ -27,7 +27,7 @@ entry:
%2 = load volatile i32, ptr @g_x_u, align 4
%3 = load volatile i32, ptr @g_z_u, align 4
%4 = load volatile i32, ptr @g_m, align 4
- %call = call i64 @func() #4
+ %call = call i64 @func()
%conv = sext i32 %1 to i64
%cmp = icmp ne i64 %call, %conv
br i1 %cmp, label %if.end, label %lor.lhs.false
@@ -42,7 +42,7 @@ if.then:
br label %cleanup
if.end:
- %call4 = call i64 @func() #4
+ %call4 = call i64 @func()
%conv5 = zext i32 %3 to i64
%cmp6 = icmp ne i64 %call4, %conv5
br i1 %cmp6, label %if.end14, label %lor.lhs.false8
@@ -57,7 +57,7 @@ if.then13:
br label %cleanup
if.end14:
- %call15 = call i64 @func() #4
+ %call15 = call i64 @func()
%cmp17 = icmp ne i64 %call15, %conv
br i1 %cmp17, label %if.end25, label %lor.lhs.false19
@@ -77,5 +77,3 @@ cleanup:
%retval.0 = phi i32 [ 0, %if.end25 ], [ 1, %if.then24 ], [ 1, %if.then13 ], [ 1, %if.then ]
ret i32 %retval.0
}
-
-attributes #0 = { minsize noinline nounwind optsize uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/llvm/test/Transforms/GVNHoist/pr30499.ll b/llvm/test/Transforms/GVNHoist/pr30499.ll
index bd6f98c..6df9484b 100644
--- a/llvm/test/Transforms/GVNHoist/pr30499.ll
+++ b/llvm/test/Transforms/GVNHoist/pr30499.ll
@@ -1,6 +1,6 @@
; RUN: opt -S -passes=gvn-hoist < %s
-define void @_Z3fn2v() #0 {
+define void @_Z3fn2v() {
entry:
%a = alloca ptr, align 8
%b = alloca i32, align 4
@@ -11,7 +11,7 @@ entry:
br i1 %tobool, label %if.then, label %if.end
if.then: ; preds = %entry
- %call = call i64 @_Z3fn1v() #2
+ %call = call i64 @_Z3fn1v()
%conv = trunc i64 %call to i32
store i32 %conv, ptr %b, align 4
br label %if.end
@@ -23,8 +23,4 @@ if.end: ; preds = %if.then, %entry
}
; Function Attrs: nounwind readonly
-declare i64 @_Z3fn1v() #1
-
-attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind readonly "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #2 = { nounwind readonly }
+declare i64 @_Z3fn1v()
diff --git a/llvm/test/Transforms/IndVarSimplify/X86/widen-nsw.ll b/llvm/test/Transforms/IndVarSimplify/X86/widen-nsw.ll
index 7cba30d..81a9323 100644
--- a/llvm/test/Transforms/IndVarSimplify/X86/widen-nsw.ll
+++ b/llvm/test/Transforms/IndVarSimplify/X86/widen-nsw.ll
@@ -4,7 +4,7 @@ target triple = "x86_64-apple-macosx"
; CHECK-LABEL: @test1
; CHECK: %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
-define i32 @test1(ptr %a) #0 {
+define i32 @test1(ptr %a) {
entry:
br label %for.cond
@@ -25,5 +25,3 @@ for.body: ; preds = %for.cond
for.end: ; preds = %for.cond
ret i32 %sum.0
}
-
-attributes #0 = { nounwind ssp uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/llvm/test/Transforms/Inline/always-inline-attr.ll b/llvm/test/Transforms/Inline/always-inline-attr.ll
index 08ea307..4e69822 100644
--- a/llvm/test/Transforms/Inline/always-inline-attr.ll
+++ b/llvm/test/Transforms/Inline/always-inline-attr.ll
@@ -6,7 +6,7 @@ target triple = "x86_64-grtev4-linux-gnu"
; After AlwaysInline the callee's attributes should be merged into caller's attibutes.
-; CHECK: define dso_local <2 x i64> @foo(ptr byval(<8 x i64>) align 64 %0) #0
+; CHECK: define dso_local <2 x i64> @foo(ptr byval(<8 x i64>) align 64 %0)
; CHECK: attributes #0 = { mustprogress uwtable "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="512"
; Function Attrs: uwtable mustprogress
@@ -36,12 +36,10 @@ entry:
}
; Function Attrs: nounwind readnone
-declare <16 x i8> @llvm.x86.avx512.mask.pmovs.db.512(<16 x i32>, <16 x i8>, i16) #2
-
-attributes #0 = { uwtable mustprogress "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="128" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="true" "prefer-vector-width"="128" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+aes,+avx,+avx2,+avx512bw,+avx512dq,+avx512f,+avx512vl,+bmi2,+cx16,+cx8,+f16c,+fma,+fxsr,+mmx,+pclmul,+popcnt,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+x87,+xsave" "tune-cpu"="generic" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { alwaysinline nounwind uwtable mustprogress "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="512" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="true" "prefer-vector-width"="128" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+aes,+avx,+avx2,+avx512f,+cx16,+cx8,+f16c,+fma,+fxsr,+mmx,+pclmul,+popcnt,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+x87,+xsave" "tune-cpu"="generic" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #2 = { nounwind readnone }
+declare <16 x i8> @llvm.x86.avx512.mask.pmovs.db.512(<16 x i32>, <16 x i8>, i16)
+attributes #0 = { uwtable mustprogress "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="128" "prefer-vector-width"="128" }
+attributes #1 = { alwaysinline nounwind uwtable mustprogress "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="512" "prefer-vector-width"="128" }
!2 = !{!3, !3, i64 0}
!3 = !{!"omnipotent char", !4, i64 0}
diff --git a/llvm/test/Transforms/Inline/debug-info-duplicate-calls.ll b/llvm/test/Transforms/Inline/debug-info-duplicate-calls.ll
index a4c4134..bf0dfa2 100644
--- a/llvm/test/Transforms/Inline/debug-info-duplicate-calls.ll
+++ b/llvm/test/Transforms/Inline/debug-info-duplicate-calls.ll
@@ -89,11 +89,10 @@ entry:
ret void, !dbg !20
}
-declare void @_Z2f1v() #2
+attributes #0 = { uwtable }
+attributes #1 = { alwaysinline inlinehint uwtable }
-attributes #0 = { uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { alwaysinline inlinehint uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #2 = { "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+declare void @_Z2f1v()
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!10, !11}
diff --git a/llvm/test/Transforms/Inline/inline-vla.ll b/llvm/test/Transforms/Inline/inline-vla.ll
index 8e4bb3d..1e3c235 100644
--- a/llvm/test/Transforms/Inline/inline-vla.ll
+++ b/llvm/test/Transforms/Inline/inline-vla.ll
@@ -9,7 +9,7 @@
@.str1 = private unnamed_addr constant [3 x i8] c"ab\00", align 1
; Function Attrs: nounwind ssp uwtable
-define i32 @main(i32 %argc, ptr nocapture readnone %argv) #0 {
+define i32 @main(i32 %argc, ptr nocapture readnone %argv) {
entry:
%data = alloca [2 x i8], align 1
call fastcc void @memcpy2(ptr %data, ptr @.str, i64 1)
@@ -18,7 +18,7 @@ entry:
}
; Function Attrs: inlinehint nounwind ssp uwtable
-define internal fastcc void @memcpy2(ptr nocapture %dst, ptr nocapture readonly %src, i64 %size) #1 {
+define internal fastcc void @memcpy2(ptr nocapture %dst, ptr nocapture readonly %src, i64 %size) {
entry:
%vla = alloca i64, i64 %size, align 16
call void @llvm.memcpy.p0.p0.i64(ptr %vla, ptr %src, i64 %size, i1 false)
@@ -27,11 +27,7 @@ entry:
}
; Function Attrs: nounwind
-declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1) #2
-
-attributes #0 = { nounwind ssp uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { inlinehint nounwind ssp uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #2 = { nounwind }
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1)
!llvm.ident = !{!0}
diff --git a/llvm/test/Transforms/Inline/optimization-remarks-hotness-threshold.ll b/llvm/test/Transforms/Inline/optimization-remarks-hotness-threshold.ll
index 3021935..2b4aedd 100644
--- a/llvm/test/Transforms/Inline/optimization-remarks-hotness-threshold.ll
+++ b/llvm/test/Transforms/Inline/optimization-remarks-hotness-threshold.ll
@@ -27,20 +27,18 @@ target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.11.0"
; Function Attrs: nounwind ssp uwtable
-define i32 @foo() #0 !dbg !7 {
+define i32 @foo() !dbg !7 {
entry:
ret i32 1, !dbg !9
}
; Function Attrs: nounwind ssp uwtable
-define i32 @bar() #0 !dbg !10 {
+define i32 @bar() !dbg !10 {
entry:
%call = call i32 @foo(), !dbg !11
ret i32 %call, !dbg !12
}
-attributes #0 = { nounwind ssp uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="core2" "target-features"="+cx16,+fxsr,+mmx,+sse,+sse2,+sse3,+ssse3,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!3, !4, !5}
!llvm.ident = !{!6}
diff --git a/llvm/test/Transforms/Inline/optimization-remarks-passed-deleted-callee-yaml.ll b/llvm/test/Transforms/Inline/optimization-remarks-passed-deleted-callee-yaml.ll
index d394713..f547c37 100644
--- a/llvm/test/Transforms/Inline/optimization-remarks-passed-deleted-callee-yaml.ll
+++ b/llvm/test/Transforms/Inline/optimization-remarks-passed-deleted-callee-yaml.ll
@@ -63,20 +63,18 @@ target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.11.0"
; Function Attrs: nounwind ssp uwtable
-define internal i32 @foo() #0 !dbg !7 {
+define internal i32 @foo() !dbg !7 {
entry:
ret i32 1, !dbg !9
}
; Function Attrs: nounwind ssp uwtable
-define i32 @bar() #0 !dbg !10 !prof !13 {
+define i32 @bar() !dbg !10 !prof !13 {
entry:
%call = call i32 @foo(), !dbg !11
ret i32 %call, !dbg !12
}
-attributes #0 = { nounwind ssp uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="core2" "target-features"="+cx16,+fxsr,+mmx,+sse,+sse2,+sse3,+ssse3,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!3, !4, !5}
!llvm.ident = !{!6}
diff --git a/llvm/test/Transforms/Inline/optimization-remarks-passed-yaml.ll b/llvm/test/Transforms/Inline/optimization-remarks-passed-yaml.ll
index b0a238f..64b305b 100644
--- a/llvm/test/Transforms/Inline/optimization-remarks-passed-yaml.ll
+++ b/llvm/test/Transforms/Inline/optimization-remarks-passed-yaml.ll
@@ -72,20 +72,18 @@ target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.11.0"
; Function Attrs: nounwind ssp uwtable
-define i32 @foo() #0 !dbg !7 {
+define i32 @foo() !dbg !7 {
entry:
ret i32 1, !dbg !9
}
; Function Attrs: nounwind ssp uwtable
-define i32 @bar() #0 !dbg !10 !prof !13 {
+define i32 @bar() !dbg !10 !prof !13 {
entry:
%call = call i32 @foo(), !dbg !11
ret i32 %call, !dbg !12
}
-attributes #0 = { nounwind ssp uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="core2" "target-features"="+cx16,+fxsr,+mmx,+sse,+sse2,+sse3,+ssse3,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!3, !4, !5}
!llvm.ident = !{!6}
diff --git a/llvm/test/Transforms/Inline/optimization-remarks.ll b/llvm/test/Transforms/Inline/optimization-remarks.ll
index bc1e690..135d835 100644
--- a/llvm/test/Transforms/Inline/optimization-remarks.ll
+++ b/llvm/test/Transforms/Inline/optimization-remarks.ll
@@ -81,9 +81,9 @@ entry:
ret i32 %add
}
-attributes #0 = { alwaysinline nounwind uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { noinline nounwind uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #2 = { nounwind uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { alwaysinline nounwind uwtable }
+attributes #1 = { noinline nounwind uwtable }
+attributes #2 = { nounwind uwtable }
!llvm.ident = !{!0}
diff --git a/llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-to-svbool-binops.ll b/llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-to-svbool-binops.ll
index ecedbdb..abe1ed0 100644
--- a/llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-to-svbool-binops.ll
+++ b/llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-to-svbool-binops.ll
@@ -124,6 +124,39 @@ define <vscale x 8 x i1> @try_combine_svbool_binop_orr(<vscale x 8 x i1> %a, <vs
ret <vscale x 8 x i1> %t3
}
+; Verify predicate cast does not hinder "isAllActive" knowledge.
+define <vscale x 8 x half> @try_combine_svbool_binop_fadd(<vscale x 8 x half> %a, <vscale x 8 x half> %b) {
+; CHECK-LABEL: @try_combine_svbool_binop_fadd(
+; CHECK-NEXT: [[T2:%.*]] = fadd <vscale x 8 x half> [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: ret <vscale x 8 x half> [[T2]]
+;
+ %t1 = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> splat (i1 true))
+ %t2 = tail call <vscale x 8 x half> @llvm.aarch64.sve.fadd.nxv8f16(<vscale x 8 x i1> %t1, <vscale x 8 x half> %a, <vscale x 8 x half> %b)
+ ret <vscale x 8 x half> %t2
+}
+
+; Verify predicate cast does not hinder "isAllActive" knowledge.
+define <vscale x 4 x float> @try_combine_svbool_binop_fmul(<vscale x 4 x float> %a, <vscale x 4 x float> %b) {
+; CHECK-LABEL: @try_combine_svbool_binop_fmul(
+; CHECK-NEXT: [[T2:%.*]] = fmul <vscale x 4 x float> [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: ret <vscale x 4 x float> [[T2]]
+;
+ %t1 = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> splat (i1 true))
+ %t2 = tail call <vscale x 4 x float> @llvm.aarch64.sve.fmul.nxv4f32(<vscale x 4 x i1> %t1, <vscale x 4 x float> %a, <vscale x 4 x float> %b)
+ ret <vscale x 4 x float> %t2
+}
+
+; Verify predicate cast does not hinder "isAllActive" knowledge.
+define <vscale x 2 x double> @try_combine_svbool_binop_fsub(<vscale x 2 x double> %a, <vscale x 2 x double> %b) {
+; CHECK-LABEL: @try_combine_svbool_binop_fsub(
+; CHECK-NEXT: [[T2:%.*]] = fsub <vscale x 2 x double> [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: ret <vscale x 2 x double> [[T2]]
+;
+ %t1 = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> splat (i1 true))
+ %t2 = tail call <vscale x 2 x double> @llvm.aarch64.sve.fsub.nxv2f64(<vscale x 2 x i1> %t1, <vscale x 2 x double> %a, <vscale x 2 x double> %b)
+ ret <vscale x 2 x double> %t2
+}
+
declare <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1>)
declare <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1>)
declare <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1>)
diff --git a/llvm/test/Transforms/InstCombine/bitreverse-hang.ll b/llvm/test/Transforms/InstCombine/bitreverse-hang.ll
index bb01299..3f29509 100644
--- a/llvm/test/Transforms/InstCombine/bitreverse-hang.ll
+++ b/llvm/test/Transforms/InstCombine/bitreverse-hang.ll
@@ -21,7 +21,7 @@
@b = common global i32 0, align 4
; CHECK: define i32 @fn1
-define i32 @fn1() #0 {
+define i32 @fn1() {
entry:
%b.promoted = load i32, ptr @b, align 4, !tbaa !2
br label %for.body
@@ -40,8 +40,6 @@ for.end: ; preds = %for.body
ret i32 undef
}
-attributes #0 = { norecurse nounwind ssp uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="core2" "target-features"="+cx16,+fxsr,+mmx,+sse,+sse2,+sse3,+ssse3" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
!llvm.module.flags = !{!0}
!llvm.ident = !{!1}
diff --git a/llvm/test/Transforms/InstCombine/intrinsic-select.ll b/llvm/test/Transforms/InstCombine/intrinsic-select.ll
index 2f1f9fc..fc9ab9f 100644
--- a/llvm/test/Transforms/InstCombine/intrinsic-select.ll
+++ b/llvm/test/Transforms/InstCombine/intrinsic-select.ll
@@ -222,8 +222,7 @@ declare i32 @llvm.vector.reduce.add.v2i32(<2 x i32>)
define i32 @vec_to_scalar_select_scalar(i1 %b) {
; CHECK-LABEL: @vec_to_scalar_select_scalar(
-; CHECK-NEXT: [[S:%.*]] = select i1 [[B:%.*]], <2 x i32> <i32 1, i32 2>, <2 x i32> <i32 3, i32 4>
-; CHECK-NEXT: [[C:%.*]] = call i32 @llvm.vector.reduce.add.v2i32(<2 x i32> [[S]])
+; CHECK-NEXT: [[C:%.*]] = select i1 [[B:%.*]], i32 3, i32 7
; CHECK-NEXT: ret i32 [[C]]
;
%s = select i1 %b, <2 x i32> <i32 1, i32 2>, <2 x i32> <i32 3, i32 4>
@@ -371,3 +370,36 @@ define float @test_fabs_select_multiuse_both_constant(i1 %cond, float %x) {
%fabs = call float @llvm.fabs.f32(float %select)
ret float %fabs
}
+
+; Negative test: Don't replace with select between vector mask and zeroinitializer.
+define <16 x i1> @test_select_of_active_lane_mask_bound(i64 %base, i64 %n, i1 %cond) {
+; CHECK-LABEL: @test_select_of_active_lane_mask_bound(
+; CHECK-NEXT: [[S:%.*]] = select i1 [[COND:%.*]], i64 [[N:%.*]], i64 0
+; CHECK-NEXT: [[MASK:%.*]] = call <16 x i1> @llvm.get.active.lane.mask.v16i1.i64(i64 [[BASE:%.*]], i64 [[S]])
+; CHECK-NEXT: ret <16 x i1> [[MASK]]
+;
+ %s = select i1 %cond, i64 %n, i64 0
+ %mask = call <16 x i1> @llvm.get.active.lane.mask.v16i1.i64(i64 %base, i64 %s)
+ ret <16 x i1> %mask
+}
+
+define <16 x i1> @test_select_of_active_lane_mask_bound_both_constant(i64 %base, i64 %n, i1 %cond) {
+; CHECK-LABEL: @test_select_of_active_lane_mask_bound_both_constant(
+; CHECK-NEXT: [[MASK:%.*]] = select i1 [[COND:%.*]], <16 x i1> splat (i1 true), <16 x i1> zeroinitializer
+; CHECK-NEXT: ret <16 x i1> [[MASK]]
+;
+ %s = select i1 %cond, i64 16, i64 0
+ %mask = call <16 x i1> @llvm.get.active.lane.mask.v16i1.i64(i64 0, i64 %s)
+ ret <16 x i1> %mask
+}
+
+define { i64, i1 } @test_select_of_overflow_intrinsic_operand(i64 %n, i1 %cond) {
+; CHECK-LABEL: @test_select_of_overflow_intrinsic_operand(
+; CHECK-NEXT: [[TMP1:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[N:%.*]], i64 42)
+; CHECK-NEXT: [[ADD_OVERFLOW:%.*]] = select i1 [[COND:%.*]], { i64, i1 } [[TMP1]], { i64, i1 } { i64 42, i1 false }
+; CHECK-NEXT: ret { i64, i1 } [[ADD_OVERFLOW]]
+;
+ %s = select i1 %cond, i64 %n, i64 0
+ %add_overflow = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %s, i64 42)
+ ret { i64, i1 } %add_overflow
+}
diff --git a/llvm/test/Transforms/InstCombine/phi.ll b/llvm/test/Transforms/InstCombine/phi.ll
index 3454835..1c8b21c 100644
--- a/llvm/test/Transforms/InstCombine/phi.ll
+++ b/llvm/test/Transforms/InstCombine/phi.ll
@@ -3026,6 +3026,56 @@ join:
ret i32 %umax
}
+define i32 @cross_lane_intrinsic_over_phi(i1 %c, i1 %c2, <4 x i32> %a) {
+; CHECK-LABEL: @cross_lane_intrinsic_over_phi(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 [[C:%.*]], label [[IF:%.*]], label [[JOIN:%.*]]
+; CHECK: if:
+; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[A:%.*]])
+; CHECK-NEXT: br label [[JOIN]]
+; CHECK: join:
+; CHECK-NEXT: [[PHI:%.*]] = phi i32 [ [[TMP0]], [[IF]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: call void @may_exit()
+; CHECK-NEXT: ret i32 [[PHI]]
+;
+entry:
+ br i1 %c, label %if, label %join
+
+if:
+ br label %join
+
+join:
+ %phi = phi <4 x i32> [ %a, %if ], [ zeroinitializer, %entry ]
+ call void @may_exit()
+ %sum = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %phi)
+ ret i32 %sum
+}
+
+define { i64, i1 } @overflow_intrinsic_over_phi(i1 %c, i64 %a) {
+; CHECK-LABEL: @overflow_intrinsic_over_phi(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br i1 [[C:%.*]], label [[IF:%.*]], label [[JOIN:%.*]]
+; CHECK: if:
+; CHECK-NEXT: [[TMP0:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[A:%.*]], i64 1)
+; CHECK-NEXT: br label [[JOIN]]
+; CHECK: join:
+; CHECK-NEXT: [[PHI:%.*]] = phi { i64, i1 } [ [[TMP0]], [[IF]] ], [ { i64 1, i1 false }, [[ENTRY:%.*]] ]
+; CHECK-NEXT: call void @may_exit()
+; CHECK-NEXT: ret { i64, i1 } [[PHI]]
+;
+entry:
+ br i1 %c, label %if, label %join
+
+if:
+ br label %join
+
+join:
+ %phi = phi i64 [ %a, %if ], [ 0, %entry ]
+ call void @may_exit()
+ %add_overflow = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %phi, i64 1)
+ ret { i64, i1 } %add_overflow
+}
+
define i32 @multiple_intrinsics_with_multiple_phi_uses(i1 %c, i32 %arg) {
; CHECK-LABEL: @multiple_intrinsics_with_multiple_phi_uses(
; CHECK-NEXT: entry:
diff --git a/llvm/test/Transforms/InstCombine/ptrtoaddr.ll b/llvm/test/Transforms/InstCombine/ptrtoaddr.ll
index 410c43c..f19cca8 100644
--- a/llvm/test/Transforms/InstCombine/ptrtoaddr.ll
+++ b/llvm/test/Transforms/InstCombine/ptrtoaddr.ll
@@ -40,3 +40,134 @@ define i128 @ptrtoaddr_sext(ptr %p) {
%ext = sext i64 %p.addr to i128
ret i128 %ext
}
+
+define i64 @sub_ptrtoaddr(ptr %p, i64 %offset) {
+; CHECK-LABEL: define i64 @sub_ptrtoaddr(
+; CHECK-SAME: ptr [[P:%.*]], i64 [[OFFSET:%.*]]) {
+; CHECK-NEXT: ret i64 [[OFFSET]]
+;
+ %p2 = getelementptr i8, ptr %p, i64 %offset
+ %p.addr = ptrtoaddr ptr %p to i64
+ %p2.addr = ptrtoaddr ptr %p2 to i64
+ %sub = sub i64 %p2.addr, %p.addr
+ ret i64 %sub
+}
+
+define i64 @sub_ptrtoint_ptrtoaddr(ptr %p, i64 %offset) {
+; CHECK-LABEL: define i64 @sub_ptrtoint_ptrtoaddr(
+; CHECK-SAME: ptr [[P:%.*]], i64 [[OFFSET:%.*]]) {
+; CHECK-NEXT: ret i64 [[OFFSET]]
+;
+ %p2 = getelementptr i8, ptr %p, i64 %offset
+ %p.int = ptrtoint ptr %p to i64
+ %p2.addr = ptrtoaddr ptr %p2 to i64
+ %sub = sub i64 %p2.addr, %p.int
+ ret i64 %sub
+}
+
+define i32 @sub_ptrtoaddr_addrsize(ptr addrspace(1) %p, i32 %offset) {
+; CHECK-LABEL: define i32 @sub_ptrtoaddr_addrsize(
+; CHECK-SAME: ptr addrspace(1) [[P:%.*]], i32 [[OFFSET:%.*]]) {
+; CHECK-NEXT: ret i32 [[OFFSET]]
+;
+ %p2 = getelementptr i8, ptr addrspace(1) %p, i32 %offset
+ %p.addr = ptrtoaddr ptr addrspace(1) %p to i32
+ %p2.addr = ptrtoaddr ptr addrspace(1) %p2 to i32
+ %sub = sub i32 %p2.addr, %p.addr
+ ret i32 %sub
+}
+
+define i32 @sub_trunc_ptrtoaddr(ptr %p, i64 %offset) {
+; CHECK-LABEL: define i32 @sub_trunc_ptrtoaddr(
+; CHECK-SAME: ptr [[P:%.*]], i64 [[OFFSET:%.*]]) {
+; CHECK-NEXT: [[SUB:%.*]] = trunc i64 [[OFFSET]] to i32
+; CHECK-NEXT: ret i32 [[SUB]]
+;
+ %p2 = getelementptr i8, ptr %p, i64 %offset
+ %p.addr = ptrtoaddr ptr %p to i64
+ %p2.addr = ptrtoaddr ptr %p2 to i64
+ %p.addr.trunc = trunc i64 %p.addr to i32
+ %p2.addr.trunc = trunc i64 %p2.addr to i32
+ %sub = sub i32 %p2.addr.trunc, %p.addr.trunc
+ ret i32 %sub
+}
+
+define i16 @sub_trunc_ptrtoaddr_addrsize(ptr addrspace(1) %p, i32 %offset) {
+; CHECK-LABEL: define i16 @sub_trunc_ptrtoaddr_addrsize(
+; CHECK-SAME: ptr addrspace(1) [[P:%.*]], i32 [[OFFSET:%.*]]) {
+; CHECK-NEXT: [[SUB:%.*]] = trunc i32 [[OFFSET]] to i16
+; CHECK-NEXT: ret i16 [[SUB]]
+;
+ %p2 = getelementptr i8, ptr addrspace(1) %p, i32 %offset
+ %p.addr = ptrtoaddr ptr addrspace(1) %p to i32
+ %p2.addr = ptrtoaddr ptr addrspace(1) %p2 to i32
+ %p.addr.trunc = trunc i32 %p.addr to i16
+ %p2.addr.trunc = trunc i32 %p2.addr to i16
+ %sub = sub i16 %p2.addr.trunc, %p.addr.trunc
+ ret i16 %sub
+}
+
+define i16 @sub_trunc_ptrtoint_ptrtoaddr_addrsize(ptr addrspace(1) %p, i32 %offset) {
+; CHECK-LABEL: define i16 @sub_trunc_ptrtoint_ptrtoaddr_addrsize(
+; CHECK-SAME: ptr addrspace(1) [[P:%.*]], i32 [[OFFSET:%.*]]) {
+; CHECK-NEXT: [[SUB:%.*]] = trunc i32 [[OFFSET]] to i16
+; CHECK-NEXT: ret i16 [[SUB]]
+;
+ %p2 = getelementptr i8, ptr addrspace(1) %p, i32 %offset
+ %p.int = ptrtoint ptr addrspace(1) %p to i64
+ %p2.addr = ptrtoaddr ptr addrspace(1) %p2 to i32
+ %p.int.trunc = trunc i64 %p.int to i16
+ %p2.addr.trunc = trunc i32 %p2.addr to i16
+ %sub = sub i16 %p2.addr.trunc, %p.int.trunc
+ ret i16 %sub
+}
+
+define i128 @sub_zext_ptrtoaddr(ptr %p, i64 %offset) {
+; CHECK-LABEL: define i128 @sub_zext_ptrtoaddr(
+; CHECK-SAME: ptr [[P:%.*]], i64 [[OFFSET:%.*]]) {
+; CHECK-NEXT: [[SUB:%.*]] = zext i64 [[OFFSET]] to i128
+; CHECK-NEXT: ret i128 [[SUB]]
+;
+ %p2 = getelementptr nuw i8, ptr %p, i64 %offset
+ %p.addr = ptrtoaddr ptr %p to i64
+ %p2.addr = ptrtoaddr ptr %p2 to i64
+ %p.addr.ext = zext i64 %p.addr to i128
+ %p2.addr.ext = zext i64 %p2.addr to i128
+ %sub = sub i128 %p2.addr.ext, %p.addr.ext
+ ret i128 %sub
+}
+
+define i64 @sub_zext_ptrtoaddr_addrsize(ptr addrspace(1) %p, i32 %offset) {
+; CHECK-LABEL: define i64 @sub_zext_ptrtoaddr_addrsize(
+; CHECK-SAME: ptr addrspace(1) [[P:%.*]], i32 [[OFFSET:%.*]]) {
+; CHECK-NEXT: [[SUB:%.*]] = zext i32 [[OFFSET]] to i64
+; CHECK-NEXT: ret i64 [[SUB]]
+;
+ %p2 = getelementptr nuw i8, ptr addrspace(1) %p, i32 %offset
+ %p.addr = ptrtoaddr ptr addrspace(1) %p to i32
+ %p2.addr = ptrtoaddr ptr addrspace(1) %p2 to i32
+ %p.addr.ext = zext i32 %p.addr to i64
+ %p2.addr.ext = zext i32 %p2.addr to i64
+ %sub = sub i64 %p2.addr.ext, %p.addr.ext
+ ret i64 %sub
+}
+
+define i128 @sub_zext_ptrtoint_ptrtoaddr_addrsize(ptr addrspace(1) %p, i32 %offset) {
+; CHECK-LABEL: define i128 @sub_zext_ptrtoint_ptrtoaddr_addrsize(
+; CHECK-SAME: ptr addrspace(1) [[P:%.*]], i32 [[OFFSET:%.*]]) {
+; CHECK-NEXT: [[P2:%.*]] = getelementptr nuw i8, ptr addrspace(1) [[P]], i32 [[OFFSET]]
+; CHECK-NEXT: [[P_INT:%.*]] = ptrtoint ptr addrspace(1) [[P]] to i64
+; CHECK-NEXT: [[P2_ADDR:%.*]] = ptrtoaddr ptr addrspace(1) [[P2]] to i32
+; CHECK-NEXT: [[P_INT_EXT:%.*]] = zext i64 [[P_INT]] to i128
+; CHECK-NEXT: [[P2_ADDR_EXT:%.*]] = zext i32 [[P2_ADDR]] to i128
+; CHECK-NEXT: [[SUB:%.*]] = sub nsw i128 [[P2_ADDR_EXT]], [[P_INT_EXT]]
+; CHECK-NEXT: ret i128 [[SUB]]
+;
+ %p2 = getelementptr nuw i8, ptr addrspace(1) %p, i32 %offset
+ %p.int = ptrtoint ptr addrspace(1) %p to i64
+ %p2.addr = ptrtoaddr ptr addrspace(1) %p2 to i32
+ %p.int.ext = zext i64 %p.int to i128
+ %p2.addr.ext = zext i32 %p2.addr to i128
+ %sub = sub i128 %p2.addr.ext, %p.int.ext
+ ret i128 %sub
+}
diff --git a/llvm/test/Transforms/InstCombine/select-extractelement-inseltpoison.ll b/llvm/test/Transforms/InstCombine/select-extractelement-inseltpoison.ll
index 2348490..2d06f42 100644
--- a/llvm/test/Transforms/InstCombine/select-extractelement-inseltpoison.ll
+++ b/llvm/test/Transforms/InstCombine/select-extractelement-inseltpoison.ll
@@ -208,6 +208,3 @@ define <4 x i32> @extract_cond_type_mismatch(<4 x i32> %x, <4 x i32> %y, <5 x i1
%r = select i1 %cond, <4 x i32> %x, <4 x i32> %y
ret <4 x i32> %r
}
-
-
-attributes #0 = { nounwind ssp uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/llvm/test/Transforms/InstCombine/select-extractelement.ll b/llvm/test/Transforms/InstCombine/select-extractelement.ll
index 621d278..6f80b7d 100644
--- a/llvm/test/Transforms/InstCombine/select-extractelement.ll
+++ b/llvm/test/Transforms/InstCombine/select-extractelement.ll
@@ -1,9 +1,9 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -S -passes=instcombine < %s | FileCheck %s
-declare void @v4float_user(<4 x float>) #0
+declare void @v4float_user(<4 x float>)
-define float @extract_one_select(<4 x float> %a, <4 x float> %b, i32 %c) #0 {
+define float @extract_one_select(<4 x float> %a, <4 x float> %b, i32 %c) {
; CHECK-LABEL: @extract_one_select(
; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp eq i32 [[C:%.*]], 0
; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP_NOT]], <4 x float> [[B:%.*]], <4 x float> [[A:%.*]]
@@ -17,7 +17,7 @@ define float @extract_one_select(<4 x float> %a, <4 x float> %b, i32 %c) #0 {
}
; Multiple extractelements
-define <2 x float> @extract_two_select(<4 x float> %a, <4 x float> %b, i32 %c) #0 {
+define <2 x float> @extract_two_select(<4 x float> %a, <4 x float> %b, i32 %c) {
; CHECK-LABEL: @extract_two_select(
; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp eq i32 [[C:%.*]], 0
; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP_NOT]], <4 x float> [[B:%.*]], <4 x float> [[A:%.*]]
@@ -34,7 +34,7 @@ define <2 x float> @extract_two_select(<4 x float> %a, <4 x float> %b, i32 %c) #
}
; Select has an extra non-extractelement user, don't change it
-define float @extract_one_select_user(<4 x float> %a, <4 x float> %b, i32 %c) #0 {
+define float @extract_one_select_user(<4 x float> %a, <4 x float> %b, i32 %c) {
; CHECK-LABEL: @extract_one_select_user(
; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp eq i32 [[C:%.*]], 0
; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP_NOT]], <4 x float> [[B:%.*]], <4 x float> [[A:%.*]]
@@ -49,7 +49,7 @@ define float @extract_one_select_user(<4 x float> %a, <4 x float> %b, i32 %c) #0
ret float %extract
}
-define float @extract_one_vselect_user(<4 x float> %a, <4 x float> %b, <4 x i32> %c) #0 {
+define float @extract_one_vselect_user(<4 x float> %a, <4 x float> %b, <4 x i32> %c) {
; CHECK-LABEL: @extract_one_vselect_user(
; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp eq <4 x i32> [[C:%.*]], zeroinitializer
; CHECK-NEXT: [[SEL:%.*]] = select <4 x i1> [[CMP_NOT]], <4 x float> [[B:%.*]], <4 x float> [[A:%.*]]
@@ -67,7 +67,7 @@ define float @extract_one_vselect_user(<4 x float> %a, <4 x float> %b, <4 x i32>
; Do not convert the vector select into a scalar select. That would increase
; the instruction count and potentially obfuscate a vector min/max idiom.
-define float @extract_one_vselect(<4 x float> %a, <4 x float> %b, <4 x i32> %c) #0 {
+define float @extract_one_vselect(<4 x float> %a, <4 x float> %b, <4 x i32> %c) {
; CHECK-LABEL: @extract_one_vselect(
; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp eq <4 x i32> [[C:%.*]], zeroinitializer
; CHECK-NEXT: [[SELECT:%.*]] = select <4 x i1> [[CMP_NOT]], <4 x float> [[B:%.*]], <4 x float> [[A:%.*]]
@@ -81,7 +81,7 @@ define float @extract_one_vselect(<4 x float> %a, <4 x float> %b, <4 x i32> %c)
}
; Multiple extractelements from a vector select
-define <2 x float> @extract_two_vselect(<4 x float> %a, <4 x float> %b, <4 x i32> %c) #0 {
+define <2 x float> @extract_two_vselect(<4 x float> %a, <4 x float> %b, <4 x i32> %c) {
; CHECK-LABEL: @extract_two_vselect(
; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp eq <4 x i32> [[C:%.*]], zeroinitializer
; CHECK-NEXT: [[SEL:%.*]] = select <4 x i1> [[CMP_NOT]], <4 x float> [[B:%.*]], <4 x float> [[A:%.*]]
@@ -100,7 +100,7 @@ define <2 x float> @extract_two_vselect(<4 x float> %a, <4 x float> %b, <4 x i32
; The vector selects are not decomposed into scalar selects because that would increase
; the instruction count. Extract+insert is converted to non-lane-crossing shuffles.
; Test multiple extractelements
-define <4 x float> @simple_vector_select(<4 x float> %a, <4 x float> %b, <4 x i32> %c) #0 {
+define <4 x float> @simple_vector_select(<4 x float> %a, <4 x float> %b, <4 x i32> %c) {
; CHECK-LABEL: @simple_vector_select(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = extractelement <4 x i32> [[C:%.*]], i64 0
@@ -232,5 +232,3 @@ define i32 @inf_loop_partial_undef(<2 x i1> %a, <2 x i1> %b, <2 x i32> %x, <2 x
%t11 = extractelement <2 x i32> %p, i32 0
ret i32 %t11
}
-
-attributes #0 = { nounwind ssp uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/llvm/test/Transforms/InstCombine/select_frexp.ll b/llvm/test/Transforms/InstCombine/select_frexp.ll
index d025aed..ccfcca1 100644
--- a/llvm/test/Transforms/InstCombine/select_frexp.ll
+++ b/llvm/test/Transforms/InstCombine/select_frexp.ll
@@ -115,10 +115,10 @@ define float @test_select_frexp_no_const(float %x, float %y, i1 %cond) {
define i32 @test_select_frexp_extract_exp(float %x, i1 %cond) {
; CHECK-LABEL: define i32 @test_select_frexp_extract_exp(
; CHECK-SAME: float [[X:%.*]], i1 [[COND:%.*]]) {
-; CHECK-NEXT: [[SEL:%.*]] = select i1 [[COND]], float 1.000000e+00, float [[X]]
-; CHECK-NEXT: [[FREXP:%.*]] = call { float, i32 } @llvm.frexp.f32.i32(float [[SEL]])
+; CHECK-NEXT: [[FREXP:%.*]] = call { float, i32 } @llvm.frexp.f32.i32(float [[X]])
; CHECK-NEXT: [[FREXP_1:%.*]] = extractvalue { float, i32 } [[FREXP]], 1
-; CHECK-NEXT: ret i32 [[FREXP_1]]
+; CHECK-NEXT: [[FREXP_2:%.*]] = select i1 [[COND]], i32 1, i32 [[FREXP_1]]
+; CHECK-NEXT: ret i32 [[FREXP_2]]
;
%sel = select i1 %cond, float 1.000000e+00, float %x
%frexp = call { float, i32 } @llvm.frexp.f32.i32(float %sel)
@@ -132,7 +132,7 @@ define float @test_select_frexp_fast_math_select(float %x, i1 %cond) {
; CHECK-SAME: float [[X:%.*]], i1 [[COND:%.*]]) {
; CHECK-NEXT: [[FREXP1:%.*]] = call { float, i32 } @llvm.frexp.f32.i32(float [[X]])
; CHECK-NEXT: [[MANTISSA:%.*]] = extractvalue { float, i32 } [[FREXP1]], 0
-; CHECK-NEXT: [[SELECT_FREXP:%.*]] = select nnan ninf nsz i1 [[COND]], float 5.000000e-01, float [[MANTISSA]]
+; CHECK-NEXT: [[SELECT_FREXP:%.*]] = select i1 [[COND]], float 5.000000e-01, float [[MANTISSA]]
; CHECK-NEXT: ret float [[SELECT_FREXP]]
;
%sel = select nnan ninf nsz i1 %cond, float 1.000000e+00, float %x
diff --git a/llvm/test/Transforms/InstCombine/sub-gep.ll b/llvm/test/Transforms/InstCombine/sub-gep.ll
index 8eeaea1..ee70137 100644
--- a/llvm/test/Transforms/InstCombine/sub-gep.ll
+++ b/llvm/test/Transforms/InstCombine/sub-gep.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -S -passes=instcombine < %s | FileCheck %s
-target datalayout = "e-p:64:64:64-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-p2:32:32"
+target datalayout = "e-p:64:64:64-p1:16:16:16-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-p2:32:32-p3:32:32:32:16"
define i64 @test_inbounds(ptr %base, i64 %idx) {
; CHECK-LABEL: @test_inbounds(
@@ -505,6 +505,23 @@ define i64 @negative_zext_ptrtoint_sub_ptrtoint_as2_nuw(i32 %offset) {
ret i64 %D
}
+define i64 @negative_zext_ptrtoint_sub_zext_ptrtoint_as2_nuw_truncating(i32 %offset) {
+; CHECK-LABEL: @negative_zext_ptrtoint_sub_zext_ptrtoint_as2_nuw_truncating(
+; CHECK-NEXT: [[A:%.*]] = getelementptr nuw bfloat, ptr addrspace(2) @Arr_as2, i32 [[OFFSET:%.*]]
+; CHECK-NEXT: [[A_IDX:%.*]] = ptrtoint ptr addrspace(2) [[A]] to i32
+; CHECK-NEXT: [[E:%.*]] = zext i32 [[A_IDX]] to i64
+; CHECK-NEXT: [[D:%.*]] = zext i16 ptrtoint (ptr addrspace(2) @Arr_as2 to i16) to i64
+; CHECK-NEXT: [[E1:%.*]] = sub nsw i64 [[E]], [[D]]
+; CHECK-NEXT: ret i64 [[E1]]
+;
+ %A = getelementptr nuw bfloat, ptr addrspace(2) @Arr_as2, i32 %offset
+ %B = ptrtoint ptr addrspace(2) %A to i32
+ %C = zext i32 %B to i64
+ %D = zext i16 ptrtoint (ptr addrspace(2) @Arr_as2 to i16) to i64
+ %E = sub i64 %C, %D
+ ret i64 %E
+}
+
define i64 @ptrtoint_sub_zext_ptrtoint_as2_inbounds_local(ptr addrspace(2) %p, i32 %offset) {
; CHECK-LABEL: @ptrtoint_sub_zext_ptrtoint_as2_inbounds_local(
; CHECK-NEXT: [[A:%.*]] = getelementptr inbounds bfloat, ptr addrspace(2) [[P:%.*]], i32 [[OFFSET:%.*]]
@@ -614,6 +631,20 @@ define i64 @negative_zext_ptrtoint_sub_ptrtoint_as2_nuw_local(ptr addrspace(2) %
ret i64 %D
}
+define i64 @zext_ptrtoint_sub_ptrtoint_as3_nuw_local(ptr addrspace(3) %p, i16 %offset) {
+; CHECK-LABEL: @zext_ptrtoint_sub_ptrtoint_as3_nuw_local(
+; CHECK-NEXT: [[SUB:%.*]] = zext i16 [[GEP_IDX:%.*]] to i64
+; CHECK-NEXT: ret i64 [[SUB]]
+;
+ %gep = getelementptr nuw i8, ptr addrspace(3) %p, i16 %offset
+ %gep.int = ptrtoint ptr addrspace(3) %gep to i32
+ %p.int = ptrtoint ptr addrspace(3) %p to i32
+ %gep.int.ext = zext i32 %gep.int to i64
+ %p.int.ext = zext i32 %p.int to i64
+ %sub = sub i64 %gep.int.ext, %p.int.ext
+ ret i64 %sub
+}
+
define i64 @test30(ptr %foo, i64 %i, i64 %j) {
; CHECK-LABEL: @test30(
; CHECK-NEXT: [[GEP1_IDX:%.*]] = shl nsw i64 [[I:%.*]], 2
diff --git a/llvm/test/Transforms/JumpThreading/ddt-crash3.ll b/llvm/test/Transforms/JumpThreading/ddt-crash3.ll
index edaade32..26ba857 100644
--- a/llvm/test/Transforms/JumpThreading/ddt-crash3.ll
+++ b/llvm/test/Transforms/JumpThreading/ddt-crash3.ll
@@ -9,9 +9,9 @@ target triple = "x86_64-unknown-linux-gnu"
@global.2 = external local_unnamed_addr global i64, align 8
; Function Attrs: norecurse noreturn nounwind uwtable
-define void @hoge() local_unnamed_addr #0 {
+define void @hoge() local_unnamed_addr {
; CHECK-LABEL: define void @hoge(
-; CHECK-SAME: ) local_unnamed_addr #[[ATTR0:[0-9]+]] {
+; CHECK-SAME: ) local_unnamed_addr {
; CHECK-NEXT: [[BB:.*:]]
; CHECK-NEXT: br label %[[BB1:.*]]
; CHECK: [[BB1]]:
@@ -48,8 +48,6 @@ bb27: ; preds = %bb1
br label %bb26
}
-attributes #0 = { norecurse noreturn nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
!llvm.ident = !{!0}
!0 = !{!"clang version 7.0.0 "}
diff --git a/llvm/test/Transforms/LICM/volatile-alias.ll b/llvm/test/Transforms/LICM/volatile-alias.ll
index 410f3be..35e8844 100644
--- a/llvm/test/Transforms/LICM/volatile-alias.ll
+++ b/llvm/test/Transforms/LICM/volatile-alias.ll
@@ -10,7 +10,7 @@
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
; Function Attrs: nounwind uwtable
-define i32 @foo(ptr dereferenceable(4) nonnull %p, ptr %q, i32 %n) #0 {
+define i32 @foo(ptr dereferenceable(4) nonnull %p, ptr %q, i32 %n) {
entry:
%p.addr = alloca ptr, align 8
%q.addr = alloca ptr, align 8
@@ -51,5 +51,3 @@ for.end: ; preds = %for.cond
%8 = load i32, ptr %s, align 4
ret i32 %8
}
-
-attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/llvm/test/Transforms/LoopRotate/noalias.ll b/llvm/test/Transforms/LoopRotate/noalias.ll
index f709bba..d6b2414 100644
--- a/llvm/test/Transforms/LoopRotate/noalias.ll
+++ b/llvm/test/Transforms/LoopRotate/noalias.ll
@@ -146,11 +146,7 @@ for.end: ; preds = %for.cond
}
; Function Attrs: inaccessiblememonly nounwind
-declare void @llvm.experimental.noalias.scope.decl(metadata) #1
-
-attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="all" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { inaccessiblememonly nounwind }
-attributes #2 = { nounwind readnone speculatable }
+declare void @llvm.experimental.noalias.scope.decl(metadata)
!llvm.module.flags = !{!0}
!llvm.ident = !{!1}
diff --git a/llvm/test/Transforms/LoopStrengthReduce/AMDGPU/different-addrspace-addressing-mode-loops.ll b/llvm/test/Transforms/LoopStrengthReduce/AMDGPU/different-addrspace-addressing-mode-loops.ll
index 5423368..25ded4b 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/AMDGPU/different-addrspace-addressing-mode-loops.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/AMDGPU/different-addrspace-addressing-mode-loops.ll
@@ -157,4 +157,4 @@ bb:
br i1 %exitcond, label %._crit_edge.loopexit, label %.lr.ph
}
-attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="hawaii" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="hawaii" "use-soft-float"="false" }
diff --git a/llvm/test/Transforms/LoopStrengthReduce/X86/pr17473.ll b/llvm/test/Transforms/LoopStrengthReduce/X86/pr17473.ll
index 6c34d1b..a878fc2 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/X86/pr17473.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/X86/pr17473.ll
@@ -62,6 +62,6 @@ for.end: ; preds = %fn3.exit
; Function Attrs: nounwind optsize
declare i32 @printf(ptr nocapture readonly, ...) #1
-attributes #0 = { nounwind optsize ssp uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind optsize "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind optsize ssp uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "use-soft-float"="false" }
+attributes #1 = { nounwind optsize "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "use-soft-float"="false" }
attributes #2 = { nounwind optsize }
diff --git a/llvm/test/Transforms/LoopStrengthReduce/pr18165.ll b/llvm/test/Transforms/LoopStrengthReduce/pr18165.ll
index 914ea7a..9c20685 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/pr18165.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/pr18165.ll
@@ -71,8 +71,8 @@ fn1.exit: ; preds = %lor.end.i
; Function Attrs: nounwind optsize
declare i32 @printf(ptr nocapture readonly, ...) #1
-attributes #0 = { nounwind optsize ssp uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind optsize "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind optsize ssp uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "use-soft-float"="false" }
+attributes #1 = { nounwind optsize "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "use-soft-float"="false" }
attributes #2 = { nounwind optsize }
!llvm.ident = !{!0}
diff --git a/llvm/test/Transforms/LoopStrengthReduce/two-combinations-bug.ll b/llvm/test/Transforms/LoopStrengthReduce/two-combinations-bug.ll
index 3364465..37e2f68 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/two-combinations-bug.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/two-combinations-bug.ll
@@ -46,7 +46,7 @@ for.body3: ; preds = %for.body3, %for.bod
br i1 %exitcond, label %for.cond.loopexit, label %for.body3
}
-attributes #0 = { nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "use-soft-float"="false" }
!0 = !{!1, !2, i64 16}
!1 = !{!"planet", !2, i64 0, !2, i64 8, !2, i64 16, !2, i64 24, !2, i64 32, !2, i64 40, !2, i64 48}
diff --git a/llvm/test/Transforms/LoopUnroll/runtime-epilog-debuginfo.ll b/llvm/test/Transforms/LoopUnroll/runtime-epilog-debuginfo.ll
index ee28aa1..df4dfa6 100644
--- a/llvm/test/Transforms/LoopUnroll/runtime-epilog-debuginfo.ll
+++ b/llvm/test/Transforms/LoopUnroll/runtime-epilog-debuginfo.ll
@@ -76,7 +76,7 @@ lee1.exit: ; preds = %lee1.exit.loopexit,
; Function Attrs: nounwind readnone
declare void @llvm.dbg.value(metadata, i64, metadata, metadata) #1
-attributes #0 = { nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="arm7tdmi" "target-features"="+neon,+strict-align,+vfp3,-crypto,-fp-armv8,-fp16,-vfp4" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="arm7tdmi" "target-features"="+neon,+strict-align,+vfp3,-crypto,-fp-armv8,-fp16,-vfp4" "use-soft-float"="false" }
attributes #1 = { nounwind readnone }
!llvm.dbg.cu = !{!0}
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/fully-unrolled-cost.ll b/llvm/test/Transforms/LoopVectorize/AArch64/fully-unrolled-cost.ll
index 27ca414..199203a 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/fully-unrolled-cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/fully-unrolled-cost.ll
@@ -86,7 +86,8 @@ define i64 @test_two_ivs(ptr %a, ptr %b, i64 %start) #0 {
; CHECK-NEXT: Cost of 0 for VF 16: induction instruction %i.iv = phi i64 [ 0, %entry ], [ %i.iv.next, %for.body ]
; CHECK-NEXT: Cost of 0 for VF 16: induction instruction %j.iv = phi i64 [ %start, %entry ], [ %j.iv.next, %for.body ]
; CHECK-NEXT: Cost of 0 for VF 16: EMIT vp<{{.+}}> = CANONICAL-INDUCTION ir<0>, vp<%index.next>
-; CHECK: Cost for VF 16: 41
+; CHECK: Cost of 1 for VF 16: EXPRESSION vp<%11> = ir<%sum> + partial.reduce.add (mul nuw nsw (ir<%1> zext to i64), (ir<%0> zext to i64))
+; CHECK: Cost for VF 16: 3
; CHECK: LV: Selecting VF: 16
entry:
br label %for.body
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/maxbandwidth-regpressure.ll b/llvm/test/Transforms/LoopVectorize/AArch64/maxbandwidth-regpressure.ll
index 2d15431..8109d068 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/maxbandwidth-regpressure.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/maxbandwidth-regpressure.ll
@@ -6,8 +6,7 @@ target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
target triple = "aarch64-none-unknown-elf"
define i32 @dotp(ptr %a, ptr %b) #0 {
-; CHECK-REGS-VP-NOT: LV(REG): Not considering vector loop of width vscale x 16 because it uses too many registers
-; CHECK-REGS-VP: LV: Selecting VF: vscale x 8.
+; CHECK-REGS-VP: LV: Selecting VF: vscale x 16.
;
; CHECK-NOREGS-VP: LV(REG): Not considering vector loop of width vscale x 8 because it uses too many registers
; CHECK-NOREGS-VP: LV(REG): Not considering vector loop of width vscale x 16 because it uses too many registers
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-chained.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-chained.ll
index 3dfa6df..287226f 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-chained.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-chained.ll
@@ -31,9 +31,9 @@ define i32 @chained_partial_reduce_add_sub(ptr %a, ptr %b, ptr %c, i32 %N) #0 {
; CHECK-NEON-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1
; CHECK-NEON-NEXT: [[TMP7:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-NEON-NEXT: [[TMP8:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
-; CHECK-NEON-NEXT: [[TMP9:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-NEON-NEXT: [[TMP10:%.*]] = mul nsw <16 x i32> [[TMP7]], [[TMP8]]
; CHECK-NEON-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP10]])
+; CHECK-NEON-NEXT: [[TMP9:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-NEON-NEXT: [[TMP12:%.*]] = mul nsw <16 x i32> [[TMP7]], [[TMP9]]
; CHECK-NEON-NEXT: [[TMP13:%.*]] = sub <16 x i32> zeroinitializer, [[TMP12]]
; CHECK-NEON-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP13]])
@@ -52,37 +52,34 @@ define i32 @chained_partial_reduce_add_sub(ptr %a, ptr %b, ptr %c, i32 %N) #0 {
; CHECK-SVE-NEXT: [[CMP28_NOT:%.*]] = icmp ult i32 [[N]], 2
; CHECK-SVE-NEXT: [[DIV27:%.*]] = lshr i32 [[N]], 1
; CHECK-SVE-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext nneg i32 [[DIV27]] to i64
-; CHECK-SVE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-SVE-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
-; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], [[TMP1]]
+; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], 16
; CHECK-SVE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK-SVE: vector.ph:
-; CHECK-SVE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-SVE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]]
+; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 16
; CHECK-SVE-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]]
; CHECK-SVE-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-SVE: vector.body:
; CHECK-SVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP19:%.*]], [[VECTOR_BODY]] ]
+; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ]
; CHECK-SVE-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDEX]]
; CHECK-SVE-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[INDEX]]
; CHECK-SVE-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i8, ptr [[C]], i64 [[INDEX]]
-; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i8>, ptr [[TMP7]], align 1
-; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x i8>, ptr [[TMP8]], align 1
-; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 4 x i8>, ptr [[TMP9]], align 1
-; CHECK-SVE-NEXT: [[TMP13:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD]] to <vscale x 4 x i32>
-; CHECK-SVE-NEXT: [[TMP14:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD1]] to <vscale x 4 x i32>
-; CHECK-SVE-NEXT: [[TMP15:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD2]] to <vscale x 4 x i32>
-; CHECK-SVE-NEXT: [[TMP16:%.*]] = mul nsw <vscale x 4 x i32> [[TMP13]], [[TMP14]]
-; CHECK-SVE-NEXT: [[TMP17:%.*]] = add <vscale x 4 x i32> [[VEC_PHI]], [[TMP16]]
-; CHECK-SVE-NEXT: [[TMP18:%.*]] = mul nsw <vscale x 4 x i32> [[TMP13]], [[TMP15]]
-; CHECK-SVE-NEXT: [[TMP19]] = sub <vscale x 4 x i32> [[TMP17]], [[TMP18]]
-; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
+; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1
+; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1
+; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP9]], align 1
+; CHECK-SVE-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
+; CHECK-SVE-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
+; CHECK-SVE-NEXT: [[TMP5:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP4]]
+; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP5]])
+; CHECK-SVE-NEXT: [[TMP6:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
+; CHECK-SVE-NEXT: [[TMP11:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP6]]
+; CHECK-SVE-NEXT: [[TMP12:%.*]] = sub <16 x i32> zeroinitializer, [[TMP11]]
+; CHECK-SVE-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP12]])
+; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-SVE-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-SVE-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK-SVE: middle.block:
-; CHECK-SVE-NEXT: [[TMP21:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP19]])
+; CHECK-SVE-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE3]])
; CHECK-SVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]]
; CHECK-SVE-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
; CHECK-SVE: scalar.ph:
@@ -114,10 +111,10 @@ define i32 @chained_partial_reduce_add_sub(ptr %a, ptr %b, ptr %c, i32 %N) #0 {
; CHECK-SVE-MAXBW-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 8 x i8>, ptr [[TMP9]], align 1
; CHECK-SVE-MAXBW-NEXT: [[TMP13:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD]] to <vscale x 8 x i32>
; CHECK-SVE-MAXBW-NEXT: [[TMP14:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD1]] to <vscale x 8 x i32>
-; CHECK-SVE-MAXBW-NEXT: [[TMP15:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD2]] to <vscale x 8 x i32>
; CHECK-SVE-MAXBW-NEXT: [[TMP16:%.*]] = mul nsw <vscale x 8 x i32> [[TMP13]], [[TMP14]]
; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE:%.*]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI]], <vscale x 8 x i32> [[TMP16]])
-; CHECK-SVE-MAXBW-NEXT: [[TMP17:%.*]] = mul nsw <vscale x 8 x i32> [[TMP13]], [[TMP15]]
+; CHECK-SVE-MAXBW-NEXT: [[TMP11:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD2]] to <vscale x 8 x i32>
+; CHECK-SVE-MAXBW-NEXT: [[TMP17:%.*]] = mul nsw <vscale x 8 x i32> [[TMP13]], [[TMP11]]
; CHECK-SVE-MAXBW-NEXT: [[TMP18:%.*]] = sub <vscale x 8 x i32> zeroinitializer, [[TMP17]]
; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE3]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[PARTIAL_REDUCE]], <vscale x 8 x i32> [[TMP18]])
; CHECK-SVE-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
@@ -184,9 +181,9 @@ define i32 @chained_partial_reduce_add_add(ptr %a, ptr %b, ptr %c, i32 %N) #0 {
; CHECK-NEON-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1
; CHECK-NEON-NEXT: [[TMP7:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-NEON-NEXT: [[TMP8:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
-; CHECK-NEON-NEXT: [[TMP9:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-NEON-NEXT: [[TMP10:%.*]] = mul nsw <16 x i32> [[TMP7]], [[TMP8]]
; CHECK-NEON-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP10]])
+; CHECK-NEON-NEXT: [[TMP9:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-NEON-NEXT: [[TMP11:%.*]] = mul nsw <16 x i32> [[TMP7]], [[TMP9]]
; CHECK-NEON-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP11]])
; CHECK-NEON-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
@@ -221,9 +218,9 @@ define i32 @chained_partial_reduce_add_add(ptr %a, ptr %b, ptr %c, i32 %N) #0 {
; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP9]], align 1
; CHECK-SVE-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-SVE-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
-; CHECK-SVE-NEXT: [[TMP5:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-SVE-NEXT: [[TMP6:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP4]]
; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP6]])
+; CHECK-SVE-NEXT: [[TMP5:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-SVE-NEXT: [[TMP10:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP5]]
; CHECK-SVE-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP10]])
; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
@@ -262,10 +259,10 @@ define i32 @chained_partial_reduce_add_add(ptr %a, ptr %b, ptr %c, i32 %N) #0 {
; CHECK-SVE-MAXBW-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 8 x i8>, ptr [[TMP9]], align 1
; CHECK-SVE-MAXBW-NEXT: [[TMP13:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD]] to <vscale x 8 x i32>
; CHECK-SVE-MAXBW-NEXT: [[TMP14:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD1]] to <vscale x 8 x i32>
-; CHECK-SVE-MAXBW-NEXT: [[TMP15:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD2]] to <vscale x 8 x i32>
; CHECK-SVE-MAXBW-NEXT: [[TMP16:%.*]] = mul nsw <vscale x 8 x i32> [[TMP13]], [[TMP14]]
; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE:%.*]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI]], <vscale x 8 x i32> [[TMP16]])
-; CHECK-SVE-MAXBW-NEXT: [[TMP17:%.*]] = mul nsw <vscale x 8 x i32> [[TMP13]], [[TMP15]]
+; CHECK-SVE-MAXBW-NEXT: [[TMP11:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD2]] to <vscale x 8 x i32>
+; CHECK-SVE-MAXBW-NEXT: [[TMP17:%.*]] = mul nsw <vscale x 8 x i32> [[TMP13]], [[TMP11]]
; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE3]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[PARTIAL_REDUCE]], <vscale x 8 x i32> [[TMP17]])
; CHECK-SVE-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-SVE-MAXBW-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -331,11 +328,11 @@ define i32 @chained_partial_reduce_sub_add(ptr %a, ptr %b, ptr %c, i32 %N) #0 {
; CHECK-NEON-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1
; CHECK-NEON-NEXT: [[TMP7:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-NEON-NEXT: [[TMP8:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
-; CHECK-NEON-NEXT: [[TMP9:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-NEON-NEXT: [[TMP10:%.*]] = mul nsw <16 x i32> [[TMP7]], [[TMP8]]
; CHECK-NEON-NEXT: [[TMP11:%.*]] = sub nsw <16 x i32> zeroinitializer, [[TMP10]]
; CHECK-NEON-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP11]])
-; CHECK-NEON-NEXT: [[TMP12:%.*]] = mul nsw <16 x i32> [[TMP7]], [[TMP9]]
+; CHECK-NEON-NEXT: [[TMP13:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
+; CHECK-NEON-NEXT: [[TMP12:%.*]] = mul nsw <16 x i32> [[TMP7]], [[TMP13]]
; CHECK-NEON-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP12]])
; CHECK-NEON-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-NEON-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -352,37 +349,34 @@ define i32 @chained_partial_reduce_sub_add(ptr %a, ptr %b, ptr %c, i32 %N) #0 {
; CHECK-SVE-NEXT: [[CMP28_NOT:%.*]] = icmp ult i32 [[N]], 2
; CHECK-SVE-NEXT: [[DIV27:%.*]] = lshr i32 [[N]], 1
; CHECK-SVE-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext nneg i32 [[DIV27]] to i64
-; CHECK-SVE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-SVE-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
-; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], [[TMP1]]
+; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], 16
; CHECK-SVE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK-SVE: vector.ph:
-; CHECK-SVE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-SVE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]]
+; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 16
; CHECK-SVE-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]]
; CHECK-SVE-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-SVE: vector.body:
; CHECK-SVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP19:%.*]], [[VECTOR_BODY]] ]
+; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ]
; CHECK-SVE-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDEX]]
; CHECK-SVE-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[INDEX]]
; CHECK-SVE-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i8, ptr [[C]], i64 [[INDEX]]
-; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i8>, ptr [[TMP7]], align 1
-; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x i8>, ptr [[TMP8]], align 1
-; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 4 x i8>, ptr [[TMP9]], align 1
-; CHECK-SVE-NEXT: [[TMP13:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD]] to <vscale x 4 x i32>
-; CHECK-SVE-NEXT: [[TMP14:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD1]] to <vscale x 4 x i32>
-; CHECK-SVE-NEXT: [[TMP15:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD2]] to <vscale x 4 x i32>
-; CHECK-SVE-NEXT: [[TMP16:%.*]] = mul nsw <vscale x 4 x i32> [[TMP13]], [[TMP14]]
-; CHECK-SVE-NEXT: [[TMP17:%.*]] = sub <vscale x 4 x i32> [[VEC_PHI]], [[TMP16]]
-; CHECK-SVE-NEXT: [[TMP18:%.*]] = mul nsw <vscale x 4 x i32> [[TMP13]], [[TMP15]]
-; CHECK-SVE-NEXT: [[TMP19]] = add <vscale x 4 x i32> [[TMP17]], [[TMP18]]
-; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
+; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1
+; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1
+; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP9]], align 1
+; CHECK-SVE-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
+; CHECK-SVE-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
+; CHECK-SVE-NEXT: [[TMP5:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP4]]
+; CHECK-SVE-NEXT: [[TMP6:%.*]] = sub nsw <16 x i32> zeroinitializer, [[TMP5]]
+; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP6]])
+; CHECK-SVE-NEXT: [[TMP11:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
+; CHECK-SVE-NEXT: [[TMP12:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP11]]
+; CHECK-SVE-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP12]])
+; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-SVE-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-SVE-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK-SVE: middle.block:
-; CHECK-SVE-NEXT: [[TMP21:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP19]])
+; CHECK-SVE-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE3]])
; CHECK-SVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]]
; CHECK-SVE-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
; CHECK-SVE: scalar.ph:
@@ -414,11 +408,11 @@ define i32 @chained_partial_reduce_sub_add(ptr %a, ptr %b, ptr %c, i32 %N) #0 {
; CHECK-SVE-MAXBW-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 8 x i8>, ptr [[TMP9]], align 1
; CHECK-SVE-MAXBW-NEXT: [[TMP13:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD]] to <vscale x 8 x i32>
; CHECK-SVE-MAXBW-NEXT: [[TMP14:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD1]] to <vscale x 8 x i32>
-; CHECK-SVE-MAXBW-NEXT: [[TMP15:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD2]] to <vscale x 8 x i32>
; CHECK-SVE-MAXBW-NEXT: [[TMP16:%.*]] = mul nsw <vscale x 8 x i32> [[TMP13]], [[TMP14]]
; CHECK-SVE-MAXBW-NEXT: [[TMP17:%.*]] = sub nsw <vscale x 8 x i32> zeroinitializer, [[TMP16]]
; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE:%.*]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI]], <vscale x 8 x i32> [[TMP17]])
-; CHECK-SVE-MAXBW-NEXT: [[TMP18:%.*]] = mul nsw <vscale x 8 x i32> [[TMP13]], [[TMP15]]
+; CHECK-SVE-MAXBW-NEXT: [[TMP12:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD2]] to <vscale x 8 x i32>
+; CHECK-SVE-MAXBW-NEXT: [[TMP18:%.*]] = mul nsw <vscale x 8 x i32> [[TMP13]], [[TMP12]]
; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE3]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[PARTIAL_REDUCE]], <vscale x 8 x i32> [[TMP18]])
; CHECK-SVE-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-SVE-MAXBW-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -486,11 +480,11 @@ define i32 @chained_partial_reduce_sub_sub(ptr %a, ptr %b, ptr %c, i32 %N) #0 {
; CHECK-NEON-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1
; CHECK-NEON-NEXT: [[TMP7:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-NEON-NEXT: [[TMP8:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
-; CHECK-NEON-NEXT: [[TMP9:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-NEON-NEXT: [[TMP10:%.*]] = mul nsw <16 x i32> [[TMP7]], [[TMP8]]
; CHECK-NEON-NEXT: [[TMP11:%.*]] = sub nsw <16 x i32> zeroinitializer, [[TMP10]]
; CHECK-NEON-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP11]])
-; CHECK-NEON-NEXT: [[TMP12:%.*]] = mul nsw <16 x i32> [[TMP7]], [[TMP9]]
+; CHECK-NEON-NEXT: [[TMP16:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
+; CHECK-NEON-NEXT: [[TMP12:%.*]] = mul nsw <16 x i32> [[TMP7]], [[TMP16]]
; CHECK-NEON-NEXT: [[TMP13:%.*]] = sub <16 x i32> zeroinitializer, [[TMP12]]
; CHECK-NEON-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP13]])
; CHECK-NEON-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
@@ -508,37 +502,35 @@ define i32 @chained_partial_reduce_sub_sub(ptr %a, ptr %b, ptr %c, i32 %N) #0 {
; CHECK-SVE-NEXT: [[CMP28_NOT:%.*]] = icmp ult i32 [[N]], 2
; CHECK-SVE-NEXT: [[DIV27:%.*]] = lshr i32 [[N]], 1
; CHECK-SVE-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext nneg i32 [[DIV27]] to i64
-; CHECK-SVE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-SVE-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
-; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], [[TMP1]]
+; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], 16
; CHECK-SVE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK-SVE: vector.ph:
-; CHECK-SVE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-SVE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]]
+; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 16
; CHECK-SVE-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]]
; CHECK-SVE-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-SVE: vector.body:
; CHECK-SVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP19:%.*]], [[VECTOR_BODY]] ]
+; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ]
; CHECK-SVE-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDEX]]
; CHECK-SVE-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[INDEX]]
; CHECK-SVE-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i8, ptr [[C]], i64 [[INDEX]]
-; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i8>, ptr [[TMP7]], align 1
-; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x i8>, ptr [[TMP8]], align 1
-; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 4 x i8>, ptr [[TMP9]], align 1
-; CHECK-SVE-NEXT: [[TMP13:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD]] to <vscale x 4 x i32>
-; CHECK-SVE-NEXT: [[TMP14:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD1]] to <vscale x 4 x i32>
-; CHECK-SVE-NEXT: [[TMP15:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD2]] to <vscale x 4 x i32>
-; CHECK-SVE-NEXT: [[TMP16:%.*]] = mul nsw <vscale x 4 x i32> [[TMP13]], [[TMP14]]
-; CHECK-SVE-NEXT: [[TMP17:%.*]] = sub <vscale x 4 x i32> [[VEC_PHI]], [[TMP16]]
-; CHECK-SVE-NEXT: [[TMP18:%.*]] = mul nsw <vscale x 4 x i32> [[TMP13]], [[TMP15]]
-; CHECK-SVE-NEXT: [[TMP19]] = sub <vscale x 4 x i32> [[TMP17]], [[TMP18]]
-; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
+; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1
+; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1
+; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP9]], align 1
+; CHECK-SVE-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
+; CHECK-SVE-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
+; CHECK-SVE-NEXT: [[TMP5:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP4]]
+; CHECK-SVE-NEXT: [[TMP6:%.*]] = sub nsw <16 x i32> zeroinitializer, [[TMP5]]
+; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP6]])
+; CHECK-SVE-NEXT: [[TMP10:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
+; CHECK-SVE-NEXT: [[TMP12:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP10]]
+; CHECK-SVE-NEXT: [[TMP13:%.*]] = sub <16 x i32> zeroinitializer, [[TMP12]]
+; CHECK-SVE-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP13]])
+; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-SVE-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-SVE-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK-SVE: middle.block:
-; CHECK-SVE-NEXT: [[TMP21:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP19]])
+; CHECK-SVE-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE3]])
; CHECK-SVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]]
; CHECK-SVE-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
; CHECK-SVE: scalar.ph:
@@ -570,11 +562,11 @@ define i32 @chained_partial_reduce_sub_sub(ptr %a, ptr %b, ptr %c, i32 %N) #0 {
; CHECK-SVE-MAXBW-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 8 x i8>, ptr [[TMP9]], align 1
; CHECK-SVE-MAXBW-NEXT: [[TMP13:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD]] to <vscale x 8 x i32>
; CHECK-SVE-MAXBW-NEXT: [[TMP14:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD1]] to <vscale x 8 x i32>
-; CHECK-SVE-MAXBW-NEXT: [[TMP15:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD2]] to <vscale x 8 x i32>
; CHECK-SVE-MAXBW-NEXT: [[TMP16:%.*]] = mul nsw <vscale x 8 x i32> [[TMP13]], [[TMP14]]
; CHECK-SVE-MAXBW-NEXT: [[TMP17:%.*]] = sub nsw <vscale x 8 x i32> zeroinitializer, [[TMP16]]
; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE:%.*]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI]], <vscale x 8 x i32> [[TMP17]])
-; CHECK-SVE-MAXBW-NEXT: [[TMP18:%.*]] = mul nsw <vscale x 8 x i32> [[TMP13]], [[TMP15]]
+; CHECK-SVE-MAXBW-NEXT: [[TMP12:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD2]] to <vscale x 8 x i32>
+; CHECK-SVE-MAXBW-NEXT: [[TMP18:%.*]] = mul nsw <vscale x 8 x i32> [[TMP13]], [[TMP12]]
; CHECK-SVE-MAXBW-NEXT: [[TMP19:%.*]] = sub <vscale x 8 x i32> zeroinitializer, [[TMP18]]
; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE3]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[PARTIAL_REDUCE]], <vscale x 8 x i32> [[TMP19]])
; CHECK-SVE-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
@@ -644,12 +636,12 @@ define i32 @chained_partial_reduce_add_add_add(ptr %a, ptr %b, ptr %c, i32 %N) #
; CHECK-NEON-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1
; CHECK-NEON-NEXT: [[TMP7:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-NEON-NEXT: [[TMP8:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
-; CHECK-NEON-NEXT: [[TMP9:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-NEON-NEXT: [[TMP10:%.*]] = mul nsw <16 x i32> [[TMP7]], [[TMP8]]
; CHECK-NEON-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP10]])
-; CHECK-NEON-NEXT: [[TMP11:%.*]] = mul nsw <16 x i32> [[TMP7]], [[TMP9]]
+; CHECK-NEON-NEXT: [[TMP15:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
+; CHECK-NEON-NEXT: [[TMP11:%.*]] = mul nsw <16 x i32> [[TMP7]], [[TMP15]]
; CHECK-NEON-NEXT: [[PARTIAL_REDUCE3:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP11]])
-; CHECK-NEON-NEXT: [[TMP12:%.*]] = mul nsw <16 x i32> [[TMP8]], [[TMP9]]
+; CHECK-NEON-NEXT: [[TMP12:%.*]] = mul nsw <16 x i32> [[TMP8]], [[TMP15]]
; CHECK-NEON-NEXT: [[PARTIAL_REDUCE4]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE3]], <16 x i32> [[TMP12]])
; CHECK-NEON-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-NEON-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -683,9 +675,9 @@ define i32 @chained_partial_reduce_add_add_add(ptr %a, ptr %b, ptr %c, i32 %N) #
; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP9]], align 1
; CHECK-SVE-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-SVE-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
-; CHECK-SVE-NEXT: [[TMP5:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-SVE-NEXT: [[TMP6:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP4]]
; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP6]])
+; CHECK-SVE-NEXT: [[TMP5:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-SVE-NEXT: [[TMP11:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP5]]
; CHECK-SVE-NEXT: [[PARTIAL_REDUCE3:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP11]])
; CHECK-SVE-NEXT: [[TMP12:%.*]] = mul nsw <16 x i32> [[TMP4]], [[TMP5]]
@@ -726,12 +718,12 @@ define i32 @chained_partial_reduce_add_add_add(ptr %a, ptr %b, ptr %c, i32 %N) #
; CHECK-SVE-MAXBW-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 8 x i8>, ptr [[TMP9]], align 1
; CHECK-SVE-MAXBW-NEXT: [[TMP13:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD]] to <vscale x 8 x i32>
; CHECK-SVE-MAXBW-NEXT: [[TMP14:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD1]] to <vscale x 8 x i32>
-; CHECK-SVE-MAXBW-NEXT: [[TMP15:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD2]] to <vscale x 8 x i32>
; CHECK-SVE-MAXBW-NEXT: [[TMP16:%.*]] = mul nsw <vscale x 8 x i32> [[TMP13]], [[TMP14]]
; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE:%.*]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI]], <vscale x 8 x i32> [[TMP16]])
-; CHECK-SVE-MAXBW-NEXT: [[TMP17:%.*]] = mul nsw <vscale x 8 x i32> [[TMP13]], [[TMP15]]
+; CHECK-SVE-MAXBW-NEXT: [[TMP11:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD2]] to <vscale x 8 x i32>
+; CHECK-SVE-MAXBW-NEXT: [[TMP17:%.*]] = mul nsw <vscale x 8 x i32> [[TMP13]], [[TMP11]]
; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE3:%.*]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[PARTIAL_REDUCE]], <vscale x 8 x i32> [[TMP17]])
-; CHECK-SVE-MAXBW-NEXT: [[TMP18:%.*]] = mul nsw <vscale x 8 x i32> [[TMP14]], [[TMP15]]
+; CHECK-SVE-MAXBW-NEXT: [[TMP18:%.*]] = mul nsw <vscale x 8 x i32> [[TMP14]], [[TMP11]]
; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE4]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[PARTIAL_REDUCE3]], <vscale x 8 x i32> [[TMP18]])
; CHECK-SVE-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-SVE-MAXBW-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -802,13 +794,13 @@ define i32 @chained_partial_reduce_sub_add_sub(ptr %a, ptr %b, ptr %c, i32 %N) #
; CHECK-NEON-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1
; CHECK-NEON-NEXT: [[TMP7:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-NEON-NEXT: [[TMP8:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
-; CHECK-NEON-NEXT: [[TMP9:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-NEON-NEXT: [[TMP10:%.*]] = mul nsw <16 x i32> [[TMP7]], [[TMP8]]
; CHECK-NEON-NEXT: [[TMP11:%.*]] = sub nsw <16 x i32> zeroinitializer, [[TMP10]]
; CHECK-NEON-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP11]])
-; CHECK-NEON-NEXT: [[TMP12:%.*]] = mul nsw <16 x i32> [[TMP7]], [[TMP9]]
+; CHECK-NEON-NEXT: [[TMP13:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
+; CHECK-NEON-NEXT: [[TMP12:%.*]] = mul nsw <16 x i32> [[TMP7]], [[TMP13]]
; CHECK-NEON-NEXT: [[PARTIAL_REDUCE3:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP12]])
-; CHECK-NEON-NEXT: [[TMP14:%.*]] = mul nsw <16 x i32> [[TMP8]], [[TMP9]]
+; CHECK-NEON-NEXT: [[TMP14:%.*]] = mul nsw <16 x i32> [[TMP8]], [[TMP13]]
; CHECK-NEON-NEXT: [[TMP15:%.*]] = sub <16 x i32> zeroinitializer, [[TMP14]]
; CHECK-NEON-NEXT: [[PARTIAL_REDUCE4]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE3]], <16 x i32> [[TMP15]])
; CHECK-NEON-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
@@ -826,39 +818,37 @@ define i32 @chained_partial_reduce_sub_add_sub(ptr %a, ptr %b, ptr %c, i32 %N) #
; CHECK-SVE-NEXT: [[CMP28_NOT:%.*]] = icmp ult i32 [[N]], 2
; CHECK-SVE-NEXT: [[DIV27:%.*]] = lshr i32 [[N]], 1
; CHECK-SVE-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext nneg i32 [[DIV27]] to i64
-; CHECK-SVE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-SVE-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
-; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], [[TMP1]]
+; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], 16
; CHECK-SVE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK-SVE: vector.ph:
-; CHECK-SVE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-SVE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]]
+; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 16
; CHECK-SVE-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]]
; CHECK-SVE-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-SVE: vector.body:
; CHECK-SVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP21:%.*]], [[VECTOR_BODY]] ]
+; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE4:%.*]], [[VECTOR_BODY]] ]
; CHECK-SVE-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDEX]]
; CHECK-SVE-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[INDEX]]
; CHECK-SVE-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i8, ptr [[C]], i64 [[INDEX]]
-; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i8>, ptr [[TMP7]], align 1
-; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x i8>, ptr [[TMP8]], align 1
-; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 4 x i8>, ptr [[TMP9]], align 1
-; CHECK-SVE-NEXT: [[TMP13:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD]] to <vscale x 4 x i32>
-; CHECK-SVE-NEXT: [[TMP14:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD1]] to <vscale x 4 x i32>
-; CHECK-SVE-NEXT: [[TMP15:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD2]] to <vscale x 4 x i32>
-; CHECK-SVE-NEXT: [[TMP16:%.*]] = mul nsw <vscale x 4 x i32> [[TMP13]], [[TMP14]]
-; CHECK-SVE-NEXT: [[TMP17:%.*]] = sub <vscale x 4 x i32> [[VEC_PHI]], [[TMP16]]
-; CHECK-SVE-NEXT: [[TMP18:%.*]] = mul nsw <vscale x 4 x i32> [[TMP13]], [[TMP15]]
-; CHECK-SVE-NEXT: [[TMP19:%.*]] = add <vscale x 4 x i32> [[TMP17]], [[TMP18]]
-; CHECK-SVE-NEXT: [[TMP20:%.*]] = mul nsw <vscale x 4 x i32> [[TMP14]], [[TMP15]]
-; CHECK-SVE-NEXT: [[TMP21]] = sub <vscale x 4 x i32> [[TMP19]], [[TMP20]]
-; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
+; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1
+; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1
+; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP9]], align 1
+; CHECK-SVE-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
+; CHECK-SVE-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
+; CHECK-SVE-NEXT: [[TMP5:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP4]]
+; CHECK-SVE-NEXT: [[TMP6:%.*]] = sub nsw <16 x i32> zeroinitializer, [[TMP5]]
+; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP6]])
+; CHECK-SVE-NEXT: [[TMP11:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
+; CHECK-SVE-NEXT: [[TMP13:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP11]]
+; CHECK-SVE-NEXT: [[PARTIAL_REDUCE3:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP13]])
+; CHECK-SVE-NEXT: [[TMP14:%.*]] = mul nsw <16 x i32> [[TMP4]], [[TMP11]]
+; CHECK-SVE-NEXT: [[TMP10:%.*]] = sub <16 x i32> zeroinitializer, [[TMP14]]
+; CHECK-SVE-NEXT: [[PARTIAL_REDUCE4]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE3]], <16 x i32> [[TMP10]])
+; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-SVE-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-SVE-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; CHECK-SVE: middle.block:
-; CHECK-SVE-NEXT: [[TMP23:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP21]])
+; CHECK-SVE-NEXT: [[TMP12:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE4]])
; CHECK-SVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]]
; CHECK-SVE-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
; CHECK-SVE: scalar.ph:
@@ -890,13 +880,13 @@ define i32 @chained_partial_reduce_sub_add_sub(ptr %a, ptr %b, ptr %c, i32 %N) #
; CHECK-SVE-MAXBW-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 8 x i8>, ptr [[TMP9]], align 1
; CHECK-SVE-MAXBW-NEXT: [[TMP13:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD]] to <vscale x 8 x i32>
; CHECK-SVE-MAXBW-NEXT: [[TMP14:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD1]] to <vscale x 8 x i32>
-; CHECK-SVE-MAXBW-NEXT: [[TMP15:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD2]] to <vscale x 8 x i32>
; CHECK-SVE-MAXBW-NEXT: [[TMP16:%.*]] = mul nsw <vscale x 8 x i32> [[TMP13]], [[TMP14]]
; CHECK-SVE-MAXBW-NEXT: [[TMP17:%.*]] = sub nsw <vscale x 8 x i32> zeroinitializer, [[TMP16]]
; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE:%.*]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI]], <vscale x 8 x i32> [[TMP17]])
-; CHECK-SVE-MAXBW-NEXT: [[TMP18:%.*]] = mul nsw <vscale x 8 x i32> [[TMP13]], [[TMP15]]
+; CHECK-SVE-MAXBW-NEXT: [[TMP12:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD2]] to <vscale x 8 x i32>
+; CHECK-SVE-MAXBW-NEXT: [[TMP18:%.*]] = mul nsw <vscale x 8 x i32> [[TMP13]], [[TMP12]]
; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE3:%.*]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[PARTIAL_REDUCE]], <vscale x 8 x i32> [[TMP18]])
-; CHECK-SVE-MAXBW-NEXT: [[TMP19:%.*]] = mul nsw <vscale x 8 x i32> [[TMP14]], [[TMP15]]
+; CHECK-SVE-MAXBW-NEXT: [[TMP19:%.*]] = mul nsw <vscale x 8 x i32> [[TMP14]], [[TMP12]]
; CHECK-SVE-MAXBW-NEXT: [[TMP20:%.*]] = sub <vscale x 8 x i32> zeroinitializer, [[TMP19]]
; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE4]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[PARTIAL_REDUCE3]], <vscale x 8 x i32> [[TMP20]])
; CHECK-SVE-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
@@ -969,9 +959,9 @@ define i32 @chained_partial_reduce_madd_extadd(ptr %a, ptr %b, ptr %c, i32 %N) #
; CHECK-NEON-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1
; CHECK-NEON-NEXT: [[TMP7:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-NEON-NEXT: [[TMP8:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
-; CHECK-NEON-NEXT: [[TMP9:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-NEON-NEXT: [[TMP10:%.*]] = mul nsw <16 x i32> [[TMP7]], [[TMP8]]
; CHECK-NEON-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP10]])
+; CHECK-NEON-NEXT: [[TMP9:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-NEON-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP9]])
; CHECK-NEON-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-NEON-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -1005,9 +995,9 @@ define i32 @chained_partial_reduce_madd_extadd(ptr %a, ptr %b, ptr %c, i32 %N) #
; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP9]], align 1
; CHECK-SVE-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-SVE-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
-; CHECK-SVE-NEXT: [[TMP5:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-SVE-NEXT: [[TMP6:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP4]]
; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP6]])
+; CHECK-SVE-NEXT: [[TMP5:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-SVE-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP5]])
; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-SVE-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -1045,9 +1035,9 @@ define i32 @chained_partial_reduce_madd_extadd(ptr %a, ptr %b, ptr %c, i32 %N) #
; CHECK-SVE-MAXBW-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 8 x i8>, ptr [[TMP9]], align 1
; CHECK-SVE-MAXBW-NEXT: [[TMP13:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD]] to <vscale x 8 x i32>
; CHECK-SVE-MAXBW-NEXT: [[TMP14:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD1]] to <vscale x 8 x i32>
-; CHECK-SVE-MAXBW-NEXT: [[TMP15:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD2]] to <vscale x 8 x i32>
; CHECK-SVE-MAXBW-NEXT: [[TMP16:%.*]] = mul nsw <vscale x 8 x i32> [[TMP13]], [[TMP14]]
; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE:%.*]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI]], <vscale x 8 x i32> [[TMP16]])
+; CHECK-SVE-MAXBW-NEXT: [[TMP15:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD2]] to <vscale x 8 x i32>
; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE3]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[PARTIAL_REDUCE]], <vscale x 8 x i32> [[TMP15]])
; CHECK-SVE-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-SVE-MAXBW-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -1110,8 +1100,8 @@ define i32 @chained_partial_reduce_extadd_extadd(ptr %a, ptr %b, i32 %N) #0 {
; CHECK-NEON-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1
; CHECK-NEON-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1
; CHECK-NEON-NEXT: [[TMP5:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
-; CHECK-NEON-NEXT: [[TMP6:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
; CHECK-NEON-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP5]])
+; CHECK-NEON-NEXT: [[TMP6:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
; CHECK-NEON-NEXT: [[PARTIAL_REDUCE2]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP6]])
; CHECK-NEON-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-NEON-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -1142,8 +1132,8 @@ define i32 @chained_partial_reduce_extadd_extadd(ptr %a, ptr %b, i32 %N) #0 {
; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1
; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1
; CHECK-SVE-NEXT: [[TMP2:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
-; CHECK-SVE-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP2]])
+; CHECK-SVE-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
; CHECK-SVE-NEXT: [[PARTIAL_REDUCE2]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP3]])
; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-SVE-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -1178,8 +1168,8 @@ define i32 @chained_partial_reduce_extadd_extadd(ptr %a, ptr %b, i32 %N) #0 {
; CHECK-SVE-MAXBW-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i8>, ptr [[TMP7]], align 1
; CHECK-SVE-MAXBW-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 8 x i8>, ptr [[TMP8]], align 1
; CHECK-SVE-MAXBW-NEXT: [[TMP11:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD]] to <vscale x 8 x i32>
-; CHECK-SVE-MAXBW-NEXT: [[TMP12:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD1]] to <vscale x 8 x i32>
; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE:%.*]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI]], <vscale x 8 x i32> [[TMP11]])
+; CHECK-SVE-MAXBW-NEXT: [[TMP12:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD1]] to <vscale x 8 x i32>
; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE2]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[PARTIAL_REDUCE]], <vscale x 8 x i32> [[TMP12]])
; CHECK-SVE-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-SVE-MAXBW-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -1240,10 +1230,10 @@ define i32 @chained_partial_reduce_extadd_madd(ptr %a, ptr %b, ptr %c, i32 %N) #
; CHECK-NEON-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1
; CHECK-NEON-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1
; CHECK-NEON-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1
-; CHECK-NEON-NEXT: [[TMP7:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
-; CHECK-NEON-NEXT: [[TMP8:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
; CHECK-NEON-NEXT: [[TMP9:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-NEON-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP9]])
+; CHECK-NEON-NEXT: [[TMP7:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
+; CHECK-NEON-NEXT: [[TMP8:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
; CHECK-NEON-NEXT: [[TMP10:%.*]] = mul nsw <16 x i32> [[TMP7]], [[TMP8]]
; CHECK-NEON-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP10]])
; CHECK-NEON-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
@@ -1276,10 +1266,10 @@ define i32 @chained_partial_reduce_extadd_madd(ptr %a, ptr %b, ptr %c, i32 %N) #
; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1
; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1
; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP9]], align 1
-; CHECK-SVE-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
-; CHECK-SVE-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
; CHECK-SVE-NEXT: [[TMP5:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP5]])
+; CHECK-SVE-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
+; CHECK-SVE-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
; CHECK-SVE-NEXT: [[TMP6:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP4]]
; CHECK-SVE-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP6]])
; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
@@ -1316,10 +1306,10 @@ define i32 @chained_partial_reduce_extadd_madd(ptr %a, ptr %b, ptr %c, i32 %N) #
; CHECK-SVE-MAXBW-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i8>, ptr [[TMP7]], align 1
; CHECK-SVE-MAXBW-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 8 x i8>, ptr [[TMP8]], align 1
; CHECK-SVE-MAXBW-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 8 x i8>, ptr [[TMP9]], align 1
-; CHECK-SVE-MAXBW-NEXT: [[TMP13:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD]] to <vscale x 8 x i32>
-; CHECK-SVE-MAXBW-NEXT: [[TMP14:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD1]] to <vscale x 8 x i32>
; CHECK-SVE-MAXBW-NEXT: [[TMP15:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD2]] to <vscale x 8 x i32>
; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE:%.*]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI]], <vscale x 8 x i32> [[TMP15]])
+; CHECK-SVE-MAXBW-NEXT: [[TMP13:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD]] to <vscale x 8 x i32>
+; CHECK-SVE-MAXBW-NEXT: [[TMP14:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD1]] to <vscale x 8 x i32>
; CHECK-SVE-MAXBW-NEXT: [[TMP16:%.*]] = mul nsw <vscale x 8 x i32> [[TMP13]], [[TMP14]]
; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE3]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[PARTIAL_REDUCE]], <vscale x 8 x i32> [[TMP16]])
; CHECK-SVE-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-constant-ops.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-constant-ops.ll
index b033f60..b430efc 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-constant-ops.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-constant-ops.ll
@@ -467,3 +467,83 @@ loop:
exit:
ret i32 %red.next
}
+
+define i64 @partial_reduction_mul_two_users(i64 %n, ptr %a, i16 %b, i32 %c) {
+; CHECK-LABEL: define i64 @partial_reduction_mul_two_users(
+; CHECK-SAME: i64 [[N:%.*]], ptr [[A:%.*]], i16 [[B:%.*]], i32 [[C:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[N]], 1
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], 8
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], 8
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]]
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[B]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <8 x i16> [[BROADCAST_SPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP1:%.*]] = sext <8 x i16> [[BROADCAST_SPLAT]] to <8 x i32>
+; CHECK-NEXT: [[TMP2:%.*]] = mul <8 x i32> [[TMP1]], [[TMP1]]
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i64> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr [[A]], align 2
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <8 x i16> poison, i16 [[TMP4]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <8 x i16> [[BROADCAST_SPLATINSERT1]], <8 x i16> poison, <8 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP3:%.*]] = zext <8 x i32> [[TMP2]] to <8 x i64>
+; CHECK-NEXT: [[PARTIAL_REDUCE]] = call <4 x i64> @llvm.vector.partial.reduce.add.v4i64.v8i64(<4 x i64> [[VEC_PHI]], <8 x i64> [[TMP3]])
+; CHECK-NEXT: [[TMP5:%.*]] = sext <8 x i16> [[BROADCAST_SPLAT2]] to <8 x i32>
+; CHECK-NEXT: [[TMP6:%.*]] = sext <8 x i32> [[TMP5]] to <8 x i64>
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
+; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[PARTIAL_REDUCE]])
+; CHECK-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <8 x i64> [[TMP6]], i32 7
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i64 [ [[VECTOR_RECUR_EXTRACT]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i64 [ [[TMP8]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[RES1:%.*]] = phi i64 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[LOAD_EXT_EXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[RES2:%.*]] = phi i64 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[LOAD:%.*]] = load i16, ptr [[A]], align 2
+; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
+; CHECK-NEXT: [[CONV:%.*]] = sext i16 [[B]] to i32
+; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[CONV]], [[CONV]]
+; CHECK-NEXT: [[MUL_EXT:%.*]] = zext i32 [[MUL]] to i64
+; CHECK-NEXT: [[ADD]] = add i64 [[RES2]], [[MUL_EXT]]
+; CHECK-NEXT: [[OR:%.*]] = or i32 [[MUL]], [[C]]
+; CHECK-NEXT: [[LOAD_EXT:%.*]] = sext i16 [[LOAD]] to i32
+; CHECK-NEXT: [[LOAD_EXT_EXT]] = sext i32 [[LOAD_EXT]] to i64
+; CHECK-NEXT: [[EXITCOND740_NOT:%.*]] = icmp eq i64 [[IV]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND740_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP19:![0-9]+]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i64 [ [[ADD]], %[[LOOP]] ], [ [[TMP8]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret i64 [[ADD_LCSSA]]
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+ %res1 = phi i64 [ 0, %entry ], [ %load.ext.ext, %loop ]
+ %res2 = phi i64 [ 0, %entry ], [ %add, %loop ]
+ %load = load i16, ptr %a, align 2
+ %iv.next = add i64 %iv, 1
+ %conv = sext i16 %b to i32
+ %mul = mul i32 %conv, %conv
+ %mul.ext = zext i32 %mul to i64
+ %add = add i64 %res2, %mul.ext
+ %second_use = or i32 %mul, %c ; this value is otherwise unused, but that's sufficient for the test
+ %load.ext = sext i16 %load to i32
+ %load.ext.ext = sext i32 %load.ext to i64
+ %exitcond740.not = icmp eq i64 %iv, %n
+ br i1 %exitcond740.not, label %exit, label %loop
+
+exit:
+ ret i64 %add
+}
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-epilogue.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-epilogue.ll
index 8ece59a..d8f1a86 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-epilogue.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-epilogue.ll
@@ -16,10 +16,10 @@ define i32 @dotp(ptr %a, ptr %b) #0 {
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1
-; CHECK-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP6]], align 1
; CHECK-NEXT: [[TMP8:%.*]] = zext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
+; CHECK-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-NEXT: [[TMP9:%.*]] = mul <16 x i32> [[TMP8]], [[TMP5]]
; CHECK-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP9]])
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
@@ -68,7 +68,6 @@ define void @dotp_small_epilogue_vf(i64 %idx.neg, i8 %a) #1 {
; CHECK-NEXT: [[IV_NEXT:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]]
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[A]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i8> [[BROADCAST_SPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP1:%.*]] = sext <16 x i8> [[BROADCAST_SPLAT]] to <16 x i32>
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -77,11 +76,12 @@ define void @dotp_small_epilogue_vf(i64 %idx.neg, i8 %a) #1 {
; CHECK-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement <16 x i8> poison, i8 [[TMP2]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT3:%.*]] = shufflevector <16 x i8> [[BROADCAST_SPLATINSERT2]], <16 x i8> poison, <16 x i32> zeroinitializer
; CHECK-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[BROADCAST_SPLAT3]] to <16 x i32>
+; CHECK-NEXT: [[TMP1:%.*]] = sext <16 x i8> [[BROADCAST_SPLAT]] to <16 x i32>
; CHECK-NEXT: [[TMP4:%.*]] = mul <16 x i32> [[TMP3]], [[TMP1]]
; CHECK-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP4]])
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[IV_NEXT]]
-; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]])
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[IV_NEXT]]
@@ -89,7 +89,7 @@ define void @dotp_small_epilogue_vf(i64 %idx.neg, i8 %a) #1 {
; CHECK: vec.epilog.iter.check:
; CHECK-NEXT: [[IND_END6:%.*]] = add i64 [[IDX_NEG]], [[IV_NEXT]]
; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 4
-; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]]
+; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF4:![0-9]+]]
; CHECK: vec.epilog.ph:
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT]], [[WHILE_BODY]] ], [ 0, [[ENTRY]] ]
; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP6]], [[WHILE_BODY]] ], [ 0, [[ENTRY]] ]
@@ -112,7 +112,7 @@ define void @dotp_small_epilogue_vf(i64 %idx.neg, i8 %a) #1 {
; CHECK-NEXT: [[TMP13]] = add <4 x i32> [[TMP14]], [[VEC_PHI9]]
; CHECK-NEXT: [[INDEX_NEXT14]] = add nuw i64 [[INDEX9]], 4
; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT14]], [[N_VEC5]]
-; CHECK-NEXT: br i1 [[TMP12]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP12]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: vec.epilog.middle.block:
; CHECK-NEXT: [[TMP15:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP13]])
; CHECK-NEXT: [[CMP_N15:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC5]]
@@ -136,7 +136,7 @@ define void @dotp_small_epilogue_vf(i64 %idx.neg, i8 %a) #1 {
; CHECK-NEXT: [[CMP_IV_NEG:%.*]] = icmp ugt i64 [[IV_NEG]], 0
; CHECK-NEXT: [[CMP_IV:%.*]] = icmp ne i64 [[ACCUM1]], -1
; CHECK-NEXT: [[EXITCOND:%.*]] = and i1 [[CMP_IV_NEG]], [[CMP_IV]]
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[WHILE_BODY1]], label [[WHILE_END_LOOPEXIT]], !llvm.loop [[LOOP5:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND]], label [[WHILE_BODY1]], label [[WHILE_END_LOOPEXIT]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: while.end.loopexit:
; CHECK-NEXT: [[RESULT:%.*]] = phi i32 [ [[ADD]], [[WHILE_BODY1]] ], [ [[TMP6]], [[MIDDLE_BLOCK]] ], [ [[TMP15]], [[VEC_EPILOG_MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret void
@@ -495,7 +495,7 @@ define i32 @dotp_predicated(i64 %N, ptr %a, ptr %b) {
; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 16
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <16 x i64> [[VEC_IND]], splat (i64 16)
; CHECK-NEXT: [[TMP181:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP181]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP181]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[TMP182:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]])
; CHECK-NEXT: br label [[EXIT:%.*]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-mixed.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-mixed.ll
index 09b41fb..26e630f 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-mixed.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-mixed.ll
@@ -26,26 +26,26 @@ define i32 @sudot(ptr %a, ptr %b) #0 {
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[TMP6]], i64 [[TMP9]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP6]], align 1
; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 16 x i8>, ptr [[TMP10]], align 1
-; CHECK-NEXT: [[TMP11:%.*]] = zext <vscale x 16 x i8> [[WIDE_LOAD]] to <vscale x 16 x i32>
-; CHECK-NEXT: [[TMP7:%.*]] = zext <vscale x 16 x i8> [[WIDE_LOAD2]] to <vscale x 16 x i32>
; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP16:%.*]] = shl nuw i64 [[TMP15]], 4
; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[TMP13]], i64 [[TMP16]]
; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 16 x i8>, ptr [[TMP13]], align 1
; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 16 x i8>, ptr [[TMP17]], align 1
-; CHECK-NEXT: [[TMP12:%.*]] = sext <vscale x 16 x i8> [[WIDE_LOAD3]] to <vscale x 16 x i32>
-; CHECK-NEXT: [[TMP18:%.*]] = sext <vscale x 16 x i8> [[WIDE_LOAD4]] to <vscale x 16 x i32>
-; CHECK-NEXT: [[TMP14:%.*]] = mul <vscale x 16 x i32> [[TMP12]], [[TMP11]]
-; CHECK-NEXT: [[TMP19:%.*]] = mul <vscale x 16 x i32> [[TMP18]], [[TMP7]]
-; CHECK-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 4 x i32> @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32(<vscale x 4 x i32> [[VEC_PHI]], <vscale x 16 x i32> [[TMP14]])
-; CHECK-NEXT: [[PARTIAL_REDUCE5]] = call <vscale x 4 x i32> @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32(<vscale x 4 x i32> [[VEC_PHI1]], <vscale x 16 x i32> [[TMP19]])
+; CHECK-NEXT: [[TMP18:%.*]] = sext <vscale x 16 x i8> [[WIDE_LOAD3]] to <vscale x 16 x i32>
+; CHECK-NEXT: [[TMP11:%.*]] = zext <vscale x 16 x i8> [[WIDE_LOAD]] to <vscale x 16 x i32>
+; CHECK-NEXT: [[TMP12:%.*]] = mul <vscale x 16 x i32> [[TMP18]], [[TMP11]]
+; CHECK-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 4 x i32> @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32(<vscale x 4 x i32> [[VEC_PHI]], <vscale x 16 x i32> [[TMP12]])
+; CHECK-NEXT: [[TMP19:%.*]] = sext <vscale x 16 x i8> [[WIDE_LOAD4]] to <vscale x 16 x i32>
+; CHECK-NEXT: [[TMP14:%.*]] = zext <vscale x 16 x i8> [[WIDE_LOAD2]] to <vscale x 16 x i32>
+; CHECK-NEXT: [[TMP20:%.*]] = mul <vscale x 16 x i32> [[TMP19]], [[TMP14]]
+; CHECK-NEXT: [[PARTIAL_REDUCE5]] = call <vscale x 4 x i32> @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32(<vscale x 4 x i32> [[VEC_PHI1]], <vscale x 16 x i32> [[TMP20]])
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[BIN_RDX:%.*]] = add <vscale x 4 x i32> [[PARTIAL_REDUCE5]], [[PARTIAL_REDUCE]]
-; CHECK-NEXT: [[TMP20:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[BIN_RDX]])
+; CHECK-NEXT: [[TMP21:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[BIN_RDX]])
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]]
; CHECK: scalar.ph:
@@ -62,8 +62,8 @@ define i32 @sudot(ptr %a, ptr %b) #0 {
; CHECK-NOI8MM-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-NOI8MM: vector.body:
; CHECK-NOI8MM-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NOI8MM-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP22:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NOI8MM-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 8 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP23:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NOI8MM-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP18:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NOI8MM-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 8 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP20:%.*]], [[VECTOR_BODY]] ]
; CHECK-NOI8MM-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]]
; CHECK-NOI8MM-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NOI8MM-NEXT: [[TMP9:%.*]] = shl nuw i64 [[TMP8]], 3
@@ -71,25 +71,25 @@ define i32 @sudot(ptr %a, ptr %b) #0 {
; CHECK-NOI8MM-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i8>, ptr [[TMP6]], align 1
; CHECK-NOI8MM-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 8 x i8>, ptr [[TMP10]], align 1
; CHECK-NOI8MM-NEXT: [[TMP11:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD]] to <vscale x 8 x i32>
-; CHECK-NOI8MM-NEXT: [[TMP12:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD2]] to <vscale x 8 x i32>
+; CHECK-NOI8MM-NEXT: [[TMP7:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD2]] to <vscale x 8 x i32>
; CHECK-NOI8MM-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]]
; CHECK-NOI8MM-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NOI8MM-NEXT: [[TMP16:%.*]] = shl nuw i64 [[TMP15]], 3
; CHECK-NOI8MM-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[TMP13]], i64 [[TMP16]]
; CHECK-NOI8MM-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 8 x i8>, ptr [[TMP13]], align 1
; CHECK-NOI8MM-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 8 x i8>, ptr [[TMP17]], align 1
-; CHECK-NOI8MM-NEXT: [[TMP18:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD3]] to <vscale x 8 x i32>
-; CHECK-NOI8MM-NEXT: [[TMP19:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD4]] to <vscale x 8 x i32>
-; CHECK-NOI8MM-NEXT: [[TMP20:%.*]] = mul <vscale x 8 x i32> [[TMP18]], [[TMP11]]
-; CHECK-NOI8MM-NEXT: [[TMP21:%.*]] = mul <vscale x 8 x i32> [[TMP19]], [[TMP12]]
-; CHECK-NOI8MM-NEXT: [[TMP22]] = add <vscale x 8 x i32> [[TMP20]], [[VEC_PHI]]
-; CHECK-NOI8MM-NEXT: [[TMP23]] = add <vscale x 8 x i32> [[TMP21]], [[VEC_PHI1]]
+; CHECK-NOI8MM-NEXT: [[TMP12:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD3]] to <vscale x 8 x i32>
+; CHECK-NOI8MM-NEXT: [[TMP21:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD4]] to <vscale x 8 x i32>
+; CHECK-NOI8MM-NEXT: [[TMP14:%.*]] = mul <vscale x 8 x i32> [[TMP12]], [[TMP11]]
+; CHECK-NOI8MM-NEXT: [[TMP22:%.*]] = mul <vscale x 8 x i32> [[TMP21]], [[TMP7]]
+; CHECK-NOI8MM-NEXT: [[TMP18]] = add <vscale x 8 x i32> [[TMP14]], [[VEC_PHI]]
+; CHECK-NOI8MM-NEXT: [[TMP20]] = add <vscale x 8 x i32> [[TMP22]], [[VEC_PHI1]]
; CHECK-NOI8MM-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-NOI8MM-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NOI8MM-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK-NOI8MM: middle.block:
-; CHECK-NOI8MM-NEXT: [[BIN_RDX:%.*]] = add <vscale x 8 x i32> [[TMP23]], [[TMP22]]
-; CHECK-NOI8MM-NEXT: [[TMP25:%.*]] = call i32 @llvm.vector.reduce.add.nxv8i32(<vscale x 8 x i32> [[BIN_RDX]])
+; CHECK-NOI8MM-NEXT: [[BIN_RDX:%.*]] = add <vscale x 8 x i32> [[TMP20]], [[TMP18]]
+; CHECK-NOI8MM-NEXT: [[TMP19:%.*]] = call i32 @llvm.vector.reduce.add.nxv8i32(<vscale x 8 x i32> [[BIN_RDX]])
; CHECK-NOI8MM-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
; CHECK-NOI8MM-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]]
; CHECK-NOI8MM: scalar.ph:
@@ -137,26 +137,26 @@ define i32 @usdot(ptr %a, ptr %b) #0 {
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[TMP6]], i64 [[TMP9]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP6]], align 1
; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 16 x i8>, ptr [[TMP10]], align 1
-; CHECK-NEXT: [[TMP11:%.*]] = sext <vscale x 16 x i8> [[WIDE_LOAD]] to <vscale x 16 x i32>
-; CHECK-NEXT: [[TMP7:%.*]] = sext <vscale x 16 x i8> [[WIDE_LOAD2]] to <vscale x 16 x i32>
; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP16:%.*]] = shl nuw i64 [[TMP15]], 4
; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[TMP13]], i64 [[TMP16]]
; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 16 x i8>, ptr [[TMP13]], align 1
; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 16 x i8>, ptr [[TMP17]], align 1
-; CHECK-NEXT: [[TMP12:%.*]] = zext <vscale x 16 x i8> [[WIDE_LOAD3]] to <vscale x 16 x i32>
-; CHECK-NEXT: [[TMP18:%.*]] = zext <vscale x 16 x i8> [[WIDE_LOAD4]] to <vscale x 16 x i32>
-; CHECK-NEXT: [[TMP14:%.*]] = mul <vscale x 16 x i32> [[TMP12]], [[TMP11]]
-; CHECK-NEXT: [[TMP19:%.*]] = mul <vscale x 16 x i32> [[TMP18]], [[TMP7]]
-; CHECK-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 4 x i32> @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32(<vscale x 4 x i32> [[VEC_PHI]], <vscale x 16 x i32> [[TMP14]])
-; CHECK-NEXT: [[PARTIAL_REDUCE5]] = call <vscale x 4 x i32> @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32(<vscale x 4 x i32> [[VEC_PHI1]], <vscale x 16 x i32> [[TMP19]])
+; CHECK-NEXT: [[TMP18:%.*]] = zext <vscale x 16 x i8> [[WIDE_LOAD3]] to <vscale x 16 x i32>
+; CHECK-NEXT: [[TMP11:%.*]] = sext <vscale x 16 x i8> [[WIDE_LOAD]] to <vscale x 16 x i32>
+; CHECK-NEXT: [[TMP12:%.*]] = mul <vscale x 16 x i32> [[TMP18]], [[TMP11]]
+; CHECK-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 4 x i32> @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32(<vscale x 4 x i32> [[VEC_PHI]], <vscale x 16 x i32> [[TMP12]])
+; CHECK-NEXT: [[TMP19:%.*]] = zext <vscale x 16 x i8> [[WIDE_LOAD4]] to <vscale x 16 x i32>
+; CHECK-NEXT: [[TMP14:%.*]] = sext <vscale x 16 x i8> [[WIDE_LOAD2]] to <vscale x 16 x i32>
+; CHECK-NEXT: [[TMP20:%.*]] = mul <vscale x 16 x i32> [[TMP19]], [[TMP14]]
+; CHECK-NEXT: [[PARTIAL_REDUCE5]] = call <vscale x 4 x i32> @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32(<vscale x 4 x i32> [[VEC_PHI1]], <vscale x 16 x i32> [[TMP20]])
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[BIN_RDX:%.*]] = add <vscale x 4 x i32> [[PARTIAL_REDUCE5]], [[PARTIAL_REDUCE]]
-; CHECK-NEXT: [[TMP20:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[BIN_RDX]])
+; CHECK-NEXT: [[TMP21:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[BIN_RDX]])
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]]
; CHECK: scalar.ph:
@@ -173,8 +173,8 @@ define i32 @usdot(ptr %a, ptr %b) #0 {
; CHECK-NOI8MM-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-NOI8MM: vector.body:
; CHECK-NOI8MM-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NOI8MM-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP22:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NOI8MM-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 8 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP23:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NOI8MM-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP18:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NOI8MM-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 8 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP20:%.*]], [[VECTOR_BODY]] ]
; CHECK-NOI8MM-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]]
; CHECK-NOI8MM-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NOI8MM-NEXT: [[TMP9:%.*]] = shl nuw i64 [[TMP8]], 3
@@ -182,25 +182,25 @@ define i32 @usdot(ptr %a, ptr %b) #0 {
; CHECK-NOI8MM-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i8>, ptr [[TMP6]], align 1
; CHECK-NOI8MM-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 8 x i8>, ptr [[TMP10]], align 1
; CHECK-NOI8MM-NEXT: [[TMP11:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD]] to <vscale x 8 x i32>
-; CHECK-NOI8MM-NEXT: [[TMP12:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD2]] to <vscale x 8 x i32>
+; CHECK-NOI8MM-NEXT: [[TMP7:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD2]] to <vscale x 8 x i32>
; CHECK-NOI8MM-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]]
; CHECK-NOI8MM-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NOI8MM-NEXT: [[TMP16:%.*]] = shl nuw i64 [[TMP15]], 3
; CHECK-NOI8MM-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[TMP13]], i64 [[TMP16]]
; CHECK-NOI8MM-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 8 x i8>, ptr [[TMP13]], align 1
; CHECK-NOI8MM-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 8 x i8>, ptr [[TMP17]], align 1
-; CHECK-NOI8MM-NEXT: [[TMP18:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD3]] to <vscale x 8 x i32>
-; CHECK-NOI8MM-NEXT: [[TMP19:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD4]] to <vscale x 8 x i32>
-; CHECK-NOI8MM-NEXT: [[TMP20:%.*]] = mul <vscale x 8 x i32> [[TMP18]], [[TMP11]]
-; CHECK-NOI8MM-NEXT: [[TMP21:%.*]] = mul <vscale x 8 x i32> [[TMP19]], [[TMP12]]
-; CHECK-NOI8MM-NEXT: [[TMP22]] = add <vscale x 8 x i32> [[TMP20]], [[VEC_PHI]]
-; CHECK-NOI8MM-NEXT: [[TMP23]] = add <vscale x 8 x i32> [[TMP21]], [[VEC_PHI1]]
+; CHECK-NOI8MM-NEXT: [[TMP12:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD3]] to <vscale x 8 x i32>
+; CHECK-NOI8MM-NEXT: [[TMP21:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD4]] to <vscale x 8 x i32>
+; CHECK-NOI8MM-NEXT: [[TMP14:%.*]] = mul <vscale x 8 x i32> [[TMP12]], [[TMP11]]
+; CHECK-NOI8MM-NEXT: [[TMP22:%.*]] = mul <vscale x 8 x i32> [[TMP21]], [[TMP7]]
+; CHECK-NOI8MM-NEXT: [[TMP18]] = add <vscale x 8 x i32> [[TMP14]], [[VEC_PHI]]
+; CHECK-NOI8MM-NEXT: [[TMP20]] = add <vscale x 8 x i32> [[TMP22]], [[VEC_PHI1]]
; CHECK-NOI8MM-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-NOI8MM-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NOI8MM-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK-NOI8MM: middle.block:
-; CHECK-NOI8MM-NEXT: [[BIN_RDX:%.*]] = add <vscale x 8 x i32> [[TMP23]], [[TMP22]]
-; CHECK-NOI8MM-NEXT: [[TMP25:%.*]] = call i32 @llvm.vector.reduce.add.nxv8i32(<vscale x 8 x i32> [[BIN_RDX]])
+; CHECK-NOI8MM-NEXT: [[BIN_RDX:%.*]] = add <vscale x 8 x i32> [[TMP20]], [[TMP18]]
+; CHECK-NOI8MM-NEXT: [[TMP19:%.*]] = call i32 @llvm.vector.reduce.add.nxv8i32(<vscale x 8 x i32> [[BIN_RDX]])
; CHECK-NOI8MM-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
; CHECK-NOI8MM-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]]
; CHECK-NOI8MM: scalar.ph:
@@ -242,18 +242,18 @@ define i32 @sudot_neon(ptr %a, ptr %b) #1 {
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i32 16
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP0]], align 1
; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1
-; CHECK-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
-; CHECK-NEXT: [[TMP4:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i32 16
; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1
; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1
-; CHECK-NEXT: [[TMP8:%.*]] = sext <16 x i8> [[WIDE_LOAD3]] to <16 x i32>
-; CHECK-NEXT: [[TMP9:%.*]] = sext <16 x i8> [[WIDE_LOAD4]] to <16 x i32>
-; CHECK-NEXT: [[TMP10:%.*]] = mul <16 x i32> [[TMP8]], [[TMP3]]
+; CHECK-NEXT: [[TMP9:%.*]] = sext <16 x i8> [[WIDE_LOAD3]] to <16 x i32>
+; CHECK-NEXT: [[TMP4:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-NEXT: [[TMP11:%.*]] = mul <16 x i32> [[TMP9]], [[TMP4]]
-; CHECK-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP10]])
-; CHECK-NEXT: [[PARTIAL_REDUCE5]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP11]])
+; CHECK-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP11]])
+; CHECK-NEXT: [[TMP10:%.*]] = sext <16 x i8> [[WIDE_LOAD4]] to <16 x i32>
+; CHECK-NEXT: [[TMP8:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
+; CHECK-NEXT: [[TMP14:%.*]] = mul <16 x i32> [[TMP10]], [[TMP8]]
+; CHECK-NEXT: [[PARTIAL_REDUCE5]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP14]])
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
@@ -337,18 +337,18 @@ define i32 @usdot_neon(ptr %a, ptr %b) #1 {
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i32 16
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP0]], align 1
; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1
-; CHECK-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
-; CHECK-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i32 16
; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1
; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1
-; CHECK-NEXT: [[TMP8:%.*]] = zext <16 x i8> [[WIDE_LOAD3]] to <16 x i32>
-; CHECK-NEXT: [[TMP9:%.*]] = zext <16 x i8> [[WIDE_LOAD4]] to <16 x i32>
-; CHECK-NEXT: [[TMP10:%.*]] = mul <16 x i32> [[TMP8]], [[TMP3]]
+; CHECK-NEXT: [[TMP9:%.*]] = zext <16 x i8> [[WIDE_LOAD3]] to <16 x i32>
+; CHECK-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-NEXT: [[TMP11:%.*]] = mul <16 x i32> [[TMP9]], [[TMP4]]
-; CHECK-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP10]])
-; CHECK-NEXT: [[PARTIAL_REDUCE5]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP11]])
+; CHECK-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP11]])
+; CHECK-NEXT: [[TMP10:%.*]] = zext <16 x i8> [[WIDE_LOAD4]] to <16 x i32>
+; CHECK-NEXT: [[TMP8:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
+; CHECK-NEXT: [[TMP14:%.*]] = mul <16 x i32> [[TMP10]], [[TMP8]]
+; CHECK-NEXT: [[PARTIAL_REDUCE5]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP14]])
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-neon.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-neon.ll
index 801eb81..b847631 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-neon.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-neon.ll
@@ -18,10 +18,10 @@ define i32 @dotp(ptr %a, ptr %b) {
; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ]
; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]]
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1
-; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]]
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP4]], align 1
; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = zext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
+; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = mul <16 x i32> [[TMP6]], [[TMP3]]
; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP7]])
; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
@@ -47,17 +47,17 @@ define i32 @dotp(ptr %a, ptr %b) {
; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[TMP1]], i32 16
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1
-; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]]
; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[TMP6]], i32 16
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP6]], align 1
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1
; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = zext <16 x i8> [[WIDE_LOAD3]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = zext <16 x i8> [[WIDE_LOAD4]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = mul <16 x i32> [[TMP9]], [[TMP4]]
-; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = mul <16 x i32> [[TMP10]], [[TMP5]]
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP11]])
+; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = zext <16 x i8> [[WIDE_LOAD4]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = mul <16 x i32> [[TMP7]], [[TMP10]]
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE5]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP12]])
; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
@@ -80,10 +80,10 @@ define i32 @dotp(ptr %a, ptr %b) {
; CHECK-MAXBW-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ]
; CHECK-MAXBW-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]]
; CHECK-MAXBW-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1
-; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]]
; CHECK-MAXBW-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP4]], align 1
; CHECK-MAXBW-NEXT: [[TMP6:%.*]] = zext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
+; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-MAXBW-NEXT: [[TMP7:%.*]] = mul <16 x i32> [[TMP6]], [[TMP3]]
; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP7]])
; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
@@ -720,26 +720,26 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) {
; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP9]]
; CHECK-INTERLEAVE1-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP9]]
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1
-; CHECK-INTERLEAVE1-NEXT: [[TMP13:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1
; CHECK-INTERLEAVE1-NEXT: [[TMP15:%.*]] = sext <16 x i8> [[WIDE_LOAD4]] to <16 x i32>
+; CHECK-INTERLEAVE1-NEXT: [[TMP13:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = mul nsw <16 x i32> [[TMP15]], [[TMP13]]
; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI3]], <16 x i32> [[TMP16]])
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD5:%.*]] = load <16 x i8>, ptr [[TMP4]], align 1
-; CHECK-INTERLEAVE1-NEXT: [[TMP18:%.*]] = sext <16 x i8> [[WIDE_LOAD5]] to <16 x i32>
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD6:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1
+; CHECK-INTERLEAVE1-NEXT: [[TMP18:%.*]] = sext <16 x i8> [[WIDE_LOAD5]] to <16 x i32>
; CHECK-INTERLEAVE1-NEXT: [[TMP20:%.*]] = sext <16 x i8> [[WIDE_LOAD6]] to <16 x i32>
; CHECK-INTERLEAVE1-NEXT: [[TMP21:%.*]] = mul nsw <16 x i32> [[TMP18]], [[TMP20]]
; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE7]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI2]], <16 x i32> [[TMP21]])
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD8:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1
-; CHECK-INTERLEAVE1-NEXT: [[TMP23:%.*]] = sext <16 x i8> [[WIDE_LOAD8]] to <16 x i32>
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD9:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1
+; CHECK-INTERLEAVE1-NEXT: [[TMP23:%.*]] = sext <16 x i8> [[WIDE_LOAD8]] to <16 x i32>
; CHECK-INTERLEAVE1-NEXT: [[TMP25:%.*]] = sext <16 x i8> [[WIDE_LOAD9]] to <16 x i32>
; CHECK-INTERLEAVE1-NEXT: [[TMP26:%.*]] = mul nsw <16 x i32> [[TMP23]], [[TMP25]]
; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE10]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP26]])
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD11:%.*]] = load <16 x i8>, ptr [[TMP10]], align 1
-; CHECK-INTERLEAVE1-NEXT: [[TMP28:%.*]] = sext <16 x i8> [[WIDE_LOAD11]] to <16 x i32>
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD12:%.*]] = load <16 x i8>, ptr [[TMP11]], align 1
+; CHECK-INTERLEAVE1-NEXT: [[TMP28:%.*]] = sext <16 x i8> [[WIDE_LOAD11]] to <16 x i32>
; CHECK-INTERLEAVE1-NEXT: [[TMP30:%.*]] = sext <16 x i8> [[WIDE_LOAD12]] to <16 x i32>
; CHECK-INTERLEAVE1-NEXT: [[TMP31:%.*]] = mul nsw <16 x i32> [[TMP28]], [[TMP30]]
; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE13]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP31]])
@@ -788,59 +788,59 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) {
; CHECK-INTERLEAVED-NEXT: [[TMP38:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 16
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD8:%.*]] = load <16 x i8>, ptr [[TMP38]], align 1
-; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = sext <16 x i8> [[WIDE_LOAD8]] to <16 x i32>
; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i32 16
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD9:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD10:%.*]] = load <16 x i8>, ptr [[TMP16]], align 1
-; CHECK-INTERLEAVED-NEXT: [[TMP17:%.*]] = sext <16 x i8> [[WIDE_LOAD9]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = sext <16 x i8> [[WIDE_LOAD10]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP19:%.*]] = mul nsw <16 x i32> [[TMP17]], [[TMP13]]
+; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = sext <16 x i8> [[WIDE_LOAD9]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-INTERLEAVED-NEXT: [[TMP20:%.*]] = mul nsw <16 x i32> [[TMP18]], [[TMP14]]
-; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE1]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI6]], <16 x i32> [[TMP19]])
-; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE11]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI7]], <16 x i32> [[TMP20]])
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE1]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI6]], <16 x i32> [[TMP20]])
+; CHECK-INTERLEAVED-NEXT: [[TMP19:%.*]] = sext <16 x i8> [[WIDE_LOAD10]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP17:%.*]] = sext <16 x i8> [[WIDE_LOAD8]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP21:%.*]] = mul nsw <16 x i32> [[TMP19]], [[TMP17]]
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE11]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI7]], <16 x i32> [[TMP21]])
; CHECK-INTERLEAVED-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i32 16
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD12:%.*]] = load <16 x i8>, ptr [[TMP4]], align 1
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD13:%.*]] = load <16 x i8>, ptr [[TMP22]], align 1
-; CHECK-INTERLEAVED-NEXT: [[TMP23:%.*]] = sext <16 x i8> [[WIDE_LOAD12]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP24:%.*]] = sext <16 x i8> [[WIDE_LOAD13]] to <16 x i32>
; CHECK-INTERLEAVED-NEXT: [[TMP26:%.*]] = getelementptr inbounds i8, ptr [[TMP5]], i32 16
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD14:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD15:%.*]] = load <16 x i8>, ptr [[TMP26]], align 1
-; CHECK-INTERLEAVED-NEXT: [[TMP27:%.*]] = sext <16 x i8> [[WIDE_LOAD14]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP28:%.*]] = sext <16 x i8> [[WIDE_LOAD15]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP29:%.*]] = mul nsw <16 x i32> [[TMP23]], [[TMP27]]
+; CHECK-INTERLEAVED-NEXT: [[TMP24:%.*]] = sext <16 x i8> [[WIDE_LOAD12]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP28:%.*]] = sext <16 x i8> [[WIDE_LOAD14]] to <16 x i32>
; CHECK-INTERLEAVED-NEXT: [[TMP30:%.*]] = mul nsw <16 x i32> [[TMP24]], [[TMP28]]
-; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE16]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI4]], <16 x i32> [[TMP29]])
-; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE17]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI5]], <16 x i32> [[TMP30]])
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE16]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI4]], <16 x i32> [[TMP30]])
+; CHECK-INTERLEAVED-NEXT: [[TMP27:%.*]] = sext <16 x i8> [[WIDE_LOAD13]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP25:%.*]] = sext <16 x i8> [[WIDE_LOAD15]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP29:%.*]] = mul nsw <16 x i32> [[TMP27]], [[TMP25]]
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE17]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI5]], <16 x i32> [[TMP29]])
; CHECK-INTERLEAVED-NEXT: [[TMP32:%.*]] = getelementptr inbounds i8, ptr [[TMP7]], i32 16
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD18:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD19:%.*]] = load <16 x i8>, ptr [[TMP32]], align 1
-; CHECK-INTERLEAVED-NEXT: [[TMP33:%.*]] = sext <16 x i8> [[WIDE_LOAD18]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP34:%.*]] = sext <16 x i8> [[WIDE_LOAD19]] to <16 x i32>
; CHECK-INTERLEAVED-NEXT: [[TMP36:%.*]] = getelementptr inbounds i8, ptr [[TMP8]], i32 16
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD20:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD21:%.*]] = load <16 x i8>, ptr [[TMP36]], align 1
-; CHECK-INTERLEAVED-NEXT: [[TMP37:%.*]] = sext <16 x i8> [[WIDE_LOAD20]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP56:%.*]] = sext <16 x i8> [[WIDE_LOAD21]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP39:%.*]] = mul nsw <16 x i32> [[TMP33]], [[TMP37]]
+; CHECK-INTERLEAVED-NEXT: [[TMP34:%.*]] = sext <16 x i8> [[WIDE_LOAD18]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP56:%.*]] = sext <16 x i8> [[WIDE_LOAD20]] to <16 x i32>
; CHECK-INTERLEAVED-NEXT: [[TMP40:%.*]] = mul nsw <16 x i32> [[TMP34]], [[TMP56]]
-; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE7]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI2]], <16 x i32> [[TMP39]])
-; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI3]], <16 x i32> [[TMP40]])
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE7]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI2]], <16 x i32> [[TMP40]])
+; CHECK-INTERLEAVED-NEXT: [[TMP35:%.*]] = sext <16 x i8> [[WIDE_LOAD19]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP33:%.*]] = sext <16 x i8> [[WIDE_LOAD21]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP37:%.*]] = mul nsw <16 x i32> [[TMP35]], [[TMP33]]
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI3]], <16 x i32> [[TMP37]])
; CHECK-INTERLEAVED-NEXT: [[TMP42:%.*]] = getelementptr inbounds i8, ptr [[TMP10]], i32 16
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD24:%.*]] = load <16 x i8>, ptr [[TMP10]], align 1
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD25:%.*]] = load <16 x i8>, ptr [[TMP42]], align 1
-; CHECK-INTERLEAVED-NEXT: [[TMP43:%.*]] = sext <16 x i8> [[WIDE_LOAD24]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP44:%.*]] = sext <16 x i8> [[WIDE_LOAD25]] to <16 x i32>
; CHECK-INTERLEAVED-NEXT: [[TMP46:%.*]] = getelementptr inbounds i8, ptr [[TMP11]], i32 16
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD26:%.*]] = load <16 x i8>, ptr [[TMP11]], align 1
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD27:%.*]] = load <16 x i8>, ptr [[TMP46]], align 1
-; CHECK-INTERLEAVED-NEXT: [[TMP47:%.*]] = sext <16 x i8> [[WIDE_LOAD26]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP48:%.*]] = sext <16 x i8> [[WIDE_LOAD27]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP49:%.*]] = mul nsw <16 x i32> [[TMP43]], [[TMP47]]
+; CHECK-INTERLEAVED-NEXT: [[TMP44:%.*]] = sext <16 x i8> [[WIDE_LOAD24]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP48:%.*]] = sext <16 x i8> [[WIDE_LOAD26]] to <16 x i32>
; CHECK-INTERLEAVED-NEXT: [[TMP50:%.*]] = mul nsw <16 x i32> [[TMP44]], [[TMP48]]
-; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE13]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP49]])
-; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE10]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP50]])
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE13]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP50]])
+; CHECK-INTERLEAVED-NEXT: [[TMP43:%.*]] = sext <16 x i8> [[WIDE_LOAD25]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP41:%.*]] = sext <16 x i8> [[WIDE_LOAD27]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP45:%.*]] = mul nsw <16 x i32> [[TMP43]], [[TMP41]]
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE10]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP45]])
; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
; CHECK-INTERLEAVED-NEXT: [[TMP51:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-INTERLEAVED-NEXT: br i1 [[TMP51]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
@@ -884,26 +884,26 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) {
; CHECK-MAXBW-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP9]]
; CHECK-MAXBW-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP9]]
; CHECK-MAXBW-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1
-; CHECK-MAXBW-NEXT: [[TMP13:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-MAXBW-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1
; CHECK-MAXBW-NEXT: [[TMP15:%.*]] = sext <16 x i8> [[WIDE_LOAD4]] to <16 x i32>
+; CHECK-MAXBW-NEXT: [[TMP13:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-MAXBW-NEXT: [[TMP16:%.*]] = mul nsw <16 x i32> [[TMP15]], [[TMP13]]
; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI3]], <16 x i32> [[TMP16]])
; CHECK-MAXBW-NEXT: [[WIDE_LOAD5:%.*]] = load <16 x i8>, ptr [[TMP4]], align 1
-; CHECK-MAXBW-NEXT: [[TMP18:%.*]] = sext <16 x i8> [[WIDE_LOAD5]] to <16 x i32>
; CHECK-MAXBW-NEXT: [[WIDE_LOAD6:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1
+; CHECK-MAXBW-NEXT: [[TMP18:%.*]] = sext <16 x i8> [[WIDE_LOAD5]] to <16 x i32>
; CHECK-MAXBW-NEXT: [[TMP20:%.*]] = sext <16 x i8> [[WIDE_LOAD6]] to <16 x i32>
; CHECK-MAXBW-NEXT: [[TMP21:%.*]] = mul nsw <16 x i32> [[TMP18]], [[TMP20]]
; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE7]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI2]], <16 x i32> [[TMP21]])
; CHECK-MAXBW-NEXT: [[WIDE_LOAD8:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1
-; CHECK-MAXBW-NEXT: [[TMP23:%.*]] = sext <16 x i8> [[WIDE_LOAD8]] to <16 x i32>
; CHECK-MAXBW-NEXT: [[WIDE_LOAD9:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1
+; CHECK-MAXBW-NEXT: [[TMP23:%.*]] = sext <16 x i8> [[WIDE_LOAD8]] to <16 x i32>
; CHECK-MAXBW-NEXT: [[TMP25:%.*]] = sext <16 x i8> [[WIDE_LOAD9]] to <16 x i32>
; CHECK-MAXBW-NEXT: [[TMP26:%.*]] = mul nsw <16 x i32> [[TMP23]], [[TMP25]]
; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE10]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP26]])
; CHECK-MAXBW-NEXT: [[WIDE_LOAD11:%.*]] = load <16 x i8>, ptr [[TMP10]], align 1
-; CHECK-MAXBW-NEXT: [[TMP28:%.*]] = sext <16 x i8> [[WIDE_LOAD11]] to <16 x i32>
; CHECK-MAXBW-NEXT: [[WIDE_LOAD12:%.*]] = load <16 x i8>, ptr [[TMP11]], align 1
+; CHECK-MAXBW-NEXT: [[TMP28:%.*]] = sext <16 x i8> [[WIDE_LOAD11]] to <16 x i32>
; CHECK-MAXBW-NEXT: [[TMP30:%.*]] = sext <16 x i8> [[WIDE_LOAD12]] to <16 x i32>
; CHECK-MAXBW-NEXT: [[TMP31:%.*]] = mul nsw <16 x i32> [[TMP28]], [[TMP30]]
; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE13]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP31]])
@@ -2025,7 +2025,7 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) {
; CHECK-INTERLEAVE1-NEXT: [[TMP8]] = add <16 x i32> [[TMP7]], [[VEC_PHI]]
; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-INTERLEAVE1-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
-; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
+; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK-INTERLEAVE1: middle.block:
; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP8]])
; CHECK-INTERLEAVE1-NEXT: [[TMP11:%.*]] = extractelement <16 x i32> [[TMP6]], i32 15
@@ -2062,7 +2062,7 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) {
; CHECK-INTERLEAVED-NEXT: [[TMP14]] = add <16 x i32> [[TMP12]], [[VEC_PHI1]]
; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
-; CHECK-INTERLEAVED-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
+; CHECK-INTERLEAVED-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK-INTERLEAVED: middle.block:
; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <16 x i32> [[TMP14]], [[TMP13]]
; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[BIN_RDX]])
@@ -2091,7 +2091,7 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) {
; CHECK-MAXBW-NEXT: [[TMP8]] = add <16 x i32> [[TMP7]], [[VEC_PHI]]
; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
-; CHECK-MAXBW-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
+; CHECK-MAXBW-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK-MAXBW: middle.block:
; CHECK-MAXBW-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP8]])
; CHECK-MAXBW-NEXT: [[TMP11:%.*]] = extractelement <16 x i32> [[TMP6]], i32 15
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product.ll
index 1ace7d4..4636c1b 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product.ll
@@ -18,11 +18,11 @@ define i32 @dotp(ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ]
; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX1]]
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP16]], align 1
-; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-INTERLEAVE1-NEXT: [[TMP20:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX1]]
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP20]], align 1
-; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
-; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = mul <16 x i32> [[TMP3]], [[TMP1]]
+; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = zext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
+; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
+; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = mul <16 x i32> [[TMP2]], [[TMP3]]
; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP4]])
; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT1]] = add nuw i64 [[INDEX1]], 16
; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT1]], 1024
@@ -47,17 +47,17 @@ define i32 @dotp(ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[TMP20]], i32 16
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP20]], align 1
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1
-; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-INTERLEAVED-NEXT: [[TMP28:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX1]]
-; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP28]], i32 16
+; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[TMP28]], i32 16
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP28]], align 1
-; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1
-; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = zext <16 x i8> [[WIDE_LOAD3]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1
+; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = zext <16 x i8> [[WIDE_LOAD3]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = mul <16 x i32> [[TMP4]], [[TMP5]]
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP6]])
; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = zext <16 x i8> [[WIDE_LOAD4]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = mul <16 x i32> [[TMP6]], [[TMP2]]
-; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = mul <16 x i32> [[TMP7]], [[TMP3]]
-; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP8]])
+; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = mul <16 x i32> [[TMP7]], [[TMP8]]
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE5]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP9]])
; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT1]] = add nuw i64 [[INDEX1]], 32
; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT1]], 1024
@@ -75,26 +75,26 @@ define i32 @dotp(ptr %a, ptr %b) #0 {
; CHECK-MAXBW-NEXT: br label [[VECTOR_PH:%.*]]
; CHECK-MAXBW: vector.ph:
; CHECK-MAXBW-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
+; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16
; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
; CHECK-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-MAXBW: vector.body:
; CHECK-MAXBW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-MAXBW-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 2 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE5:%.*]], [[VECTOR_BODY]] ]
+; CHECK-MAXBW-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ]
; CHECK-MAXBW-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]]
-; CHECK-MAXBW-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 8 x i8>, ptr [[TMP7]], align 1
-; CHECK-MAXBW-NEXT: [[TMP13:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD2]] to <vscale x 8 x i32>
+; CHECK-MAXBW-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP7]], align 1
; CHECK-MAXBW-NEXT: [[TMP14:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]]
-; CHECK-MAXBW-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 8 x i8>, ptr [[TMP14]], align 1
-; CHECK-MAXBW-NEXT: [[TMP20:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD4]] to <vscale x 8 x i32>
-; CHECK-MAXBW-NEXT: [[TMP22:%.*]] = mul <vscale x 8 x i32> [[TMP20]], [[TMP13]]
-; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE5]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI1]], <vscale x 8 x i32> [[TMP22]])
+; CHECK-MAXBW-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 16 x i8>, ptr [[TMP14]], align 1
+; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = zext <vscale x 16 x i8> [[WIDE_LOAD1]] to <vscale x 16 x i32>
+; CHECK-MAXBW-NEXT: [[TMP5:%.*]] = zext <vscale x 16 x i8> [[WIDE_LOAD]] to <vscale x 16 x i32>
+; CHECK-MAXBW-NEXT: [[TMP6:%.*]] = mul <vscale x 16 x i32> [[TMP4]], [[TMP5]]
+; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 4 x i32> @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32(<vscale x 4 x i32> [[VEC_PHI]], <vscale x 16 x i32> [[TMP6]])
; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-MAXBW-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-MAXBW-NEXT: br i1 [[TMP23]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK-MAXBW: middle.block:
-; CHECK-MAXBW-NEXT: [[TMP16:%.*]] = call i32 @llvm.vector.reduce.add.nxv2i32(<vscale x 2 x i32> [[PARTIAL_REDUCE5]])
+; CHECK-MAXBW-NEXT: [[TMP8:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[PARTIAL_REDUCE]])
; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
; CHECK-MAXBW-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]]
; CHECK-MAXBW: scalar.ph:
@@ -134,10 +134,10 @@ define i64 @not_dotp_i8_to_i64_has_neon_dotprod(ptr readonly %a, ptr readonly %b
; CHECK-INTERLEAVE1-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]]
; CHECK-INTERLEAVE1-NEXT: [[NEXT_GEP1:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]]
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[NEXT_GEP]], align 1
-; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i64>
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[NEXT_GEP1]], align 1
-; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i64>
-; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = mul nuw nsw <16 x i64> [[TMP1]], [[TMP0]]
+; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i64>
+; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i64>
+; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = mul nuw nsw <16 x i64> [[TMP0]], [[TMP1]]
; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI]], <16 x i64> [[TMP2]])
; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
@@ -157,54 +157,78 @@ define i64 @not_dotp_i8_to_i64_has_neon_dotprod(ptr readonly %a, ptr readonly %b
; CHECK-INTERLEAVED: vector.body:
; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ]
+; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE6:%.*]], [[VECTOR_BODY]] ]
+; CHECK-INTERLEAVED-NEXT: [[VEC_PHI2:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE13:%.*]], [[VECTOR_BODY]] ]
+; CHECK-INTERLEAVED-NEXT: [[VEC_PHI3:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE14:%.*]], [[VECTOR_BODY]] ]
; CHECK-INTERLEAVED-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]]
; CHECK-INTERLEAVED-NEXT: [[NEXT_GEP1:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]]
+; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 16
+; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 32
+; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 48
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[NEXT_GEP]], align 1
-; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i64>
-; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[NEXT_GEP1]], align 1
-; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i64>
-; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = mul nuw nsw <16 x i64> [[TMP1]], [[TMP0]]
-; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI]], <16 x i64> [[TMP2]])
-; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
-; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
-; CHECK-INTERLEAVED-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP0]], align 1
+; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD6:%.*]] = load <16 x i8>, ptr [[TMP10]], align 1
+; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD7:%.*]] = load <16 x i8>, ptr [[TMP11]], align 1
+; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[NEXT_GEP1]], i32 16
+; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = getelementptr i8, ptr [[NEXT_GEP1]], i32 32
+; CHECK-INTERLEAVED-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[NEXT_GEP1]], i32 48
+; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[NEXT_GEP1]], align 1
+; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD5:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1
+; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD10:%.*]] = load <16 x i8>, ptr [[TMP18]], align 1
+; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD11:%.*]] = load <16 x i8>, ptr [[TMP19]], align 1
+; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = zext <16 x i8> [[WIDE_LOAD4]] to <16 x i64>
+; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i64>
+; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = mul nuw nsw <16 x i64> [[TMP2]], [[TMP3]]
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI]], <16 x i64> [[TMP4]])
+; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD5]] to <16 x i64>
+; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = zext <16 x i8> [[WIDE_LOAD3]] to <16 x i64>
+; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = mul nuw nsw <16 x i64> [[TMP5]], [[TMP6]]
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE6]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI1]], <16 x i64> [[TMP7]])
+; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = zext <16 x i8> [[WIDE_LOAD10]] to <16 x i64>
+; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = zext <16 x i8> [[WIDE_LOAD6]] to <16 x i64>
+; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = mul nuw nsw <16 x i64> [[TMP12]], [[TMP13]]
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE13]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI2]], <16 x i64> [[TMP14]])
+; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = zext <16 x i8> [[WIDE_LOAD11]] to <16 x i64>
+; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = zext <16 x i8> [[WIDE_LOAD7]] to <16 x i64>
+; CHECK-INTERLEAVED-NEXT: [[TMP17:%.*]] = mul nuw nsw <16 x i64> [[TMP15]], [[TMP16]]
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE14]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI3]], <16 x i64> [[TMP17]])
+; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 64
+; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-INTERLEAVED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK-INTERLEAVED: middle.block:
-; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[PARTIAL_REDUCE]])
+; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <2 x i64> [[PARTIAL_REDUCE6]], [[PARTIAL_REDUCE]]
+; CHECK-INTERLEAVED-NEXT: [[BIN_RDX15:%.*]] = add <2 x i64> [[PARTIAL_REDUCE13]], [[BIN_RDX]]
+; CHECK-INTERLEAVED-NEXT: [[BIN_RDX16:%.*]] = add <2 x i64> [[PARTIAL_REDUCE14]], [[BIN_RDX15]]
+; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[BIN_RDX16]])
; CHECK-INTERLEAVED-NEXT: br label [[FOR_EXIT:%.*]]
; CHECK-INTERLEAVED: for.exit:
-; CHECK-INTERLEAVED-NEXT: ret i64 [[TMP4]]
+; CHECK-INTERLEAVED-NEXT: ret i64 [[TMP9]]
;
; CHECK-MAXBW-LABEL: define i64 @not_dotp_i8_to_i64_has_neon_dotprod(
; CHECK-MAXBW-SAME: ptr readonly [[A:%.*]], ptr readonly [[B:%.*]]) #[[ATTR1:[0-9]+]] {
; CHECK-MAXBW-NEXT: entry:
; CHECK-MAXBW-NEXT: br label [[VECTOR_PH:%.*]]
; CHECK-MAXBW: vector.ph:
-; CHECK-MAXBW-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
-; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; CHECK-MAXBW-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[A]], i64 [[N_VEC]]
-; CHECK-MAXBW-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[B]], i64 [[N_VEC]]
; CHECK-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-MAXBW: vector.body:
; CHECK-MAXBW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-MAXBW-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
+; CHECK-MAXBW-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ]
; CHECK-MAXBW-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]]
; CHECK-MAXBW-NEXT: [[NEXT_GEP1:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]]
-; CHECK-MAXBW-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i8>, ptr [[NEXT_GEP]], align 1
-; CHECK-MAXBW-NEXT: [[TMP11:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD]] to <vscale x 8 x i64>
-; CHECK-MAXBW-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 8 x i8>, ptr [[NEXT_GEP1]], align 1
-; CHECK-MAXBW-NEXT: [[TMP13:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD2]] to <vscale x 8 x i64>
-; CHECK-MAXBW-NEXT: [[TMP14:%.*]] = mul nuw nsw <vscale x 8 x i64> [[TMP13]], [[TMP11]]
-; CHECK-MAXBW-NEXT: [[TMP15]] = add <vscale x 8 x i64> [[TMP14]], [[VEC_PHI]]
-; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
-; CHECK-MAXBW-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-MAXBW-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK-MAXBW-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[NEXT_GEP]], align 1
+; CHECK-MAXBW-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[NEXT_GEP1]], align 1
+; CHECK-MAXBW-NEXT: [[TMP0:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i64>
+; CHECK-MAXBW-NEXT: [[TMP1:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i64>
+; CHECK-MAXBW-NEXT: [[TMP2:%.*]] = mul nuw nsw <16 x i64> [[TMP0]], [[TMP1]]
+; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI]], <16 x i64> [[TMP2]])
+; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
+; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-MAXBW-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK-MAXBW: middle.block:
-; CHECK-MAXBW-NEXT: [[TMP17:%.*]] = call i64 @llvm.vector.reduce.add.nxv8i64(<vscale x 8 x i64> [[TMP15]])
-; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-MAXBW-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]]
-; CHECK-MAXBW: scalar.ph:
+; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[PARTIAL_REDUCE]])
+; CHECK-MAXBW-NEXT: br label [[FOR_EXIT:%.*]]
+; CHECK-MAXBW: for.exit:
+; CHECK-MAXBW-NEXT: ret i64 [[TMP4]]
;
entry:
br label %for.body
@@ -245,10 +269,10 @@ define i64 @not_dotp_i16_to_i64_has_neon_dotprod(ptr readonly %a, ptr readonly %
; CHECK-INTERLEAVE1-NEXT: [[OFFSET_IDX1:%.*]] = mul i64 [[INDEX]], 2
; CHECK-INTERLEAVE1-NEXT: [[NEXT_GEP2:%.*]] = getelementptr i8, ptr [[B]], i64 [[OFFSET_IDX1]]
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i16>, ptr [[NEXT_GEP]], align 2
-; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = zext <8 x i16> [[WIDE_LOAD]] to <8 x i64>
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD3:%.*]] = load <8 x i16>, ptr [[NEXT_GEP2]], align 2
-; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = zext <8 x i16> [[WIDE_LOAD3]] to <8 x i64>
-; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = mul nuw nsw <8 x i64> [[TMP1]], [[TMP0]]
+; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = zext <8 x i16> [[WIDE_LOAD3]] to <8 x i64>
+; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = zext <8 x i16> [[WIDE_LOAD]] to <8 x i64>
+; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = mul nuw nsw <8 x i64> [[TMP0]], [[TMP1]]
; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v8i64(<2 x i64> [[VEC_PHI]], <8 x i64> [[TMP2]])
; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
@@ -269,30 +293,50 @@ define i64 @not_dotp_i16_to_i64_has_neon_dotprod(ptr readonly %a, ptr readonly %
; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ]
; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE7:%.*]], [[VECTOR_BODY]] ]
+; CHECK-INTERLEAVED-NEXT: [[VEC_PHI2:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE14:%.*]], [[VECTOR_BODY]] ]
+; CHECK-INTERLEAVED-NEXT: [[VEC_PHI3:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE15:%.*]], [[VECTOR_BODY]] ]
; CHECK-INTERLEAVED-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 2
; CHECK-INTERLEAVED-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[OFFSET_IDX]]
; CHECK-INTERLEAVED-NEXT: [[OFFSET_IDX2:%.*]] = mul i64 [[INDEX]], 2
; CHECK-INTERLEAVED-NEXT: [[NEXT_GEP3:%.*]] = getelementptr i8, ptr [[B]], i64 [[OFFSET_IDX2]]
; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = getelementptr i16, ptr [[NEXT_GEP]], i32 8
+; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = getelementptr i16, ptr [[NEXT_GEP]], i32 16
+; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = getelementptr i16, ptr [[NEXT_GEP]], i32 24
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i16>, ptr [[NEXT_GEP]], align 2
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load <8 x i16>, ptr [[TMP0]], align 2
-; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = zext <8 x i16> [[WIDE_LOAD]] to <8 x i64>
-; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = zext <8 x i16> [[WIDE_LOAD4]] to <8 x i64>
-; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = getelementptr i16, ptr [[NEXT_GEP3]], i32 8
+; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD7:%.*]] = load <8 x i16>, ptr [[TMP10]], align 2
+; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD8:%.*]] = load <8 x i16>, ptr [[TMP11]], align 2
+; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i16, ptr [[NEXT_GEP3]], i32 8
+; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = getelementptr i16, ptr [[NEXT_GEP3]], i32 16
+; CHECK-INTERLEAVED-NEXT: [[TMP19:%.*]] = getelementptr i16, ptr [[NEXT_GEP3]], i32 24
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD5:%.*]] = load <8 x i16>, ptr [[NEXT_GEP3]], align 2
-; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD6:%.*]] = load <8 x i16>, ptr [[TMP3]], align 2
-; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = zext <8 x i16> [[WIDE_LOAD5]] to <8 x i64>
+; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD6:%.*]] = load <8 x i16>, ptr [[TMP1]], align 2
+; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD11:%.*]] = load <8 x i16>, ptr [[TMP18]], align 2
+; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD12:%.*]] = load <8 x i16>, ptr [[TMP19]], align 2
+; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = zext <8 x i16> [[WIDE_LOAD5]] to <8 x i64>
+; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = zext <8 x i16> [[WIDE_LOAD]] to <8 x i64>
+; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = mul nuw nsw <8 x i64> [[TMP2]], [[TMP3]]
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v8i64(<2 x i64> [[VEC_PHI]], <8 x i64> [[TMP4]])
; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = zext <8 x i16> [[WIDE_LOAD6]] to <8 x i64>
-; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = mul nuw nsw <8 x i64> [[TMP4]], [[TMP1]]
-; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = mul nuw nsw <8 x i64> [[TMP5]], [[TMP2]]
-; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v8i64(<2 x i64> [[VEC_PHI]], <8 x i64> [[TMP6]])
+; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = zext <8 x i16> [[WIDE_LOAD4]] to <8 x i64>
+; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = mul nuw nsw <8 x i64> [[TMP5]], [[TMP6]]
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE7]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v8i64(<2 x i64> [[VEC_PHI1]], <8 x i64> [[TMP7]])
-; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
+; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = zext <8 x i16> [[WIDE_LOAD11]] to <8 x i64>
+; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = zext <8 x i16> [[WIDE_LOAD7]] to <8 x i64>
+; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = mul nuw nsw <8 x i64> [[TMP12]], [[TMP13]]
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE14]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v8i64(<2 x i64> [[VEC_PHI2]], <8 x i64> [[TMP14]])
+; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = zext <8 x i16> [[WIDE_LOAD12]] to <8 x i64>
+; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = zext <8 x i16> [[WIDE_LOAD8]] to <8 x i64>
+; CHECK-INTERLEAVED-NEXT: [[TMP17:%.*]] = mul nuw nsw <8 x i64> [[TMP15]], [[TMP16]]
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE15]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v8i64(<2 x i64> [[VEC_PHI3]], <8 x i64> [[TMP17]])
+; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; CHECK-INTERLEAVED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK-INTERLEAVED: middle.block:
; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <2 x i64> [[PARTIAL_REDUCE7]], [[PARTIAL_REDUCE]]
-; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[BIN_RDX]])
+; CHECK-INTERLEAVED-NEXT: [[BIN_RDX16:%.*]] = add <2 x i64> [[PARTIAL_REDUCE14]], [[BIN_RDX]]
+; CHECK-INTERLEAVED-NEXT: [[BIN_RDX17:%.*]] = add <2 x i64> [[PARTIAL_REDUCE15]], [[BIN_RDX16]]
+; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[BIN_RDX17]])
; CHECK-INTERLEAVED-NEXT: br label [[FOR_EXIT:%.*]]
; CHECK-INTERLEAVED: for.exit:
; CHECK-INTERLEAVED-NEXT: ret i64 [[TMP9]]
@@ -302,36 +346,28 @@ define i64 @not_dotp_i16_to_i64_has_neon_dotprod(ptr readonly %a, ptr readonly %
; CHECK-MAXBW-NEXT: entry:
; CHECK-MAXBW-NEXT: br label [[VECTOR_PH:%.*]]
; CHECK-MAXBW: vector.ph:
-; CHECK-MAXBW-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; CHECK-MAXBW-NEXT: [[TMP6:%.*]] = mul i64 [[N_VEC]], 2
-; CHECK-MAXBW-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP6]]
-; CHECK-MAXBW-NEXT: [[TMP8:%.*]] = mul i64 [[N_VEC]], 2
-; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP8]]
; CHECK-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-MAXBW: vector.body:
; CHECK-MAXBW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-MAXBW-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP17:%.*]], [[VECTOR_BODY]] ]
+; CHECK-MAXBW-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ]
; CHECK-MAXBW-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 2
; CHECK-MAXBW-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[OFFSET_IDX]]
; CHECK-MAXBW-NEXT: [[OFFSET_IDX1:%.*]] = mul i64 [[INDEX]], 2
; CHECK-MAXBW-NEXT: [[NEXT_GEP2:%.*]] = getelementptr i8, ptr [[B]], i64 [[OFFSET_IDX1]]
-; CHECK-MAXBW-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i16>, ptr [[NEXT_GEP]], align 2
-; CHECK-MAXBW-NEXT: [[TMP13:%.*]] = zext <vscale x 4 x i16> [[WIDE_LOAD]] to <vscale x 4 x i64>
-; CHECK-MAXBW-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 4 x i16>, ptr [[NEXT_GEP2]], align 2
-; CHECK-MAXBW-NEXT: [[TMP15:%.*]] = zext <vscale x 4 x i16> [[WIDE_LOAD3]] to <vscale x 4 x i64>
-; CHECK-MAXBW-NEXT: [[TMP16:%.*]] = mul nuw nsw <vscale x 4 x i64> [[TMP15]], [[TMP13]]
-; CHECK-MAXBW-NEXT: [[TMP17]] = add <vscale x 4 x i64> [[TMP16]], [[VEC_PHI]]
-; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
-; CHECK-MAXBW-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-MAXBW-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK-MAXBW-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i16>, ptr [[NEXT_GEP]], align 2
+; CHECK-MAXBW-NEXT: [[WIDE_LOAD3:%.*]] = load <8 x i16>, ptr [[NEXT_GEP2]], align 2
+; CHECK-MAXBW-NEXT: [[TMP0:%.*]] = zext <8 x i16> [[WIDE_LOAD3]] to <8 x i64>
+; CHECK-MAXBW-NEXT: [[TMP1:%.*]] = zext <8 x i16> [[WIDE_LOAD]] to <8 x i64>
+; CHECK-MAXBW-NEXT: [[TMP2:%.*]] = mul nuw nsw <8 x i64> [[TMP0]], [[TMP1]]
+; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v8i64(<2 x i64> [[VEC_PHI]], <8 x i64> [[TMP2]])
+; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
+; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-MAXBW-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK-MAXBW: middle.block:
-; CHECK-MAXBW-NEXT: [[TMP19:%.*]] = call i64 @llvm.vector.reduce.add.nxv4i64(<vscale x 4 x i64> [[TMP17]])
-; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-MAXBW-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]]
-; CHECK-MAXBW: scalar.ph:
+; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[PARTIAL_REDUCE]])
+; CHECK-MAXBW-NEXT: br label [[FOR_EXIT:%.*]]
+; CHECK-MAXBW: for.exit:
+; CHECK-MAXBW-NEXT: ret i64 [[TMP4]]
;
entry:
br label %for.body
@@ -687,7 +723,7 @@ define i32 @not_dotp_different_types(ptr %a, ptr %b) #0 {
; CHECK-MAXBW-NEXT: [[TMP138]] = add <16 x i32> [[TMP136]], [[VEC_PHI1]]
; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-MAXBW-NEXT: [[TMP70:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
-; CHECK-MAXBW-NEXT: br i1 [[TMP70]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK-MAXBW-NEXT: br i1 [[TMP70]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK-MAXBW: middle.block:
; CHECK-MAXBW-NEXT: [[TMP71:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP138]])
; CHECK-MAXBW-NEXT: br label [[FOR_EXIT:%.*]]
@@ -835,7 +871,7 @@ define i32 @not_dotp_not_loop_carried(ptr %a, ptr %b) #0 {
; CHECK-MAXBW-NEXT: [[TMP27:%.*]] = add <vscale x 8 x i32> [[TMP25]], [[TMP26]]
; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-MAXBW-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-MAXBW-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
+; CHECK-MAXBW-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK-MAXBW: middle.block:
; CHECK-MAXBW-NEXT: [[TMP20:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-MAXBW-NEXT: [[TMP21:%.*]] = mul nuw i32 [[TMP20]], 8
@@ -964,7 +1000,7 @@ define i32 @not_dotp_not_phi(ptr %a, ptr %b) #0 {
; CHECK-MAXBW-NEXT: [[TMP21:%.*]] = add <vscale x 8 x i32> [[TMP20]], [[TMP19]]
; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-MAXBW-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-MAXBW-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
+; CHECK-MAXBW-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK-MAXBW: middle.block:
; CHECK-MAXBW-NEXT: [[TMP23:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-MAXBW-NEXT: [[TMP24:%.*]] = mul nuw i32 [[TMP23]], 8
@@ -1024,26 +1060,26 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP9]]
; CHECK-INTERLEAVE1-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP9]]
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1
-; CHECK-INTERLEAVE1-NEXT: [[TMP23:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1
-; CHECK-INTERLEAVE1-NEXT: [[TMP12:%.*]] = sext <16 x i8> [[WIDE_LOAD4]] to <16 x i32>
-; CHECK-INTERLEAVE1-NEXT: [[TMP13:%.*]] = mul nsw <16 x i32> [[TMP12]], [[TMP23]]
+; CHECK-INTERLEAVE1-NEXT: [[TMP23:%.*]] = sext <16 x i8> [[WIDE_LOAD4]] to <16 x i32>
+; CHECK-INTERLEAVE1-NEXT: [[TMP12:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
+; CHECK-INTERLEAVE1-NEXT: [[TMP13:%.*]] = mul nsw <16 x i32> [[TMP23]], [[TMP12]]
; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI3]], <16 x i32> [[TMP13]])
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD5:%.*]] = load <16 x i8>, ptr [[TMP4]], align 1
-; CHECK-INTERLEAVE1-NEXT: [[TMP14:%.*]] = sext <16 x i8> [[WIDE_LOAD5]] to <16 x i32>
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD6:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1
+; CHECK-INTERLEAVE1-NEXT: [[TMP14:%.*]] = sext <16 x i8> [[WIDE_LOAD5]] to <16 x i32>
; CHECK-INTERLEAVE1-NEXT: [[TMP15:%.*]] = sext <16 x i8> [[WIDE_LOAD6]] to <16 x i32>
; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = mul nsw <16 x i32> [[TMP14]], [[TMP15]]
; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE7]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI2]], <16 x i32> [[TMP16]])
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD8:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1
-; CHECK-INTERLEAVE1-NEXT: [[TMP17:%.*]] = sext <16 x i8> [[WIDE_LOAD8]] to <16 x i32>
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD9:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1
+; CHECK-INTERLEAVE1-NEXT: [[TMP17:%.*]] = sext <16 x i8> [[WIDE_LOAD8]] to <16 x i32>
; CHECK-INTERLEAVE1-NEXT: [[TMP18:%.*]] = sext <16 x i8> [[WIDE_LOAD9]] to <16 x i32>
; CHECK-INTERLEAVE1-NEXT: [[TMP19:%.*]] = mul nsw <16 x i32> [[TMP17]], [[TMP18]]
; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE10]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP19]])
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD11:%.*]] = load <16 x i8>, ptr [[TMP10]], align 1
-; CHECK-INTERLEAVE1-NEXT: [[TMP20:%.*]] = sext <16 x i8> [[WIDE_LOAD11]] to <16 x i32>
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD12:%.*]] = load <16 x i8>, ptr [[TMP11]], align 1
+; CHECK-INTERLEAVE1-NEXT: [[TMP20:%.*]] = sext <16 x i8> [[WIDE_LOAD11]] to <16 x i32>
; CHECK-INTERLEAVE1-NEXT: [[TMP21:%.*]] = sext <16 x i8> [[WIDE_LOAD12]] to <16 x i32>
; CHECK-INTERLEAVE1-NEXT: [[TMP22:%.*]] = mul nsw <16 x i32> [[TMP20]], [[TMP21]]
; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE13]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP22]])
@@ -1092,58 +1128,58 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVED-NEXT: [[TMP43:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 16
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD8:%.*]] = load <16 x i8>, ptr [[TMP43]], align 1
-; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = sext <16 x i8> [[WIDE_LOAD8]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i32 16
+; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i32 16
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD9:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1
-; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD10:%.*]] = load <16 x i8>, ptr [[TMP14]], align 1
-; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = sext <16 x i8> [[WIDE_LOAD9]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD10:%.*]] = load <16 x i8>, ptr [[TMP12]], align 1
+; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = sext <16 x i8> [[WIDE_LOAD9]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = mul nsw <16 x i32> [[TMP13]], [[TMP14]]
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI6]], <16 x i32> [[TMP15]])
; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = sext <16 x i8> [[WIDE_LOAD10]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP17:%.*]] = mul nsw <16 x i32> [[TMP15]], [[TMP12]]
-; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = mul nsw <16 x i32> [[TMP16]], [[TMP13]]
-; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI6]], <16 x i32> [[TMP17]])
+; CHECK-INTERLEAVED-NEXT: [[TMP17:%.*]] = sext <16 x i8> [[WIDE_LOAD8]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = mul nsw <16 x i32> [[TMP16]], [[TMP17]]
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE11]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI7]], <16 x i32> [[TMP18]])
; CHECK-INTERLEAVED-NEXT: [[TMP19:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i32 16
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD12:%.*]] = load <16 x i8>, ptr [[TMP4]], align 1
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD13:%.*]] = load <16 x i8>, ptr [[TMP19]], align 1
-; CHECK-INTERLEAVED-NEXT: [[TMP20:%.*]] = sext <16 x i8> [[WIDE_LOAD12]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP21:%.*]] = sext <16 x i8> [[WIDE_LOAD13]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[TMP5]], i32 16
+; CHECK-INTERLEAVED-NEXT: [[TMP20:%.*]] = getelementptr inbounds i8, ptr [[TMP5]], i32 16
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD14:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1
-; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD15:%.*]] = load <16 x i8>, ptr [[TMP22]], align 1
-; CHECK-INTERLEAVED-NEXT: [[TMP23:%.*]] = sext <16 x i8> [[WIDE_LOAD14]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP24:%.*]] = sext <16 x i8> [[WIDE_LOAD15]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP25:%.*]] = mul nsw <16 x i32> [[TMP20]], [[TMP23]]
-; CHECK-INTERLEAVED-NEXT: [[TMP26:%.*]] = mul nsw <16 x i32> [[TMP21]], [[TMP24]]
-; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE16]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI4]], <16 x i32> [[TMP25]])
+; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD15:%.*]] = load <16 x i8>, ptr [[TMP20]], align 1
+; CHECK-INTERLEAVED-NEXT: [[TMP21:%.*]] = sext <16 x i8> [[WIDE_LOAD12]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP22:%.*]] = sext <16 x i8> [[WIDE_LOAD14]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP23:%.*]] = mul nsw <16 x i32> [[TMP21]], [[TMP22]]
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE16]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI4]], <16 x i32> [[TMP23]])
+; CHECK-INTERLEAVED-NEXT: [[TMP24:%.*]] = sext <16 x i8> [[WIDE_LOAD13]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP25:%.*]] = sext <16 x i8> [[WIDE_LOAD15]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP26:%.*]] = mul nsw <16 x i32> [[TMP24]], [[TMP25]]
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE17]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI5]], <16 x i32> [[TMP26]])
; CHECK-INTERLEAVED-NEXT: [[TMP27:%.*]] = getelementptr inbounds i8, ptr [[TMP7]], i32 16
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD18:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD19:%.*]] = load <16 x i8>, ptr [[TMP27]], align 1
-; CHECK-INTERLEAVED-NEXT: [[TMP28:%.*]] = sext <16 x i8> [[WIDE_LOAD18]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP29:%.*]] = sext <16 x i8> [[WIDE_LOAD19]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP30:%.*]] = getelementptr inbounds i8, ptr [[TMP8]], i32 16
+; CHECK-INTERLEAVED-NEXT: [[TMP28:%.*]] = getelementptr inbounds i8, ptr [[TMP8]], i32 16
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD20:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1
-; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD21:%.*]] = load <16 x i8>, ptr [[TMP30]], align 1
-; CHECK-INTERLEAVED-NEXT: [[TMP31:%.*]] = sext <16 x i8> [[WIDE_LOAD20]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP48:%.*]] = sext <16 x i8> [[WIDE_LOAD21]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP33:%.*]] = mul nsw <16 x i32> [[TMP28]], [[TMP31]]
-; CHECK-INTERLEAVED-NEXT: [[TMP34:%.*]] = mul nsw <16 x i32> [[TMP29]], [[TMP48]]
-; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE22]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI2]], <16 x i32> [[TMP33]])
+; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD21:%.*]] = load <16 x i8>, ptr [[TMP28]], align 1
+; CHECK-INTERLEAVED-NEXT: [[TMP29:%.*]] = sext <16 x i8> [[WIDE_LOAD18]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP30:%.*]] = sext <16 x i8> [[WIDE_LOAD20]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP31:%.*]] = mul nsw <16 x i32> [[TMP29]], [[TMP30]]
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE22]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI2]], <16 x i32> [[TMP31]])
+; CHECK-INTERLEAVED-NEXT: [[TMP48:%.*]] = sext <16 x i8> [[WIDE_LOAD19]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP33:%.*]] = sext <16 x i8> [[WIDE_LOAD21]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP34:%.*]] = mul nsw <16 x i32> [[TMP48]], [[TMP33]]
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE23]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI3]], <16 x i32> [[TMP34]])
; CHECK-INTERLEAVED-NEXT: [[TMP35:%.*]] = getelementptr inbounds i8, ptr [[TMP10]], i32 16
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD24:%.*]] = load <16 x i8>, ptr [[TMP10]], align 1
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD25:%.*]] = load <16 x i8>, ptr [[TMP35]], align 1
-; CHECK-INTERLEAVED-NEXT: [[TMP36:%.*]] = sext <16 x i8> [[WIDE_LOAD24]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP37:%.*]] = sext <16 x i8> [[WIDE_LOAD25]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP38:%.*]] = getelementptr inbounds i8, ptr [[TMP11]], i32 16
+; CHECK-INTERLEAVED-NEXT: [[TMP36:%.*]] = getelementptr inbounds i8, ptr [[TMP11]], i32 16
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD26:%.*]] = load <16 x i8>, ptr [[TMP11]], align 1
-; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD27:%.*]] = load <16 x i8>, ptr [[TMP38]], align 1
-; CHECK-INTERLEAVED-NEXT: [[TMP39:%.*]] = sext <16 x i8> [[WIDE_LOAD26]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP40:%.*]] = sext <16 x i8> [[WIDE_LOAD27]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP41:%.*]] = mul nsw <16 x i32> [[TMP36]], [[TMP39]]
-; CHECK-INTERLEAVED-NEXT: [[TMP42:%.*]] = mul nsw <16 x i32> [[TMP37]], [[TMP40]]
-; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE28]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP41]])
+; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD27:%.*]] = load <16 x i8>, ptr [[TMP36]], align 1
+; CHECK-INTERLEAVED-NEXT: [[TMP37:%.*]] = sext <16 x i8> [[WIDE_LOAD24]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP38:%.*]] = sext <16 x i8> [[WIDE_LOAD26]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP39:%.*]] = mul nsw <16 x i32> [[TMP37]], [[TMP38]]
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE28]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP39]])
+; CHECK-INTERLEAVED-NEXT: [[TMP40:%.*]] = sext <16 x i8> [[WIDE_LOAD25]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP41:%.*]] = sext <16 x i8> [[WIDE_LOAD27]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP42:%.*]] = mul nsw <16 x i32> [[TMP40]], [[TMP41]]
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE29]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP42]])
; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
; CHECK-INTERLEAVED-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -1165,21 +1201,21 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) #0 {
; CHECK-MAXBW-SAME: i32 [[NUM_OUT:%.*]], i64 [[NUM_IN:%.*]], ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-MAXBW-NEXT: entry:
; CHECK-MAXBW-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-MAXBW-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 3
+; CHECK-MAXBW-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4
; CHECK-MAXBW-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[NUM_IN]], [[TMP1]]
; CHECK-MAXBW-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK-MAXBW: vector.ph:
; CHECK-MAXBW-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
+; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16
; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[NUM_IN]], [[TMP3]]
; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 [[NUM_IN]], [[N_MOD_VF]]
; CHECK-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-MAXBW: vector.body:
; CHECK-MAXBW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-MAXBW-NEXT: [[VEC_PHI4:%.*]] = phi <vscale x 2 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE16:%.*]], [[VECTOR_BODY]] ]
-; CHECK-MAXBW-NEXT: [[VEC_PHI5:%.*]] = phi <vscale x 2 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE17:%.*]], [[VECTOR_BODY]] ]
-; CHECK-MAXBW-NEXT: [[VEC_PHI6:%.*]] = phi <vscale x 2 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ]
-; CHECK-MAXBW-NEXT: [[VEC_PHI7:%.*]] = phi <vscale x 2 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE11:%.*]], [[VECTOR_BODY]] ]
+; CHECK-MAXBW-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE13:%.*]], [[VECTOR_BODY]] ]
+; CHECK-MAXBW-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE10:%.*]], [[VECTOR_BODY]] ]
+; CHECK-MAXBW-NEXT: [[VEC_PHI2:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE7:%.*]], [[VECTOR_BODY]] ]
+; CHECK-MAXBW-NEXT: [[VEC_PHI3:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ]
; CHECK-MAXBW-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[INDEX]]
; CHECK-MAXBW-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]]
; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = or disjoint i64 [[INDEX]], 1
@@ -1191,38 +1227,38 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) #0 {
; CHECK-MAXBW-NEXT: [[TMP15:%.*]] = or disjoint i64 [[INDEX]], 3
; CHECK-MAXBW-NEXT: [[TMP16:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP15]]
; CHECK-MAXBW-NEXT: [[TMP17:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP15]]
-; CHECK-MAXBW-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i8>, ptr [[TMP7]], align 1
-; CHECK-MAXBW-NEXT: [[TMP23:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD]] to <vscale x 8 x i32>
-; CHECK-MAXBW-NEXT: [[WIDE_LOAD9:%.*]] = load <vscale x 8 x i8>, ptr [[TMP8]], align 1
-; CHECK-MAXBW-NEXT: [[TMP29:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD9]] to <vscale x 8 x i32>
-; CHECK-MAXBW-NEXT: [[TMP31:%.*]] = mul nsw <vscale x 8 x i32> [[TMP29]], [[TMP23]]
-; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE11]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI7]], <vscale x 8 x i32> [[TMP31]])
-; CHECK-MAXBW-NEXT: [[WIDE_LOAD12:%.*]] = load <vscale x 8 x i8>, ptr [[TMP10]], align 1
-; CHECK-MAXBW-NEXT: [[TMP37:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD12]] to <vscale x 8 x i32>
-; CHECK-MAXBW-NEXT: [[WIDE_LOAD14:%.*]] = load <vscale x 8 x i8>, ptr [[TMP11]], align 1
-; CHECK-MAXBW-NEXT: [[TMP43:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD14]] to <vscale x 8 x i32>
-; CHECK-MAXBW-NEXT: [[TMP45:%.*]] = mul nsw <vscale x 8 x i32> [[TMP37]], [[TMP43]]
-; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI6]], <vscale x 8 x i32> [[TMP45]])
-; CHECK-MAXBW-NEXT: [[WIDE_LOAD18:%.*]] = load <vscale x 8 x i8>, ptr [[TMP13]], align 1
-; CHECK-MAXBW-NEXT: [[TMP51:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD18]] to <vscale x 8 x i32>
-; CHECK-MAXBW-NEXT: [[WIDE_LOAD20:%.*]] = load <vscale x 8 x i8>, ptr [[TMP14]], align 1
-; CHECK-MAXBW-NEXT: [[TMP57:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD20]] to <vscale x 8 x i32>
-; CHECK-MAXBW-NEXT: [[TMP59:%.*]] = mul nsw <vscale x 8 x i32> [[TMP51]], [[TMP57]]
-; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE17]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI5]], <vscale x 8 x i32> [[TMP59]])
-; CHECK-MAXBW-NEXT: [[WIDE_LOAD24:%.*]] = load <vscale x 8 x i8>, ptr [[TMP16]], align 1
-; CHECK-MAXBW-NEXT: [[TMP65:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD24]] to <vscale x 8 x i32>
-; CHECK-MAXBW-NEXT: [[WIDE_LOAD26:%.*]] = load <vscale x 8 x i8>, ptr [[TMP17]], align 1
-; CHECK-MAXBW-NEXT: [[TMP71:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD26]] to <vscale x 8 x i32>
-; CHECK-MAXBW-NEXT: [[TMP73:%.*]] = mul nsw <vscale x 8 x i32> [[TMP65]], [[TMP71]]
-; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE16]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI4]], <vscale x 8 x i32> [[TMP73]])
+; CHECK-MAXBW-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP7]], align 1
+; CHECK-MAXBW-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 16 x i8>, ptr [[TMP8]], align 1
+; CHECK-MAXBW-NEXT: [[TMP27:%.*]] = sext <vscale x 16 x i8> [[WIDE_LOAD4]] to <vscale x 16 x i32>
+; CHECK-MAXBW-NEXT: [[TMP32:%.*]] = sext <vscale x 16 x i8> [[WIDE_LOAD]] to <vscale x 16 x i32>
+; CHECK-MAXBW-NEXT: [[TMP33:%.*]] = mul nsw <vscale x 16 x i32> [[TMP27]], [[TMP32]]
+; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 4 x i32> @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32(<vscale x 4 x i32> [[VEC_PHI3]], <vscale x 16 x i32> [[TMP33]])
+; CHECK-MAXBW-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 16 x i8>, ptr [[TMP10]], align 1
+; CHECK-MAXBW-NEXT: [[WIDE_LOAD6:%.*]] = load <vscale x 16 x i8>, ptr [[TMP11]], align 1
+; CHECK-MAXBW-NEXT: [[TMP18:%.*]] = sext <vscale x 16 x i8> [[WIDE_LOAD5]] to <vscale x 16 x i32>
+; CHECK-MAXBW-NEXT: [[TMP19:%.*]] = sext <vscale x 16 x i8> [[WIDE_LOAD6]] to <vscale x 16 x i32>
+; CHECK-MAXBW-NEXT: [[TMP20:%.*]] = mul nsw <vscale x 16 x i32> [[TMP18]], [[TMP19]]
+; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE7]] = call <vscale x 4 x i32> @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32(<vscale x 4 x i32> [[VEC_PHI2]], <vscale x 16 x i32> [[TMP20]])
+; CHECK-MAXBW-NEXT: [[WIDE_LOAD8:%.*]] = load <vscale x 16 x i8>, ptr [[TMP13]], align 1
+; CHECK-MAXBW-NEXT: [[WIDE_LOAD9:%.*]] = load <vscale x 16 x i8>, ptr [[TMP14]], align 1
+; CHECK-MAXBW-NEXT: [[TMP21:%.*]] = sext <vscale x 16 x i8> [[WIDE_LOAD8]] to <vscale x 16 x i32>
+; CHECK-MAXBW-NEXT: [[TMP22:%.*]] = sext <vscale x 16 x i8> [[WIDE_LOAD9]] to <vscale x 16 x i32>
+; CHECK-MAXBW-NEXT: [[TMP23:%.*]] = mul nsw <vscale x 16 x i32> [[TMP21]], [[TMP22]]
+; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE10]] = call <vscale x 4 x i32> @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32(<vscale x 4 x i32> [[VEC_PHI1]], <vscale x 16 x i32> [[TMP23]])
+; CHECK-MAXBW-NEXT: [[WIDE_LOAD11:%.*]] = load <vscale x 16 x i8>, ptr [[TMP16]], align 1
+; CHECK-MAXBW-NEXT: [[WIDE_LOAD12:%.*]] = load <vscale x 16 x i8>, ptr [[TMP17]], align 1
+; CHECK-MAXBW-NEXT: [[TMP24:%.*]] = sext <vscale x 16 x i8> [[WIDE_LOAD11]] to <vscale x 16 x i32>
+; CHECK-MAXBW-NEXT: [[TMP25:%.*]] = sext <vscale x 16 x i8> [[WIDE_LOAD12]] to <vscale x 16 x i32>
+; CHECK-MAXBW-NEXT: [[TMP26:%.*]] = mul nsw <vscale x 16 x i32> [[TMP24]], [[TMP25]]
+; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE13]] = call <vscale x 4 x i32> @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32(<vscale x 4 x i32> [[VEC_PHI]], <vscale x 16 x i32> [[TMP26]])
; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-MAXBW-NEXT: [[TMP74:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-MAXBW-NEXT: br i1 [[TMP74]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
+; CHECK-MAXBW-NEXT: br i1 [[TMP74]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; CHECK-MAXBW: middle.block:
-; CHECK-MAXBW-NEXT: [[TMP39:%.*]] = call i32 @llvm.vector.reduce.add.nxv2i32(<vscale x 2 x i32> [[PARTIAL_REDUCE16]])
-; CHECK-MAXBW-NEXT: [[TMP40:%.*]] = call i32 @llvm.vector.reduce.add.nxv2i32(<vscale x 2 x i32> [[PARTIAL_REDUCE17]])
-; CHECK-MAXBW-NEXT: [[TMP41:%.*]] = call i32 @llvm.vector.reduce.add.nxv2i32(<vscale x 2 x i32> [[PARTIAL_REDUCE]])
-; CHECK-MAXBW-NEXT: [[TMP42:%.*]] = call i32 @llvm.vector.reduce.add.nxv2i32(<vscale x 2 x i32> [[PARTIAL_REDUCE11]])
+; CHECK-MAXBW-NEXT: [[TMP28:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[PARTIAL_REDUCE13]])
+; CHECK-MAXBW-NEXT: [[TMP29:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[PARTIAL_REDUCE10]])
+; CHECK-MAXBW-NEXT: [[TMP30:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[PARTIAL_REDUCE7]])
+; CHECK-MAXBW-NEXT: [[TMP31:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[PARTIAL_REDUCE]])
; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[NUM_IN]], [[N_VEC]]
; CHECK-MAXBW-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
; CHECK-MAXBW: scalar.ph:
@@ -1390,7 +1426,7 @@ define i32 @dotp_predicated(i64 %N, ptr %a, ptr %b) #0 {
; CHECK-MAXBW-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64 [[INDEX]], i64 [[TMP9]])
; CHECK-MAXBW-NEXT: [[TMP19:%.*]] = extractelement <vscale x 16 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0
; CHECK-MAXBW-NEXT: [[TMP20:%.*]] = xor i1 [[TMP19]], true
-; CHECK-MAXBW-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
+; CHECK-MAXBW-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; CHECK-MAXBW: middle.block:
; CHECK-MAXBW-NEXT: [[TMP21:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[PARTIAL_REDUCE]])
; CHECK-MAXBW-NEXT: br label [[EXIT:%.*]]
@@ -1525,7 +1561,7 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) #0 {
; CHECK-MAXBW-NEXT: [[TMP24]] = add <vscale x 8 x i32> [[TMP22]], [[VEC_PHI1]]
; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-MAXBW-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-MAXBW-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
+; CHECK-MAXBW-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; CHECK-MAXBW: middle.block:
; CHECK-MAXBW-NEXT: [[TMP16:%.*]] = call i32 @llvm.vector.reduce.add.nxv8i32(<vscale x 8 x i32> [[TMP24]])
; CHECK-MAXBW-NEXT: [[TMP17:%.*]] = call i32 @llvm.vscale.i32()
@@ -1565,110 +1601,93 @@ define i64 @dotp_cost_disagreement(ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVE1-NEXT: entry:
; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_PH:%.*]]
; CHECK-INTERLEAVE1: vector.ph:
-; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 41, [[TMP3]]
-; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i64 41, [[N_MOD_VF]]
; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-INTERLEAVE1: vector.body:
; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
+; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ]
; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDEX]]
-; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i8>, ptr [[TMP7]], align 1
-; CHECK-INTERLEAVE1-NEXT: [[TMP9:%.*]] = zext <vscale x 2 x i8> [[WIDE_LOAD]] to <vscale x 2 x i64>
+; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1
; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = add nuw nsw i64 [[INDEX]], 1
; CHECK-INTERLEAVE1-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[TMP10]]
-; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 2 x i8>, ptr [[TMP11]], align 1
-; CHECK-INTERLEAVE1-NEXT: [[TMP13:%.*]] = zext <vscale x 2 x i8> [[WIDE_LOAD1]] to <vscale x 2 x i64>
-; CHECK-INTERLEAVE1-NEXT: [[TMP14:%.*]] = mul nuw nsw <vscale x 2 x i64> [[TMP13]], [[TMP9]]
-; CHECK-INTERLEAVE1-NEXT: [[TMP15]] = add <vscale x 2 x i64> [[VEC_PHI]], [[TMP14]]
-; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
-; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
+; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP11]], align 1
+; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD1]] to <16 x i64>
+; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i64>
+; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = mul nuw nsw <16 x i64> [[TMP3]], [[TMP4]]
+; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI]], <16 x i64> [[TMP5]])
+; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
+; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 32
+; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
; CHECK-INTERLEAVE1: middle.block:
-; CHECK-INTERLEAVE1-NEXT: [[TMP17:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> [[TMP15]])
-; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 41, [[N_VEC]]
-; CHECK-INTERLEAVE1-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH:%.*]]
+; CHECK-INTERLEAVE1-NEXT: [[TMP8:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[PARTIAL_REDUCE]])
+; CHECK-INTERLEAVE1-NEXT: br label [[SCALAR_PH:%.*]]
; CHECK-INTERLEAVE1: scalar.ph:
;
; CHECK-INTERLEAVED-LABEL: define i64 @dotp_cost_disagreement(
; CHECK-INTERLEAVED-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-INTERLEAVED-NEXT: entry:
-; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
-; CHECK-INTERLEAVED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 41, [[TMP1]]
-; CHECK-INTERLEAVED-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_PH:%.*]]
; CHECK-INTERLEAVED: vector.ph:
-; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 41, [[TMP3]]
-; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i64 41, [[N_MOD_VF]]
; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-INTERLEAVED: vector.body:
; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP24:%.*]], [[VECTOR_BODY]] ]
-; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP25:%.*]], [[VECTOR_BODY]] ]
+; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ]
+; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE5:%.*]], [[VECTOR_BODY]] ]
; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDEX]]
-; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = shl nuw i64 [[TMP9]], 1
-; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP7]], i64 [[TMP10]]
-; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i8>, ptr [[TMP7]], align 1
-; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 2 x i8>, ptr [[TMP11]], align 1
-; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = zext <vscale x 2 x i8> [[WIDE_LOAD]] to <vscale x 2 x i64>
-; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = zext <vscale x 2 x i8> [[WIDE_LOAD2]] to <vscale x 2 x i64>
+; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP7]], i32 16
+; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1
+; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1
; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = add nuw nsw i64 [[INDEX]], 1
; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[TMP14]]
-; CHECK-INTERLEAVED-NEXT: [[TMP17:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = shl nuw i64 [[TMP17]], 1
-; CHECK-INTERLEAVED-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP15]], i64 [[TMP18]]
-; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 2 x i8>, ptr [[TMP15]], align 1
-; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 2 x i8>, ptr [[TMP19]], align 1
-; CHECK-INTERLEAVED-NEXT: [[TMP20:%.*]] = zext <vscale x 2 x i8> [[WIDE_LOAD3]] to <vscale x 2 x i64>
-; CHECK-INTERLEAVED-NEXT: [[TMP21:%.*]] = zext <vscale x 2 x i8> [[WIDE_LOAD4]] to <vscale x 2 x i64>
-; CHECK-INTERLEAVED-NEXT: [[TMP22:%.*]] = mul nuw nsw <vscale x 2 x i64> [[TMP20]], [[TMP12]]
-; CHECK-INTERLEAVED-NEXT: [[TMP23:%.*]] = mul nuw nsw <vscale x 2 x i64> [[TMP21]], [[TMP13]]
-; CHECK-INTERLEAVED-NEXT: [[TMP24]] = add <vscale x 2 x i64> [[VEC_PHI]], [[TMP22]]
-; CHECK-INTERLEAVED-NEXT: [[TMP25]] = add <vscale x 2 x i64> [[VEC_PHI1]], [[TMP23]]
-; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
-; CHECK-INTERLEAVED-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-INTERLEAVED-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
+; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP15]], i32 16
+; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP15]], align 1
+; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP4]], align 1
+; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD3]] to <16 x i64>
+; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i64>
+; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = mul nuw nsw <16 x i64> [[TMP5]], [[TMP6]]
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI]], <16 x i64> [[TMP13]])
+; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = zext <16 x i8> [[WIDE_LOAD4]] to <16 x i64>
+; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i64>
+; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = mul nuw nsw <16 x i64> [[TMP8]], [[TMP9]]
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE5]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI1]], <16 x i64> [[TMP10]])
+; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
+; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 32
+; CHECK-INTERLEAVED-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
; CHECK-INTERLEAVED: middle.block:
-; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <vscale x 2 x i64> [[TMP25]], [[TMP24]]
-; CHECK-INTERLEAVED-NEXT: [[TMP27:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> [[BIN_RDX]])
-; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i64 41, [[N_VEC]]
-; CHECK-INTERLEAVED-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <2 x i64> [[PARTIAL_REDUCE5]], [[PARTIAL_REDUCE]]
+; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[BIN_RDX]])
+; CHECK-INTERLEAVED-NEXT: br label [[SCALAR_PH:%.*]]
; CHECK-INTERLEAVED: scalar.ph:
;
; CHECK-MAXBW-LABEL: define i64 @dotp_cost_disagreement(
; CHECK-MAXBW-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-MAXBW-NEXT: entry:
; CHECK-MAXBW-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-MAXBW-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 3
+; CHECK-MAXBW-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4
; CHECK-MAXBW-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 41, [[TMP1]]
; CHECK-MAXBW-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK-MAXBW: vector.ph:
; CHECK-MAXBW-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
+; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16
; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 41, [[TMP3]]
; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 41, [[N_MOD_VF]]
; CHECK-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-MAXBW: vector.body:
; CHECK-MAXBW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-MAXBW-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP14:%.*]], [[VECTOR_BODY]] ]
+; CHECK-MAXBW-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ]
; CHECK-MAXBW-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDEX]]
-; CHECK-MAXBW-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i8>, ptr [[TMP7]], align 1
-; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD]] to <vscale x 8 x i64>
+; CHECK-MAXBW-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP7]], align 1
; CHECK-MAXBW-NEXT: [[TMP10:%.*]] = add nuw nsw i64 [[INDEX]], 1
; CHECK-MAXBW-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[TMP10]]
-; CHECK-MAXBW-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 8 x i8>, ptr [[TMP11]], align 1
-; CHECK-MAXBW-NEXT: [[TMP8:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD1]] to <vscale x 8 x i64>
-; CHECK-MAXBW-NEXT: [[TMP13:%.*]] = mul nuw nsw <vscale x 8 x i64> [[TMP8]], [[TMP9]]
-; CHECK-MAXBW-NEXT: [[TMP14]] = add <vscale x 8 x i64> [[VEC_PHI]], [[TMP13]]
+; CHECK-MAXBW-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 16 x i8>, ptr [[TMP11]], align 1
+; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = zext <vscale x 16 x i8> [[WIDE_LOAD1]] to <vscale x 16 x i64>
+; CHECK-MAXBW-NEXT: [[TMP8:%.*]] = zext <vscale x 16 x i8> [[WIDE_LOAD]] to <vscale x 16 x i64>
+; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = mul nuw nsw <vscale x 16 x i64> [[TMP12]], [[TMP8]]
+; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 2 x i64> @llvm.vector.partial.reduce.add.nxv2i64.nxv16i64(<vscale x 2 x i64> [[VEC_PHI]], <vscale x 16 x i64> [[TMP9]])
; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-MAXBW-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-MAXBW-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
+; CHECK-MAXBW-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
; CHECK-MAXBW: middle.block:
-; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = call i64 @llvm.vector.reduce.add.nxv8i64(<vscale x 8 x i64> [[TMP14]])
+; CHECK-MAXBW-NEXT: [[TMP13:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> [[PARTIAL_REDUCE]])
; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 41, [[N_VEC]]
; CHECK-MAXBW-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
; CHECK-MAXBW: scalar.ph:
@@ -1971,7 +1990,7 @@ define i64 @not_dotp_ext_outside_plan(ptr %a, i16 %b, i64 %n) #0 {
; CHECK-MAXBW-NEXT: [[TMP11]] = add <vscale x 4 x i64> [[TMP10]], [[VEC_PHI]]
; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-MAXBW-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
+; CHECK-MAXBW-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
; CHECK-MAXBW: middle.block:
; CHECK-MAXBW-NEXT: [[TMP13:%.*]] = call i64 @llvm.vector.reduce.add.nxv4i64(<vscale x 4 x i64> [[TMP11]])
; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
@@ -2104,7 +2123,7 @@ define i64 @not_dotp_ext_outside_plan2(ptr %a, i16 %b, i64 %n) #0 {
; CHECK-MAXBW-NEXT: [[TMP11]] = add <vscale x 4 x i64> [[TMP10]], [[VEC_PHI]]
; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-MAXBW-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]]
+; CHECK-MAXBW-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
; CHECK-MAXBW: middle.block:
; CHECK-MAXBW-NEXT: [[TMP13:%.*]] = call i64 @llvm.vector.reduce.add.nxv4i64(<vscale x 4 x i64> [[TMP11]])
; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
@@ -2160,10 +2179,10 @@ define dso_local i32 @not_dotp_vscale1(ptr %a, ptr %b, i32 %n, i64 %cost) #0 {
; CHECK-INTERLEAVE1-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]]
; CHECK-INTERLEAVE1-NEXT: [[NEXT_GEP1:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]]
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[NEXT_GEP]], align 1
-; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i64>
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[NEXT_GEP1]], align 1
-; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i64>
-; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = mul nuw nsw <16 x i64> [[TMP6]], [[TMP5]]
+; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i64>
+; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i64>
+; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = mul nuw nsw <16 x i64> [[TMP5]], [[TMP6]]
; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI]], <16 x i64> [[TMP10]])
; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-INTERLEAVE1-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -2181,10 +2200,10 @@ define dso_local i32 @not_dotp_vscale1(ptr %a, ptr %b, i32 %n, i64 %cost) #0 {
; CHECK-INTERLEAVED-NEXT: br i1 [[CMP]], label [[FOR_BODY_PREHEADER:%.*]], label [[EXIT:%.*]]
; CHECK-INTERLEAVED: for.body.preheader:
; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = zext i32 [[N]] to i64
-; CHECK-INTERLEAVED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], 16
+; CHECK-INTERLEAVED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], 32
; CHECK-INTERLEAVED-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK-INTERLEAVED: vector.ph:
-; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], 16
+; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], 32
; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]]
; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = trunc i64 [[N_VEC]] to i32
; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[A]], i64 [[N_VEC]]
@@ -2194,19 +2213,29 @@ define dso_local i32 @not_dotp_vscale1(ptr %a, ptr %b, i32 %n, i64 %cost) #0 {
; CHECK-INTERLEAVED: vector.body:
; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ [[TMP4]], [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ]
+; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE6:%.*]], [[VECTOR_BODY]] ]
; CHECK-INTERLEAVED-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]]
; CHECK-INTERLEAVED-NEXT: [[NEXT_GEP2:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]]
+; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 16
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[NEXT_GEP]], align 1
-; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i64>
-; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[NEXT_GEP2]], align 1
-; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i64>
-; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = mul nuw nsw <16 x i64> [[TMP6]], [[TMP5]]
-; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI]], <16 x i64> [[TMP10]])
-; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
+; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1
+; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[NEXT_GEP2]], i32 16
+; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[NEXT_GEP2]], align 1
+; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD5:%.*]] = load <16 x i8>, ptr [[TMP6]], align 1
+; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = zext <16 x i8> [[WIDE_LOAD4]] to <16 x i64>
+; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i64>
+; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = mul nuw nsw <16 x i64> [[TMP13]], [[TMP15]]
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI]], <16 x i64> [[TMP16]])
+; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = zext <16 x i8> [[WIDE_LOAD5]] to <16 x i64>
+; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = zext <16 x i8> [[WIDE_LOAD3]] to <16 x i64>
+; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = mul nuw nsw <16 x i64> [[TMP10]], [[TMP11]]
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE6]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI1]], <16 x i64> [[TMP12]])
+; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
; CHECK-INTERLEAVED-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-INTERLEAVED-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]]
; CHECK-INTERLEAVED: middle.block:
-; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[PARTIAL_REDUCE]])
+; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <2 x i64> [[PARTIAL_REDUCE6]], [[PARTIAL_REDUCE]]
+; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[BIN_RDX]])
; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]]
; CHECK-INTERLEAVED-NEXT: br i1 [[CMP_N]], label [[EXIT_LOOPEXIT:%.*]], label [[SCALAR_PH]]
; CHECK-INTERLEAVED: scalar.ph:
@@ -2219,35 +2248,35 @@ define dso_local i32 @not_dotp_vscale1(ptr %a, ptr %b, i32 %n, i64 %cost) #0 {
; CHECK-MAXBW: for.body.preheader:
; CHECK-MAXBW-NEXT: [[TMP0:%.*]] = zext i32 [[N]] to i64
; CHECK-MAXBW-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-MAXBW-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3
+; CHECK-MAXBW-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
; CHECK-MAXBW-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]]
; CHECK-MAXBW-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK-MAXBW: vector.ph:
; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 8
+; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 16
; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP4]]
; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]]
; CHECK-MAXBW-NEXT: [[TMP7:%.*]] = trunc i64 [[N_VEC]] to i32
; CHECK-MAXBW-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[A]], i64 [[N_VEC]]
; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[B]], i64 [[N_VEC]]
-; CHECK-MAXBW-NEXT: [[TMP10:%.*]] = insertelement <vscale x 8 x i64> zeroinitializer, i64 [[COST]], i32 0
+; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = insertelement <vscale x 2 x i64> zeroinitializer, i64 [[COST]], i32 0
; CHECK-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-MAXBW: vector.body:
; CHECK-MAXBW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-MAXBW-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x i64> [ [[TMP10]], [[VECTOR_PH]] ], [ [[TMP20:%.*]], [[VECTOR_BODY]] ]
+; CHECK-MAXBW-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 2 x i64> [ [[TMP12]], [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ]
; CHECK-MAXBW-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]]
; CHECK-MAXBW-NEXT: [[NEXT_GEP1:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]]
-; CHECK-MAXBW-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i8>, ptr [[NEXT_GEP]], align 1
-; CHECK-MAXBW-NEXT: [[TMP14:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD]] to <vscale x 8 x i64>
-; CHECK-MAXBW-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 8 x i8>, ptr [[NEXT_GEP1]], align 1
-; CHECK-MAXBW-NEXT: [[TMP16:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD2]] to <vscale x 8 x i64>
-; CHECK-MAXBW-NEXT: [[TMP17:%.*]] = mul nuw nsw <vscale x 8 x i64> [[TMP16]], [[TMP14]]
-; CHECK-MAXBW-NEXT: [[TMP20]] = add <vscale x 8 x i64> [[TMP17]], [[VEC_PHI]]
+; CHECK-MAXBW-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[NEXT_GEP]], align 1
+; CHECK-MAXBW-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 16 x i8>, ptr [[NEXT_GEP1]], align 1
+; CHECK-MAXBW-NEXT: [[TMP14:%.*]] = zext <vscale x 16 x i8> [[WIDE_LOAD2]] to <vscale x 16 x i64>
+; CHECK-MAXBW-NEXT: [[TMP10:%.*]] = zext <vscale x 16 x i8> [[WIDE_LOAD]] to <vscale x 16 x i64>
+; CHECK-MAXBW-NEXT: [[TMP11:%.*]] = mul nuw nsw <vscale x 16 x i64> [[TMP14]], [[TMP10]]
+; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 2 x i64> @llvm.vector.partial.reduce.add.nxv2i64.nxv16i64(<vscale x 2 x i64> [[VEC_PHI]], <vscale x 16 x i64> [[TMP11]])
; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]]
; CHECK-MAXBW-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-MAXBW-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]]
+; CHECK-MAXBW-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]]
; CHECK-MAXBW: middle.block:
-; CHECK-MAXBW-NEXT: [[TMP19:%.*]] = call i64 @llvm.vector.reduce.add.nxv8i64(<vscale x 8 x i64> [[TMP20]])
+; CHECK-MAXBW-NEXT: [[TMP13:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> [[PARTIAL_REDUCE]])
; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]]
; CHECK-MAXBW-NEXT: br i1 [[CMP_N]], label [[EXIT_LOOPEXIT:%.*]], label [[SCALAR_PH]]
; CHECK-MAXBW: scalar.ph:
@@ -2319,17 +2348,16 @@ define dso_local void @not_dotp_high_register_pressure(ptr %a, ptr %b, ptr %sum,
; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-INTERLEAVE1: vector.body:
; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ [[TMP0]], [[VECTOR_PH]] ], [ [[TMP36:%.*]], [[VECTOR_BODY]] ]
-; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ [[TMP1]], [[VECTOR_PH]] ], [ [[TMP33:%.*]], [[VECTOR_BODY]] ]
-; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI2:%.*]] = phi <4 x i32> [ [[TMP2]], [[VECTOR_PH]] ], [ [[TMP30:%.*]], [[VECTOR_BODY]] ]
-; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI3:%.*]] = phi <4 x i32> [ [[TMP3]], [[VECTOR_PH]] ], [ [[TMP27:%.*]], [[VECTOR_BODY]] ]
-; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI4:%.*]] = phi <4 x i32> [ [[TMP4]], [[VECTOR_PH]] ], [ [[TMP24:%.*]], [[VECTOR_BODY]] ]
-; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI5:%.*]] = phi <4 x i32> [ [[TMP5]], [[VECTOR_PH]] ], [ [[TMP21:%.*]], [[VECTOR_BODY]] ]
-; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI6:%.*]] = phi <4 x i32> [ [[TMP6]], [[VECTOR_PH]] ], [ [[TMP18:%.*]], [[VECTOR_BODY]] ]
-; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI7:%.*]] = phi <4 x i32> [ [[TMP7]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
+; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ [[TMP0]], [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE21:%.*]], [[VECTOR_BODY]] ]
+; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ [[TMP1]], [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE20:%.*]], [[VECTOR_BODY]] ]
+; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI2:%.*]] = phi <4 x i32> [ [[TMP2]], [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE19:%.*]], [[VECTOR_BODY]] ]
+; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI3:%.*]] = phi <4 x i32> [ [[TMP3]], [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE18:%.*]], [[VECTOR_BODY]] ]
+; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI4:%.*]] = phi <4 x i32> [ [[TMP4]], [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE17:%.*]], [[VECTOR_BODY]] ]
+; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI5:%.*]] = phi <4 x i32> [ [[TMP5]], [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE16:%.*]], [[VECTOR_BODY]] ]
+; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI6:%.*]] = phi <4 x i32> [ [[TMP6]], [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE15:%.*]], [[VECTOR_BODY]] ]
+; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI7:%.*]] = phi <4 x i32> [ [[TMP7]], [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ]
; CHECK-INTERLEAVE1-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDEX]]
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1
-; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-INTERLEAVE1-NEXT: [[TMP11:%.*]] = shl nsw i64 [[INDEX]], 3
; CHECK-INTERLEAVE1-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[TMP11]]
; CHECK-INTERLEAVE1-NEXT: [[WIDE_VEC:%.*]] = load <128 x i8>, ptr [[TMP12]], align 1
@@ -2341,42 +2369,43 @@ define dso_local void @not_dotp_high_register_pressure(ptr %a, ptr %b, ptr %sum,
; CHECK-INTERLEAVE1-NEXT: [[STRIDED_VEC12:%.*]] = shufflevector <128 x i8> [[WIDE_VEC]], <128 x i8> poison, <16 x i32> <i32 5, i32 13, i32 21, i32 29, i32 37, i32 45, i32 53, i32 61, i32 69, i32 77, i32 85, i32 93, i32 101, i32 109, i32 117, i32 125>
; CHECK-INTERLEAVE1-NEXT: [[STRIDED_VEC13:%.*]] = shufflevector <128 x i8> [[WIDE_VEC]], <128 x i8> poison, <16 x i32> <i32 6, i32 14, i32 22, i32 30, i32 38, i32 46, i32 54, i32 62, i32 70, i32 78, i32 86, i32 94, i32 102, i32 110, i32 118, i32 126>
; CHECK-INTERLEAVE1-NEXT: [[STRIDED_VEC14:%.*]] = shufflevector <128 x i8> [[WIDE_VEC]], <128 x i8> poison, <16 x i32> <i32 7, i32 15, i32 23, i32 31, i32 39, i32 47, i32 55, i32 63, i32 71, i32 79, i32 87, i32 95, i32 103, i32 111, i32 119, i32 127>
-; CHECK-INTERLEAVE1-NEXT: [[TMP13:%.*]] = sext <16 x i8> [[STRIDED_VEC]] to <16 x i32>
-; CHECK-INTERLEAVE1-NEXT: [[TMP14:%.*]] = mul nsw <16 x i32> [[TMP13]], [[TMP10]]
-; CHECK-INTERLEAVE1-NEXT: [[TMP15]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI7]], <16 x i32> [[TMP14]])
-; CHECK-INTERLEAVE1-NEXT: [[TMP29:%.*]] = sext <16 x i8> [[STRIDED_VEC8]] to <16 x i32>
-; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = mul nsw <16 x i32> [[TMP29]], [[TMP10]]
-; CHECK-INTERLEAVE1-NEXT: [[TMP18]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI6]], <16 x i32> [[TMP16]])
+; CHECK-INTERLEAVE1-NEXT: [[TMP35:%.*]] = sext <16 x i8> [[STRIDED_VEC]] to <16 x i32>
+; CHECK-INTERLEAVE1-NEXT: [[TMP44:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
+; CHECK-INTERLEAVE1-NEXT: [[TMP13:%.*]] = mul nsw <16 x i32> [[TMP35]], [[TMP44]]
+; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI7]], <16 x i32> [[TMP13]])
+; CHECK-INTERLEAVE1-NEXT: [[TMP14:%.*]] = sext <16 x i8> [[STRIDED_VEC8]] to <16 x i32>
+; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = mul nsw <16 x i32> [[TMP14]], [[TMP44]]
+; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE15]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI6]], <16 x i32> [[TMP16]])
; CHECK-INTERLEAVE1-NEXT: [[TMP17:%.*]] = sext <16 x i8> [[STRIDED_VEC9]] to <16 x i32>
-; CHECK-INTERLEAVE1-NEXT: [[TMP31:%.*]] = mul nsw <16 x i32> [[TMP17]], [[TMP10]]
-; CHECK-INTERLEAVE1-NEXT: [[TMP21]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI5]], <16 x i32> [[TMP31]])
-; CHECK-INTERLEAVE1-NEXT: [[TMP19:%.*]] = sext <16 x i8> [[STRIDED_VEC10]] to <16 x i32>
-; CHECK-INTERLEAVE1-NEXT: [[TMP20:%.*]] = mul nsw <16 x i32> [[TMP19]], [[TMP10]]
-; CHECK-INTERLEAVE1-NEXT: [[TMP24]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI4]], <16 x i32> [[TMP20]])
-; CHECK-INTERLEAVE1-NEXT: [[TMP32:%.*]] = sext <16 x i8> [[STRIDED_VEC11]] to <16 x i32>
-; CHECK-INTERLEAVE1-NEXT: [[TMP22:%.*]] = mul nsw <16 x i32> [[TMP32]], [[TMP10]]
-; CHECK-INTERLEAVE1-NEXT: [[TMP27]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI3]], <16 x i32> [[TMP22]])
-; CHECK-INTERLEAVE1-NEXT: [[TMP23:%.*]] = sext <16 x i8> [[STRIDED_VEC12]] to <16 x i32>
-; CHECK-INTERLEAVE1-NEXT: [[TMP34:%.*]] = mul nsw <16 x i32> [[TMP23]], [[TMP10]]
-; CHECK-INTERLEAVE1-NEXT: [[TMP30]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI2]], <16 x i32> [[TMP34]])
-; CHECK-INTERLEAVE1-NEXT: [[TMP25:%.*]] = sext <16 x i8> [[STRIDED_VEC13]] to <16 x i32>
-; CHECK-INTERLEAVE1-NEXT: [[TMP26:%.*]] = mul nsw <16 x i32> [[TMP25]], [[TMP10]]
-; CHECK-INTERLEAVE1-NEXT: [[TMP33]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP26]])
-; CHECK-INTERLEAVE1-NEXT: [[TMP35:%.*]] = sext <16 x i8> [[STRIDED_VEC14]] to <16 x i32>
-; CHECK-INTERLEAVE1-NEXT: [[TMP28:%.*]] = mul nsw <16 x i32> [[TMP35]], [[TMP10]]
-; CHECK-INTERLEAVE1-NEXT: [[TMP36]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP28]])
+; CHECK-INTERLEAVE1-NEXT: [[TMP19:%.*]] = mul nsw <16 x i32> [[TMP17]], [[TMP44]]
+; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE16]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI5]], <16 x i32> [[TMP19]])
+; CHECK-INTERLEAVE1-NEXT: [[TMP20:%.*]] = sext <16 x i8> [[STRIDED_VEC10]] to <16 x i32>
+; CHECK-INTERLEAVE1-NEXT: [[TMP22:%.*]] = mul nsw <16 x i32> [[TMP20]], [[TMP44]]
+; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE17]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI4]], <16 x i32> [[TMP22]])
+; CHECK-INTERLEAVE1-NEXT: [[TMP23:%.*]] = sext <16 x i8> [[STRIDED_VEC11]] to <16 x i32>
+; CHECK-INTERLEAVE1-NEXT: [[TMP25:%.*]] = mul nsw <16 x i32> [[TMP23]], [[TMP44]]
+; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE18]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI3]], <16 x i32> [[TMP25]])
+; CHECK-INTERLEAVE1-NEXT: [[TMP26:%.*]] = sext <16 x i8> [[STRIDED_VEC12]] to <16 x i32>
+; CHECK-INTERLEAVE1-NEXT: [[TMP28:%.*]] = mul nsw <16 x i32> [[TMP26]], [[TMP44]]
+; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE19]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI2]], <16 x i32> [[TMP28]])
+; CHECK-INTERLEAVE1-NEXT: [[TMP29:%.*]] = sext <16 x i8> [[STRIDED_VEC13]] to <16 x i32>
+; CHECK-INTERLEAVE1-NEXT: [[TMP31:%.*]] = mul nsw <16 x i32> [[TMP29]], [[TMP44]]
+; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE20]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP31]])
+; CHECK-INTERLEAVE1-NEXT: [[TMP32:%.*]] = sext <16 x i8> [[STRIDED_VEC14]] to <16 x i32>
+; CHECK-INTERLEAVE1-NEXT: [[TMP34:%.*]] = mul nsw <16 x i32> [[TMP32]], [[TMP44]]
+; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE21]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP34]])
; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-INTERLEAVE1-NEXT: [[TMP37:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP37]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]]
; CHECK-INTERLEAVE1: middle.block:
-; CHECK-INTERLEAVE1-NEXT: [[TMP38:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP36]])
-; CHECK-INTERLEAVE1-NEXT: [[TMP39:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP33]])
-; CHECK-INTERLEAVE1-NEXT: [[TMP40:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP30]])
-; CHECK-INTERLEAVE1-NEXT: [[TMP41:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP27]])
-; CHECK-INTERLEAVE1-NEXT: [[TMP42:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP24]])
-; CHECK-INTERLEAVE1-NEXT: [[TMP43:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP21]])
-; CHECK-INTERLEAVE1-NEXT: [[TMP44:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP18]])
-; CHECK-INTERLEAVE1-NEXT: [[TMP45:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP15]])
+; CHECK-INTERLEAVE1-NEXT: [[TMP36:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE21]])
+; CHECK-INTERLEAVE1-NEXT: [[TMP45:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE20]])
+; CHECK-INTERLEAVE1-NEXT: [[TMP38:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE19]])
+; CHECK-INTERLEAVE1-NEXT: [[TMP39:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE18]])
+; CHECK-INTERLEAVE1-NEXT: [[TMP40:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE17]])
+; CHECK-INTERLEAVE1-NEXT: [[TMP41:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE16]])
+; CHECK-INTERLEAVE1-NEXT: [[TMP42:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE15]])
+; CHECK-INTERLEAVE1-NEXT: [[TMP43:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]])
; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]]
; CHECK-INTERLEAVE1-NEXT: br i1 [[CMP_N]], label [[FOR_COND_FOR_COND_CLEANUP_CRIT_EDGE:%.*]], label [[SCALAR_PH]]
; CHECK-INTERLEAVE1: scalar.ph:
@@ -2429,7 +2458,6 @@ define dso_local void @not_dotp_high_register_pressure(ptr %a, ptr %b, ptr %sum,
; CHECK-INTERLEAVED-NEXT: [[VEC_PHI7:%.*]] = phi <4 x i32> [ [[TMP7]], [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ]
; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDEX]]
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1
-; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = shl nsw i64 [[INDEX]], 3
; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[TMP11]]
; CHECK-INTERLEAVED-NEXT: [[WIDE_VEC:%.*]] = load <128 x i8>, ptr [[TMP12]], align 1
@@ -2441,42 +2469,43 @@ define dso_local void @not_dotp_high_register_pressure(ptr %a, ptr %b, ptr %sum,
; CHECK-INTERLEAVED-NEXT: [[STRIDED_VEC12:%.*]] = shufflevector <128 x i8> [[WIDE_VEC]], <128 x i8> poison, <16 x i32> <i32 5, i32 13, i32 21, i32 29, i32 37, i32 45, i32 53, i32 61, i32 69, i32 77, i32 85, i32 93, i32 101, i32 109, i32 117, i32 125>
; CHECK-INTERLEAVED-NEXT: [[STRIDED_VEC13:%.*]] = shufflevector <128 x i8> [[WIDE_VEC]], <128 x i8> poison, <16 x i32> <i32 6, i32 14, i32 22, i32 30, i32 38, i32 46, i32 54, i32 62, i32 70, i32 78, i32 86, i32 94, i32 102, i32 110, i32 118, i32 126>
; CHECK-INTERLEAVED-NEXT: [[STRIDED_VEC14:%.*]] = shufflevector <128 x i8> [[WIDE_VEC]], <128 x i8> poison, <16 x i32> <i32 7, i32 15, i32 23, i32 31, i32 39, i32 47, i32 55, i32 63, i32 71, i32 79, i32 87, i32 95, i32 103, i32 111, i32 119, i32 127>
-; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = sext <16 x i8> [[STRIDED_VEC]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = mul nsw <16 x i32> [[TMP13]], [[TMP10]]
-; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI7]], <16 x i32> [[TMP14]])
-; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = sext <16 x i8> [[STRIDED_VEC8]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = mul nsw <16 x i32> [[TMP15]], [[TMP10]]
+; CHECK-INTERLEAVED-NEXT: [[TMP35:%.*]] = sext <16 x i8> [[STRIDED_VEC]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP44:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = mul nsw <16 x i32> [[TMP35]], [[TMP44]]
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI7]], <16 x i32> [[TMP13]])
+; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = sext <16 x i8> [[STRIDED_VEC8]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = mul nsw <16 x i32> [[TMP14]], [[TMP44]]
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE15]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI6]], <16 x i32> [[TMP16]])
; CHECK-INTERLEAVED-NEXT: [[TMP17:%.*]] = sext <16 x i8> [[STRIDED_VEC9]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = mul nsw <16 x i32> [[TMP17]], [[TMP10]]
-; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE16]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI5]], <16 x i32> [[TMP18]])
-; CHECK-INTERLEAVED-NEXT: [[TMP19:%.*]] = sext <16 x i8> [[STRIDED_VEC10]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP20:%.*]] = mul nsw <16 x i32> [[TMP19]], [[TMP10]]
-; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE17]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI4]], <16 x i32> [[TMP20]])
-; CHECK-INTERLEAVED-NEXT: [[TMP21:%.*]] = sext <16 x i8> [[STRIDED_VEC11]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP22:%.*]] = mul nsw <16 x i32> [[TMP21]], [[TMP10]]
-; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE18]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI3]], <16 x i32> [[TMP22]])
-; CHECK-INTERLEAVED-NEXT: [[TMP23:%.*]] = sext <16 x i8> [[STRIDED_VEC12]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP24:%.*]] = mul nsw <16 x i32> [[TMP23]], [[TMP10]]
-; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE19]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI2]], <16 x i32> [[TMP24]])
-; CHECK-INTERLEAVED-NEXT: [[TMP25:%.*]] = sext <16 x i8> [[STRIDED_VEC13]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP26:%.*]] = mul nsw <16 x i32> [[TMP25]], [[TMP10]]
-; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE20]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP26]])
-; CHECK-INTERLEAVED-NEXT: [[TMP27:%.*]] = sext <16 x i8> [[STRIDED_VEC14]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP28:%.*]] = mul nsw <16 x i32> [[TMP27]], [[TMP10]]
-; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE21]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP28]])
+; CHECK-INTERLEAVED-NEXT: [[TMP19:%.*]] = mul nsw <16 x i32> [[TMP17]], [[TMP44]]
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE16]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI5]], <16 x i32> [[TMP19]])
+; CHECK-INTERLEAVED-NEXT: [[TMP20:%.*]] = sext <16 x i8> [[STRIDED_VEC10]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP22:%.*]] = mul nsw <16 x i32> [[TMP20]], [[TMP44]]
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE17]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI4]], <16 x i32> [[TMP22]])
+; CHECK-INTERLEAVED-NEXT: [[TMP23:%.*]] = sext <16 x i8> [[STRIDED_VEC11]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP25:%.*]] = mul nsw <16 x i32> [[TMP23]], [[TMP44]]
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE18]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI3]], <16 x i32> [[TMP25]])
+; CHECK-INTERLEAVED-NEXT: [[TMP26:%.*]] = sext <16 x i8> [[STRIDED_VEC12]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP28:%.*]] = mul nsw <16 x i32> [[TMP26]], [[TMP44]]
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE19]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI2]], <16 x i32> [[TMP28]])
+; CHECK-INTERLEAVED-NEXT: [[TMP45:%.*]] = sext <16 x i8> [[STRIDED_VEC13]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP31:%.*]] = mul nsw <16 x i32> [[TMP45]], [[TMP44]]
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE20]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP31]])
+; CHECK-INTERLEAVED-NEXT: [[TMP32:%.*]] = sext <16 x i8> [[STRIDED_VEC14]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP34:%.*]] = mul nsw <16 x i32> [[TMP32]], [[TMP44]]
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE21]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP34]])
; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-INTERLEAVED-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-INTERLEAVED-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]]
; CHECK-INTERLEAVED: middle.block:
-; CHECK-INTERLEAVED-NEXT: [[TMP30:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE21]])
-; CHECK-INTERLEAVED-NEXT: [[TMP31:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE20]])
-; CHECK-INTERLEAVED-NEXT: [[TMP32:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE19]])
-; CHECK-INTERLEAVED-NEXT: [[TMP33:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE18]])
-; CHECK-INTERLEAVED-NEXT: [[TMP34:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE17]])
-; CHECK-INTERLEAVED-NEXT: [[TMP35:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE16]])
-; CHECK-INTERLEAVED-NEXT: [[TMP36:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE15]])
-; CHECK-INTERLEAVED-NEXT: [[TMP37:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]])
+; CHECK-INTERLEAVED-NEXT: [[TMP36:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE21]])
+; CHECK-INTERLEAVED-NEXT: [[TMP37:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE20]])
+; CHECK-INTERLEAVED-NEXT: [[TMP38:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE19]])
+; CHECK-INTERLEAVED-NEXT: [[TMP39:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE18]])
+; CHECK-INTERLEAVED-NEXT: [[TMP40:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE17]])
+; CHECK-INTERLEAVED-NEXT: [[TMP41:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE16]])
+; CHECK-INTERLEAVED-NEXT: [[TMP42:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE15]])
+; CHECK-INTERLEAVED-NEXT: [[TMP43:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]])
; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]]
; CHECK-INTERLEAVED-NEXT: br i1 [[CMP_N]], label [[FOR_COND_FOR_COND_CLEANUP_CRIT_EDGE:%.*]], label [[SCALAR_PH]]
; CHECK-INTERLEAVED: scalar.ph:
@@ -2529,7 +2558,6 @@ define dso_local void @not_dotp_high_register_pressure(ptr %a, ptr %b, ptr %sum,
; CHECK-MAXBW-NEXT: [[VEC_PHI7:%.*]] = phi <4 x i32> [ [[TMP7]], [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ]
; CHECK-MAXBW-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDEX]]
; CHECK-MAXBW-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1
-; CHECK-MAXBW-NEXT: [[TMP10:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-MAXBW-NEXT: [[TMP11:%.*]] = shl nsw i64 [[INDEX]], 3
; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[TMP11]]
; CHECK-MAXBW-NEXT: [[WIDE_VEC:%.*]] = load <128 x i8>, ptr [[TMP12]], align 1
@@ -2541,42 +2569,43 @@ define dso_local void @not_dotp_high_register_pressure(ptr %a, ptr %b, ptr %sum,
; CHECK-MAXBW-NEXT: [[STRIDED_VEC12:%.*]] = shufflevector <128 x i8> [[WIDE_VEC]], <128 x i8> poison, <16 x i32> <i32 5, i32 13, i32 21, i32 29, i32 37, i32 45, i32 53, i32 61, i32 69, i32 77, i32 85, i32 93, i32 101, i32 109, i32 117, i32 125>
; CHECK-MAXBW-NEXT: [[STRIDED_VEC13:%.*]] = shufflevector <128 x i8> [[WIDE_VEC]], <128 x i8> poison, <16 x i32> <i32 6, i32 14, i32 22, i32 30, i32 38, i32 46, i32 54, i32 62, i32 70, i32 78, i32 86, i32 94, i32 102, i32 110, i32 118, i32 126>
; CHECK-MAXBW-NEXT: [[STRIDED_VEC14:%.*]] = shufflevector <128 x i8> [[WIDE_VEC]], <128 x i8> poison, <16 x i32> <i32 7, i32 15, i32 23, i32 31, i32 39, i32 47, i32 55, i32 63, i32 71, i32 79, i32 87, i32 95, i32 103, i32 111, i32 119, i32 127>
-; CHECK-MAXBW-NEXT: [[TMP13:%.*]] = sext <16 x i8> [[STRIDED_VEC]] to <16 x i32>
-; CHECK-MAXBW-NEXT: [[TMP14:%.*]] = mul nsw <16 x i32> [[TMP13]], [[TMP10]]
-; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI7]], <16 x i32> [[TMP14]])
-; CHECK-MAXBW-NEXT: [[TMP15:%.*]] = sext <16 x i8> [[STRIDED_VEC8]] to <16 x i32>
-; CHECK-MAXBW-NEXT: [[TMP16:%.*]] = mul nsw <16 x i32> [[TMP15]], [[TMP10]]
+; CHECK-MAXBW-NEXT: [[TMP35:%.*]] = sext <16 x i8> [[STRIDED_VEC]] to <16 x i32>
+; CHECK-MAXBW-NEXT: [[TMP44:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
+; CHECK-MAXBW-NEXT: [[TMP13:%.*]] = mul nsw <16 x i32> [[TMP35]], [[TMP44]]
+; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI7]], <16 x i32> [[TMP13]])
+; CHECK-MAXBW-NEXT: [[TMP14:%.*]] = sext <16 x i8> [[STRIDED_VEC8]] to <16 x i32>
+; CHECK-MAXBW-NEXT: [[TMP16:%.*]] = mul nsw <16 x i32> [[TMP14]], [[TMP44]]
; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE15]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI6]], <16 x i32> [[TMP16]])
; CHECK-MAXBW-NEXT: [[TMP17:%.*]] = sext <16 x i8> [[STRIDED_VEC9]] to <16 x i32>
-; CHECK-MAXBW-NEXT: [[TMP18:%.*]] = mul nsw <16 x i32> [[TMP17]], [[TMP10]]
-; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE16]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI5]], <16 x i32> [[TMP18]])
-; CHECK-MAXBW-NEXT: [[TMP19:%.*]] = sext <16 x i8> [[STRIDED_VEC10]] to <16 x i32>
-; CHECK-MAXBW-NEXT: [[TMP20:%.*]] = mul nsw <16 x i32> [[TMP19]], [[TMP10]]
-; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE17]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI4]], <16 x i32> [[TMP20]])
-; CHECK-MAXBW-NEXT: [[TMP21:%.*]] = sext <16 x i8> [[STRIDED_VEC11]] to <16 x i32>
-; CHECK-MAXBW-NEXT: [[TMP22:%.*]] = mul nsw <16 x i32> [[TMP21]], [[TMP10]]
-; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE18]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI3]], <16 x i32> [[TMP22]])
-; CHECK-MAXBW-NEXT: [[TMP23:%.*]] = sext <16 x i8> [[STRIDED_VEC12]] to <16 x i32>
-; CHECK-MAXBW-NEXT: [[TMP24:%.*]] = mul nsw <16 x i32> [[TMP23]], [[TMP10]]
-; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE19]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI2]], <16 x i32> [[TMP24]])
-; CHECK-MAXBW-NEXT: [[TMP25:%.*]] = sext <16 x i8> [[STRIDED_VEC13]] to <16 x i32>
-; CHECK-MAXBW-NEXT: [[TMP26:%.*]] = mul nsw <16 x i32> [[TMP25]], [[TMP10]]
-; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE20]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP26]])
-; CHECK-MAXBW-NEXT: [[TMP27:%.*]] = sext <16 x i8> [[STRIDED_VEC14]] to <16 x i32>
-; CHECK-MAXBW-NEXT: [[TMP28:%.*]] = mul nsw <16 x i32> [[TMP27]], [[TMP10]]
-; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE21]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP28]])
+; CHECK-MAXBW-NEXT: [[TMP19:%.*]] = mul nsw <16 x i32> [[TMP17]], [[TMP44]]
+; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE16]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI5]], <16 x i32> [[TMP19]])
+; CHECK-MAXBW-NEXT: [[TMP20:%.*]] = sext <16 x i8> [[STRIDED_VEC10]] to <16 x i32>
+; CHECK-MAXBW-NEXT: [[TMP22:%.*]] = mul nsw <16 x i32> [[TMP20]], [[TMP44]]
+; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE17]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI4]], <16 x i32> [[TMP22]])
+; CHECK-MAXBW-NEXT: [[TMP23:%.*]] = sext <16 x i8> [[STRIDED_VEC11]] to <16 x i32>
+; CHECK-MAXBW-NEXT: [[TMP25:%.*]] = mul nsw <16 x i32> [[TMP23]], [[TMP44]]
+; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE18]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI3]], <16 x i32> [[TMP25]])
+; CHECK-MAXBW-NEXT: [[TMP26:%.*]] = sext <16 x i8> [[STRIDED_VEC12]] to <16 x i32>
+; CHECK-MAXBW-NEXT: [[TMP28:%.*]] = mul nsw <16 x i32> [[TMP26]], [[TMP44]]
+; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE19]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI2]], <16 x i32> [[TMP28]])
+; CHECK-MAXBW-NEXT: [[TMP45:%.*]] = sext <16 x i8> [[STRIDED_VEC13]] to <16 x i32>
+; CHECK-MAXBW-NEXT: [[TMP31:%.*]] = mul nsw <16 x i32> [[TMP45]], [[TMP44]]
+; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE20]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP31]])
+; CHECK-MAXBW-NEXT: [[TMP32:%.*]] = sext <16 x i8> [[STRIDED_VEC14]] to <16 x i32>
+; CHECK-MAXBW-NEXT: [[TMP34:%.*]] = mul nsw <16 x i32> [[TMP32]], [[TMP44]]
+; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE21]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP34]])
; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-MAXBW-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-MAXBW-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]]
+; CHECK-MAXBW-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]]
; CHECK-MAXBW: middle.block:
-; CHECK-MAXBW-NEXT: [[TMP30:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE21]])
-; CHECK-MAXBW-NEXT: [[TMP31:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE20]])
-; CHECK-MAXBW-NEXT: [[TMP32:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE19]])
-; CHECK-MAXBW-NEXT: [[TMP33:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE18]])
-; CHECK-MAXBW-NEXT: [[TMP34:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE17]])
-; CHECK-MAXBW-NEXT: [[TMP35:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE16]])
-; CHECK-MAXBW-NEXT: [[TMP36:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE15]])
-; CHECK-MAXBW-NEXT: [[TMP37:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]])
+; CHECK-MAXBW-NEXT: [[TMP36:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE21]])
+; CHECK-MAXBW-NEXT: [[TMP37:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE20]])
+; CHECK-MAXBW-NEXT: [[TMP38:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE19]])
+; CHECK-MAXBW-NEXT: [[TMP39:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE18]])
+; CHECK-MAXBW-NEXT: [[TMP40:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE17]])
+; CHECK-MAXBW-NEXT: [[TMP41:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE16]])
+; CHECK-MAXBW-NEXT: [[TMP42:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE15]])
+; CHECK-MAXBW-NEXT: [[TMP43:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]])
; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]]
; CHECK-MAXBW-NEXT: br i1 [[CMP_N]], label [[FOR_COND_FOR_COND_CLEANUP_CRIT_EDGE:%.*]], label [[SCALAR_PH]]
; CHECK-MAXBW: scalar.ph:
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-interleave.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-interleave.ll
index b308b92..bd9fae6 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-interleave.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-interleave.ll
@@ -23,12 +23,12 @@ define i32 @partial_reduce_with_non_constant_start_value(ptr %src, i32 %rdx.star
; IC2-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 16
; IC2-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1
; IC2-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1
-; IC2-NEXT: [[TMP4:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
-; IC2-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
-; IC2-NEXT: [[TMP6:%.*]] = mul nuw nsw <16 x i32> [[TMP4]], [[TMP4]]
+; IC2-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; IC2-NEXT: [[TMP7:%.*]] = mul nuw nsw <16 x i32> [[TMP5]], [[TMP5]]
-; IC2-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP6]])
-; IC2-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP7]])
+; IC2-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP7]])
+; IC2-NEXT: [[TMP8:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
+; IC2-NEXT: [[TMP6:%.*]] = mul nuw nsw <16 x i32> [[TMP8]], [[TMP8]]
+; IC2-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP6]])
; IC2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
; IC2-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; IC2-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
@@ -80,18 +80,18 @@ define i32 @partial_reduce_with_non_constant_start_value(ptr %src, i32 %rdx.star
; IC4-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1
; IC4-NEXT: [[WIDE_LOAD5:%.*]] = load <16 x i8>, ptr [[TMP4]], align 1
; IC4-NEXT: [[WIDE_LOAD6:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1
-; IC4-NEXT: [[TMP6:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
+; IC4-NEXT: [[TMP9:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
+; IC4-NEXT: [[TMP13:%.*]] = mul nuw nsw <16 x i32> [[TMP9]], [[TMP9]]
+; IC4-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP13]])
; IC4-NEXT: [[TMP7:%.*]] = zext <16 x i8> [[WIDE_LOAD4]] to <16 x i32>
-; IC4-NEXT: [[TMP8:%.*]] = zext <16 x i8> [[WIDE_LOAD5]] to <16 x i32>
-; IC4-NEXT: [[TMP9:%.*]] = zext <16 x i8> [[WIDE_LOAD6]] to <16 x i32>
-; IC4-NEXT: [[TMP10:%.*]] = mul nuw nsw <16 x i32> [[TMP6]], [[TMP6]]
; IC4-NEXT: [[TMP11:%.*]] = mul nuw nsw <16 x i32> [[TMP7]], [[TMP7]]
-; IC4-NEXT: [[TMP12:%.*]] = mul nuw nsw <16 x i32> [[TMP8]], [[TMP8]]
-; IC4-NEXT: [[TMP13:%.*]] = mul nuw nsw <16 x i32> [[TMP9]], [[TMP9]]
-; IC4-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP10]])
; IC4-NEXT: [[PARTIAL_REDUCE7]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP11]])
+; IC4-NEXT: [[TMP10:%.*]] = zext <16 x i8> [[WIDE_LOAD5]] to <16 x i32>
+; IC4-NEXT: [[TMP12:%.*]] = mul nuw nsw <16 x i32> [[TMP10]], [[TMP10]]
; IC4-NEXT: [[PARTIAL_REDUCE8]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI2]], <16 x i32> [[TMP12]])
-; IC4-NEXT: [[PARTIAL_REDUCE9]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI3]], <16 x i32> [[TMP13]])
+; IC4-NEXT: [[TMP14:%.*]] = zext <16 x i8> [[WIDE_LOAD6]] to <16 x i32>
+; IC4-NEXT: [[TMP16:%.*]] = mul nuw nsw <16 x i32> [[TMP14]], [[TMP14]]
+; IC4-NEXT: [[PARTIAL_REDUCE9]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI3]], <16 x i32> [[TMP16]])
; IC4-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 64
; IC4-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; IC4-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-sub.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-sub.ll
index 7bb4715..6dae09e 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-sub.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-sub.ll
@@ -18,11 +18,11 @@ define i32 @dotp(ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ]
; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]]
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1
-; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]]
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP10]], align 1
-; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
-; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = mul <16 x i32> [[TMP3]], [[TMP1]]
+; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = zext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
+; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
+; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = mul <16 x i32> [[TMP2]], [[TMP3]]
; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = sub <16 x i32> zeroinitializer, [[TMP4]]
; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP5]])
; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
@@ -48,19 +48,19 @@ define i32 @dotp(ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[TMP7]], i32 16
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1
-; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]]
-; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP14]], i32 16
+; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[TMP14]], i32 16
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP14]], align 1
-; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1
-; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = zext <16 x i8> [[WIDE_LOAD3]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = zext <16 x i8> [[WIDE_LOAD4]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = mul <16 x i32> [[TMP6]], [[TMP2]]
-; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = mul <16 x i32> [[TMP15]], [[TMP3]]
-; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = sub <16 x i32> zeroinitializer, [[TMP8]]
-; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = sub <16 x i32> zeroinitializer, [[TMP9]]
-; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP10]])
+; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1
+; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = zext <16 x i8> [[WIDE_LOAD3]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = mul <16 x i32> [[TMP4]], [[TMP5]]
+; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = sub <16 x i32> zeroinitializer, [[TMP6]]
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP15]])
+; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = zext <16 x i8> [[WIDE_LOAD4]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = mul <16 x i32> [[TMP8]], [[TMP9]]
+; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = sub <16 x i32> zeroinitializer, [[TMP10]]
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE5]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP11]])
; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
@@ -78,27 +78,27 @@ define i32 @dotp(ptr %a, ptr %b) #0 {
; CHECK-MAXBW-NEXT: br label [[ENTRY:%.*]]
; CHECK-MAXBW: vector.ph:
; CHECK-MAXBW-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
+; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16
; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
; CHECK-MAXBW-NEXT: br label [[FOR_BODY:%.*]]
; CHECK-MAXBW: vector.body:
; CHECK-MAXBW-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-MAXBW-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 2 x i32> [ zeroinitializer, [[ENTRY]] ], [ [[PARTIAL_REDUCE:%.*]], [[FOR_BODY]] ]
+; CHECK-MAXBW-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[ENTRY]] ], [ [[PARTIAL_REDUCE:%.*]], [[FOR_BODY]] ]
; CHECK-MAXBW-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[A]], i64 [[IV]]
-; CHECK-MAXBW-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i8>, ptr [[TMP7]], align 1
-; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD]] to <vscale x 8 x i32>
+; CHECK-MAXBW-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP7]], align 1
; CHECK-MAXBW-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[B]], i64 [[IV]]
-; CHECK-MAXBW-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 8 x i8>, ptr [[TMP10]], align 1
-; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD1]] to <vscale x 8 x i32>
-; CHECK-MAXBW-NEXT: [[TMP13:%.*]] = mul <vscale x 8 x i32> [[TMP12]], [[TMP9]]
-; CHECK-MAXBW-NEXT: [[TMP14:%.*]] = sub <vscale x 8 x i32> zeroinitializer, [[TMP13]]
-; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI]], <vscale x 8 x i32> [[TMP14]])
+; CHECK-MAXBW-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 16 x i8>, ptr [[TMP10]], align 1
+; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = zext <vscale x 16 x i8> [[WIDE_LOAD1]] to <vscale x 16 x i32>
+; CHECK-MAXBW-NEXT: [[TMP5:%.*]] = zext <vscale x 16 x i8> [[WIDE_LOAD]] to <vscale x 16 x i32>
+; CHECK-MAXBW-NEXT: [[TMP6:%.*]] = mul <vscale x 16 x i32> [[TMP4]], [[TMP5]]
+; CHECK-MAXBW-NEXT: [[TMP8:%.*]] = sub <vscale x 16 x i32> zeroinitializer, [[TMP6]]
+; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 4 x i32> @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32(<vscale x 4 x i32> [[VEC_PHI]], <vscale x 16 x i32> [[TMP8]])
; CHECK-MAXBW-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], [[TMP3]]
; CHECK-MAXBW-NEXT: [[TMP16:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]]
; CHECK-MAXBW-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK-MAXBW: middle.block:
-; CHECK-MAXBW-NEXT: [[TMP17:%.*]] = call i32 @llvm.vector.reduce.add.nxv2i32(<vscale x 2 x i32> [[PARTIAL_REDUCE]])
+; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[PARTIAL_REDUCE]])
; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
; CHECK-MAXBW-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]]
; CHECK-MAXBW: scalar.ph:
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce.ll
index 70532ad..46ec858 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce.ll
@@ -45,8 +45,8 @@ define i32 @zext_add_reduc_i8_i32_sve(ptr %a) #0 {
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP0]], align 1
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1
; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP3]])
+; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP4]])
; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
@@ -138,8 +138,8 @@ define i32 @zext_add_reduc_i8_i32_neon(ptr %a) #2 {
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP0]], align 1
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1
; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP3]])
+; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP4]])
; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
@@ -227,8 +227,8 @@ define i64 @zext_add_reduc_i8_i64(ptr %a) #0 {
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1
; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i64>
-; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i64>
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI]], <16 x i64> [[TMP4]])
+; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i64>
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE3]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI1]], <16 x i64> [[TMP5]])
; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
@@ -321,8 +321,8 @@ define i64 @zext_add_reduc_i16_i64(ptr %a) #0 {
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i16>, ptr [[TMP1]], align 2
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x i16>, ptr [[TMP3]], align 2
; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = zext <8 x i16> [[WIDE_LOAD]] to <8 x i64>
-; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = zext <8 x i16> [[WIDE_LOAD2]] to <8 x i64>
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v8i64(<2 x i64> [[VEC_PHI]], <8 x i64> [[TMP4]])
+; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = zext <8 x i16> [[WIDE_LOAD2]] to <8 x i64>
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE3]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v8i64(<2 x i64> [[VEC_PHI1]], <8 x i64> [[TMP5]])
; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
@@ -421,12 +421,12 @@ define i32 @zext_add_reduc_i8_i32_has_neon_dotprod(ptr %a) #1 {
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD5:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD6:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1
; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = zext <16 x i8> [[WIDE_LOAD5]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = zext <16 x i8> [[WIDE_LOAD6]] to <16 x i32>
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP4]])
+; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP5]])
+; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = zext <16 x i8> [[WIDE_LOAD5]] to <16 x i32>
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE8]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI2]], <16 x i32> [[TMP10]])
+; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = zext <16 x i8> [[WIDE_LOAD6]] to <16 x i32>
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE9]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI3]], <16 x i32> [[TMP7]])
; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 64
; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
@@ -811,8 +811,8 @@ define i32 @sext_add_reduc_i8_i32(ptr %a) #0 {
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1
; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP4]])
+; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP5]])
; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
@@ -1000,11 +1000,10 @@ define i32 @add_of_loop_invariant_zext(i32 %a, ptr %b, i8 %c, i32 %d) #0 {
; CHECK-INTERLEAVE1: vector.ph:
; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP0]], 16
; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP0]], [[N_MOD_VF]]
-; CHECK-INTERLEAVE1-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[C]], i64 0
-; CHECK-INTERLEAVE1-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i8> [[BROADCAST_SPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = add i32 [[D]], [[N_VEC]]
; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = insertelement <4 x i32> zeroinitializer, i32 [[A]], i32 0
-; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[BROADCAST_SPLAT]] to <16 x i32>
+; CHECK-INTERLEAVE1-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[C]], i64 0
+; CHECK-INTERLEAVE1-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i8> [[BROADCAST_SPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
; CHECK-INTERLEAVE1-NEXT: br label [[FOR_BODY:%.*]]
; CHECK-INTERLEAVE1: vector.body:
; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[FOR_BODY]] ]
@@ -1012,7 +1011,8 @@ define i32 @add_of_loop_invariant_zext(i32 %a, ptr %b, i8 %c, i32 %d) #0 {
; CHECK-INTERLEAVE1-NEXT: [[OFFSET_IDX:%.*]] = add i32 [[D]], [[INDEX]]
; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[B]], i32 [[OFFSET_IDX]]
; CHECK-INTERLEAVE1-NEXT: store <16 x i8> zeroinitializer, ptr [[TMP4]], align 1
-; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP3]])
+; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[BROADCAST_SPLAT]] to <16 x i32>
+; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP5]])
; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16
; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
@@ -1031,23 +1031,23 @@ define i32 @add_of_loop_invariant_zext(i32 %a, ptr %b, i8 %c, i32 %d) #0 {
; CHECK-INTERLEAVED: vector.ph:
; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP2]], 32
; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP2]], [[N_MOD_VF]]
+; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = add i32 [[D]], [[N_VEC]]
+; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = insertelement <4 x i32> zeroinitializer, i32 [[A]], i32 0
; CHECK-INTERLEAVED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[C]], i64 0
; CHECK-INTERLEAVED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i8> [[BROADCAST_SPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
-; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = add i32 [[D]], [[N_VEC]]
-; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = insertelement <4 x i32> zeroinitializer, i32 [[A]], i32 0
-; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[BROADCAST_SPLAT]] to <16 x i32>
; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-INTERLEAVED: vector.body:
; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[TMP22:%.*]], [[VECTOR_BODY]] ]
-; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ [[TMP5]], [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ]
+; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ [[TMP3]], [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ]
; CHECK-INTERLEAVED-NEXT: [[VEC_PHI2:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE2:%.*]], [[VECTOR_BODY]] ]
; CHECK-INTERLEAVED-NEXT: [[OFFSET_IDX:%.*]] = add i32 [[D]], [[VEC_PHI1]]
; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[B]], i32 [[OFFSET_IDX]]
; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i32 16
; CHECK-INTERLEAVED-NEXT: store <16 x i8> zeroinitializer, ptr [[TMP4]], align 1
; CHECK-INTERLEAVED-NEXT: store <16 x i8> zeroinitializer, ptr [[TMP6]], align 1
-; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP3]])
-; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE2]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI2]], <16 x i32> [[TMP3]])
+; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[BROADCAST_SPLAT]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP5]])
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE2]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI2]], <16 x i32> [[TMP5]])
; CHECK-INTERLEAVED-NEXT: [[TMP22]] = add nuw i32 [[VEC_PHI1]], 32
; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP22]], [[N_VEC]]
; CHECK-INTERLEAVED-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
@@ -1071,11 +1071,10 @@ define i32 @add_of_loop_invariant_zext(i32 %a, ptr %b, i8 %c, i32 %d) #0 {
; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = mul nuw i32 [[TMP3]], 16
; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP0]], [[TMP4]]
; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP0]], [[N_MOD_VF]]
-; CHECK-MAXBW-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[C]], i64 0
-; CHECK-MAXBW-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
; CHECK-MAXBW-NEXT: [[TMP7:%.*]] = add i32 [[D]], [[N_VEC]]
; CHECK-MAXBW-NEXT: [[TMP8:%.*]] = insertelement <vscale x 4 x i32> zeroinitializer, i32 [[A]], i32 0
-; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = zext <vscale x 16 x i8> [[BROADCAST_SPLAT]] to <vscale x 16 x i32>
+; CHECK-MAXBW-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[C]], i64 0
+; CHECK-MAXBW-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
; CHECK-MAXBW-NEXT: br label [[FOR_BODY:%.*]]
; CHECK-MAXBW: vector.body:
; CHECK-MAXBW-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[FOR_BODY]] ]
@@ -1083,6 +1082,7 @@ define i32 @add_of_loop_invariant_zext(i32 %a, ptr %b, i8 %c, i32 %d) #0 {
; CHECK-MAXBW-NEXT: [[OFFSET_IDX:%.*]] = add i32 [[D]], [[INDEX]]
; CHECK-MAXBW-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[B]], i32 [[OFFSET_IDX]]
; CHECK-MAXBW-NEXT: store <vscale x 16 x i8> zeroinitializer, ptr [[TMP10]], align 1
+; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = zext <vscale x 16 x i8> [[BROADCAST_SPLAT]] to <vscale x 16 x i32>
; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 4 x i32> @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32(<vscale x 4 x i32> [[VEC_PHI]], <vscale x 16 x i32> [[TMP9]])
; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP4]]
; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
@@ -1164,12 +1164,12 @@ define i64 @sext_reduction_i32_to_i64(ptr %arr, i64 %n) #1 {
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x i32>, ptr [[TMP14]], align 4
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x i32>, ptr [[TMP3]], align 4
; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = sext <4 x i32> [[WIDE_LOAD]] to <4 x i64>
-; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = sext <4 x i32> [[WIDE_LOAD4]] to <4 x i64>
-; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = sext <4 x i32> [[WIDE_LOAD5]] to <4 x i64>
-; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = sext <4 x i32> [[WIDE_LOAD6]] to <4 x i64>
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v4i64(<2 x i64> [[VEC_PHI]], <4 x i64> [[TMP15]])
+; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = sext <4 x i32> [[WIDE_LOAD4]] to <4 x i64>
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE7]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v4i64(<2 x i64> [[VEC_PHI1]], <4 x i64> [[TMP5]])
+; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = sext <4 x i32> [[WIDE_LOAD5]] to <4 x i64>
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE8]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v4i64(<2 x i64> [[VEC_PHI2]], <4 x i64> [[TMP6]])
+; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = sext <4 x i32> [[WIDE_LOAD6]] to <4 x i64>
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE9]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v4i64(<2 x i64> [[VEC_PHI3]], <4 x i64> [[TMP7]])
; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/pr33053.ll b/llvm/test/Transforms/LoopVectorize/AArch64/pr33053.ll
index ebf4a4f..ab1486d 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/pr33053.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/pr33053.ll
@@ -37,7 +37,7 @@ for.end: ; preds = %for.body
ret i32 %conv27
}
-attributes #0 = { norecurse nounwind readonly "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { norecurse nounwind readonly "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "use-soft-float"="false" }
!llvm.ident = !{!0}
!0 = !{!"clang"}
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/reg-usage.ll b/llvm/test/Transforms/LoopVectorize/AArch64/reg-usage.ll
index 25ee100..70685c1 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/reg-usage.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/reg-usage.ll
@@ -192,7 +192,7 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) {
; CHECK: LV(REG): VF = 16
; CHECK-NEXT: LV(REG): Found max usage: 2 item
; CHECK-NEXT: LV(REG): RegisterClass: Generic::ScalarRC, 9 registers
-; CHECK-NEXT: LV(REG): RegisterClass: Generic::VectorRC, 12 registers
+; CHECK-NEXT: LV(REG): RegisterClass: Generic::VectorRC, 6 registers
; CHECK-NEXT: LV(REG): Found invariant usage: 1 item
entry:
br label %for.body
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll
index 786a2aa..28d2a27 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll
@@ -1630,4 +1630,4 @@ for.body: ; preds = %for.body, %entry
}
attributes #1 = { "target-features"="+sve" vscale_range(1, 16) }
-attributes #0 = { "unsafe-fp-math"="true" "target-features"="+sve" vscale_range(1, 16) }
+attributes #0 = { "target-features"="+sve" vscale_range(1, 16) }
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-constant-ops.ll b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-constant-ops.ll
index ae8dc2d..005ca8c 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-constant-ops.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-constant-ops.ll
@@ -175,18 +175,28 @@ define void @test_add_double_same_var_args_1(ptr %res, ptr noalias %A, ptr noali
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 1
+; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 2
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i64 [[TMP0]]
-; CHECK-NEXT: [[STRIDED_VEC1:%.*]] = load <2 x double>, ptr [[TMP1]], align 4
-; CHECK-NEXT: [[STRIDED_VEC4:%.*]] = load <2 x double>, ptr [[TMP2]], align 4
+; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <4 x double>, ptr [[TMP1]], align 4
+; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <4 x double> [[WIDE_VEC]], <4 x double> poison, <2 x i32> <i32 0, i32 2>
+; CHECK-NEXT: [[STRIDED_VEC1:%.*]] = shufflevector <4 x double> [[WIDE_VEC]], <4 x double> poison, <2 x i32> <i32 1, i32 3>
+; CHECK-NEXT: [[WIDE_VEC2:%.*]] = load <4 x double>, ptr [[TMP2]], align 4
+; CHECK-NEXT: [[STRIDED_VEC3:%.*]] = shufflevector <4 x double> [[WIDE_VEC2]], <4 x double> poison, <2 x i32> <i32 0, i32 2>
+; CHECK-NEXT: [[STRIDED_VEC4:%.*]] = shufflevector <4 x double> [[WIDE_VEC2]], <4 x double> poison, <2 x i32> <i32 1, i32 3>
+; CHECK-NEXT: [[TMP3:%.*]] = fadd <2 x double> [[STRIDED_VEC]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP4:%.*]] = fadd <2 x double> [[STRIDED_VEC3]], [[BROADCAST_SPLAT]]
; CHECK-NEXT: [[TMP5:%.*]] = fadd <2 x double> [[STRIDED_VEC1]], [[BROADCAST_SPLAT]]
; CHECK-NEXT: [[TMP6:%.*]] = fadd <2 x double> [[STRIDED_VEC4]], [[BROADCAST_SPLAT]]
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RES]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RES]], i64 [[TMP0]]
-; CHECK-NEXT: store <2 x double> [[TMP5]], ptr [[TMP7]], align 4
-; CHECK-NEXT: store <2 x double> [[TMP6]], ptr [[TMP8]], align 4
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
+; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <2 x double> [[TMP3]], <2 x double> [[TMP5]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = shufflevector <4 x double> [[TMP9]], <4 x double> poison, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT: store <4 x double> [[INTERLEAVED_VEC]], ptr [[TMP7]], align 4
+; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <2 x double> [[TMP4]], <2 x double> [[TMP6]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT: [[INTERLEAVED_VEC5:%.*]] = shufflevector <4 x double> [[TMP10]], <4 x double> poison, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT: store <4 x double> [[INTERLEAVED_VEC5]], ptr [[TMP8]], align 4
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 100
; CHECK-NEXT: br i1 [[TMP11]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
@@ -227,18 +237,28 @@ define void @test_add_double_same_var_args_2(ptr %res, ptr noalias %A, ptr noali
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 1
+; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 2
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i64 [[TMP0]]
-; CHECK-NEXT: [[STRIDED_VEC1:%.*]] = load <2 x double>, ptr [[TMP1]], align 4
-; CHECK-NEXT: [[STRIDED_VEC4:%.*]] = load <2 x double>, ptr [[TMP2]], align 4
+; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <4 x double>, ptr [[TMP1]], align 4
+; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <4 x double> [[WIDE_VEC]], <4 x double> poison, <2 x i32> <i32 0, i32 2>
+; CHECK-NEXT: [[STRIDED_VEC1:%.*]] = shufflevector <4 x double> [[WIDE_VEC]], <4 x double> poison, <2 x i32> <i32 1, i32 3>
+; CHECK-NEXT: [[WIDE_VEC2:%.*]] = load <4 x double>, ptr [[TMP2]], align 4
+; CHECK-NEXT: [[STRIDED_VEC3:%.*]] = shufflevector <4 x double> [[WIDE_VEC2]], <4 x double> poison, <2 x i32> <i32 0, i32 2>
+; CHECK-NEXT: [[STRIDED_VEC4:%.*]] = shufflevector <4 x double> [[WIDE_VEC2]], <4 x double> poison, <2 x i32> <i32 1, i32 3>
+; CHECK-NEXT: [[TMP3:%.*]] = fadd <2 x double> [[BROADCAST_SPLAT]], [[STRIDED_VEC]]
+; CHECK-NEXT: [[TMP4:%.*]] = fadd <2 x double> [[BROADCAST_SPLAT]], [[STRIDED_VEC3]]
; CHECK-NEXT: [[TMP5:%.*]] = fadd <2 x double> [[BROADCAST_SPLAT]], [[STRIDED_VEC1]]
; CHECK-NEXT: [[TMP6:%.*]] = fadd <2 x double> [[BROADCAST_SPLAT]], [[STRIDED_VEC4]]
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RES]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RES]], i64 [[TMP0]]
-; CHECK-NEXT: store <2 x double> [[TMP5]], ptr [[TMP7]], align 4
-; CHECK-NEXT: store <2 x double> [[TMP6]], ptr [[TMP8]], align 4
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
+; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <2 x double> [[TMP3]], <2 x double> [[TMP5]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = shufflevector <4 x double> [[TMP9]], <4 x double> poison, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT: store <4 x double> [[INTERLEAVED_VEC]], ptr [[TMP7]], align 4
+; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <2 x double> [[TMP4]], <2 x double> [[TMP6]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT: [[INTERLEAVED_VEC5:%.*]] = shufflevector <4 x double> [[TMP10]], <4 x double> poison, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT: store <4 x double> [[INTERLEAVED_VEC5]], ptr [[TMP8]], align 4
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 100
; CHECK-NEXT: br i1 [[TMP11]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-cost.ll b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-cost.ll
index b23702d..2a19402 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-cost.ll
@@ -319,46 +319,46 @@ define void @single_fmul_used_by_each_member(ptr noalias %A, ptr noalias %B, ptr
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP21:%.*]] = add i64 [[INDEX]], 1
; CHECK-NEXT: [[TMP20:%.*]] = add i64 [[INDEX]], 2
-; CHECK-NEXT: [[TMP22:%.*]] = add i64 [[INDEX]], 3
+; CHECK-NEXT: [[TMP21:%.*]] = add i64 [[INDEX]], 4
+; CHECK-NEXT: [[TMP22:%.*]] = add i64 [[INDEX]], 6
; CHECK-NEXT: [[TMP23:%.*]] = getelementptr double, ptr [[A]], i64 [[INDEX]]
-; CHECK-NEXT: [[TMP33:%.*]] = getelementptr double, ptr [[A]], i64 [[TMP21]]
-; CHECK-NEXT: [[TMP37:%.*]] = getelementptr double, ptr [[A]], i64 [[TMP20]]
-; CHECK-NEXT: [[TMP39:%.*]] = getelementptr double, ptr [[A]], i64 [[TMP22]]
-; CHECK-NEXT: [[TMP24:%.*]] = load double, ptr [[TMP23]], align 8
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <2 x double> poison, double [[TMP24]], i64 0
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = shufflevector <2 x double> [[BROADCAST_SPLATINSERT1]], <2 x double> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP25:%.*]] = load double, ptr [[TMP33]], align 8
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT12:%.*]] = insertelement <2 x double> poison, double [[TMP25]], i64 0
-; CHECK-NEXT: [[WIDE_LOAD12:%.*]] = shufflevector <2 x double> [[BROADCAST_SPLATINSERT12]], <2 x double> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP26:%.*]] = load double, ptr [[TMP37]], align 8
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT14:%.*]] = insertelement <2 x double> poison, double [[TMP26]], i64 0
-; CHECK-NEXT: [[WIDE_LOAD13:%.*]] = shufflevector <2 x double> [[BROADCAST_SPLATINSERT14]], <2 x double> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP27:%.*]] = load double, ptr [[TMP39]], align 8
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT16:%.*]] = insertelement <2 x double> poison, double [[TMP27]], i64 0
-; CHECK-NEXT: [[WIDE_LOAD14:%.*]] = shufflevector <2 x double> [[BROADCAST_SPLATINSERT16]], <2 x double> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP25:%.*]] = getelementptr double, ptr [[TMP23]], i32 2
+; CHECK-NEXT: [[TMP26:%.*]] = getelementptr double, ptr [[TMP23]], i32 4
+; CHECK-NEXT: [[TMP27:%.*]] = getelementptr double, ptr [[TMP23]], i32 6
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x double>, ptr [[TMP23]], align 8
+; CHECK-NEXT: [[WIDE_LOAD12:%.*]] = load <2 x double>, ptr [[TMP25]], align 8
+; CHECK-NEXT: [[WIDE_LOAD13:%.*]] = load <2 x double>, ptr [[TMP26]], align 8
+; CHECK-NEXT: [[WIDE_LOAD14:%.*]] = load <2 x double>, ptr [[TMP27]], align 8
; CHECK-NEXT: [[TMP28:%.*]] = fmul <2 x double> [[WIDE_LOAD]], splat (double 5.000000e+00)
; CHECK-NEXT: [[TMP29:%.*]] = fmul <2 x double> [[WIDE_LOAD12]], splat (double 5.000000e+00)
; CHECK-NEXT: [[TMP30:%.*]] = fmul <2 x double> [[WIDE_LOAD13]], splat (double 5.000000e+00)
; CHECK-NEXT: [[TMP31:%.*]] = fmul <2 x double> [[WIDE_LOAD14]], splat (double 5.000000e+00)
; CHECK-NEXT: [[TMP32:%.*]] = getelementptr { double, double }, ptr [[B]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP33:%.*]] = getelementptr { double, double }, ptr [[B]], i64 [[TMP20]]
; CHECK-NEXT: [[TMP34:%.*]] = getelementptr { double, double }, ptr [[B]], i64 [[TMP21]]
-; CHECK-NEXT: [[TMP38:%.*]] = getelementptr { double, double }, ptr [[B]], i64 [[TMP20]]
; CHECK-NEXT: [[TMP35:%.*]] = getelementptr { double, double }, ptr [[B]], i64 [[TMP22]]
-; CHECK-NEXT: store <2 x double> [[TMP28]], ptr [[TMP32]], align 8
-; CHECK-NEXT: store <2 x double> [[TMP29]], ptr [[TMP34]], align 8
-; CHECK-NEXT: store <2 x double> [[TMP30]], ptr [[TMP38]], align 8
-; CHECK-NEXT: store <2 x double> [[TMP31]], ptr [[TMP35]], align 8
+; CHECK-NEXT: [[TMP36:%.*]] = shufflevector <2 x double> [[TMP28]], <2 x double> [[TMP28]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = shufflevector <4 x double> [[TMP36]], <4 x double> poison, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT: store <4 x double> [[INTERLEAVED_VEC]], ptr [[TMP32]], align 8
+; CHECK-NEXT: [[TMP37:%.*]] = shufflevector <2 x double> [[TMP29]], <2 x double> [[TMP29]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT: [[INTERLEAVED_VEC15:%.*]] = shufflevector <4 x double> [[TMP37]], <4 x double> poison, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT: store <4 x double> [[INTERLEAVED_VEC15]], ptr [[TMP33]], align 8
+; CHECK-NEXT: [[TMP38:%.*]] = shufflevector <2 x double> [[TMP30]], <2 x double> [[TMP30]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT: [[INTERLEAVED_VEC16:%.*]] = shufflevector <4 x double> [[TMP38]], <4 x double> poison, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT: store <4 x double> [[INTERLEAVED_VEC16]], ptr [[TMP34]], align 8
+; CHECK-NEXT: [[TMP39:%.*]] = shufflevector <2 x double> [[TMP31]], <2 x double> [[TMP31]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT: [[INTERLEAVED_VEC17:%.*]] = shufflevector <4 x double> [[TMP39]], <4 x double> poison, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT: store <4 x double> [[INTERLEAVED_VEC17]], ptr [[TMP35]], align 8
; CHECK-NEXT: [[TMP40:%.*]] = getelementptr { double, double }, ptr [[C]], i64 [[INDEX]]
-; CHECK-NEXT: [[TMP42:%.*]] = getelementptr { double, double }, ptr [[C]], i64 [[TMP21]]
; CHECK-NEXT: [[TMP41:%.*]] = getelementptr { double, double }, ptr [[C]], i64 [[TMP20]]
+; CHECK-NEXT: [[TMP42:%.*]] = getelementptr { double, double }, ptr [[C]], i64 [[TMP21]]
; CHECK-NEXT: [[TMP43:%.*]] = getelementptr { double, double }, ptr [[C]], i64 [[TMP22]]
-; CHECK-NEXT: store <2 x double> [[TMP28]], ptr [[TMP40]], align 8
-; CHECK-NEXT: store <2 x double> [[TMP29]], ptr [[TMP42]], align 8
-; CHECK-NEXT: store <2 x double> [[TMP30]], ptr [[TMP41]], align 8
-; CHECK-NEXT: store <2 x double> [[TMP31]], ptr [[TMP43]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; CHECK-NEXT: store <4 x double> [[INTERLEAVED_VEC]], ptr [[TMP40]], align 8
+; CHECK-NEXT: store <4 x double> [[INTERLEAVED_VEC15]], ptr [[TMP41]], align 8
+; CHECK-NEXT: store <4 x double> [[INTERLEAVED_VEC16]], ptr [[TMP42]], align 8
+; CHECK-NEXT: store <4 x double> [[INTERLEAVED_VEC17]], ptr [[TMP43]], align 8
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
; CHECK-NEXT: [[TMP44:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP44]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
@@ -435,7 +435,7 @@ exit:
ret void
}
-; We should interleave by 2 after narrowing interleave groups to saturate
+; FIXME: We should interleave by 2 after narrowing interleave groups to saturate
; load/store units.
define void @test_interleave_after_narrowing(i32 %n, ptr %x, ptr noalias %y) {
; CHECK-LABEL: define void @test_interleave_after_narrowing(
@@ -447,18 +447,12 @@ define void @test_interleave_after_narrowing(i32 %n, ptr %x, ptr noalias %y) {
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 4
-; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[OFFSET_IDX]], 4
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw float, ptr [[X]], i64 [[OFFSET_IDX]]
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw float, ptr [[X]], i64 [[TMP5]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP0]], align 4
-; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x float>, ptr [[TMP7]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = fneg <4 x float> [[WIDE_LOAD]]
-; CHECK-NEXT: [[TMP4:%.*]] = fneg <4 x float> [[WIDE_LOAD1]]
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[Y]], i64 [[OFFSET_IDX]]
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds nuw float, ptr [[Y]], i64 [[TMP5]]
; CHECK-NEXT: store <4 x float> [[TMP1]], ptr [[TMP2]], align 4
-; CHECK-NEXT: store <4 x float> [[TMP4]], ptr [[TMP6]], align 4
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 1
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 256
; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-remove-loop-region.ll b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-remove-loop-region.ll
index 2865495..c261760 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-remove-loop-region.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-remove-loop-region.ll
@@ -13,8 +13,12 @@ define void @load_store_interleave_group_tc_2(ptr noalias %data) {
; VF2: [[VECTOR_PH]]:
; VF2-NEXT: br label %[[VECTOR_BODY:.*]]
; VF2: [[VECTOR_BODY]]:
-; VF2-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i64>, ptr [[DATA]], align 8
-; VF2-NEXT: store <2 x i64> [[WIDE_LOAD]], ptr [[DATA]], align 8
+; VF2-NEXT: [[WIDE_VEC:%.*]] = load <4 x i64>, ptr [[DATA]], align 8
+; VF2-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <4 x i64> [[WIDE_VEC]], <4 x i64> poison, <2 x i32> <i32 0, i32 2>
+; VF2-NEXT: [[STRIDED_VEC1:%.*]] = shufflevector <4 x i64> [[WIDE_VEC]], <4 x i64> poison, <2 x i32> <i32 1, i32 3>
+; VF2-NEXT: [[TMP2:%.*]] = shufflevector <2 x i64> [[STRIDED_VEC]], <2 x i64> [[STRIDED_VEC1]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; VF2-NEXT: [[INTERLEAVED_VEC:%.*]] = shufflevector <4 x i64> [[TMP2]], <4 x i64> poison, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; VF2-NEXT: store <4 x i64> [[INTERLEAVED_VEC]], ptr [[DATA]], align 8
; VF2-NEXT: br label %[[MIDDLE_BLOCK:.*]]
; VF2: [[MIDDLE_BLOCK]]:
; VF2-NEXT: br label %[[EXIT:.*]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-scalable.ll b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-scalable.ll
index 305a692..b63e03d 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-scalable.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-scalable.ll
@@ -1,81 +1,29 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --filter-out-after "^scalar.ph:" --version 5
-; RUN: opt -p loop-vectorize -force-vector-interleave=1 -S -mcpu=neoverse-512tvb %s | FileCheck --check-prefixes=IC1 %s
-; RUN: opt -p loop-vectorize -S -mcpu=neoverse-512tvb %s | FileCheck --check-prefixes=CHECK %s
+; RUN: opt -p loop-vectorize -force-vector-interleave=1 -S -mcpu=neoverse-512tvb %s | FileCheck --check-prefixes=CHECK %s
target triple = "aarch64-unknown-linux"
define void @load_store_interleave_group(ptr noalias %data) {
-; IC1-LABEL: define void @load_store_interleave_group(
-; IC1-SAME: ptr noalias [[DATA:%.*]]) #[[ATTR0:[0-9]+]] {
-; IC1-NEXT: [[ENTRY:.*:]]
-; IC1-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; IC1-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP0]], 1
-; IC1-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 100, [[TMP1]]
-; IC1-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
-; IC1: [[VECTOR_PH]]:
-; IC1-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; IC1-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; IC1-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP3]]
-; IC1-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]]
-; IC1-NEXT: br label %[[VECTOR_BODY:.*]]
-; IC1: [[VECTOR_BODY]]:
-; IC1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IC1-NEXT: [[TMP4:%.*]] = shl nsw i64 [[INDEX]], 1
-; IC1-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[DATA]], i64 [[TMP4]]
-; IC1-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP5]], align 8
-; IC1-NEXT: store <vscale x 2 x i64> [[WIDE_LOAD]], ptr [[TMP5]], align 8
-; IC1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]]
-; IC1-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; IC1-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
-; IC1: [[MIDDLE_BLOCK]]:
-; IC1-NEXT: [[CMP_N:%.*]] = icmp eq i64 100, [[N_VEC]]
-; IC1-NEXT: br i1 [[CMP_N]], [[EXIT:label %.*]], label %[[SCALAR_PH]]
-; IC1: [[SCALAR_PH]]:
-;
; CHECK-LABEL: define void @load_store_interleave_group(
; CHECK-SAME: ptr noalias [[DATA:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP5:%.*]] = shl nuw i64 [[TMP4]], 3
+; CHECK-NEXT: [[TMP5:%.*]] = shl nuw i64 [[TMP4]], 1
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 100, [[TMP5]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
+; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP3]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP16:%.*]] = mul i64 [[TMP2]], 4
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP20:%.*]] = add i64 [[TMP2]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = mul i64 [[TMP20]], 1
-; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[INDEX]], [[TMP6]]
-; CHECK-NEXT: [[TMP24:%.*]] = mul i64 [[TMP2]], 2
-; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[TMP24]], 0
-; CHECK-NEXT: [[TMP10:%.*]] = mul i64 [[TMP9]], 1
-; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[INDEX]], [[TMP10]]
-; CHECK-NEXT: [[TMP12:%.*]] = mul i64 [[TMP2]], 3
-; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[TMP12]], 0
-; CHECK-NEXT: [[TMP14:%.*]] = mul i64 [[TMP13]], 1
-; CHECK-NEXT: [[TMP15:%.*]] = add i64 [[INDEX]], [[TMP14]]
; CHECK-NEXT: [[TMP0:%.*]] = shl nsw i64 [[INDEX]], 1
-; CHECK-NEXT: [[TMP17:%.*]] = shl nsw i64 [[TMP7]], 1
-; CHECK-NEXT: [[TMP18:%.*]] = shl nsw i64 [[TMP11]], 1
-; CHECK-NEXT: [[TMP19:%.*]] = shl nsw i64 [[TMP15]], 1
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[DATA]], i64 [[TMP0]]
-; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds i64, ptr [[DATA]], i64 [[TMP17]]
-; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i64, ptr [[DATA]], i64 [[TMP18]]
-; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds i64, ptr [[DATA]], i64 [[TMP19]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP1]], align 8
-; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 2 x i64>, ptr [[TMP21]], align 8
-; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 2 x i64>, ptr [[TMP22]], align 8
-; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 2 x i64>, ptr [[TMP23]], align 8
; CHECK-NEXT: store <vscale x 2 x i64> [[WIDE_LOAD]], ptr [[TMP1]], align 8
-; CHECK-NEXT: store <vscale x 2 x i64> [[WIDE_LOAD1]], ptr [[TMP21]], align 8
-; CHECK-NEXT: store <vscale x 2 x i64> [[WIDE_LOAD2]], ptr [[TMP22]], align 8
-; CHECK-NEXT: store <vscale x 2 x i64> [[WIDE_LOAD3]], ptr [[TMP23]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP16]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]]
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
@@ -105,82 +53,27 @@ exit:
}
define void @test_2xi64_unary_op_load_interleave_group(ptr noalias %data, ptr noalias %factor) {
-; IC1-LABEL: define void @test_2xi64_unary_op_load_interleave_group(
-; IC1-SAME: ptr noalias [[DATA:%.*]], ptr noalias [[FACTOR:%.*]]) #[[ATTR0]] {
-; IC1-NEXT: [[ENTRY:.*:]]
-; IC1-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; IC1-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP0]], 1
-; IC1-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1111, [[TMP1]]
-; IC1-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
-; IC1: [[VECTOR_PH]]:
-; IC1-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; IC1-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; IC1-NEXT: [[N_MOD_VF:%.*]] = urem i64 1111, [[TMP3]]
-; IC1-NEXT: [[N_VEC:%.*]] = sub i64 1111, [[N_MOD_VF]]
-; IC1-NEXT: br label %[[VECTOR_BODY:.*]]
-; IC1: [[VECTOR_BODY]]:
-; IC1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; IC1-NEXT: [[TMP4:%.*]] = shl nsw i64 [[INDEX]], 1
-; IC1-NEXT: [[TMP5:%.*]] = getelementptr inbounds double, ptr [[DATA]], i64 [[TMP4]]
-; IC1-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x double>, ptr [[TMP5]], align 8
-; IC1-NEXT: [[TMP6:%.*]] = fneg <vscale x 2 x double> [[WIDE_LOAD]]
-; IC1-NEXT: store <vscale x 2 x double> [[TMP6]], ptr [[TMP5]], align 8
-; IC1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]]
-; IC1-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; IC1-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
-; IC1: [[MIDDLE_BLOCK]]:
-; IC1-NEXT: [[CMP_N:%.*]] = icmp eq i64 1111, [[N_VEC]]
-; IC1-NEXT: br i1 [[CMP_N]], [[EXIT:label %.*]], label %[[SCALAR_PH]]
-; IC1: [[SCALAR_PH]]:
-;
; CHECK-LABEL: define void @test_2xi64_unary_op_load_interleave_group(
; CHECK-SAME: ptr noalias [[DATA:%.*]], ptr noalias [[FACTOR:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP5:%.*]] = shl nuw i64 [[TMP4]], 3
+; CHECK-NEXT: [[TMP5:%.*]] = shl nuw i64 [[TMP4]], 1
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1111, [[TMP5]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
+; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1111, [[TMP3]]
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1111, [[N_MOD_VF]]
-; CHECK-NEXT: [[TMP16:%.*]] = mul i64 [[TMP2]], 4
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP20:%.*]] = add i64 [[TMP2]], 0
-; CHECK-NEXT: [[TMP6:%.*]] = mul i64 [[TMP20]], 1
-; CHECK-NEXT: [[TMP24:%.*]] = add i64 [[INDEX]], [[TMP6]]
-; CHECK-NEXT: [[TMP8:%.*]] = mul i64 [[TMP2]], 2
-; CHECK-NEXT: [[TMP28:%.*]] = add i64 [[TMP8]], 0
-; CHECK-NEXT: [[TMP29:%.*]] = mul i64 [[TMP28]], 1
-; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[INDEX]], [[TMP29]]
-; CHECK-NEXT: [[TMP12:%.*]] = mul i64 [[TMP2]], 3
-; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[TMP12]], 0
-; CHECK-NEXT: [[TMP14:%.*]] = mul i64 [[TMP13]], 1
-; CHECK-NEXT: [[TMP15:%.*]] = add i64 [[INDEX]], [[TMP14]]
; CHECK-NEXT: [[TMP0:%.*]] = shl nsw i64 [[INDEX]], 1
-; CHECK-NEXT: [[TMP17:%.*]] = shl nsw i64 [[TMP24]], 1
-; CHECK-NEXT: [[TMP18:%.*]] = shl nsw i64 [[TMP11]], 1
-; CHECK-NEXT: [[TMP19:%.*]] = shl nsw i64 [[TMP15]], 1
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds double, ptr [[DATA]], i64 [[TMP0]]
-; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds double, ptr [[DATA]], i64 [[TMP17]]
-; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds double, ptr [[DATA]], i64 [[TMP18]]
-; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds double, ptr [[DATA]], i64 [[TMP19]]
; CHECK-NEXT: [[TMP7:%.*]] = load <vscale x 2 x double>, ptr [[TMP1]], align 8
-; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 2 x double>, ptr [[TMP21]], align 8
-; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 2 x double>, ptr [[TMP22]], align 8
-; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 2 x double>, ptr [[TMP23]], align 8
; CHECK-NEXT: [[TMP9:%.*]] = fneg <vscale x 2 x double> [[TMP7]]
-; CHECK-NEXT: [[TMP25:%.*]] = fneg <vscale x 2 x double> [[WIDE_LOAD1]]
-; CHECK-NEXT: [[TMP26:%.*]] = fneg <vscale x 2 x double> [[WIDE_LOAD2]]
-; CHECK-NEXT: [[TMP27:%.*]] = fneg <vscale x 2 x double> [[WIDE_LOAD3]]
; CHECK-NEXT: store <vscale x 2 x double> [[TMP9]], ptr [[TMP1]], align 8
-; CHECK-NEXT: store <vscale x 2 x double> [[TMP25]], ptr [[TMP21]], align 8
-; CHECK-NEXT: store <vscale x 2 x double> [[TMP26]], ptr [[TMP22]], align 8
-; CHECK-NEXT: store <vscale x 2 x double> [[TMP27]], ptr [[TMP23]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP16]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP2]]
; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-unroll.ll b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-unroll.ll
index abfb44d..d290f2d 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-unroll.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory-unroll.ll
@@ -60,26 +60,32 @@ define void @test_2xi64_with_wide_load(ptr noalias %data, ptr noalias %factor) {
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 1
+; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 2
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[FACTOR]], i64 [[INDEX]]
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i64, ptr [[FACTOR]], i64 [[TMP0]]
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr [[TMP1]], align 8
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i64> poison, i64 [[TMP2]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i64> [[BROADCAST_SPLATINSERT]], <2 x i64> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP3:%.*]] = load i64, ptr [[TMP4]], align 8
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement <2 x i64> poison, i64 [[TMP3]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT3:%.*]] = shufflevector <2 x i64> [[BROADCAST_SPLATINSERT2]], <2 x i64> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[TMP1]], i32 2
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = load <2 x i64>, ptr [[TMP1]], align 8
+; CHECK-NEXT: [[BROADCAST_SPLAT3:%.*]] = load <2 x i64>, ptr [[TMP3]], align 8
; CHECK-NEXT: [[TMP6:%.*]] = shl nsw i64 [[INDEX]], 1
; CHECK-NEXT: [[TMP7:%.*]] = shl nsw i64 [[TMP0]], 1
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[DATA]], i64 [[TMP6]]
; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i64, ptr [[DATA]], i64 [[TMP7]]
-; CHECK-NEXT: [[STRIDED_VEC2:%.*]] = load <2 x i64>, ptr [[TMP8]], align 8
-; CHECK-NEXT: [[STRIDED_VEC5:%.*]] = load <2 x i64>, ptr [[TMP9]], align 8
+; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <4 x i64>, ptr [[TMP8]], align 8
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = shufflevector <4 x i64> [[WIDE_VEC]], <4 x i64> poison, <2 x i32> <i32 0, i32 2>
+; CHECK-NEXT: [[STRIDED_VEC2:%.*]] = shufflevector <4 x i64> [[WIDE_VEC]], <4 x i64> poison, <2 x i32> <i32 1, i32 3>
+; CHECK-NEXT: [[WIDE_VEC3:%.*]] = load <4 x i64>, ptr [[TMP9]], align 8
+; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = shufflevector <4 x i64> [[WIDE_VEC3]], <4 x i64> poison, <2 x i32> <i32 0, i32 2>
+; CHECK-NEXT: [[STRIDED_VEC5:%.*]] = shufflevector <4 x i64> [[WIDE_VEC3]], <4 x i64> poison, <2 x i32> <i32 1, i32 3>
+; CHECK-NEXT: [[TMP10:%.*]] = mul <2 x i64> [[BROADCAST_SPLAT]], [[WIDE_LOAD]]
+; CHECK-NEXT: [[TMP11:%.*]] = mul <2 x i64> [[BROADCAST_SPLAT3]], [[WIDE_LOAD1]]
; CHECK-NEXT: [[TMP15:%.*]] = mul <2 x i64> [[BROADCAST_SPLAT]], [[STRIDED_VEC2]]
; CHECK-NEXT: [[TMP16:%.*]] = mul <2 x i64> [[BROADCAST_SPLAT3]], [[STRIDED_VEC5]]
-; CHECK-NEXT: store <2 x i64> [[TMP15]], ptr [[TMP8]], align 8
-; CHECK-NEXT: store <2 x i64> [[TMP16]], ptr [[TMP9]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
+; CHECK-NEXT: [[TMP17:%.*]] = shufflevector <2 x i64> [[TMP10]], <2 x i64> [[TMP15]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT: [[INTERLEAVED_VEC:%.*]] = shufflevector <4 x i64> [[TMP17]], <4 x i64> poison, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT: store <4 x i64> [[INTERLEAVED_VEC]], ptr [[TMP8]], align 8
+; CHECK-NEXT: [[TMP18:%.*]] = shufflevector <2 x i64> [[TMP11]], <2 x i64> [[TMP16]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT: [[INTERLEAVED_VEC6:%.*]] = shufflevector <4 x i64> [[TMP18]], <4 x i64> poison, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT: store <4 x i64> [[INTERLEAVED_VEC6]], ptr [[TMP9]], align 8
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 100
; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory.ll b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory.ll
index f2e689c..75980ba 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/transform-narrow-interleave-to-widen-memory.ll
@@ -328,8 +328,10 @@ define void @same_live_in_store_interleave_group(i64 %x, ptr noalias %dst) {
; VF2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; VF2-NEXT: [[TMP0:%.*]] = shl nsw i64 [[INDEX]], 1
; VF2-NEXT: [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[DST]], i64 [[TMP0]]
-; VF2-NEXT: store <2 x i64> [[BROADCAST_SPLAT]], ptr [[TMP1]], align 8
-; VF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 1
+; VF2-NEXT: [[TMP2:%.*]] = shufflevector <2 x i64> [[BROADCAST_SPLAT]], <2 x i64> [[BROADCAST_SPLAT]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; VF2-NEXT: [[INTERLEAVED_VEC:%.*]] = shufflevector <4 x i64> [[TMP2]], <4 x i64> poison, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; VF2-NEXT: store <4 x i64> [[INTERLEAVED_VEC]], ptr [[TMP1]], align 8
+; VF2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; VF2-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 100
; VF2-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; VF2: [[MIDDLE_BLOCK]]:
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/vplan-printing.ll b/llvm/test/Transforms/LoopVectorize/AArch64/vplan-printing.ll
index d4e5dea..49f663f 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/vplan-printing.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/vplan-printing.ll
@@ -23,18 +23,15 @@ define i32 @print_partial_reduction(ptr %a, ptr %b) {
; CHECK-NEXT: <x1> vector loop: {
; CHECK-NEXT: vector.body:
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION ir<0>, vp<[[CAN_IV_NEXT:%.+]]>
-; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<[[ACC:%.+]]> = phi vp<[[RDX_START]]>, ir<[[REDUCE:%.+]]> (VF scaled by 1/4)
+; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<[[ACC:%.+]]> = phi vp<[[RDX_START]]>, vp<[[REDUCE:%.+]]> (VF scaled by 1/4)
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>, vp<[[VF]]>
; CHECK-NEXT: CLONE ir<%gep.a> = getelementptr ir<%a>, vp<[[STEPS]]>
; CHECK-NEXT: vp<[[PTR_A:%.+]]> = vector-pointer ir<%gep.a>
; CHECK-NEXT: WIDEN ir<%load.a> = load vp<[[PTR_A]]>
-; CHECK-NEXT: WIDEN-CAST ir<%ext.a> = zext ir<%load.a> to i32
; CHECK-NEXT: CLONE ir<%gep.b> = getelementptr ir<%b>, vp<[[STEPS]]>
; CHECK-NEXT: vp<[[PTR_B:%.+]]> = vector-pointer ir<%gep.b>
; CHECK-NEXT: WIDEN ir<%load.b> = load vp<[[PTR_B]]>
-; CHECK-NEXT: WIDEN-CAST ir<%ext.b> = zext ir<%load.b> to i32
-; CHECK-NEXT: WIDEN ir<%mul> = mul ir<%ext.b>, ir<%ext.a>
-; CHECK-NEXT: PARTIAL-REDUCE ir<[[REDUCE]]> = add ir<[[ACC]]>, ir<%mul>
+; CHECK-NEXT: EXPRESSION vp<[[REDUCE]]> = ir<[[ACC]]> + partial.reduce.add (mul (ir<%load.b> zext to i32), (ir<%load.a> zext to i32))
; CHECK-NEXT: EMIT vp<[[CAN_IV_NEXT]]> = add nuw vp<[[CAN_IV]]>, vp<[[VFxUF]]>
; CHECK-NEXT: EMIT branch-on-count vp<[[CAN_IV_NEXT]]>, vp<[[VEC_TC]]>
; CHECK-NEXT: No successors
@@ -42,7 +39,7 @@ define i32 @print_partial_reduction(ptr %a, ptr %b) {
; CHECK-NEXT: Successor(s): middle.block
; CHECK-EMPTY:
; CHECK-NEXT: middle.block:
-; CHECK-NEXT: EMIT vp<[[RED_RESULT:%.+]]> = compute-reduction-result ir<[[ACC]]>, ir<[[REDUCE]]>
+; CHECK-NEXT: EMIT vp<[[RED_RESULT:%.+]]> = compute-reduction-result ir<[[ACC]]>, vp<[[REDUCE]]>
; CHECK-NEXT: EMIT vp<[[CMP:%.+]]> = icmp eq ir<1024>, vp<[[VEC_TC]]>
; CHECK-NEXT: EMIT branch-on-cond vp<[[CMP]]>
; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph
@@ -89,10 +86,10 @@ define i32 @print_partial_reduction(ptr %a, ptr %b) {
; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<%accum> = phi vp<[[RDX_START]]>, ir<%add> (VF scaled by 1/4)
; CHECK-NEXT: CLONE ir<%gep.a> = getelementptr ir<%a>, vp<[[EP_IV]]>
; CHECK-NEXT: WIDEN ir<%load.a> = load ir<%gep.a>
-; CHECK-NEXT: WIDEN-CAST ir<%ext.a> = zext ir<%load.a> to i32
; CHECK-NEXT: CLONE ir<%gep.b> = getelementptr ir<%b>, vp<[[EP_IV]]>
; CHECK-NEXT: WIDEN ir<%load.b> = load ir<%gep.b>
; CHECK-NEXT: WIDEN-CAST ir<%ext.b> = zext ir<%load.b> to i32
+; CHECK-NEXT: WIDEN-CAST ir<%ext.a> = zext ir<%load.a> to i32
; CHECK-NEXT: WIDEN ir<%mul> = mul ir<%ext.b>, ir<%ext.a>
; CHECK-NEXT: PARTIAL-REDUCE ir<%add> = add ir<%accum>, ir<%mul>
; CHECK-NEXT: EMIT vp<[[EP_IV_NEXT:%.+]]> = add nuw vp<[[EP_IV]]>, ir<16>
diff --git a/llvm/test/Transforms/LoopVectorize/ARM/arm-ieee-vectorize.ll b/llvm/test/Transforms/LoopVectorize/ARM/arm-ieee-vectorize.ll
index 0f398a6..2579918 100644
--- a/llvm/test/Transforms/LoopVectorize/ARM/arm-ieee-vectorize.ll
+++ b/llvm/test/Transforms/LoopVectorize/ARM/arm-ieee-vectorize.ll
@@ -327,5 +327,5 @@ for.end: ; preds = %for.body, %entry
declare float @fabsf(float)
-attributes #1 = { nounwind readnone "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="cortex-a8" "target-features"="+dsp,+neon,+vfp3" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #2 = { nounwind readnone "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="cortex-a8" "target-features"="+dsp,+neon,+vfp3" "unsafe-fp-math"="true" "use-soft-float"="false" }
+attributes #1 = { nounwind readnone "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="cortex-a8" "target-features"="+dsp,+neon,+vfp3" "use-soft-float"="false" }
+attributes #2 = { nounwind readnone "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="cortex-a8" "target-features"="+dsp,+neon,+vfp3" "use-soft-float"="false" }
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/partial-reduce-dot-product.ll b/llvm/test/Transforms/LoopVectorize/RISCV/partial-reduce-dot-product.ll
index 0a9b1e0..61e3a18 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/partial-reduce-dot-product.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/partial-reduce-dot-product.ll
@@ -60,10 +60,10 @@ define i32 @vqdot(ptr %a, ptr %b) #0 {
; ZVQDOTQ-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 1 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ]
; ZVQDOTQ-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]]
; ZVQDOTQ-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i8>, ptr [[TMP6]], align 1
-; ZVQDOTQ-NEXT: [[TMP8:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD]] to <vscale x 4 x i32>
; ZVQDOTQ-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]]
; ZVQDOTQ-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x i8>, ptr [[TMP9]], align 1
; ZVQDOTQ-NEXT: [[TMP11:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD1]] to <vscale x 4 x i32>
+; ZVQDOTQ-NEXT: [[TMP8:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD]] to <vscale x 4 x i32>
; ZVQDOTQ-NEXT: [[TMP12:%.*]] = mul <vscale x 4 x i32> [[TMP11]], [[TMP8]]
; ZVQDOTQ-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 1 x i32> @llvm.vector.partial.reduce.add.nxv1i32.nxv4i32(<vscale x 1 x i32> [[VEC_PHI]], <vscale x 4 x i32> [[TMP12]])
; ZVQDOTQ-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
@@ -125,17 +125,17 @@ define i32 @vqdot(ptr %a, ptr %b) #0 {
; FIXED-ZVQDOTQ-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i32 8
; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i8>, ptr [[TMP0]], align 1
; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x i8>, ptr [[TMP2]], align 1
-; FIXED-ZVQDOTQ-NEXT: [[TMP3:%.*]] = sext <8 x i8> [[WIDE_LOAD]] to <8 x i32>
-; FIXED-ZVQDOTQ-NEXT: [[TMP4:%.*]] = sext <8 x i8> [[WIDE_LOAD2]] to <8 x i32>
; FIXED-ZVQDOTQ-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]]
; FIXED-ZVQDOTQ-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i32 8
; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD3:%.*]] = load <8 x i8>, ptr [[TMP5]], align 1
; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD4:%.*]] = load <8 x i8>, ptr [[TMP7]], align 1
; FIXED-ZVQDOTQ-NEXT: [[TMP8:%.*]] = sext <8 x i8> [[WIDE_LOAD3]] to <8 x i32>
-; FIXED-ZVQDOTQ-NEXT: [[TMP9:%.*]] = sext <8 x i8> [[WIDE_LOAD4]] to <8 x i32>
+; FIXED-ZVQDOTQ-NEXT: [[TMP3:%.*]] = sext <8 x i8> [[WIDE_LOAD]] to <8 x i32>
; FIXED-ZVQDOTQ-NEXT: [[TMP10:%.*]] = mul <8 x i32> [[TMP8]], [[TMP3]]
-; FIXED-ZVQDOTQ-NEXT: [[TMP11:%.*]] = mul <8 x i32> [[TMP9]], [[TMP4]]
; FIXED-ZVQDOTQ-NEXT: [[PARTIAL_REDUCE]] = call <2 x i32> @llvm.vector.partial.reduce.add.v2i32.v8i32(<2 x i32> [[VEC_PHI]], <8 x i32> [[TMP10]])
+; FIXED-ZVQDOTQ-NEXT: [[TMP9:%.*]] = sext <8 x i8> [[WIDE_LOAD4]] to <8 x i32>
+; FIXED-ZVQDOTQ-NEXT: [[TMP12:%.*]] = sext <8 x i8> [[WIDE_LOAD2]] to <8 x i32>
+; FIXED-ZVQDOTQ-NEXT: [[TMP11:%.*]] = mul <8 x i32> [[TMP9]], [[TMP12]]
; FIXED-ZVQDOTQ-NEXT: [[PARTIAL_REDUCE5]] = call <2 x i32> @llvm.vector.partial.reduce.add.v2i32.v8i32(<2 x i32> [[VEC_PHI1]], <8 x i32> [[TMP11]])
; FIXED-ZVQDOTQ-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; FIXED-ZVQDOTQ-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
@@ -222,10 +222,10 @@ define i32 @vqdotu(ptr %a, ptr %b) #0 {
; ZVQDOTQ-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 1 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ]
; ZVQDOTQ-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]]
; ZVQDOTQ-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i8>, ptr [[TMP6]], align 1
-; ZVQDOTQ-NEXT: [[TMP8:%.*]] = zext <vscale x 4 x i8> [[WIDE_LOAD]] to <vscale x 4 x i32>
; ZVQDOTQ-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]]
; ZVQDOTQ-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x i8>, ptr [[TMP9]], align 1
; ZVQDOTQ-NEXT: [[TMP11:%.*]] = zext <vscale x 4 x i8> [[WIDE_LOAD1]] to <vscale x 4 x i32>
+; ZVQDOTQ-NEXT: [[TMP8:%.*]] = zext <vscale x 4 x i8> [[WIDE_LOAD]] to <vscale x 4 x i32>
; ZVQDOTQ-NEXT: [[TMP12:%.*]] = mul <vscale x 4 x i32> [[TMP11]], [[TMP8]]
; ZVQDOTQ-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 1 x i32> @llvm.vector.partial.reduce.add.nxv1i32.nxv4i32(<vscale x 1 x i32> [[VEC_PHI]], <vscale x 4 x i32> [[TMP12]])
; ZVQDOTQ-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
@@ -287,17 +287,17 @@ define i32 @vqdotu(ptr %a, ptr %b) #0 {
; FIXED-ZVQDOTQ-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i32 8
; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i8>, ptr [[TMP0]], align 1
; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x i8>, ptr [[TMP2]], align 1
-; FIXED-ZVQDOTQ-NEXT: [[TMP3:%.*]] = zext <8 x i8> [[WIDE_LOAD]] to <8 x i32>
-; FIXED-ZVQDOTQ-NEXT: [[TMP4:%.*]] = zext <8 x i8> [[WIDE_LOAD2]] to <8 x i32>
; FIXED-ZVQDOTQ-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]]
; FIXED-ZVQDOTQ-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i32 8
; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD3:%.*]] = load <8 x i8>, ptr [[TMP5]], align 1
; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD4:%.*]] = load <8 x i8>, ptr [[TMP7]], align 1
; FIXED-ZVQDOTQ-NEXT: [[TMP8:%.*]] = zext <8 x i8> [[WIDE_LOAD3]] to <8 x i32>
-; FIXED-ZVQDOTQ-NEXT: [[TMP9:%.*]] = zext <8 x i8> [[WIDE_LOAD4]] to <8 x i32>
+; FIXED-ZVQDOTQ-NEXT: [[TMP3:%.*]] = zext <8 x i8> [[WIDE_LOAD]] to <8 x i32>
; FIXED-ZVQDOTQ-NEXT: [[TMP10:%.*]] = mul <8 x i32> [[TMP8]], [[TMP3]]
-; FIXED-ZVQDOTQ-NEXT: [[TMP11:%.*]] = mul <8 x i32> [[TMP9]], [[TMP4]]
; FIXED-ZVQDOTQ-NEXT: [[PARTIAL_REDUCE]] = call <2 x i32> @llvm.vector.partial.reduce.add.v2i32.v8i32(<2 x i32> [[VEC_PHI]], <8 x i32> [[TMP10]])
+; FIXED-ZVQDOTQ-NEXT: [[TMP9:%.*]] = zext <8 x i8> [[WIDE_LOAD4]] to <8 x i32>
+; FIXED-ZVQDOTQ-NEXT: [[TMP12:%.*]] = zext <8 x i8> [[WIDE_LOAD2]] to <8 x i32>
+; FIXED-ZVQDOTQ-NEXT: [[TMP11:%.*]] = mul <8 x i32> [[TMP9]], [[TMP12]]
; FIXED-ZVQDOTQ-NEXT: [[PARTIAL_REDUCE5]] = call <2 x i32> @llvm.vector.partial.reduce.add.v2i32.v8i32(<2 x i32> [[VEC_PHI1]], <8 x i32> [[TMP11]])
; FIXED-ZVQDOTQ-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; FIXED-ZVQDOTQ-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
@@ -384,10 +384,10 @@ define i32 @vqdotsu(ptr %a, ptr %b) #0 {
; ZVQDOTQ-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 1 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ]
; ZVQDOTQ-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]]
; ZVQDOTQ-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i8>, ptr [[TMP6]], align 1
-; ZVQDOTQ-NEXT: [[TMP8:%.*]] = zext <vscale x 4 x i8> [[WIDE_LOAD]] to <vscale x 4 x i32>
; ZVQDOTQ-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]]
; ZVQDOTQ-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x i8>, ptr [[TMP9]], align 1
; ZVQDOTQ-NEXT: [[TMP11:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD1]] to <vscale x 4 x i32>
+; ZVQDOTQ-NEXT: [[TMP8:%.*]] = zext <vscale x 4 x i8> [[WIDE_LOAD]] to <vscale x 4 x i32>
; ZVQDOTQ-NEXT: [[TMP12:%.*]] = mul <vscale x 4 x i32> [[TMP11]], [[TMP8]]
; ZVQDOTQ-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 1 x i32> @llvm.vector.partial.reduce.add.nxv1i32.nxv4i32(<vscale x 1 x i32> [[VEC_PHI]], <vscale x 4 x i32> [[TMP12]])
; ZVQDOTQ-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
@@ -449,18 +449,18 @@ define i32 @vqdotsu(ptr %a, ptr %b) #0 {
; FIXED-ZVQDOTQ-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i32 8
; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i8>, ptr [[TMP0]], align 1
; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x i8>, ptr [[TMP2]], align 1
-; FIXED-ZVQDOTQ-NEXT: [[TMP3:%.*]] = zext <8 x i8> [[WIDE_LOAD]] to <8 x i32>
-; FIXED-ZVQDOTQ-NEXT: [[TMP4:%.*]] = zext <8 x i8> [[WIDE_LOAD2]] to <8 x i32>
; FIXED-ZVQDOTQ-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]]
; FIXED-ZVQDOTQ-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i32 8
; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD3:%.*]] = load <8 x i8>, ptr [[TMP5]], align 1
; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD4:%.*]] = load <8 x i8>, ptr [[TMP7]], align 1
-; FIXED-ZVQDOTQ-NEXT: [[TMP8:%.*]] = sext <8 x i8> [[WIDE_LOAD3]] to <8 x i32>
-; FIXED-ZVQDOTQ-NEXT: [[TMP9:%.*]] = sext <8 x i8> [[WIDE_LOAD4]] to <8 x i32>
-; FIXED-ZVQDOTQ-NEXT: [[TMP10:%.*]] = mul <8 x i32> [[TMP8]], [[TMP3]]
+; FIXED-ZVQDOTQ-NEXT: [[TMP9:%.*]] = sext <8 x i8> [[WIDE_LOAD3]] to <8 x i32>
+; FIXED-ZVQDOTQ-NEXT: [[TMP4:%.*]] = zext <8 x i8> [[WIDE_LOAD]] to <8 x i32>
; FIXED-ZVQDOTQ-NEXT: [[TMP11:%.*]] = mul <8 x i32> [[TMP9]], [[TMP4]]
-; FIXED-ZVQDOTQ-NEXT: [[PARTIAL_REDUCE]] = call <2 x i32> @llvm.vector.partial.reduce.add.v2i32.v8i32(<2 x i32> [[VEC_PHI]], <8 x i32> [[TMP10]])
-; FIXED-ZVQDOTQ-NEXT: [[PARTIAL_REDUCE5]] = call <2 x i32> @llvm.vector.partial.reduce.add.v2i32.v8i32(<2 x i32> [[VEC_PHI1]], <8 x i32> [[TMP11]])
+; FIXED-ZVQDOTQ-NEXT: [[PARTIAL_REDUCE]] = call <2 x i32> @llvm.vector.partial.reduce.add.v2i32.v8i32(<2 x i32> [[VEC_PHI]], <8 x i32> [[TMP11]])
+; FIXED-ZVQDOTQ-NEXT: [[TMP10:%.*]] = sext <8 x i8> [[WIDE_LOAD4]] to <8 x i32>
+; FIXED-ZVQDOTQ-NEXT: [[TMP8:%.*]] = zext <8 x i8> [[WIDE_LOAD2]] to <8 x i32>
+; FIXED-ZVQDOTQ-NEXT: [[TMP14:%.*]] = mul <8 x i32> [[TMP10]], [[TMP8]]
+; FIXED-ZVQDOTQ-NEXT: [[PARTIAL_REDUCE5]] = call <2 x i32> @llvm.vector.partial.reduce.add.v2i32.v8i32(<2 x i32> [[VEC_PHI1]], <8 x i32> [[TMP14]])
; FIXED-ZVQDOTQ-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; FIXED-ZVQDOTQ-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; FIXED-ZVQDOTQ-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
@@ -545,10 +545,10 @@ define i32 @vqdotsu2(ptr %a, ptr %b) #0 {
; ZVQDOTQ-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 1 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ]
; ZVQDOTQ-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]]
; ZVQDOTQ-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i8>, ptr [[TMP6]], align 1
-; ZVQDOTQ-NEXT: [[TMP8:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD]] to <vscale x 4 x i32>
; ZVQDOTQ-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]]
; ZVQDOTQ-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x i8>, ptr [[TMP9]], align 1
; ZVQDOTQ-NEXT: [[TMP11:%.*]] = zext <vscale x 4 x i8> [[WIDE_LOAD1]] to <vscale x 4 x i32>
+; ZVQDOTQ-NEXT: [[TMP8:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD]] to <vscale x 4 x i32>
; ZVQDOTQ-NEXT: [[TMP12:%.*]] = mul <vscale x 4 x i32> [[TMP11]], [[TMP8]]
; ZVQDOTQ-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 1 x i32> @llvm.vector.partial.reduce.add.nxv1i32.nxv4i32(<vscale x 1 x i32> [[VEC_PHI]], <vscale x 4 x i32> [[TMP12]])
; ZVQDOTQ-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
@@ -610,18 +610,18 @@ define i32 @vqdotsu2(ptr %a, ptr %b) #0 {
; FIXED-ZVQDOTQ-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i32 8
; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i8>, ptr [[TMP0]], align 1
; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x i8>, ptr [[TMP2]], align 1
-; FIXED-ZVQDOTQ-NEXT: [[TMP3:%.*]] = sext <8 x i8> [[WIDE_LOAD]] to <8 x i32>
-; FIXED-ZVQDOTQ-NEXT: [[TMP4:%.*]] = sext <8 x i8> [[WIDE_LOAD2]] to <8 x i32>
; FIXED-ZVQDOTQ-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]]
; FIXED-ZVQDOTQ-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i32 8
; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD3:%.*]] = load <8 x i8>, ptr [[TMP5]], align 1
; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD4:%.*]] = load <8 x i8>, ptr [[TMP7]], align 1
-; FIXED-ZVQDOTQ-NEXT: [[TMP8:%.*]] = zext <8 x i8> [[WIDE_LOAD3]] to <8 x i32>
-; FIXED-ZVQDOTQ-NEXT: [[TMP9:%.*]] = zext <8 x i8> [[WIDE_LOAD4]] to <8 x i32>
-; FIXED-ZVQDOTQ-NEXT: [[TMP10:%.*]] = mul <8 x i32> [[TMP8]], [[TMP3]]
+; FIXED-ZVQDOTQ-NEXT: [[TMP9:%.*]] = zext <8 x i8> [[WIDE_LOAD3]] to <8 x i32>
+; FIXED-ZVQDOTQ-NEXT: [[TMP4:%.*]] = sext <8 x i8> [[WIDE_LOAD]] to <8 x i32>
; FIXED-ZVQDOTQ-NEXT: [[TMP11:%.*]] = mul <8 x i32> [[TMP9]], [[TMP4]]
-; FIXED-ZVQDOTQ-NEXT: [[PARTIAL_REDUCE]] = call <2 x i32> @llvm.vector.partial.reduce.add.v2i32.v8i32(<2 x i32> [[VEC_PHI]], <8 x i32> [[TMP10]])
-; FIXED-ZVQDOTQ-NEXT: [[PARTIAL_REDUCE5]] = call <2 x i32> @llvm.vector.partial.reduce.add.v2i32.v8i32(<2 x i32> [[VEC_PHI1]], <8 x i32> [[TMP11]])
+; FIXED-ZVQDOTQ-NEXT: [[PARTIAL_REDUCE]] = call <2 x i32> @llvm.vector.partial.reduce.add.v2i32.v8i32(<2 x i32> [[VEC_PHI]], <8 x i32> [[TMP11]])
+; FIXED-ZVQDOTQ-NEXT: [[TMP10:%.*]] = zext <8 x i8> [[WIDE_LOAD4]] to <8 x i32>
+; FIXED-ZVQDOTQ-NEXT: [[TMP8:%.*]] = sext <8 x i8> [[WIDE_LOAD2]] to <8 x i32>
+; FIXED-ZVQDOTQ-NEXT: [[TMP14:%.*]] = mul <8 x i32> [[TMP10]], [[TMP8]]
+; FIXED-ZVQDOTQ-NEXT: [[PARTIAL_REDUCE5]] = call <2 x i32> @llvm.vector.partial.reduce.add.v2i32.v8i32(<2 x i32> [[VEC_PHI1]], <8 x i32> [[TMP14]])
; FIXED-ZVQDOTQ-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; FIXED-ZVQDOTQ-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; FIXED-ZVQDOTQ-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/WebAssembly/memory-interleave.ll b/llvm/test/Transforms/LoopVectorize/WebAssembly/memory-interleave.ll
index e42e2c7..b26e9cf 100644
--- a/llvm/test/Transforms/LoopVectorize/WebAssembly/memory-interleave.ll
+++ b/llvm/test/Transforms/LoopVectorize/WebAssembly/memory-interleave.ll
@@ -1779,7 +1779,7 @@ for.body: ; preds = %entry, %for.body
; CHECK: LV: Scalar loop costs: 24
; CHECK: LV: Vector loop of width 2 costs: 33
; CHECK: LV: Vector loop of width 4 costs: 30
-; CHECK: LV: Selecting VF: 4
+; CHECK: LV: Selecting VF: 1
define hidden void @four_floats_same_op(ptr noundef readonly captures(none) %a, ptr noundef readonly captures(none) %b, ptr noundef writeonly captures(none) %res, i32 noundef %N) {
entry:
%cmp45.not = icmp eq i32 %N, 0
diff --git a/llvm/test/Transforms/LoopVectorize/WebAssembly/partial-reduce-accumulate.ll b/llvm/test/Transforms/LoopVectorize/WebAssembly/partial-reduce-accumulate.ll
new file mode 100644
index 0000000..2338da5
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/WebAssembly/partial-reduce-accumulate.ll
@@ -0,0 +1,126 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt -mattr=+simd128 -passes=loop-vectorize %s -S | FileCheck %s
+; RUN: opt -mattr=+simd128 -passes=loop-vectorize -vectorizer-maximize-bandwidth %s -S | FileCheck %s --check-prefix=CHECK-MAX-BANDWIDTH
+
+target triple = "wasm32"
+
+define hidden i32 @accumulate_add_u8_u8(ptr noundef readonly %a, ptr noundef readonly %b, i32 noundef %N) {
+; CHECK-LABEL: define hidden i32 @accumulate_add_u8_u8(
+; CHECK-SAME: ptr noundef readonly [[A:%.*]], ptr noundef readonly [[B:%.*]], i32 noundef [[N:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[N]], 4
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[N]], 4
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i32 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP2]], align 1
+; CHECK-NEXT: [[TMP3:%.*]] = zext <4 x i8> [[WIDE_LOAD]] to <4 x i32>
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i32 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i8>, ptr [[TMP5]], align 1
+; CHECK-NEXT: [[TMP6:%.*]] = zext <4 x i8> [[WIDE_LOAD1]] to <4 x i32>
+; CHECK-NEXT: [[TMP7:%.*]] = add <4 x i32> [[VEC_PHI]], [[TMP3]]
+; CHECK-NEXT: [[TMP8]] = add <4 x i32> [[TMP7]], [[TMP6]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP8]])
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_COND_CLEANUP:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_COND_CLEANUP]]:
+; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = phi i32 [ [[ADD3:%.*]], %[[FOR_BODY]] ], [ [[TMP10]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret i32 [[RESULT_0_LCSSA]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[INC:%.*]], %[[FOR_BODY]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; CHECK-NEXT: [[RED:%.*]] = phi i32 [ [[ADD3]], %[[FOR_BODY]] ], [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i32 [[IV]]
+; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[TMP11]] to i32
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i32 [[IV]]
+; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
+; CHECK-NEXT: [[CONV2:%.*]] = zext i8 [[TMP12]] to i32
+; CHECK-NEXT: [[ADD:%.*]] = add i32 [[RED]], [[CONV]]
+; CHECK-NEXT: [[ADD3]] = add i32 [[ADD]], [[CONV2]]
+; CHECK-NEXT: [[INC]] = add nuw i32 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+;
+; CHECK-MAX-BANDWIDTH-LABEL: define hidden i32 @accumulate_add_u8_u8(
+; CHECK-MAX-BANDWIDTH-SAME: ptr noundef readonly [[A:%.*]], ptr noundef readonly [[B:%.*]], i32 noundef [[N:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-MAX-BANDWIDTH-NEXT: [[ENTRY:.*]]:
+; CHECK-MAX-BANDWIDTH-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[N]], 16
+; CHECK-MAX-BANDWIDTH-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-MAX-BANDWIDTH: [[VECTOR_PH]]:
+; CHECK-MAX-BANDWIDTH-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[N]], 16
+; CHECK-MAX-BANDWIDTH-NEXT: [[N_VEC:%.*]] = sub i32 [[N]], [[N_MOD_VF]]
+; CHECK-MAX-BANDWIDTH-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK-MAX-BANDWIDTH: [[VECTOR_BODY]]:
+; CHECK-MAX-BANDWIDTH-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-MAX-BANDWIDTH-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[PARTIAL_REDUCE2:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-MAX-BANDWIDTH-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i32 [[INDEX]]
+; CHECK-MAX-BANDWIDTH-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1
+; CHECK-MAX-BANDWIDTH-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i32 [[INDEX]]
+; CHECK-MAX-BANDWIDTH-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1
+; CHECK-MAX-BANDWIDTH-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
+; CHECK-MAX-BANDWIDTH-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP3]])
+; CHECK-MAX-BANDWIDTH-NEXT: [[TMP6:%.*]] = zext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
+; CHECK-MAX-BANDWIDTH-NEXT: [[PARTIAL_REDUCE2]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP6]])
+; CHECK-MAX-BANDWIDTH-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16
+; CHECK-MAX-BANDWIDTH-NEXT: [[TMP9:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-MAX-BANDWIDTH-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-MAX-BANDWIDTH: [[MIDDLE_BLOCK]]:
+; CHECK-MAX-BANDWIDTH-NEXT: [[TMP7:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE2]])
+; CHECK-MAX-BANDWIDTH-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N]], [[N_VEC]]
+; CHECK-MAX-BANDWIDTH-NEXT: br i1 [[CMP_N]], label %[[FOR_COND_CLEANUP:.*]], label %[[SCALAR_PH]]
+; CHECK-MAX-BANDWIDTH: [[SCALAR_PH]]:
+; CHECK-MAX-BANDWIDTH-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-MAX-BANDWIDTH-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP7]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-MAX-BANDWIDTH-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK-MAX-BANDWIDTH: [[FOR_COND_CLEANUP]]:
+; CHECK-MAX-BANDWIDTH-NEXT: [[RESULT_0_LCSSA:%.*]] = phi i32 [ [[ADD3:%.*]], %[[FOR_BODY]] ], [ [[TMP7]], %[[MIDDLE_BLOCK]] ]
+; CHECK-MAX-BANDWIDTH-NEXT: ret i32 [[RESULT_0_LCSSA]]
+; CHECK-MAX-BANDWIDTH: [[FOR_BODY]]:
+; CHECK-MAX-BANDWIDTH-NEXT: [[IV:%.*]] = phi i32 [ [[INC:%.*]], %[[FOR_BODY]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; CHECK-MAX-BANDWIDTH-NEXT: [[RED:%.*]] = phi i32 [ [[ADD3]], %[[FOR_BODY]] ], [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ]
+; CHECK-MAX-BANDWIDTH-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i32 [[IV]]
+; CHECK-MAX-BANDWIDTH-NEXT: [[TMP11:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; CHECK-MAX-BANDWIDTH-NEXT: [[CONV:%.*]] = zext i8 [[TMP11]] to i32
+; CHECK-MAX-BANDWIDTH-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i32 [[IV]]
+; CHECK-MAX-BANDWIDTH-NEXT: [[TMP12:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
+; CHECK-MAX-BANDWIDTH-NEXT: [[CONV2:%.*]] = zext i8 [[TMP12]] to i32
+; CHECK-MAX-BANDWIDTH-NEXT: [[ADD:%.*]] = add i32 [[RED]], [[CONV]]
+; CHECK-MAX-BANDWIDTH-NEXT: [[ADD3]] = add i32 [[ADD]], [[CONV2]]
+; CHECK-MAX-BANDWIDTH-NEXT: [[INC]] = add nuw i32 [[IV]], 1
+; CHECK-MAX-BANDWIDTH-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[N]]
+; CHECK-MAX-BANDWIDTH-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+;
+entry:
+ br label %for.body
+
+for.cond.cleanup: ; preds = %for.body
+ ret i32 %add3
+
+for.body: ; preds = %entry, %for.body
+ %iv = phi i32 [ %inc, %for.body ], [ 0, %entry ]
+ %red = phi i32 [ %add3, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds nuw i8, ptr %a, i32 %iv
+ %0 = load i8, ptr %arrayidx, align 1
+ %conv = zext i8 %0 to i32
+ %arrayidx1 = getelementptr inbounds nuw i8, ptr %b, i32 %iv
+ %1 = load i8, ptr %arrayidx1, align 1
+ %conv2 = zext i8 %1 to i32
+ %add = add i32 %red, %conv
+ %add3 = add i32 %add, %conv2
+ %inc = add nuw i32 %iv, 1
+ %exitcond.not = icmp eq i32 %inc, %N
+ br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
+}
diff --git a/llvm/test/Transforms/LoopVectorize/X86/illegal-parallel-loop-uniform-write.ll b/llvm/test/Transforms/LoopVectorize/X86/illegal-parallel-loop-uniform-write.ll
index 9168ebf..08d39ea 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/illegal-parallel-loop-uniform-write.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/illegal-parallel-loop-uniform-write.ll
@@ -214,7 +214,7 @@ for.end15: ; preds = %for.end.us, %entry
ret void
}
-attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "use-soft-float"="false" }
!3 = !{!4, !5}
!4 = !{!4}
diff --git a/llvm/test/Transforms/LoopVectorize/X86/int128_no_gather.ll b/llvm/test/Transforms/LoopVectorize/X86/int128_no_gather.ll
index 44e9d3e..b7f8be1 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/int128_no_gather.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/int128_no_gather.ll
@@ -71,6 +71,6 @@ declare i32 @printf(ptr, ...) #1
; Function Attrs: nounwind
declare i32 @puts(ptr nocapture readonly) #2
-attributes #0 = { noinline nounwind uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="skylake-avx512" "target-features"="+adx,+aes,+avx,+avx2,+avx512bw,+avx512cd,+avx512dq,+avx512f,+avx512vl,+bmi,+bmi2,+clflushopt,+clwb,+cx16,+f16c,+fma,+fsgsbase,+fxsr,+lzcnt,+mmx,+movbe,+mpx,+pclmul,+pku,+popcnt,+rdrnd,+rdseed,+rtm,+sgx,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+x87,+xsave,+xsavec,+xsaveopt,+xsaves" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="skylake-avx512" "target-features"="+adx,+aes,+avx,+avx2,+avx512bw,+avx512cd,+avx512dq,+avx512f,+avx512vl,+bmi,+bmi2,+clflushopt,+clwb,+cx16,+f16c,+fma,+fsgsbase,+fxsr,+lzcnt,+mmx,+movbe,+mpx,+pclmul,+pku,+popcnt,+rdrnd,+rdseed,+rtm,+sgx,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+x87,+xsave,+xsavec,+xsaveopt,+xsaves" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { noinline nounwind uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="skylake-avx512" "target-features"="+adx,+aes,+avx,+avx2,+avx512bw,+avx512cd,+avx512dq,+avx512f,+avx512vl,+bmi,+bmi2,+clflushopt,+clwb,+cx16,+f16c,+fma,+fsgsbase,+fxsr,+lzcnt,+mmx,+movbe,+mpx,+pclmul,+pku,+popcnt,+rdrnd,+rdseed,+rtm,+sgx,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+x87,+xsave,+xsavec,+xsaveopt,+xsaves" "use-soft-float"="false" }
+attributes #1 = { nounwind "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="skylake-avx512" "target-features"="+adx,+aes,+avx,+avx2,+avx512bw,+avx512cd,+avx512dq,+avx512f,+avx512vl,+bmi,+bmi2,+clflushopt,+clwb,+cx16,+f16c,+fma,+fsgsbase,+fxsr,+lzcnt,+mmx,+movbe,+mpx,+pclmul,+pku,+popcnt,+rdrnd,+rdseed,+rtm,+sgx,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+x87,+xsave,+xsavec,+xsaveopt,+xsaves" "use-soft-float"="false" }
attributes #2 = { nounwind }
diff --git a/llvm/test/Transforms/LoopVectorize/X86/reduction-fastmath.ll b/llvm/test/Transforms/LoopVectorize/X86/reduction-fastmath.ll
index f066000..52e90e4 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/reduction-fastmath.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/reduction-fastmath.ll
@@ -362,4 +362,4 @@ for.body:
br i1 %exitcond, label %for.cond.cleanup, label %for.body
}
-attributes #0 = { "no-infs-fp-math"="true" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "unsafe-fp-math"="false" }
+attributes #0 = { "no-infs-fp-math"="true" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" }
diff --git a/llvm/test/Transforms/LoopVectorize/X86/scatter_crash.ll b/llvm/test/Transforms/LoopVectorize/X86/scatter_crash.ll
index 005696a..e405fe7 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/scatter_crash.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/scatter_crash.ll
@@ -335,4 +335,4 @@ for.body: ; preds = %for.body.preheader,
br i1 %cmp, label %for.body, label %for.cond.cleanup.loopexit99
}
-attributes #0 = { norecurse nounwind ssp uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="broadwell" "target-features"="+adx,+aes,+avx,+avx2,+avx512cd,+avx512f,+bmi,+bmi2,+cx16,+f16c,+fma,+fsgsbase,+fxsr,+evex512,+lzcnt,+mmx,+movbe,+pclmul,+popcnt,+rdrnd,+rdseed,+rtm,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+x87,+xsave,+xsaveopt,-vzeroupper" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { norecurse nounwind ssp uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="broadwell" "target-features"="+adx,+aes,+avx,+avx2,+avx512cd,+avx512f,+bmi,+bmi2,+cx16,+f16c,+fma,+fsgsbase,+fxsr,+evex512,+lzcnt,+mmx,+movbe,+pclmul,+popcnt,+rdrnd,+rdseed,+rtm,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+x87,+xsave,+xsaveopt,-vzeroupper" "use-soft-float"="false" }
diff --git a/llvm/test/Transforms/LoopVectorize/X86/tail_folding_and_assume_safety.ll b/llvm/test/Transforms/LoopVectorize/X86/tail_folding_and_assume_safety.ll
index b98a2ea..c7550ca 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/tail_folding_and_assume_safety.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/tail_folding_and_assume_safety.ll
@@ -143,7 +143,7 @@ for.inc:
br i1 %exitcond, label %for.cond.cleanup, label %for.body, !llvm.loop !11
}
-attributes #0 = { norecurse nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { norecurse nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "use-soft-float"="false" }
!llvm.module.flags = !{!0}
!llvm.ident = !{!1}
diff --git a/llvm/test/Transforms/LoopVectorize/X86/transform-narrow-interleave-to-widen-memory.ll b/llvm/test/Transforms/LoopVectorize/X86/transform-narrow-interleave-to-widen-memory.ll
index f4d80af..2a3ce03 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/transform-narrow-interleave-to-widen-memory.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/transform-narrow-interleave-to-widen-memory.ll
@@ -8,69 +8,15 @@ target triple = "x86_64-unknown-linux"
define void @test_4xi64(ptr noalias %data, ptr noalias %factor, i64 noundef %n) {
; CHECK-LABEL: define void @test_4xi64(
; CHECK-SAME: ptr noalias [[DATA:%.*]], ptr noalias [[FACTOR:%.*]], i64 noundef [[N:%.*]]) #[[ATTR0:[0-9]+]] {
-; CHECK-NEXT: [[ITER_CHECK:.*]]:
+; CHECK-NEXT: [[ENTRY:.*]]:
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 4
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH:.*]], label %[[VECTOR_MAIN_LOOP_ITER_CHECK:.*]]
-; CHECK: [[VECTOR_MAIN_LOOP_ITER_CHECK]]:
-; CHECK-NEXT: [[MIN_ITERS_CHECK1:%.*]] = icmp ult i64 [[N]], 16
-; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK1]], label %[[VEC_EPILOG_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[N_MOD_VF1:%.*]] = urem i64 [[N]], 16
-; CHECK-NEXT: [[N_VEC1:%.*]] = sub i64 [[N]], [[N_MOD_VF1]]
-; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
-; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT1:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 1
-; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 2
-; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 3
-; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds i64, ptr [[FACTOR]], i64 [[INDEX]]
-; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds i64, ptr [[FACTOR]], i64 [[TMP0]]
-; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i64, ptr [[FACTOR]], i64 [[TMP1]]
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i64, ptr [[FACTOR]], i64 [[TMP2]]
-; CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr [[TMP20]], align 8
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <4 x i64> poison, i64 [[TMP7]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT1:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT1]], <4 x i64> poison, <4 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr [[TMP21]], align 8
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT5:%.*]] = insertelement <4 x i64> poison, i64 [[TMP8]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT6:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT5]], <4 x i64> poison, <4 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP22]], align 8
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT7:%.*]] = insertelement <4 x i64> poison, i64 [[TMP9]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT8:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT7]], <4 x i64> poison, <4 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT9:%.*]] = insertelement <4 x i64> poison, i64 [[TMP10]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT10:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT9]], <4 x i64> poison, <4 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds { i64, i64, i64, i64 }, ptr [[DATA]], i64 [[INDEX]], i32 0
-; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds { i64, i64, i64, i64 }, ptr [[DATA]], i64 [[TMP0]], i32 0
-; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds { i64, i64, i64, i64 }, ptr [[DATA]], i64 [[TMP1]], i32 0
-; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds { i64, i64, i64, i64 }, ptr [[DATA]], i64 [[TMP2]], i32 0
-; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP11]], align 8
-; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i64>, ptr [[TMP12]], align 8
-; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <4 x i64>, ptr [[TMP13]], align 8
-; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x i64>, ptr [[TMP23]], align 8
-; CHECK-NEXT: [[TMP15:%.*]] = mul <4 x i64> [[BROADCAST_SPLAT1]], [[WIDE_LOAD1]]
-; CHECK-NEXT: [[TMP16:%.*]] = mul <4 x i64> [[BROADCAST_SPLAT6]], [[WIDE_LOAD2]]
-; CHECK-NEXT: [[TMP17:%.*]] = mul <4 x i64> [[BROADCAST_SPLAT8]], [[WIDE_LOAD3]]
-; CHECK-NEXT: [[TMP18:%.*]] = mul <4 x i64> [[BROADCAST_SPLAT10]], [[WIDE_LOAD4]]
-; CHECK-NEXT: store <4 x i64> [[TMP15]], ptr [[TMP11]], align 8
-; CHECK-NEXT: store <4 x i64> [[TMP16]], ptr [[TMP12]], align 8
-; CHECK-NEXT: store <4 x i64> [[TMP17]], ptr [[TMP13]], align 8
-; CHECK-NEXT: store <4 x i64> [[TMP18]], ptr [[TMP23]], align 8
-; CHECK-NEXT: [[INDEX_NEXT1]] = add nuw i64 [[INDEX]], 4
-; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT1]], [[N_VEC1]]
-; CHECK-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
-; CHECK: [[MIDDLE_BLOCK]]:
-; CHECK-NEXT: [[CMP_N1:%.*]] = icmp eq i64 [[N]], [[N_VEC1]]
-; CHECK-NEXT: br i1 [[CMP_N1]], label %[[EXIT:.*]], label %[[VEC_EPILOG_ITER_CHECK:.*]]
-; CHECK: [[VEC_EPILOG_ITER_CHECK]]:
-; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF1]], 4
-; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label %[[VEC_EPILOG_SCALAR_PH]], label %[[VEC_EPILOG_PH]]
-; CHECK: [[VEC_EPILOG_PH]]:
-; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC1]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[VECTOR_MAIN_LOOP_ITER_CHECK]] ]
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 4
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
-; CHECK-NEXT: br label %[[VEC_EPILOG_VECTOR_BODY:.*]]
-; CHECK: [[VEC_EPILOG_VECTOR_BODY]]:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], %[[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VEC_EPILOG_VECTOR_BODY]] ]
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[FACTOR]], i64 [[IV]]
; CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[TMP5]], i64 0
@@ -81,15 +27,15 @@ define void @test_4xi64(ptr noalias %data, ptr noalias %factor, i64 noundef %n)
; CHECK-NEXT: store <4 x i64> [[TMP4]], ptr [[TMP3]], align 8
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], 1
; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP14]], label %[[VEC_EPILOG_MIDDLE_BLOCK:.*]], label %[[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
-; CHECK: [[VEC_EPILOG_MIDDLE_BLOCK]]:
+; CHECK-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT]], label %[[VEC_EPILOG_SCALAR_PH]]
-; CHECK: [[VEC_EPILOG_SCALAR_PH]]:
-; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[N_VEC1]], %[[VEC_EPILOG_ITER_CHECK]] ], [ 0, %[[ITER_CHECK]] ]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
; CHECK-NEXT: br label %[[LOOP:.*]]
; CHECK: [[LOOP]]:
-; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[VEC_EPILOG_SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; CHECK-NEXT: [[DATA_2:%.*]] = getelementptr inbounds i64, ptr [[FACTOR]], i64 [[IV1]]
; CHECK-NEXT: [[L_2:%.*]] = load i64, ptr [[DATA_2]], align 8
; CHECK-NEXT: [[DATA_0:%.*]] = getelementptr inbounds { i64, i64, i64, i64 }, ptr [[DATA]], i64 [[IV1]], i32 0
@@ -110,7 +56,7 @@ define void @test_4xi64(ptr noalias %data, ptr noalias %factor, i64 noundef %n)
; CHECK-NEXT: store i64 [[MUL_3]], ptr [[DATA_3]], align 8
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1
; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -171,7 +117,7 @@ define void @test_2xi64(ptr noalias %data, ptr noalias %factor, i64 noundef %n)
; CHECK-NEXT: store <8 x i64> [[INTERLEAVED_VEC]], ptr [[TMP4]], align 8
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], 4
; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
@@ -194,7 +140,7 @@ define void @test_2xi64(ptr noalias %data, ptr noalias %factor, i64 noundef %n)
; CHECK-NEXT: store i64 [[MUL_1]], ptr [[DATA_1]], align 8
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1
; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -249,7 +195,7 @@ define void @test_2xi64_interleave_loads_order_flipped(ptr noalias %data, ptr no
; CHECK-NEXT: store <8 x i64> [[INTERLEAVED_VEC]], ptr [[TMP4]], align 8
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], 4
; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
@@ -272,7 +218,7 @@ define void @test_2xi64_interleave_loads_order_flipped(ptr noalias %data, ptr no
; CHECK-NEXT: store i64 [[MUL_1]], ptr [[DATA_1]], align 8
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1
; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -327,7 +273,7 @@ define void @test_2xi64_store_order_flipped_1(ptr noalias %data, ptr noalias %fa
; CHECK-NEXT: store <8 x i64> [[INTERLEAVED_VEC]], ptr [[TMP4]], align 8
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], 4
; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
@@ -350,7 +296,7 @@ define void @test_2xi64_store_order_flipped_1(ptr noalias %data, ptr noalias %fa
; CHECK-NEXT: store i64 [[MUL_0]], ptr [[DATA_1]], align 8
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1
; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP10:![0-9]+]]
+; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -405,7 +351,7 @@ define void @test_2xi64_store_order_flipped_2(ptr noalias %data, ptr noalias %fa
; CHECK-NEXT: store <8 x i64> [[INTERLEAVED_VEC]], ptr [[TMP4]], align 8
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], 4
; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
@@ -428,7 +374,7 @@ define void @test_2xi64_store_order_flipped_2(ptr noalias %data, ptr noalias %fa
; CHECK-NEXT: store i64 [[MUL_1]], ptr [[DATA_0]], align 8
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1
; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP12:![0-9]+]]
+; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP11:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -489,7 +435,7 @@ define void @test_2xi64_different_loads_feeding_fmul(ptr noalias %data, ptr noal
; CHECK-NEXT: store <8 x i64> [[INTERLEAVED_VEC]], ptr [[TMP16]], align 8
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], 4
; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[SCALAR_PH]]
; CHECK: [[SCALAR_PH]]:
@@ -513,7 +459,7 @@ define void @test_2xi64_different_loads_feeding_fmul(ptr noalias %data, ptr noal
; CHECK-NEXT: store i64 [[MUL_1]], ptr [[DATA_1]], align 8
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1
; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP]], !llvm.loop [[LOOP14:![0-9]+]]
+; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP]], !llvm.loop [[LOOP13:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -573,7 +519,7 @@ define void @test_3xi64(ptr noalias %data, ptr noalias %factor, i64 noundef %n)
; CHECK-NEXT: store <12 x i64> [[INTERLEAVED_VEC]], ptr [[TMP3]], align 8
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], 4
; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
@@ -598,7 +544,7 @@ define void @test_3xi64(ptr noalias %data, ptr noalias %factor, i64 noundef %n)
; CHECK-NEXT: store i64 [[MUL_2]], ptr [[DATA_2]], align 8
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1
; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP16:![0-9]+]]
+; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP15:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -707,7 +653,7 @@ define void @test_3xi32(ptr noalias %data, ptr noalias %factor, i64 noundef %n)
; CHECK-NEXT: store <24 x i32> [[INTERLEAVED_VEC]], ptr [[TMP5]], align 8
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], 8
; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
; CHECK-NEXT: br label %[[SCALAR_PH]]
; CHECK: [[SCALAR_PH]]:
@@ -731,7 +677,7 @@ define void @test_3xi32(ptr noalias %data, ptr noalias %factor, i64 noundef %n)
; CHECK-NEXT: store i32 [[MUL_2]], ptr [[DATA_2]], align 8
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1
; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP]], !llvm.loop [[LOOP18:![0-9]+]]
+; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP]], !llvm.loop [[LOOP17:![0-9]+]]
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
@@ -765,20 +711,19 @@ exit:
; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
-; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]], [[META2]]}
-; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META2]], [[META1]]}
-; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]], [[META2]]}
-; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META2]], [[META1]]}
-; CHECK: [[LOOP7]] = distinct !{[[LOOP7]], [[META1]], [[META2]]}
-; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[META2]], [[META1]]}
-; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META1]], [[META2]]}
-; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META2]], [[META1]]}
-; CHECK: [[LOOP11]] = distinct !{[[LOOP11]], [[META1]], [[META2]]}
-; CHECK: [[LOOP12]] = distinct !{[[LOOP12]], [[META2]], [[META1]]}
-; CHECK: [[LOOP13]] = distinct !{[[LOOP13]], [[META1]], [[META2]]}
-; CHECK: [[LOOP14]] = distinct !{[[LOOP14]], [[META2]], [[META1]]}
-; CHECK: [[LOOP15]] = distinct !{[[LOOP15]], [[META1]], [[META2]]}
-; CHECK: [[LOOP16]] = distinct !{[[LOOP16]], [[META2]], [[META1]]}
-; CHECK: [[LOOP17]] = distinct !{[[LOOP17]], [[META1]], [[META2]]}
-; CHECK: [[LOOP18]] = distinct !{[[LOOP18]], [[META2]], [[META1]]}
+; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]}
+; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]}
+; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META2]], [[META1]]}
+; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]], [[META2]]}
+; CHECK: [[LOOP7]] = distinct !{[[LOOP7]], [[META2]], [[META1]]}
+; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[META1]], [[META2]]}
+; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META2]], [[META1]]}
+; CHECK: [[LOOP10]] = distinct !{[[LOOP10]], [[META1]], [[META2]]}
+; CHECK: [[LOOP11]] = distinct !{[[LOOP11]], [[META2]], [[META1]]}
+; CHECK: [[LOOP12]] = distinct !{[[LOOP12]], [[META1]], [[META2]]}
+; CHECK: [[LOOP13]] = distinct !{[[LOOP13]], [[META2]], [[META1]]}
+; CHECK: [[LOOP14]] = distinct !{[[LOOP14]], [[META1]], [[META2]]}
+; CHECK: [[LOOP15]] = distinct !{[[LOOP15]], [[META2]], [[META1]]}
+; CHECK: [[LOOP16]] = distinct !{[[LOOP16]], [[META1]], [[META2]]}
+; CHECK: [[LOOP17]] = distinct !{[[LOOP17]], [[META2]], [[META1]]}
;.
diff --git a/llvm/test/Transforms/LoopVectorize/X86/vectorization-remarks-profitable.ll b/llvm/test/Transforms/LoopVectorize/X86/vectorization-remarks-profitable.ll
index 2c187ca..9471e4a 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/vectorization-remarks-profitable.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/vectorization-remarks-profitable.ll
@@ -72,7 +72,7 @@ for.end: ; preds = %for.body, %entry
ret void, !dbg !27
}
-attributes #0 = { nounwind uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+mmx,+sse,+sse2" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+mmx,+sse,+sse2" "use-soft-float"="false" }
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!7, !8}
diff --git a/llvm/test/Transforms/LoopVectorize/diag-missing-instr-debug-loc.ll b/llvm/test/Transforms/LoopVectorize/diag-missing-instr-debug-loc.ll
index 8ce2a97..0656de4 100644
--- a/llvm/test/Transforms/LoopVectorize/diag-missing-instr-debug-loc.ll
+++ b/llvm/test/Transforms/LoopVectorize/diag-missing-instr-debug-loc.ll
@@ -50,7 +50,7 @@ for.body: ; preds = %for.body.preheader,
br i1 %exitcond, label %for.cond.cleanup, label %for.body, !dbg !9, !llvm.loop !18
}
-attributes #0 = { norecurse nounwind ssp uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="core2" "target-features"="+cx16,+fxsr,+mmx,+sse,+sse2,+sse3,+ssse3,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { norecurse nounwind ssp uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="core2" "target-features"="+cx16,+fxsr,+mmx,+sse,+sse2,+sse3,+ssse3,+x87" "use-soft-float"="false" }
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!3, !4}
diff --git a/llvm/test/Transforms/LoopVectorize/diag-with-hotness-info-2.ll b/llvm/test/Transforms/LoopVectorize/diag-with-hotness-info-2.ll
index 65bd36c..5e51624f 100644
--- a/llvm/test/Transforms/LoopVectorize/diag-with-hotness-info-2.ll
+++ b/llvm/test/Transforms/LoopVectorize/diag-with-hotness-info-2.ll
@@ -134,7 +134,7 @@ for.cond.cleanup:
ret void, !dbg !44
}
-attributes #0 = { norecurse nounwind ssp uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="core2" "target-features"="+cx16,+fxsr,+mmx,+sse,+sse2,+sse3,+ssse3,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { norecurse nounwind ssp uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="core2" "target-features"="+cx16,+fxsr,+mmx,+sse,+sse2,+sse3,+ssse3,+x87" "use-soft-float"="false" }
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!3, !4, !5}
diff --git a/llvm/test/Transforms/LoopVectorize/diag-with-hotness-info.ll b/llvm/test/Transforms/LoopVectorize/diag-with-hotness-info.ll
index 4b7b714..77ec95a 100644
--- a/llvm/test/Transforms/LoopVectorize/diag-with-hotness-info.ll
+++ b/llvm/test/Transforms/LoopVectorize/diag-with-hotness-info.ll
@@ -145,7 +145,7 @@ for.body: ; preds = %entry, %for.body
br i1 %exitcond, label %for.cond.cleanup, label %for.body, !dbg !43, !llvm.loop !55
}
-attributes #0 = { norecurse nounwind ssp uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="core2" "target-features"="+cx16,+fxsr,+mmx,+sse,+sse2,+sse3,+ssse3,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { norecurse nounwind ssp uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="core2" "target-features"="+cx16,+fxsr,+mmx,+sse,+sse2,+sse3,+ssse3,+x87" "use-soft-float"="false" }
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!3, !4, !5}
diff --git a/llvm/test/Transforms/LoopVectorize/incorrect-dom-info.ll b/llvm/test/Transforms/LoopVectorize/incorrect-dom-info.ll
index 5cf99b8..3487331 100644
--- a/llvm/test/Transforms/LoopVectorize/incorrect-dom-info.ll
+++ b/llvm/test/Transforms/LoopVectorize/incorrect-dom-info.ll
@@ -135,7 +135,7 @@ thread-pre-split5: ; preds = %.lr.ph
ret void
}
-attributes #0 = { nounwind ssp uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind ssp uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "use-soft-float"="false" }
!llvm.ident = !{!0}
diff --git a/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll b/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll
index 8efe29a..fc2e233 100644
--- a/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll
+++ b/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll
@@ -783,7 +783,7 @@ for.body: ; preds = %for.body, %entry
@SA = common global i32 0, align 4
@SB = common global float 0.000000e+00, align 4
-define void @int_float_struct(ptr nocapture readonly %A) #0 {
+define void @int_float_struct(ptr nocapture readonly %A) {
; CHECK-LABEL: @int_float_struct(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[VECTOR_PH:%.*]]
@@ -1546,5 +1546,3 @@ loop:
end:
ret void
}
-
-attributes #0 = { "unsafe-fp-math"="true" }
diff --git a/llvm/test/Transforms/LoopVectorize/metadata-width.ll b/llvm/test/Transforms/LoopVectorize/metadata-width.ll
index ddf9029..22243d9 100644
--- a/llvm/test/Transforms/LoopVectorize/metadata-width.ll
+++ b/llvm/test/Transforms/LoopVectorize/metadata-width.ll
@@ -89,7 +89,7 @@ for.end: ; preds = %for.body, %entry
ret void
}
-attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "use-soft-float"="false" }
!0 = !{!0, !1, !5}
!1 = !{!"llvm.loop.vectorize.width", i32 8}
diff --git a/llvm/test/Transforms/LoopVectorize/multiple-address-spaces.ll b/llvm/test/Transforms/LoopVectorize/multiple-address-spaces.ll
index a1fc1b8..87b5a0b 100644
--- a/llvm/test/Transforms/LoopVectorize/multiple-address-spaces.ll
+++ b/llvm/test/Transforms/LoopVectorize/multiple-address-spaces.ll
@@ -56,4 +56,4 @@ for.end: ; preds = %for.body
ret i32 0
}
-attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "use-soft-float"="false" }
diff --git a/llvm/test/Transforms/LoopVectorize/runtime-check-address-space.ll b/llvm/test/Transforms/LoopVectorize/runtime-check-address-space.ll
index 705bbab..64ae730 100644
--- a/llvm/test/Transforms/LoopVectorize/runtime-check-address-space.ll
+++ b/llvm/test/Transforms/LoopVectorize/runtime-check-address-space.ll
@@ -218,4 +218,4 @@ for.end: ; preds = %for.body, %entry
ret void
}
-attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "use-soft-float"="false" }
diff --git a/llvm/test/Transforms/LoopVectorize/runtime-check-readonly-address-space.ll b/llvm/test/Transforms/LoopVectorize/runtime-check-readonly-address-space.ll
index 1effb10..872a3929 100644
--- a/llvm/test/Transforms/LoopVectorize/runtime-check-readonly-address-space.ll
+++ b/llvm/test/Transforms/LoopVectorize/runtime-check-readonly-address-space.ll
@@ -129,4 +129,4 @@ for.end: ; preds = %for.body
ret void
}
-attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "use-soft-float"="false" }
diff --git a/llvm/test/Transforms/LoopVectorize/scev-exitlim-crash.ll b/llvm/test/Transforms/LoopVectorize/scev-exitlim-crash.ll
index 8224d6b..137e098 100644
--- a/llvm/test/Transforms/LoopVectorize/scev-exitlim-crash.ll
+++ b/llvm/test/Transforms/LoopVectorize/scev-exitlim-crash.ll
@@ -104,8 +104,8 @@ for.end26: ; preds = %for.cond4.for.end26
}
declare i32 @fn2(double) #1
-attributes #0 = { nounwind ssp uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind ssp uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "use-soft-float"="false" }
!0 = !{!"int", !1}
!1 = !{!"omnipotent char", !2}
diff --git a/llvm/test/Transforms/MergeICmps/X86/int64-and-ptr.ll b/llvm/test/Transforms/MergeICmps/X86/int64-and-ptr.ll
index e2f5007..cc187de 100644
--- a/llvm/test/Transforms/MergeICmps/X86/int64-and-ptr.ll
+++ b/llvm/test/Transforms/MergeICmps/X86/int64-and-ptr.ll
@@ -35,11 +35,7 @@ if.end5: ; preds = %if.then, %entry
ret i1 %rez.0
}
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0(ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture)
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end.p0(ptr nocapture) #1
-
-attributes #0 = { nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { argmemonly nounwind }
-attributes #2 = { nounwind }
+declare void @llvm.lifetime.end.p0(ptr nocapture)
diff --git a/llvm/test/Transforms/MergeICmps/X86/pr41917.ll b/llvm/test/Transforms/MergeICmps/X86/pr41917.ll
index d4fd34a..d8065e8 100644
--- a/llvm/test/Transforms/MergeICmps/X86/pr41917.ll
+++ b/llvm/test/Transforms/MergeICmps/X86/pr41917.ll
@@ -7,13 +7,13 @@ target triple = "i386-pc-windows-msvc19.11.0"
%class.a = type { i32, i32, i32, i32, i32 }
; Function Attrs: nounwind optsize
-define dso_local zeroext i1 @pr41917(ptr byval(%class.a) nocapture readonly align 4 %g, ptr byval(%class.a) nocapture readonly align 4 %p2) local_unnamed_addr #0 {
+define dso_local zeroext i1 @pr41917(ptr byval(%class.a) nocapture readonly align 4 %g, ptr byval(%class.a) nocapture readonly align 4 %p2) local_unnamed_addr {
; CHECK-LABEL: @pr41917(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call zeroext i1 @f2() #[[ATTR3:[0-9]+]]
+; CHECK-NEXT: [[CALL:%.*]] = tail call zeroext i1 @f2()
; CHECK-NEXT: br i1 [[CALL]], label [[LAND_RHS:%.*]], label %"land.end+land.rhs3"
; CHECK: land.rhs:
-; CHECK-NEXT: [[CALL1:%.*]] = tail call zeroext i1 @f2() #[[ATTR3]]
+; CHECK-NEXT: [[CALL1:%.*]] = tail call zeroext i1 @f2()
; CHECK-NEXT: br label %"land.end+land.rhs3"
; CHECK: "land.end+land.rhs3":
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[CLASS_A:%.*]], ptr [[G:%.*]], i32 0, i32 1
@@ -25,11 +25,11 @@ define dso_local zeroext i1 @pr41917(ptr byval(%class.a) nocapture readonly alig
; CHECK-NEXT: ret i1 [[TMP2]]
;
entry:
- %call = tail call zeroext i1 @f2() #2
+ %call = tail call zeroext i1 @f2()
br i1 %call, label %land.rhs, label %land.end
land.rhs: ; preds = %entry
- %call1 = tail call zeroext i1 @f2() #2
+ %call1 = tail call zeroext i1 @f2()
br label %land.end
land.end: ; preds = %land.rhs, %entry
@@ -53,11 +53,7 @@ land.end6: ; preds = %land.rhs3, %land.en
ret i1 %4
}
-declare dso_local zeroext i1 @f2() local_unnamed_addr #1
-
-attributes #0 = { nounwind optsize "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-features"="+cx8,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { optsize "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-features"="+cx8,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #2 = { nounwind optsize }
+declare dso_local zeroext i1 @f2() local_unnamed_addr
!llvm.module.flags = !{!0, !1}
!llvm.ident = !{!2}
diff --git a/llvm/test/Transforms/NewGVN/basic-cyclic-opt.ll b/llvm/test/Transforms/NewGVN/basic-cyclic-opt.ll
index 1cf9fd9..12b4184 100644
--- a/llvm/test/Transforms/NewGVN/basic-cyclic-opt.ll
+++ b/llvm/test/Transforms/NewGVN/basic-cyclic-opt.ll
@@ -4,7 +4,7 @@ target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
;; Function Attrs: nounwind ssp uwtable
;; We should eliminate the sub, and one of the phi nodes
-define void @vnum_test1(ptr %data) #0 {
+define void @vnum_test1(ptr %data) {
; CHECK-LABEL: @vnum_test1(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[TMP:%.*]] = getelementptr inbounds i32, ptr [[DATA:%.*]], i64 3
@@ -74,7 +74,7 @@ bb19: ; preds = %bb4
;; We should eliminate the sub, one of the phi nodes, prove the store of the sub
;; and the load of data are equivalent, that the load always produces constant 0, and
;; delete the load replacing it with constant 0.
-define i32 @vnum_test2(ptr %data) #0 {
+define i32 @vnum_test2(ptr %data) {
; CHECK-LABEL: @vnum_test2(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[TMP:%.*]] = getelementptr inbounds i32, ptr [[DATA:%.*]], i64 3
@@ -142,11 +142,10 @@ bb21: ; preds = %bb4
ret i32 %p.0
}
-
; Function Attrs: nounwind ssp uwtable
;; Same as test 2, with a conditional store of m-n, so it has to also discover
;; that data ends up with the same value no matter what branch is taken.
-define i32 @vnum_test3(ptr %data) #0 {
+define i32 @vnum_test3(ptr %data) {
; CHECK-LABEL: @vnum_test3(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[TMP:%.*]] = getelementptr inbounds i32, ptr [[DATA:%.*]], i64 3
@@ -305,7 +304,6 @@ bb3: ; preds = %bb2
%tmp3 = sub i32 %tmp, %phi2
ret i32 %tmp3
}
-attributes #0 = { nounwind ssp uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
!llvm.ident = !{!0, !0, !0}
diff --git a/llvm/test/Transforms/NewGVN/cond_br2-xfail.ll b/llvm/test/Transforms/NewGVN/cond_br2-xfail.ll
index 8b2d662..b6da86b 100644
--- a/llvm/test/Transforms/NewGVN/cond_br2-xfail.ll
+++ b/llvm/test/Transforms/NewGVN/cond_br2-xfail.ll
@@ -10,7 +10,7 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
%"union.llvm::SmallVectorBase::U" = type { x86_fp80 }
; Function Attrs: ssp uwtable
-define void @_Z4testv() #0 personality ptr @__gxx_personality_v0 {
+define void @_Z4testv() personality ptr @__gxx_personality_v0 {
; CHECK: @_Z4testv()
; CHECK: invoke.cont:
; CHECK: br i1 true, label %new.notnull.i11, label %if.end.i14
@@ -18,7 +18,7 @@ define void @_Z4testv() #0 personality ptr @__gxx_personality_v0 {
entry:
%sv = alloca %"class.llvm::SmallVector", align 16
- call void @llvm.lifetime.start.p0(ptr %sv) #1
+ call void @llvm.lifetime.start.p0(ptr %sv)
%FirstEl.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SmallVector", ptr %sv, i64 0, i32 0, i32 0, i32 0, i32 0, i32 3
store ptr %FirstEl.i.i.i.i.i.i, ptr %sv, align 16, !tbaa !4
%EndX.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SmallVector", ptr %sv, i64 0, i32 0, i32 0, i32 0, i32 0, i32 1
@@ -83,11 +83,11 @@ invoke.cont3: ; preds = %invoke.cont2
br i1 %cmp.i.i.i.i19, label %_ZN4llvm11SmallVectorIiLj8EED1Ev.exit21, label %if.then.i.i.i20
if.then.i.i.i20: ; preds = %invoke.cont3
- call void @free(ptr %5) #1
+ call void @free(ptr %5)
br label %_ZN4llvm11SmallVectorIiLj8EED1Ev.exit21
_ZN4llvm11SmallVectorIiLj8EED1Ev.exit21: ; preds = %invoke.cont3, %if.then.i.i.i20
- call void @llvm.lifetime.end.p0(ptr %sv) #1
+ call void @llvm.lifetime.end.p0(ptr %sv)
ret void
lpad: ; preds = %if.end.i14, %if.end.i, %invoke.cont2
@@ -98,7 +98,7 @@ lpad: ; preds = %if.end.i14, %if.end
br i1 %cmp.i.i.i.i, label %eh.resume, label %if.then.i.i.i
if.then.i.i.i: ; preds = %lpad
- call void @free(ptr %7) #1
+ call void @free(ptr %7)
br label %eh.resume
eh.resume: ; preds = %if.then.i.i.i, %lpad
@@ -106,24 +106,19 @@ eh.resume: ; preds = %if.then.i.i.i, %lpa
}
; Function Attrs: nounwind
-declare void @llvm.lifetime.start.p0(ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture)
declare i32 @__gxx_personality_v0(...)
-declare void @_Z1gRN4llvm11SmallVectorIiLj8EEE(ptr) #2
+declare void @_Z1gRN4llvm11SmallVectorIiLj8EEE(ptr)
; Function Attrs: nounwind
-declare void @llvm.lifetime.end.p0(ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture)
-declare void @_ZN4llvm15SmallVectorBase8grow_podEmm(ptr, i64, i64) #2
+declare void @_ZN4llvm15SmallVectorBase8grow_podEmm(ptr, i64, i64)
; Function Attrs: nounwind
-declare void @free(ptr nocapture) #3
-
-attributes #0 = { ssp uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind }
-attributes #2 = { "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #3 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+declare void @free(ptr nocapture)
!0 = !{!"any pointer", !1}
!1 = !{!"omnipotent char", !2}
diff --git a/llvm/test/Transforms/NewGVN/equivalent-phi.ll b/llvm/test/Transforms/NewGVN/equivalent-phi.ll
index ba4fc14..388b941 100644
--- a/llvm/test/Transforms/NewGVN/equivalent-phi.ll
+++ b/llvm/test/Transforms/NewGVN/equivalent-phi.ll
@@ -8,7 +8,7 @@ target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
;; one set of indexing calculations and a load
; Function Attrs: nounwind ssp uwtable
-define i32 @bar(i32 %arg, i32 %arg1, i32 %arg2) #0 {
+define i32 @bar(i32 %arg, i32 %arg1, i32 %arg2) {
; CHECK-LABEL: @bar(
; CHECK-NEXT: bb:
; CHECK-NEXT: br label [[BB3:%.*]]
@@ -59,8 +59,6 @@ bb20: ; preds = %bb17
ret i32 %tmp14
}
-attributes #0 = { nounwind ssp uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+fxsr,+mmx,+sse,+sse2,+sse3,+sse4.1,+ssse3" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
!llvm.module.flags = !{!0}
!llvm.ident = !{!1}
diff --git a/llvm/test/Transforms/NewGVN/memory-handling.ll b/llvm/test/Transforms/NewGVN/memory-handling.ll
index f83d145..71e9041 100644
--- a/llvm/test/Transforms/NewGVN/memory-handling.ll
+++ b/llvm/test/Transforms/NewGVN/memory-handling.ll
@@ -282,10 +282,10 @@ declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1) #2
; Function Attrs: inlinehint nounwind readonly uwtable
declare i32 @tolower(i32) local_unnamed_addr #3
-attributes #0 = { nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind uwtable }
+attributes #1 = { nounwind readnone }
attributes #2 = { argmemonly nounwind }
-attributes #3 = { inlinehint nounwind readonly uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #3 = { inlinehint nounwind readonly uwtable }
attributes #4 = { nounwind readnone }
attributes #5 = { nounwind readonly }
diff --git a/llvm/test/Transforms/NewGVN/pr31483.ll b/llvm/test/Transforms/NewGVN/pr31483.ll
index 82e9a2a..c1fb836 100644
--- a/llvm/test/Transforms/NewGVN/pr31483.ll
+++ b/llvm/test/Transforms/NewGVN/pr31483.ll
@@ -6,7 +6,7 @@ target datalayout = "E-m:e-i64:64-n32:64"
;; Ensure we do not believe the indexing increments are unreachable due to incorrect memory
;; equivalence detection. In PR31483, we were deleting those blocks as unreachable
; Function Attrs: nounwind
-define signext i32 @ham(ptr %arg, ptr %arg1) #0 {
+define signext i32 @ham(ptr %arg, ptr %arg1) {
; CHECK-LABEL: @ham(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[TMP:%.*]] = alloca ptr, align 8
@@ -89,12 +89,7 @@ bb23: ; preds = %bb2
ret i32 undef
}
-declare signext i32 @zot(ptr, ...) #1
+declare signext i32 @zot(ptr, ...)
; Function Attrs: nounwind
-declare void @llvm.va_end(ptr) #2
-
-attributes #0 = { nounwind "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="ppc64" "target-features"="+altivec,-bpermd,-crypto,-direct-move,-extdiv,-power8-vector,-vsx" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="ppc64" "target-features"="+altivec,-bpermd,-crypto,-direct-move,-extdiv,-power8-vector,-vsx" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #2 = { nounwind }
-
+declare void @llvm.va_end(ptr)
diff --git a/llvm/test/Transforms/NewGVN/pr31501.ll b/llvm/test/Transforms/NewGVN/pr31501.ll
index 353c693..b2ba42b 100644
--- a/llvm/test/Transforms/NewGVN/pr31501.ll
+++ b/llvm/test/Transforms/NewGVN/pr31501.ll
@@ -49,9 +49,9 @@ target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
%struct.wombat.28 = type <{ ptr, i8, i8, [6 x i8] }>
; Function Attrs: norecurse nounwind ssp uwtable
-define weak_odr hidden ptr @quux(ptr %arg, ptr %arg1) local_unnamed_addr #0 align 2 {
+define weak_odr hidden ptr @quux(ptr %arg, ptr %arg1) local_unnamed_addr align 2 {
; CHECK-LABEL: define weak_odr hidden ptr @quux(
-; CHECK-SAME: ptr [[ARG:%.*]], ptr [[ARG1:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] align 2 {
+; CHECK-SAME: ptr [[ARG:%.*]], ptr [[ARG1:%.*]]) local_unnamed_addr align 2 {
; CHECK-NEXT: [[BB:.*]]:
; CHECK-NEXT: [[TMP:%.*]] = getelementptr inbounds [[STRUCT_BARNEY:%.*]], ptr [[ARG]], i64 0, i32 3, i32 0, i32 0, i32 0
; CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TMP]], align 8, !tbaa [[ANYPTR_TBAA2:![0-9]+]]
@@ -112,8 +112,6 @@ bb21: ; preds = %bb19, %bb
ret ptr %tmp22
}
-attributes #0 = { norecurse nounwind ssp uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+fxsr,+mmx,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
!llvm.module.flags = !{!0}
!llvm.ident = !{!1}
diff --git a/llvm/test/Transforms/NewGVN/pr33187.ll b/llvm/test/Transforms/NewGVN/pr33187.ll
index 969f172..61eb7a5 100644
--- a/llvm/test/Transforms/NewGVN/pr33187.ll
+++ b/llvm/test/Transforms/NewGVN/pr33187.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
;; Ensure we don't change after value numbering by accidentally deleting the wrong expression.
; RUN: opt -passes=newgvn -S %s | FileCheck %s
-define void @fn1(i1 %arg) local_unnamed_addr #0 {
+define void @fn1(i1 %arg) local_unnamed_addr {
; CHECK-LABEL: @fn1(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[FOR_COND_PREHEADER:%.*]]
@@ -89,8 +89,7 @@ if.end18: ; preds = %L, %while.body12
br label %while.cond10
}
-
-define void @hoge() local_unnamed_addr #0 {
+define void @hoge() local_unnamed_addr {
; CHECK-LABEL: @hoge(
; CHECK-NEXT: bb:
; CHECK-NEXT: br label [[BB1:%.*]]
@@ -108,9 +107,6 @@ bb1: ; preds = %bb1, %bb
br label %bb1
}
-attributes #0 = { nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
-
define void @a(i1 %arg) {
; CHECK-LABEL: @a(
; CHECK-NEXT: b:
@@ -143,4 +139,3 @@ f: ; preds = %d
%e = getelementptr i8, ptr %i, i64 1
br label %d
}
-
diff --git a/llvm/test/Transforms/NewGVN/pr33305.ll b/llvm/test/Transforms/NewGVN/pr33305.ll
index e742f14..ff645f8 100644
--- a/llvm/test/Transforms/NewGVN/pr33305.ll
+++ b/llvm/test/Transforms/NewGVN/pr33305.ll
@@ -16,9 +16,9 @@ target triple = "x86_64-apple-macosx10.12.0"
@str.2 = private unnamed_addr constant [8 x i8] c"Screwed\00"
; Function Attrs: nounwind optsize ssp uwtable
-define i32 @main() local_unnamed_addr #0 {
+define i32 @main() local_unnamed_addr {
; CHECK-LABEL: define i32 @main(
-; CHECK-SAME: ) local_unnamed_addr #[[ATTR0:[0-9]+]] {
+; CHECK-SAME: ) local_unnamed_addr {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[DOTPR_I:%.*]] = load i32, ptr @c, align 4, !tbaa [[INT_TBAA3:![0-9]+]]
; CHECK-NEXT: [[CMP13_I:%.*]] = icmp slt i32 [[DOTPR_I]], 1
@@ -77,7 +77,7 @@ define i32 @main() local_unnamed_addr #0 {
; CHECK-NEXT: br i1 [[TOBOOL]], label %[[IF_END:.*]], label %[[IF_THEN:.*]]
; CHECK: [[IF_THEN]]:
; CHECK-NEXT: [[PUTS2:%.*]] = tail call i32 @puts(ptr @str.2)
-; CHECK-NEXT: tail call void @abort() #[[ATTR3:[0-9]+]]
+; CHECK-NEXT: tail call void @abort()
; CHECK-NEXT: unreachable
; CHECK: [[IF_END]]:
; CHECK-NEXT: [[PUTS:%.*]] = tail call i32 @puts(ptr @str)
@@ -153,7 +153,7 @@ fn1.exit: ; preds = %if.then.i, %for.end
if.then: ; preds = %fn1.exit
%puts2 = tail call i32 @puts(ptr @str.2)
- tail call void @abort() #3
+ tail call void @abort()
unreachable
if.end: ; preds = %fn1.exit
@@ -162,15 +162,10 @@ if.end: ; preds = %fn1.exit
}
; Function Attrs: noreturn nounwind optsize
-declare void @abort() local_unnamed_addr #1
+declare void @abort() local_unnamed_addr
; Function Attrs: nounwind
-declare i32 @puts(ptr nocapture readonly) local_unnamed_addr #2
-
-attributes #0 = { nounwind optsize ssp uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+fxsr,+mmx,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { noreturn nounwind optsize "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+fxsr,+mmx,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #2 = { nounwind }
-attributes #3 = { noreturn nounwind optsize }
+declare i32 @puts(ptr nocapture readonly) local_unnamed_addr
!llvm.module.flags = !{!0, !1}
!llvm.ident = !{!2}
diff --git a/llvm/test/Transforms/NewGVN/pr34430.ll b/llvm/test/Transforms/NewGVN/pr34430.ll
index 490ba433..62d5770 100644
--- a/llvm/test/Transforms/NewGVN/pr34430.ll
+++ b/llvm/test/Transforms/NewGVN/pr34430.ll
@@ -4,7 +4,7 @@
source_filename = "bugpoint-output-e4c7d0f.bc"
; Make sure we still properly resolve phi cycles when they involve predicateinfo copies of phis.
-define void @hoge(i1 %arg) local_unnamed_addr #0 {
+define void @hoge(i1 %arg) local_unnamed_addr {
; CHECK-LABEL: @hoge(
; CHECK-NEXT: bb:
; CHECK-NEXT: br i1 %arg, label [[BB6:%.*]], label [[BB1:%.*]]
@@ -41,8 +41,6 @@ bb6: ; preds = %bb4, %bb2, %bb1, %b
br label %bb4
}
-attributes #0 = { norecurse noreturn nounwind ssp uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+fxsr,+mmx,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
!llvm.ident = !{!0}
!0 = !{!"clang version 6.0.0"}
diff --git a/llvm/test/Transforms/NewGVN/pr34452.ll b/llvm/test/Transforms/NewGVN/pr34452.ll
index 48bdd88..7c38147 100644
--- a/llvm/test/Transforms/NewGVN/pr34452.ll
+++ b/llvm/test/Transforms/NewGVN/pr34452.ll
@@ -6,9 +6,9 @@ source_filename = "bugpoint-output-09f7a24.bc"
@WHOLELINE = external local_unnamed_addr global i32, align 4
; Function Attrs: nounwind uwtable
-define void @sgrep() local_unnamed_addr #0 {
+define void @sgrep() local_unnamed_addr {
; CHECK-LABEL: define void @sgrep(
-; CHECK-SAME: ) local_unnamed_addr #[[ATTR0:[0-9]+]] {
+; CHECK-SAME: ) local_unnamed_addr {
; CHECK-NEXT: [[ENTRY:.*]]:
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @WHOLELINE, align 4, !tbaa [[INT_TBAA1:![0-9]+]]
; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[TMP0]], 0
@@ -36,10 +36,7 @@ while.body.us: ; preds = %while.body.us, %ent
}
; Function Attrs: nounwind readnone speculatable
-declare { i64, i1 } @llvm.sadd.with.overflow.i64(i64, i64) #1
-
-attributes #0 = { nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "polly-optimized" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind readnone speculatable }
+declare { i64, i1 } @llvm.sadd.with.overflow.i64(i64, i64)
!llvm.ident = !{!0}
diff --git a/llvm/test/Transforms/OpenMP/dead_use.ll b/llvm/test/Transforms/OpenMP/dead_use.ll
index 1c4b2c6..ad0f91c 100644
--- a/llvm/test/Transforms/OpenMP/dead_use.ll
+++ b/llvm/test/Transforms/OpenMP/dead_use.ll
@@ -6,9 +6,9 @@
@0 = private unnamed_addr global %struct.ident_t { i32 0, i32 2, i32 0, i32 0, ptr @.str }, align 8
; Function Attrs: nounwind uwtable
-define dso_local i32 @b() #0 {
+define dso_local i32 @b() {
; CHECK-LABEL: define dso_local i32 @b(
-; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
+; CHECK-SAME: ) {
; CHECK-NEXT: [[TMP1:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @a()
; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP1]], align 4
@@ -21,9 +21,9 @@ define dso_local i32 @b() #0 {
}
; Function Attrs: nounwind uwtable
-define internal i32 @a() #0 {
+define internal i32 @a() {
; CHECK-LABEL: define internal i32 @a(
-; CHECK-SAME: ) #[[ATTR0]] {
+; CHECK-SAME: ) {
; CHECK-NEXT: [[TMP1:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @b()
; CHECK-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB0:[0-9]+]], i32 0, ptr @.omp_outlined.)
@@ -38,9 +38,9 @@ define internal i32 @a() #0 {
}
; Function Attrs: norecurse nounwind uwtable
-define internal void @.omp_outlined.(ptr noalias %0, ptr noalias %1) #1 {
+define internal void @.omp_outlined.(ptr noalias %0, ptr noalias %1) {
; CHECK-LABEL: define internal void @.omp_outlined.(
-; CHECK-SAME: ptr noalias [[TMP0:%.*]], ptr noalias [[TMP1:%.*]]) #[[ATTR1:[0-9]+]] {
+; CHECK-SAME: ptr noalias [[TMP0:%.*]], ptr noalias [[TMP1:%.*]]) {
; CHECK-NEXT: [[TMP3:%.*]] = alloca ptr, align 8
; CHECK-NEXT: [[TMP4:%.*]] = alloca ptr, align 8
; CHECK-NEXT: store ptr [[TMP0]], ptr [[TMP3]], align 8, !tbaa [[ANYPTR_TBAA2:![0-9]+]]
@@ -55,11 +55,7 @@ define internal void @.omp_outlined.(ptr noalias %0, ptr noalias %1) #1 {
}
; Function Attrs: nounwind
-declare !callback !6 void @__kmpc_fork_call(ptr, i32, ptr, ...) #2
-
-attributes #0 = { nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { norecurse nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #2 = { nounwind }
+declare !callback !6 void @__kmpc_fork_call(ptr, i32, ptr, ...)
!llvm.module.flags = !{!0}
!llvm.ident = !{!1}
diff --git a/llvm/test/Transforms/OpenMP/icv_remarks.ll b/llvm/test/Transforms/OpenMP/icv_remarks.ll
index f76d487..3caa56f 100644
--- a/llvm/test/Transforms/OpenMP/icv_remarks.ll
+++ b/llvm/test/Transforms/OpenMP/icv_remarks.ll
@@ -14,55 +14,48 @@ target triple = "x86_64-unknown-linux-gnu"
; CHECK-DAG: remark: icv_remarks.c:12:0: OpenMP ICV nthreads Value: IMPLEMENTATION_DEFINED
; CHECK-DAG: remark: icv_remarks.c:12:0: OpenMP ICV active_levels Value: 0
; CHECK-DAG: remark: icv_remarks.c:12:0: OpenMP ICV cancel Value: 0
-define dso_local void @foo(i32 %a) local_unnamed_addr #0 !dbg !17 {
+define dso_local void @foo(i32 %a) local_unnamed_addr !dbg !17 {
entry:
%.kmpc_loc.addr = alloca %struct.ident_t, align 8
call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 8 dereferenceable(24) %.kmpc_loc.addr, ptr nonnull align 8 dereferenceable(24) @0, i64 16, i1 false)
call void @llvm.dbg.value(metadata i32 %a, metadata !19, metadata !DIExpression()), !dbg !21
- tail call void @omp_set_num_threads(i32 %a) #1, !dbg !22
- %call = tail call i32 @omp_get_max_threads() #1, !dbg !23
+ tail call void @omp_set_num_threads(i32 %a), !dbg !22
+ %call = tail call i32 @omp_get_max_threads(), !dbg !23
call void @llvm.dbg.value(metadata i32 %call, metadata !20, metadata !DIExpression()), !dbg !21
- tail call void @use(i32 %call) #1, !dbg !24
+ tail call void @use(i32 %call), !dbg !24
%0 = getelementptr inbounds %struct.ident_t, ptr %.kmpc_loc.addr, i64 0, i32 4, !dbg !25
store ptr @1, ptr %0, align 8, !dbg !25, !tbaa !26
- call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr nonnull %.kmpc_loc.addr, i32 0, ptr @.omp_outlined.) #1, !dbg !25
+ call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr nonnull %.kmpc_loc.addr, i32 0, ptr @.omp_outlined.), !dbg !25
ret void, !dbg !32
}
-declare !dbg !4 dso_local void @omp_set_num_threads(i32) local_unnamed_addr #1
+declare !dbg !4 dso_local void @omp_set_num_threads(i32) local_unnamed_addr
-declare !dbg !9 dso_local i32 @omp_get_max_threads() local_unnamed_addr #1
+declare !dbg !9 dso_local i32 @omp_get_max_threads() local_unnamed_addr
-declare !dbg !12 dso_local void @use(i32) local_unnamed_addr #2
+declare !dbg !12 dso_local void @use(i32) local_unnamed_addr
; CHECK-DAG: remark: icv_remarks.c:18:0: OpenMP ICV nthreads Value: IMPLEMENTATION_DEFINED
; CHECK-DAG: remark: icv_remarks.c:18:0: OpenMP ICV active_levels Value: 0
; CHECK-DAG: remark: icv_remarks.c:18:0: OpenMP ICV cancel Value: 0
-define internal void @.omp_outlined.(ptr noalias nocapture readnone %.global_tid., ptr noalias nocapture readnone %.bound_tid.) #3 !dbg !33 {
+define internal void @.omp_outlined.(ptr noalias nocapture readnone %.global_tid., ptr noalias nocapture readnone %.bound_tid.) !dbg !33 {
entry:
call void @llvm.dbg.value(metadata ptr %.global_tid., metadata !41, metadata !DIExpression()), !dbg !43
call void @llvm.dbg.value(metadata ptr %.bound_tid., metadata !42, metadata !DIExpression()), !dbg !43
- call void @llvm.dbg.value(metadata ptr undef, metadata !44, metadata !DIExpression()) #1, !dbg !50
- call void @llvm.dbg.value(metadata ptr undef, metadata !47, metadata !DIExpression()) #1, !dbg !50
- tail call void @omp_set_num_threads(i32 10) #1, !dbg !52
- %call.i = tail call i32 @omp_get_max_threads() #1, !dbg !53
- call void @llvm.dbg.value(metadata i32 %call.i, metadata !48, metadata !DIExpression()) #1, !dbg !54
- tail call void @use(i32 %call.i) #1, !dbg !55
+ call void @llvm.dbg.value(metadata ptr undef, metadata !44, metadata !DIExpression()), !dbg !50
+ call void @llvm.dbg.value(metadata ptr undef, metadata !47, metadata !DIExpression()), !dbg !50
+ tail call void @omp_set_num_threads(i32 10), !dbg !52
+ %call.i = tail call i32 @omp_get_max_threads(), !dbg !53
+ call void @llvm.dbg.value(metadata i32 %call.i, metadata !48, metadata !DIExpression()), !dbg !54
+ tail call void @use(i32 %call.i), !dbg !55
ret void, !dbg !56
}
-declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg) #4
+declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg)
-declare !callback !57 dso_local void @__kmpc_fork_call(ptr, i32, ptr, ...) local_unnamed_addr #1
+declare !callback !57 dso_local void @__kmpc_fork_call(ptr, i32, ptr, ...) local_unnamed_addr
-declare void @llvm.dbg.value(metadata, metadata, metadata) #5
-
-attributes #0 = { nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind }
-attributes #2 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #3 = { norecurse nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #4 = { argmemonly nounwind willreturn }
-attributes #5 = { nounwind readnone speculatable willreturn }
+declare void @llvm.dbg.value(metadata, metadata, metadata)
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!13, !14, !15, !59}
diff --git a/llvm/test/Transforms/PGOProfile/misexpect-branch-correct.ll b/llvm/test/Transforms/PGOProfile/misexpect-branch-correct.ll
index 0a1efda..eb1cc5e 100644
--- a/llvm/test/Transforms/PGOProfile/misexpect-branch-correct.ll
+++ b/llvm/test/Transforms/PGOProfile/misexpect-branch-correct.ll
@@ -9,7 +9,6 @@
; CHECK-NOT: remark: {{.*}}
; CHECK: !{!"branch_weights", i32 0, i32 200000}
-
; ModuleID = 'misexpect-branch-correct.c'
source_filename = "misexpect-branch-correct.c"
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
@@ -19,14 +18,14 @@ target triple = "x86_64-unknown-linux-gnu"
@outer_loop = constant i32 2000, align 4
; Function Attrs: nounwind
-define i32 @bar() #0 {
+define i32 @bar() {
entry:
%rando = alloca i32, align 4
%x = alloca i32, align 4
- call void @llvm.lifetime.start.p0(ptr %rando) #4
+ call void @llvm.lifetime.start.p0(ptr %rando)
%call = call i32 (...) @buzz()
store i32 %call, ptr %rando, align 4, !tbaa !3
- call void @llvm.lifetime.start.p0(ptr %x) #4
+ call void @llvm.lifetime.start.p0(ptr %x)
store i32 0, ptr %x, align 4, !tbaa !3
%0 = load i32, ptr %rando, align 4, !tbaa !3
%rem = srem i32 %0, 200000
@@ -52,31 +51,25 @@ if.else: ; preds = %entry
if.end: ; preds = %if.else, %if.then
%2 = load i32, ptr %x, align 4, !tbaa !3
- call void @llvm.lifetime.end.p0(ptr %x) #4
- call void @llvm.lifetime.end.p0(ptr %rando) #4
+ call void @llvm.lifetime.end.p0(ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %rando)
ret i32 %2
}
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.start.p0(ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture)
-declare i32 @buzz(...) #2
+declare i32 @buzz(...)
; Function Attrs: nounwind readnone willreturn
-declare i64 @llvm.expect.i64(i64, i64) #3
+declare i64 @llvm.expect.i64(i64, i64)
-declare i32 @baz(i32) #2
+declare i32 @baz(i32)
-declare i32 @foo(i32) #2
+declare i32 @foo(i32)
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.end.p0(ptr nocapture) #1
-
-attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-features"="+cx8,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { argmemonly nounwind willreturn }
-attributes #2 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-features"="+cx8,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #3 = { nounwind readnone willreturn }
-attributes #4 = { nounwind }
+declare void @llvm.lifetime.end.p0(ptr nocapture)
!llvm.module.flags = !{!0, !1}
!llvm.ident = !{!2}
diff --git a/llvm/test/Transforms/PGOProfile/misexpect-branch-overflow.ll b/llvm/test/Transforms/PGOProfile/misexpect-branch-overflow.ll
index 68b233e..3390825 100644
--- a/llvm/test/Transforms/PGOProfile/misexpect-branch-overflow.ll
+++ b/llvm/test/Transforms/PGOProfile/misexpect-branch-overflow.ll
@@ -16,14 +16,14 @@ target triple = "x86_64-unknown-linux-gnu"
@outer_loop = constant i32 2000, align 4
; Function Attrs: nounwind
-define i32 @bar() #0 !dbg !6 {
+define i32 @bar() !dbg !6 {
entry:
%rando = alloca i32, align 4
%x = alloca i32, align 4
- call void @llvm.lifetime.start.p0(ptr %rando) #4, !dbg !9
+ call void @llvm.lifetime.start.p0(ptr %rando), !dbg !9
%call = call i32 (...) @buzz(), !dbg !9
store i32 %call, ptr %rando, align 4, !dbg !9, !tbaa !10
- call void @llvm.lifetime.start.p0(ptr %x) #4, !dbg !14
+ call void @llvm.lifetime.start.p0(ptr %x), !dbg !14
store i32 0, ptr %x, align 4, !dbg !14, !tbaa !10
%0 = load i32, ptr %rando, align 4, !dbg !15, !tbaa !10
%rem = srem i32 %0, 200000, !dbg !15
@@ -49,31 +49,25 @@ if.else: ; preds = %entry
if.end: ; preds = %if.else, %if.then
%2 = load i32, ptr %x, align 4, !dbg !19, !tbaa !10
- call void @llvm.lifetime.end.p0(ptr %x) #4, !dbg !20
- call void @llvm.lifetime.end.p0(ptr %rando) #4, !dbg !20
+ call void @llvm.lifetime.end.p0(ptr %x), !dbg !20
+ call void @llvm.lifetime.end.p0(ptr %rando), !dbg !20
ret i32 %2, !dbg !19
}
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.start.p0(ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture)
-declare i32 @buzz(...) #2
+declare i32 @buzz(...)
; Function Attrs: nounwind readnone willreturn
-declare i64 @llvm.expect.i64(i64, i64) #3
+declare i64 @llvm.expect.i64(i64, i64)
-declare i32 @baz(i32) #2
+declare i32 @baz(i32)
-declare i32 @foo(i32) #2
+declare i32 @foo(i32)
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.end.p0(ptr nocapture) #1
-
-attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-features"="+cx8,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { argmemonly nounwind willreturn }
-attributes #2 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-features"="+cx8,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #3 = { nounwind readnone willreturn }
-attributes #4 = { nounwind }
+declare void @llvm.lifetime.end.p0(ptr nocapture)
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!3, !4}
diff --git a/llvm/test/Transforms/PGOProfile/misexpect-branch-stripped.ll b/llvm/test/Transforms/PGOProfile/misexpect-branch-stripped.ll
index 2f188f5..d34708c 100644
--- a/llvm/test/Transforms/PGOProfile/misexpect-branch-stripped.ll
+++ b/llvm/test/Transforms/PGOProfile/misexpect-branch-stripped.ll
@@ -19,7 +19,6 @@
; DISABLED-NOT: warning: <unknown>:0:0: 19.98%
; DISABLED-NOT: remark: <unknown>:0:0: Potential performance regression from use of the llvm.expect intrinsic: Annotation was correct on 19.98% (399668 / 2000000) of profiled executions.
-
; ModuleID = 'misexpect-branch.c'
source_filename = "misexpect-branch.c"
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
@@ -29,14 +28,14 @@ target triple = "x86_64-unknown-linux-gnu"
@outer_loop = constant i32 2000, align 4
; Function Attrs: nounwind
-define i32 @bar() #0 {
+define i32 @bar() {
entry:
%rando = alloca i32, align 4
%x = alloca i32, align 4
- call void @llvm.lifetime.start.p0(ptr %rando) #4
+ call void @llvm.lifetime.start.p0(ptr %rando)
%call = call i32 (...) @buzz()
store i32 %call, ptr %rando, align 4, !tbaa !3
- call void @llvm.lifetime.start.p0(ptr %x) #4
+ call void @llvm.lifetime.start.p0(ptr %x)
store i32 0, ptr %x, align 4, !tbaa !3
%0 = load i32, ptr %rando, align 4, !tbaa !3
%rem = srem i32 %0, 200000
@@ -62,31 +61,25 @@ if.else: ; preds = %entry
if.end: ; preds = %if.else, %if.then
%2 = load i32, ptr %x, align 4, !tbaa !3
- call void @llvm.lifetime.end.p0(ptr %x) #4
- call void @llvm.lifetime.end.p0(ptr %rando) #4
+ call void @llvm.lifetime.end.p0(ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %rando)
ret i32 %2
}
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.start.p0(ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture)
-declare i32 @buzz(...) #2
+declare i32 @buzz(...)
; Function Attrs: nounwind readnone willreturn
-declare i64 @llvm.expect.i64(i64, i64) #3
+declare i64 @llvm.expect.i64(i64, i64)
-declare i32 @baz(i32) #2
+declare i32 @baz(i32)
-declare i32 @foo(i32) #2
+declare i32 @foo(i32)
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.end.p0(ptr nocapture) #1
-
-attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-features"="+cx8,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { argmemonly nounwind willreturn }
-attributes #2 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-features"="+cx8,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #3 = { nounwind readnone willreturn }
-attributes #4 = { nounwind }
+declare void @llvm.lifetime.end.p0(ptr nocapture)
!llvm.module.flags = !{!0, !1}
!llvm.ident = !{!2}
diff --git a/llvm/test/Transforms/PGOProfile/misexpect-branch-unpredictable.ll b/llvm/test/Transforms/PGOProfile/misexpect-branch-unpredictable.ll
index 4add781..a43e632 100644
--- a/llvm/test/Transforms/PGOProfile/misexpect-branch-unpredictable.ll
+++ b/llvm/test/Transforms/PGOProfile/misexpect-branch-unpredictable.ll
@@ -7,7 +7,6 @@
; CHECK-NOT: warning: {{.*}}
; CHECK-NOT: remark: {{.*}}
-
; ModuleID = 'misexpect-branch-unpredictable.c'
source_filename = "clang/test/Profile/misexpect-branch-unpredictable.c"
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
@@ -17,14 +16,14 @@ target triple = "x86_64-unknown-linux-gnu"
@outer_loop = constant i32 2000, align 4
; Function Attrs: nounwind
-define i32 @bar() #0 {
+define i32 @bar() {
entry:
%rando = alloca i32, align 4
%x = alloca i32, align 4
- call void @llvm.lifetime.start.p0(ptr %rando) #3
+ call void @llvm.lifetime.start.p0(ptr %rando)
%call = call i32 (...) @buzz()
store i32 %call, ptr %rando, align 4, !tbaa !2
- call void @llvm.lifetime.start.p0(ptr %x) #3
+ call void @llvm.lifetime.start.p0(ptr %x)
store i32 0, ptr %x, align 4, !tbaa !2
%0 = load i32, ptr %rando, align 4, !tbaa !2
%rem = srem i32 %0, 200000
@@ -49,27 +48,22 @@ if.else: ; preds = %entry
if.end: ; preds = %if.else, %if.then
%2 = load i32, ptr %x, align 4, !tbaa !2
- call void @llvm.lifetime.end.p0(ptr %x) #3
- call void @llvm.lifetime.end.p0(ptr %rando) #3
+ call void @llvm.lifetime.end.p0(ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %rando)
ret i32 %2
}
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.start.p0(ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture)
-declare i32 @buzz(...) #2
+declare i32 @buzz(...)
-declare i32 @baz(i32) #2
+declare i32 @baz(i32)
-declare i32 @foo(i32) #2
+declare i32 @foo(i32)
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.end.p0(ptr nocapture) #1
-
-attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-features"="+cx8,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { argmemonly nounwind willreturn }
-attributes #2 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-features"="+cx8,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #3 = { nounwind }
+declare void @llvm.lifetime.end.p0(ptr nocapture)
!llvm.module.flags = !{!0}
!llvm.ident = !{!1}
diff --git a/llvm/test/Transforms/PGOProfile/misexpect-branch.ll b/llvm/test/Transforms/PGOProfile/misexpect-branch.ll
index 5a7731b..7e01f7a 100644
--- a/llvm/test/Transforms/PGOProfile/misexpect-branch.ll
+++ b/llvm/test/Transforms/PGOProfile/misexpect-branch.ll
@@ -33,14 +33,14 @@ target triple = "x86_64-unknown-linux-gnu"
@outer_loop = constant i32 2000, align 4
; Function Attrs: nounwind
-define i32 @bar() #0 !dbg !6 {
+define i32 @bar() !dbg !6 {
entry:
%rando = alloca i32, align 4
%x = alloca i32, align 4
- call void @llvm.lifetime.start.p0(ptr %rando) #4, !dbg !9
+ call void @llvm.lifetime.start.p0(ptr %rando), !dbg !9
%call = call i32 (...) @buzz(), !dbg !9
store i32 %call, ptr %rando, align 4, !dbg !9, !tbaa !10
- call void @llvm.lifetime.start.p0(ptr %x) #4, !dbg !14
+ call void @llvm.lifetime.start.p0(ptr %x), !dbg !14
store i32 0, ptr %x, align 4, !dbg !14, !tbaa !10
%0 = load i32, ptr %rando, align 4, !dbg !15, !tbaa !10
%rem = srem i32 %0, 200000, !dbg !15
@@ -66,31 +66,25 @@ if.else: ; preds = %entry
if.end: ; preds = %if.else, %if.then
%2 = load i32, ptr %x, align 4, !dbg !19, !tbaa !10
- call void @llvm.lifetime.end.p0(ptr %x) #4, !dbg !20
- call void @llvm.lifetime.end.p0(ptr %rando) #4, !dbg !20
+ call void @llvm.lifetime.end.p0(ptr %x), !dbg !20
+ call void @llvm.lifetime.end.p0(ptr %rando), !dbg !20
ret i32 %2, !dbg !19
}
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.start.p0(ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture)
-declare i32 @buzz(...) #2
+declare i32 @buzz(...)
; Function Attrs: nounwind readnone willreturn
-declare i64 @llvm.expect.i64(i64, i64) #3
+declare i64 @llvm.expect.i64(i64, i64)
-declare i32 @baz(i32) #2
+declare i32 @baz(i32)
-declare i32 @foo(i32) #2
+declare i32 @foo(i32)
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.end.p0(ptr nocapture) #1
-
-attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-features"="+cx8,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { argmemonly nounwind willreturn }
-attributes #2 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-features"="+cx8,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #3 = { nounwind readnone willreturn }
-attributes #4 = { nounwind }
+declare void @llvm.lifetime.end.p0(ptr nocapture)
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!3, !4}
diff --git a/llvm/test/Transforms/PGOProfile/misexpect-switch-default.ll b/llvm/test/Transforms/PGOProfile/misexpect-switch-default.ll
index 859ba72..38c27b9 100644
--- a/llvm/test/Transforms/PGOProfile/misexpect-switch-default.ll
+++ b/llvm/test/Transforms/PGOProfile/misexpect-switch-default.ll
@@ -25,7 +25,6 @@
; CORRECT-NOT: warning: {{.*}}
; CORRECT-NOT: remark: {{.*}}
-
; ModuleID = 'misexpect-switch.c'
source_filename = "misexpect-switch.c"
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
@@ -37,10 +36,10 @@ target triple = "x86_64-unknown-linux-gnu"
@arry = dso_local global [25 x i32] zeroinitializer, align 16
; Function Attrs: nounwind uwtable
-define dso_local void @init_arry() #0 {
+define dso_local void @init_arry() {
entry:
%i = alloca i32, align 4
- call void @llvm.lifetime.start.p0(ptr %i) #6
+ call void @llvm.lifetime.start.p0(ptr %i)
store i32 0, ptr %i, align 4, !tbaa !4
br label %for.cond
@@ -50,7 +49,7 @@ for.cond: ; preds = %for.inc, %entry
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
- %call = call i32 @rand() #6
+ %call = call i32 @rand()
%rem = srem i32 %call, 10
%1 = load i32, ptr %i, align 4, !tbaa !4
%idxprom = sext i32 %1 to i64
@@ -65,24 +64,24 @@ for.inc: ; preds = %for.body
br label %for.cond
for.end: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(ptr %i) #6
+ call void @llvm.lifetime.end.p0(ptr %i)
ret void
}
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.start.p0(ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture)
; Function Attrs: nounwind readnone speculatable willreturn
-declare void @llvm.dbg.declare(metadata, metadata, metadata) #2
+declare void @llvm.dbg.declare(metadata, metadata, metadata)
; Function Attrs: nounwind
-declare dso_local i32 @rand() #3
+declare dso_local i32 @rand()
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.end.p0(ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture)
; Function Attrs: nounwind uwtable
-define dso_local i32 @main() #0 {
+define dso_local i32 @main() {
entry:
%retval = alloca i32, align 4
%val = alloca i32, align 4
@@ -90,9 +89,9 @@ entry:
%condition = alloca i32, align 4
store i32 0, ptr %retval, align 4
call void @init_arry()
- call void @llvm.lifetime.start.p0(ptr %val) #6
+ call void @llvm.lifetime.start.p0(ptr %val)
store i32 0, ptr %val, align 4, !tbaa !4
- call void @llvm.lifetime.start.p0(ptr %j) #6
+ call void @llvm.lifetime.start.p0(ptr %j)
store i32 0, ptr %j, align 4, !tbaa !4
br label %for.cond
@@ -102,8 +101,8 @@ for.cond: ; preds = %for.inc, %entry
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
- call void @llvm.lifetime.start.p0(ptr %condition) #6
- %call = call i32 @rand() #6
+ call void @llvm.lifetime.start.p0(ptr %condition)
+ %call = call i32 @rand()
%rem = srem i32 %call, 5
store i32 %rem, ptr %condition, align 4, !tbaa !4
%1 = load i32, ptr %condition, align 4, !tbaa !4
@@ -138,7 +137,7 @@ sw.default: ; preds = %for.body
unreachable
sw.epilog: ; preds = %sw.bb3, %sw.bb2, %sw.bb
- call void @llvm.lifetime.end.p0(ptr %condition) #6
+ call void @llvm.lifetime.end.p0(ptr %condition)
br label %for.inc
for.inc: ; preds = %sw.epilog
@@ -148,25 +147,17 @@ for.inc: ; preds = %sw.epilog
br label %for.cond
for.end: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(ptr %j) #6
- call void @llvm.lifetime.end.p0(ptr %val) #6
+ call void @llvm.lifetime.end.p0(ptr %j)
+ call void @llvm.lifetime.end.p0(ptr %val)
ret i32 0
}
; Function Attrs: nounwind readnone willreturn
-declare i64 @llvm.expect.i64(i64, i64) #4
-
-declare dso_local i32 @sum(ptr, i32) #5
+declare i64 @llvm.expect.i64(i64, i64)
-declare dso_local i32 @random_sample(ptr, i32) #5
+declare dso_local i32 @sum(ptr, i32)
-attributes #0 = { nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { argmemonly nounwind willreturn }
-attributes #2 = { nounwind readnone speculatable willreturn }
-attributes #3 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #4 = { nounwind readnone willreturn }
-attributes #5 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #6 = { nounwind }
+declare dso_local i32 @random_sample(ptr, i32)
!llvm.module.flags = !{!0, !1, !2}
!llvm.ident = !{!3}
diff --git a/llvm/test/Transforms/PGOProfile/misexpect-switch.ll b/llvm/test/Transforms/PGOProfile/misexpect-switch.ll
index 242d5b8..9665559 100644
--- a/llvm/test/Transforms/PGOProfile/misexpect-switch.ll
+++ b/llvm/test/Transforms/PGOProfile/misexpect-switch.ll
@@ -28,7 +28,6 @@
; CORRECT-NOT: warning: {{.*}}
; CORRECT-NOT: remark: {{.*}}
-
; ModuleID = 'misexpect-switch.c'
source_filename = "misexpect-switch.c"
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
@@ -40,10 +39,10 @@ target triple = "x86_64-unknown-linux-gnu"
@arry = dso_local global [25 x i32] zeroinitializer, align 16, !dbg !12
; Function Attrs: nounwind uwtable
-define dso_local void @init_arry() #0 !dbg !21 {
+define dso_local void @init_arry() !dbg !21 {
entry:
%i = alloca i32, align 4
- call void @llvm.lifetime.start.p0(ptr %i) #6, !dbg !26
+ call void @llvm.lifetime.start.p0(ptr %i), !dbg !26
call void @llvm.dbg.declare(metadata ptr %i, metadata !25, metadata !DIExpression()), !dbg !27
store i32 0, ptr %i, align 4, !dbg !28, !tbaa !30
br label %for.cond, !dbg !34
@@ -54,7 +53,7 @@ for.cond: ; preds = %for.inc, %entry
br i1 %cmp, label %for.body, label %for.end, !dbg !38
for.body: ; preds = %for.cond
- %call = call i32 @rand() #6, !dbg !39
+ %call = call i32 @rand(), !dbg !39
%rem = srem i32 %call, 10, !dbg !41
%1 = load i32, ptr %i, align 4, !dbg !42, !tbaa !30
%idxprom = sext i32 %1 to i64, !dbg !43
@@ -69,24 +68,24 @@ for.inc: ; preds = %for.body
br label %for.cond, !dbg !47, !llvm.loop !48
for.end: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(ptr %i) #6, !dbg !50
+ call void @llvm.lifetime.end.p0(ptr %i), !dbg !50
ret void, !dbg !50
}
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.start.p0(ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture)
; Function Attrs: nounwind readnone speculatable willreturn
-declare void @llvm.dbg.declare(metadata, metadata, metadata) #2
+declare void @llvm.dbg.declare(metadata, metadata, metadata)
; Function Attrs: nounwind
-declare dso_local i32 @rand() #3
+declare dso_local i32 @rand()
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.end.p0(ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture)
; Function Attrs: nounwind uwtable
-define dso_local i32 @main() #0 !dbg !51 {
+define dso_local i32 @main() !dbg !51 {
entry:
%retval = alloca i32, align 4
%val = alloca i32, align 4
@@ -94,10 +93,10 @@ entry:
%condition = alloca i32, align 4
store i32 0, ptr %retval, align 4
call void @init_arry(), !dbg !62
- call void @llvm.lifetime.start.p0(ptr %val) #6, !dbg !63
+ call void @llvm.lifetime.start.p0(ptr %val), !dbg !63
call void @llvm.dbg.declare(metadata ptr %val, metadata !55, metadata !DIExpression()), !dbg !64
store i32 0, ptr %val, align 4, !dbg !64, !tbaa !30
- call void @llvm.lifetime.start.p0(ptr %j) #6, !dbg !65
+ call void @llvm.lifetime.start.p0(ptr %j), !dbg !65
call void @llvm.dbg.declare(metadata ptr %j, metadata !56, metadata !DIExpression()), !dbg !66
store i32 0, ptr %j, align 4, !dbg !67, !tbaa !30
br label %for.cond, !dbg !68
@@ -108,9 +107,9 @@ for.cond: ; preds = %for.inc, %entry
br i1 %cmp, label %for.body, label %for.end, !dbg !71
for.body: ; preds = %for.cond
- call void @llvm.lifetime.start.p0(ptr %condition) #6, !dbg !72
+ call void @llvm.lifetime.start.p0(ptr %condition), !dbg !72
call void @llvm.dbg.declare(metadata ptr %condition, metadata !57, metadata !DIExpression()), !dbg !73
- %call = call i32 @rand() #6, !dbg !74
+ %call = call i32 @rand(), !dbg !74
%rem = srem i32 %call, 5, !dbg !75
store i32 %rem, ptr %condition, align 4, !dbg !73, !tbaa !30
%1 = load i32, ptr %condition, align 4, !dbg !76, !tbaa !30
@@ -145,7 +144,7 @@ sw.default: ; preds = %for.body
unreachable, !dbg !87
sw.epilog: ; preds = %sw.bb3, %sw.bb2, %sw.bb
- call void @llvm.lifetime.end.p0(ptr %condition) #6, !dbg !88
+ call void @llvm.lifetime.end.p0(ptr %condition), !dbg !88
br label %for.inc, !dbg !89
for.inc: ; preds = %sw.epilog
@@ -155,25 +154,17 @@ for.inc: ; preds = %sw.epilog
br label %for.cond, !dbg !91, !llvm.loop !92
for.end: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(ptr %j) #6, !dbg !94
- call void @llvm.lifetime.end.p0(ptr %val) #6, !dbg !94
+ call void @llvm.lifetime.end.p0(ptr %j), !dbg !94
+ call void @llvm.lifetime.end.p0(ptr %val), !dbg !94
ret i32 0, !dbg !95
}
; Function Attrs: nounwind readnone willreturn
-declare i64 @llvm.expect.i64(i64, i64) #4
-
-declare dso_local i32 @sum(ptr, i32) #5
+declare i64 @llvm.expect.i64(i64, i64)
-declare dso_local i32 @random_sample(ptr, i32) #5
+declare dso_local i32 @sum(ptr, i32)
-attributes #0 = { nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { argmemonly nounwind willreturn }
-attributes #2 = { nounwind readnone speculatable willreturn }
-attributes #3 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #4 = { nounwind readnone willreturn }
-attributes #5 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #6 = { nounwind }
+declare dso_local i32 @random_sample(ptr, i32)
!llvm.dbg.cu = !{!2}
!llvm.module.flags = !{!17, !18, !19}
diff --git a/llvm/test/Transforms/PhaseOrdering/always-inline-alloca-promotion.ll b/llvm/test/Transforms/PhaseOrdering/always-inline-alloca-promotion.ll
index 63279b0..92ea2c6 100644
--- a/llvm/test/Transforms/PhaseOrdering/always-inline-alloca-promotion.ll
+++ b/llvm/test/Transforms/PhaseOrdering/always-inline-alloca-promotion.ll
@@ -35,9 +35,8 @@ define void @ham() #1 {
; CHECK-NEXT: [[SNORK_EXIT:.*:]]
; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr inttoptr (i64 48 to ptr), align 16
; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt i64 [[TMP0]], 0
-; CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 16 x float> @llvm.vector.insert.nxv16f32.nxv4f32(<vscale x 16 x float> zeroinitializer, <vscale x 4 x float> zeroinitializer, i64 0)
-; CHECK-NEXT: [[SPEC_SELECT:%.*]] = select i1 [[TMP1]], <vscale x 16 x float> [[TMP2]], <vscale x 16 x float> undef
-; CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 4 x float> @llvm.vector.extract.nxv4f32.nxv16f32(<vscale x 16 x float> [[SPEC_SELECT]], i64 0)
+; CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x float> @llvm.vector.extract.nxv4f32.nxv16f32(<vscale x 16 x float> undef, i64 0)
+; CHECK-NEXT: [[TMP3:%.*]] = select i1 [[TMP1]], <vscale x 4 x float> zeroinitializer, <vscale x 4 x float> [[TMP2]]
; CHECK-NEXT: tail call void @llvm.aarch64.sme.mopa.nxv4f32(i32 0, <vscale x 4 x i1> zeroinitializer, <vscale x 4 x i1> zeroinitializer, <vscale x 4 x float> zeroinitializer, <vscale x 4 x float> [[TMP3]])
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/Util/dbg-user-of-aext.ll b/llvm/test/Transforms/Util/dbg-user-of-aext.ll
index 9e7935e..b3d1b90 100644
--- a/llvm/test/Transforms/Util/dbg-user-of-aext.ll
+++ b/llvm/test/Transforms/Util/dbg-user-of-aext.ll
@@ -27,7 +27,7 @@
%struct.foo = type { i8, i64 }
; Function Attrs: noinline nounwind uwtable
-define void @_Z1fbb3foo(i1 zeroext %b, i1 zeroext %frag, i8 %g.coerce0, i64 %g.coerce1) #0 !dbg !6 {
+define void @_Z1fbb3foo(i1 zeroext %b, i1 zeroext %frag, i8 %g.coerce0, i64 %g.coerce1) !dbg !6 {
entry:
%g = alloca %struct.foo, align 8
%b.addr = alloca i8, align 1
@@ -51,10 +51,7 @@ entry:
; CHECK: ![[VAR_FRAG]] = !DILocalVariable(name: "frag"
; Function Attrs: nounwind readnone speculatable
-declare void @llvm.dbg.declare(metadata, metadata, metadata) #1
-
-attributes #0 = { noinline nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind readnone speculatable }
+declare void @llvm.dbg.declare(metadata, metadata, metadata)
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!3, !4}
diff --git a/llvm/test/Transforms/Util/libcalls-fast-math-inf-loop.ll b/llvm/test/Transforms/Util/libcalls-fast-math-inf-loop.ll
index ad23bf7..e9f0c8c 100644
--- a/llvm/test/Transforms/Util/libcalls-fast-math-inf-loop.ll
+++ b/llvm/test/Transforms/Util/libcalls-fast-math-inf-loop.ll
@@ -19,18 +19,18 @@ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-unknown"
; Function Attrs: nounwind
-define float @fn(float %f) #0 {
+define float @fn(float %f) {
; CHECK: define float @fn(
; CHECK: call fast float @expf(
%f.addr = alloca float, align 4
store float %f, ptr %f.addr, align 4, !tbaa !1
%1 = load float, ptr %f.addr, align 4, !tbaa !1
- %call = call fast float @expf(float %1) #3
+ %call = call fast float @expf(float %1)
ret float %call
}
; Function Attrs: inlinehint nounwind readnone
-define available_externally float @expf(float %x) #1 {
+define available_externally float @expf(float %x) {
; CHECK: define available_externally float @expf(
; CHECK: fpext float
; CHECK: call fast double @exp(
@@ -39,17 +39,13 @@ define available_externally float @expf(float %x) #1 {
store float %x, ptr %x.addr, align 4, !tbaa !1
%1 = load float, ptr %x.addr, align 4, !tbaa !1
%conv = fpext float %1 to double
- %call = call fast double @exp(double %conv) #3
+ %call = call fast double @exp(double %conv)
%conv1 = fptrunc double %call to float
ret float %conv1
}
; Function Attrs: nounwind readnone
-declare double @exp(double) #2
-
-attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-features"="+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { inlinehint nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-features"="+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #2 = { nounwind readnone }
+declare double @exp(double)
!llvm.ident = !{!0}
diff --git a/llvm/test/Verifier/atomics.ll b/llvm/test/Verifier/atomics.ll
index f835b98..17bf5a0 100644
--- a/llvm/test/Verifier/atomics.ll
+++ b/llvm/test/Verifier/atomics.ll
@@ -1,14 +1,15 @@
; RUN: not opt -passes=verify < %s 2>&1 | FileCheck %s
+; CHECK: atomic store operand must have integer, pointer, floating point, or vector type!
+; CHECK: atomic load operand must have integer, pointer, floating point, or vector type!
-; CHECK: atomic store operand must have integer, pointer, or floating point type!
-; CHECK: atomic load operand must have integer, pointer, or floating point type!
+%ty = type { i32 };
-define void @foo(ptr %P, <1 x i64> %v) {
- store atomic <1 x i64> %v, ptr %P unordered, align 8
+define void @foo(ptr %P, %ty %v) {
+ store atomic %ty %v, ptr %P unordered, align 8
ret void
}
-define <1 x i64> @bar(ptr %P) {
- %v = load atomic <1 x i64>, ptr %P unordered, align 8
- ret <1 x i64> %v
+define %ty @bar(ptr %P) {
+ %v = load atomic %ty, ptr %P unordered, align 8
+ ret %ty %v
}
diff --git a/llvm/test/tools/llvm-ir2vec/embeddings-flowaware.ll b/llvm/test/tools/llvm-ir2vec/embeddings-flowaware.ll
index b2362f8..ade228d 100644
--- a/llvm/test/tools/llvm-ir2vec/embeddings-flowaware.ll
+++ b/llvm/test/tools/llvm-ir2vec/embeddings-flowaware.ll
@@ -49,7 +49,7 @@ entry:
; CHECK-FUNC-LEVEL-ABC: Function: abc
; CHECK-FUNC-LEVEL-NEXT-ABC: [ 3630.00 3672.00 3714.00 ]
-; CHECK-FUNC-DEF: Error: Function 'def' not found
+; CHECK-FUNC-DEF: error: Function 'def' not found
; CHECK-BB-LEVEL: Function: abc
; CHECK-BB-LEVEL-NEXT: entry: [ 3630.00 3672.00 3714.00 ]
diff --git a/llvm/test/tools/llvm-ir2vec/embeddings-symbolic.ll b/llvm/test/tools/llvm-ir2vec/embeddings-symbolic.ll
index f9aa108..9d60e12 100644
--- a/llvm/test/tools/llvm-ir2vec/embeddings-symbolic.ll
+++ b/llvm/test/tools/llvm-ir2vec/embeddings-symbolic.ll
@@ -49,7 +49,7 @@ entry:
; CHECK-FUNC-LEVEL-ABC: Function: abc
; CHECK-FUNC-LEVEL-NEXT-ABC: [ 878.00 889.00 900.00 ]
-; CHECK-FUNC-DEF: Error: Function 'def' not found
+; CHECK-FUNC-DEF: error: Function 'def' not found
; CHECK-BB-LEVEL: Function: abc
; CHECK-BB-LEVEL-NEXT: entry: [ 878.00 889.00 900.00 ]
diff --git a/llvm/test/tools/llvm-ir2vec/embeddings-symbolic.mir b/llvm/test/tools/llvm-ir2vec/embeddings-symbolic.mir
new file mode 100644
index 0000000..ef835fe
--- /dev/null
+++ b/llvm/test/tools/llvm-ir2vec/embeddings-symbolic.mir
@@ -0,0 +1,92 @@
+# REQUIRES: x86_64-linux
+# RUN: llvm-ir2vec embeddings --mode=mir --mir2vec-vocab-path=%S/../../CodeGen/MIR2Vec/Inputs/mir2vec_dummy_3D_vocab.json %s | FileCheck %s -check-prefix=CHECK-DEFAULT
+# RUN: llvm-ir2vec embeddings --mode=mir --level=func --mir2vec-vocab-path=%S/../../CodeGen/MIR2Vec/Inputs/mir2vec_dummy_3D_vocab.json %s | FileCheck %s -check-prefix=CHECK-FUNC-LEVEL
+# RUN: llvm-ir2vec embeddings --mode=mir --level=func --function=add_function --mir2vec-vocab-path=%S/../../CodeGen/MIR2Vec/Inputs/mir2vec_dummy_3D_vocab.json %s | FileCheck %s -check-prefix=CHECK-FUNC-LEVEL-ADD
+# RUN: not llvm-ir2vec embeddings --mode=mir --level=func --function=missing_function --mir2vec-vocab-path=%S/../../CodeGen/MIR2Vec/Inputs/mir2vec_dummy_3D_vocab.json %s 2>&1 | FileCheck %s -check-prefix=CHECK-FUNC-MISSING
+# RUN: llvm-ir2vec embeddings --mode=mir --level=bb --mir2vec-vocab-path=%S/../../CodeGen/MIR2Vec/Inputs/mir2vec_dummy_3D_vocab.json %s | FileCheck %s -check-prefix=CHECK-BB-LEVEL
+# RUN: llvm-ir2vec embeddings --mode=mir --level=inst --function=add_function --mir2vec-vocab-path=%S/../../CodeGen/MIR2Vec/Inputs/mir2vec_dummy_3D_vocab.json %s | FileCheck %s -check-prefix=CHECK-INST-LEVEL
+
+--- |
+ target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128"
+ target triple = "x86_64-unknown-linux-gnu"
+
+ define dso_local noundef i32 @add_function(i32 noundef %a, i32 noundef %b) {
+ entry:
+ %sum = add nsw i32 %a, %b
+ %result = mul nsw i32 %sum, 2
+ ret i32 %result
+ }
+
+ define dso_local void @simple_function() {
+ entry:
+ ret void
+ }
+...
+---
+name: add_function
+alignment: 16
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: gr32 }
+ - { id: 1, class: gr32 }
+ - { id: 2, class: gr32 }
+ - { id: 3, class: gr32 }
+liveins:
+ - { reg: '$edi', virtual-reg: '%0' }
+ - { reg: '$esi', virtual-reg: '%1' }
+body: |
+ bb.0.entry:
+ liveins: $edi, $esi
+
+ %1:gr32 = COPY $esi
+ %0:gr32 = COPY $edi
+ %2:gr32 = nsw ADD32rr %0, %1, implicit-def dead $eflags
+ %3:gr32 = ADD32rr %2, %2, implicit-def dead $eflags
+ $eax = COPY %3
+ RET 0, $eax
+
+---
+name: simple_function
+alignment: 16
+tracksRegLiveness: true
+body: |
+ bb.0.entry:
+ RET 0
+
+# CHECK-DEFAULT: MIR2Vec embeddings for machine function add_function:
+# CHECK-DEFAULT-NEXT: Function vector: [ 26.50 27.10 27.70 ]
+# CHECK-DEFAULT: MIR2Vec embeddings for machine function simple_function:
+# CHECK-DEFAULT-NEXT: Function vector: [ 1.10 1.20 1.30 ]
+
+# CHECK-FUNC-LEVEL: MIR2Vec embeddings for machine function add_function:
+# CHECK-FUNC-LEVEL-NEXT: Function vector: [ 26.50 27.10 27.70 ]
+# CHECK-FUNC-LEVEL: MIR2Vec embeddings for machine function simple_function:
+# CHECK-FUNC-LEVEL-NEXT: Function vector: [ 1.10 1.20 1.30 ]
+
+# CHECK-FUNC-LEVEL-ADD: MIR2Vec embeddings for machine function add_function:
+# CHECK-FUNC-LEVEL-ADD-NEXT: Function vector: [ 26.50 27.10 27.70 ]
+# CHECK-FUNC-LEVEL-ADD-NOT: simple_function
+
+# CHECK-FUNC-MISSING: error: Function 'missing_function' not found
+
+# CHECK-BB-LEVEL: MIR2Vec embeddings for machine function add_function:
+# CHECK-BB-LEVEL-NEXT: Basic block vectors:
+# CHECK-BB-LEVEL-NEXT: MBB entry: [ 26.50 27.10 27.70 ]
+# CHECK-BB-LEVEL: MIR2Vec embeddings for machine function simple_function:
+# CHECK-BB-LEVEL-NEXT: Basic block vectors:
+# CHECK-BB-LEVEL-NEXT: MBB entry: [ 1.10 1.20 1.30 ]
+
+# CHECK-INST-LEVEL: MIR2Vec embeddings for machine function add_function:
+# CHECK-INST-LEVEL-NEXT: Instruction vectors:
+# CHECK-INST-LEVEL: %1:gr32 = COPY $esi
+# CHECK-INST-LEVEL-NEXT: -> [ 6.00 6.10 6.20 ]
+# CHECK-INST-LEVEL-NEXT: %0:gr32 = COPY $edi
+# CHECK-INST-LEVEL-NEXT: -> [ 6.00 6.10 6.20 ]
+# CHECK-INST-LEVEL: %2:gr32 = nsw ADD32rr
+# CHECK-INST-LEVEL: -> [ 3.70 3.80 3.90 ]
+# CHECK-INST-LEVEL: %3:gr32 = ADD32rr
+# CHECK-INST-LEVEL: -> [ 3.70 3.80 3.90 ]
+# CHECK-INST-LEVEL: $eax = COPY %3:gr32
+# CHECK-INST-LEVEL-NEXT: -> [ 6.00 6.10 6.20 ]
+# CHECK-INST-LEVEL: RET 0, $eax
+# CHECK-INST-LEVEL-NEXT: -> [ 1.10 1.20 1.30 ]
diff --git a/llvm/test/tools/llvm-ir2vec/entities.mir b/llvm/test/tools/llvm-ir2vec/entities.mir
new file mode 100644
index 0000000..60d9c7a
--- /dev/null
+++ b/llvm/test/tools/llvm-ir2vec/entities.mir
@@ -0,0 +1,28 @@
+# REQUIRES: x86_64-linux
+# RUN: llvm-ir2vec entities --mode=mir %s -o 2>&1 %t1.log
+# RUN: diff %S/output/reference_x86_entities.txt %t1.log
+
+--- |
+ target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128"
+ target triple = "x86_64-unknown-linux-gnu"
+
+ define dso_local noundef i32 @test_function(i32 noundef %a) {
+ entry:
+ ret i32 %a
+ }
+...
+---
+name: test_function
+alignment: 16
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: gr32 }
+liveins:
+ - { reg: '$edi', virtual-reg: '%0' }
+body: |
+ bb.0.entry:
+ liveins: $edi
+
+ %0:gr32 = COPY $edi
+ $eax = COPY %0
+ RET 0, $eax
diff --git a/llvm/test/tools/llvm-ir2vec/error-handling.ll b/llvm/test/tools/llvm-ir2vec/error-handling.ll
index b944ea0..8e9e455 100644
--- a/llvm/test/tools/llvm-ir2vec/error-handling.ll
+++ b/llvm/test/tools/llvm-ir2vec/error-handling.ll
@@ -10,4 +10,4 @@ entry:
}
; CHECK-NO-VOCAB: error: IR2Vec vocabulary file path not specified; You may need to set it using --ir2vec-vocab-path
-; CHECK-FUNC-NOT-FOUND: Error: Function 'nonexistent' not found
+; CHECK-FUNC-NOT-FOUND: error: Function 'nonexistent' not found
diff --git a/llvm/test/tools/llvm-ir2vec/error-handling.mir b/llvm/test/tools/llvm-ir2vec/error-handling.mir
new file mode 100644
index 0000000..caec454c
--- /dev/null
+++ b/llvm/test/tools/llvm-ir2vec/error-handling.mir
@@ -0,0 +1,41 @@
+# REQUIRES: x86_64-linux
+# Test error handling and input validation for llvm-ir2vec tool in MIR mode
+
+# RUN: not llvm-ir2vec embeddings --mode=mir %s 2>&1 | FileCheck %s -check-prefix=CHECK-NO-VOCAB
+# RUN: not llvm-ir2vec embeddings --mode=mir --mir2vec-vocab-path=%S/nonexistent-vocab.json %s 2>&1 | FileCheck %s -check-prefix=CHECK-VOCAB-NOT-FOUND
+# RUN: not llvm-ir2vec embeddings --mode=mir --mir2vec-vocab-path=%S/../../CodeGen/MIR2Vec/Inputs/mir2vec_invalid_vocab.json %s 2>&1 | FileCheck %s -check-prefix=CHECK-INVALID-VOCAB
+# RUN: not llvm-ir2vec embeddings --mode=mir --function=nonexistent_function --mir2vec-vocab-path=%S/../../CodeGen/MIR2Vec/Inputs/mir2vec_dummy_3D_vocab.json %s 2>&1 | FileCheck %s -check-prefix=CHECK-FUNC-NOT-FOUND
+
+--- |
+ target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128"
+ target triple = "x86_64-unknown-linux-gnu"
+
+ define dso_local noundef i32 @test_function(i32 noundef %a) {
+ entry:
+ ret i32 %a
+ }
+...
+---
+name: test_function
+alignment: 16
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: gr32 }
+liveins:
+ - { reg: '$edi', virtual-reg: '%0' }
+body: |
+ bb.0.entry:
+ liveins: $edi
+
+ %0:gr32 = COPY $edi
+ $eax = COPY %0
+ RET 0, $eax
+
+# CHECK-NO-VOCAB: error: Failed to load MIR2Vec vocabulary - MIR2Vec vocabulary file path not specified; set it using --mir2vec-vocab-path
+
+# CHECK-VOCAB-NOT-FOUND: error: Failed to load MIR2Vec vocabulary
+# CHECK-VOCAB-NOT-FOUND: No such file or directory
+
+# CHECK-INVALID-VOCAB: error: Failed to load MIR2Vec vocabulary - Missing 'Opcodes' section in vocabulary file
+
+# CHECK-FUNC-NOT-FOUND: error: Function 'nonexistent_function' not found
diff --git a/llvm/test/tools/llvm-ir2vec/output/lit.local.cfg b/llvm/test/tools/llvm-ir2vec/output/lit.local.cfg
new file mode 100644
index 0000000..2406f19
--- /dev/null
+++ b/llvm/test/tools/llvm-ir2vec/output/lit.local.cfg
@@ -0,0 +1,3 @@
+# Don't treat files in this directory as tests
+# These are reference data files, not test scripts
+config.suffixes = []
diff --git a/llvm/test/tools/llvm-ir2vec/output/reference_triplets.txt b/llvm/test/tools/llvm-ir2vec/output/reference_triplets.txt
new file mode 100644
index 0000000..dfbac4c
--- /dev/null
+++ b/llvm/test/tools/llvm-ir2vec/output/reference_triplets.txt
@@ -0,0 +1,33 @@
+MAX_RELATION=4
+187 7072 1
+187 6968 2
+187 187 0
+187 7072 1
+187 6969 2
+187 10 0
+10 7072 1
+10 7072 2
+10 7072 3
+10 6961 4
+10 187 0
+187 6952 1
+187 7072 2
+187 1555 0
+1555 6882 1
+1555 6952 2
+187 7072 1
+187 6968 2
+187 187 0
+187 7072 1
+187 6969 2
+187 601 0
+601 7072 1
+601 7072 2
+601 7072 3
+601 6961 4
+601 187 0
+187 6952 1
+187 7072 2
+187 1555 0
+1555 6882 1
+1555 6952 2
diff --git a/llvm/test/tools/llvm-ir2vec/output/reference_x86_entities.txt b/llvm/test/tools/llvm-ir2vec/output/reference_x86_entities.txt
new file mode 100644
index 0000000..dc436d1
--- /dev/null
+++ b/llvm/test/tools/llvm-ir2vec/output/reference_x86_entities.txt
@@ -0,0 +1,7174 @@
+7173
+AAA 0
+AAD 1
+AADD 2
+AAM 3
+AAND 4
+AAS 5
+ABS_F 6
+ABS_Fp 7
+ADC 8
+ADCX 9
+ADD 10
+ADDPDrm 11
+ADDPDrr 12
+ADDPSrm 13
+ADDPSrr 14
+ADDR 15
+ADDSDrm 16
+ADDSDrm_Int 17
+ADDSDrr 18
+ADDSDrr_Int 19
+ADDSSrm 20
+ADDSSrm_Int 21
+ADDSSrr 22
+ADDSSrr_Int 23
+ADDSUBPDrm 24
+ADDSUBPDrr 25
+ADDSUBPSrm 26
+ADDSUBPSrr 27
+ADD_F 28
+ADD_FI 29
+ADD_FPrST 30
+ADD_FST 31
+ADD_Fp 32
+ADD_FpI 33
+ADD_FrST 34
+ADJCALLSTACKDOWN 35
+ADJCALLSTACKUP 36
+ADOX 37
+AESDEC 38
+AESDECLASTrm 39
+AESDECLASTrr 40
+AESDECWIDE 41
+AESDECrm 42
+AESDECrr 43
+AESENC 44
+AESENCLASTrm 45
+AESENCLASTrr 46
+AESENCWIDE 47
+AESENCrm 48
+AESENCrr 49
+AESIMCrm 50
+AESIMCrr 51
+AESKEYGENASSISTrmi 52
+AESKEYGENASSISTrri 53
+AND 54
+ANDN 55
+ANDNPDrm 56
+ANDNPDrr 57
+ANDNPSrm 58
+ANDNPSrr 59
+ANDPDrm 60
+ANDPDrr 61
+ANDPSrm 62
+ANDPSrr 63
+ANNOTATION_LABEL 64
+AOR 65
+ARITH_FENCE 66
+ARPL 67
+ASAN_CHECK_MEMACCESS 68
+AVX 69
+AVX_SET 70
+AXOR 71
+BEXTR 72
+BEXTRI 73
+BLCFILL 74
+BLCI 75
+BLCIC 76
+BLCMSK 77
+BLCS 78
+BLENDPDrmi 79
+BLENDPDrri 80
+BLENDPSrmi 81
+BLENDPSrri 82
+BLENDVPDrm 83
+BLENDVPDrr 84
+BLENDVPSrm 85
+BLENDVPSrr 86
+BLSFILL 87
+BLSI 88
+BLSIC 89
+BLSMSK 90
+BLSR 91
+BOUNDS 92
+BSF 93
+BSR 94
+BSWAP 95
+BT 96
+BTC 97
+BTR 98
+BTS 99
+BUNDLE 100
+BZHI 101
+CALL 102
+CALLpcrel 103
+CATCHRET 104
+CBW 105
+CCMP 106
+CDQ 107
+CDQE 108
+CFCMOV 109
+CFI_INSTRUCTION 110
+CHS_F 111
+CHS_Fp 112
+CLAC 113
+CLC 114
+CLD 115
+CLDEMOTE 116
+CLEANUPRET 117
+CLFLUSH 118
+CLFLUSHOPT 119
+CLGI 120
+CLI 121
+CLRSSBSY 122
+CLTS 123
+CLUI 124
+CLWB 125
+CLZERO 126
+CMC 127
+CMOV 128
+CMOVBE_F 129
+CMOVBE_Fp 130
+CMOVB_F 131
+CMOVB_Fp 132
+CMOVE_F 133
+CMOVE_Fp 134
+CMOVNBE_F 135
+CMOVNBE_Fp 136
+CMOVNB_F 137
+CMOVNB_Fp 138
+CMOVNE_F 139
+CMOVNE_Fp 140
+CMOVNP_F 141
+CMOVNP_Fp 142
+CMOVP_F 143
+CMOVP_Fp 144
+CMOV_FR 145
+CMOV_GR 146
+CMOV_RFP 147
+CMOV_VK 148
+CMOV_VR 149
+CMP 150
+CMPCCXADDmr 151
+CMPPDrmi 152
+CMPPDrri 153
+CMPPSrmi 154
+CMPPSrri 155
+CMPSB 156
+CMPSDrmi 157
+CMPSDrmi_Int 158
+CMPSDrri 159
+CMPSDrri_Int 160
+CMPSL 161
+CMPSQ 162
+CMPSSrmi 163
+CMPSSrmi_Int 164
+CMPSSrri 165
+CMPSSrri_Int 166
+CMPSW 167
+CMPXCHG 168
+COMISDrm 169
+COMISDrm_Int 170
+COMISDrr 171
+COMISDrr_Int 172
+COMISSrm 173
+COMISSrm_Int 174
+COMISSrr 175
+COMISSrr_Int 176
+COMP_FST 177
+COM_FIPr 178
+COM_FIr 179
+COM_FST 180
+COM_FpIr 181
+COM_Fpr 182
+CONVERGENCECTRL_ANCHOR 183
+CONVERGENCECTRL_ENTRY 184
+CONVERGENCECTRL_GLUE 185
+CONVERGENCECTRL_LOOP 186
+COPY 187
+COPY_TO_REGCLASS 188
+CPUID 189
+CQO 190
+CRC 191
+CS_PREFIX 192
+CTEST 193
+CVTDQ 194
+CVTPD 195
+CVTPS 196
+CVTSD 197
+CVTSI 198
+CVTSS 199
+CVTTPD 200
+CVTTPS 201
+CVTTSD 202
+CVTTSS 203
+CWD 204
+CWDE 205
+DAA 206
+DAS 207
+DATA 208
+DBG_INSTR_REF 209
+DBG_LABEL 210
+DBG_PHI 211
+DBG_VALUE 212
+DBG_VALUE_LIST 213
+DEC 214
+DIV 215
+DIVPDrm 216
+DIVPDrr 217
+DIVPSrm 218
+DIVPSrr 219
+DIVR_F 220
+DIVR_FI 221
+DIVR_FPrST 222
+DIVR_FST 223
+DIVR_Fp 224
+DIVR_FpI 225
+DIVR_FrST 226
+DIVSDrm 227
+DIVSDrm_Int 228
+DIVSDrr 229
+DIVSDrr_Int 230
+DIVSSrm 231
+DIVSSrm_Int 232
+DIVSSrr 233
+DIVSSrr_Int 234
+DIV_F 235
+DIV_FI 236
+DIV_FPrST 237
+DIV_FST 238
+DIV_Fp 239
+DIV_FpI 240
+DIV_FrST 241
+DPPDrmi 242
+DPPDrri 243
+DPPSrmi 244
+DPPSrri 245
+DS_PREFIX 246
+DYN_ALLOCA 247
+EH_LABEL 248
+EH_RETURN 249
+EH_SjLj_LongJmp 250
+EH_SjLj_SetJmp 251
+EH_SjLj_Setup 252
+ENCLS 253
+ENCLU 254
+ENCLV 255
+ENCODEKEY 256
+ENDBR 257
+ENQCMD 258
+ENQCMDS 259
+ENTER 260
+ERETS 261
+ERETU 262
+ES_PREFIX 263
+EXTRACTPSmri 264
+EXTRACTPSrri 265
+EXTRACT_SUBREG 266
+EXTRQ 267
+EXTRQI 268
+F 269
+FAKE_USE 270
+FARCALL 271
+FARJMP 272
+FAULTING_OP 273
+FBLDm 274
+FBSTPm 275
+FCOM 276
+FCOMP 277
+FCOMPP 278
+FCOS 279
+FDECSTP 280
+FEMMS 281
+FENTRY_CALL 282
+FFREE 283
+FFREEP 284
+FICOM 285
+FICOMP 286
+FINCSTP 287
+FLDCW 288
+FLDENVm 289
+FLDL 290
+FLDLG 291
+FLDLN 292
+FLDPI 293
+FNCLEX 294
+FNINIT 295
+FNOP 296
+FNSTCW 297
+FNSTSW 298
+FNSTSWm 299
+FP 300
+FPATAN 301
+FPREM 302
+FPTAN 303
+FRNDINT 304
+FRSTORm 305
+FSAVEm 306
+FSCALE 307
+FSIN 308
+FSINCOS 309
+FSTENVm 310
+FS_PREFIX 311
+FXRSTOR 312
+FXSAVE 313
+FXTRACT 314
+FYL 315
+FsFLD 316
+GC_LABEL 317
+GETSEC 318
+GF 319
+GS_PREFIX 320
+G_ABDS 321
+G_ABDU 322
+G_ABS 323
+G_ADD 324
+G_ADDRSPACE_CAST 325
+G_AND 326
+G_ANYEXT 327
+G_ASHR 328
+G_ASSERT_ALIGN 329
+G_ASSERT_SEXT 330
+G_ASSERT_ZEXT 331
+G_ATOMICRMW_ADD 332
+G_ATOMICRMW_AND 333
+G_ATOMICRMW_FADD 334
+G_ATOMICRMW_FMAX 335
+G_ATOMICRMW_FMAXIMUM 336
+G_ATOMICRMW_FMIN 337
+G_ATOMICRMW_FMINIMUM 338
+G_ATOMICRMW_FSUB 339
+G_ATOMICRMW_MAX 340
+G_ATOMICRMW_MIN 341
+G_ATOMICRMW_NAND 342
+G_ATOMICRMW_OR 343
+G_ATOMICRMW_SUB 344
+G_ATOMICRMW_UDEC_WRAP 345
+G_ATOMICRMW_UINC_WRAP 346
+G_ATOMICRMW_UMAX 347
+G_ATOMICRMW_UMIN 348
+G_ATOMICRMW_USUB_COND 349
+G_ATOMICRMW_USUB_SAT 350
+G_ATOMICRMW_XCHG 351
+G_ATOMICRMW_XOR 352
+G_ATOMIC_CMPXCHG 353
+G_ATOMIC_CMPXCHG_WITH_SUCCESS 354
+G_BITCAST 355
+G_BITREVERSE 356
+G_BLOCK_ADDR 357
+G_BR 358
+G_BRCOND 359
+G_BRINDIRECT 360
+G_BRJT 361
+G_BSWAP 362
+G_BUILD_VECTOR 363
+G_BUILD_VECTOR_TRUNC 364
+G_BZERO 365
+G_CONCAT_VECTORS 366
+G_CONSTANT 367
+G_CONSTANT_FOLD_BARRIER 368
+G_CONSTANT_POOL 369
+G_CTLZ 370
+G_CTLZ_ZERO_UNDEF 371
+G_CTPOP 372
+G_CTTZ 373
+G_CTTZ_ZERO_UNDEF 374
+G_DEBUGTRAP 375
+G_DYN_STACKALLOC 376
+G_EXTRACT 377
+G_EXTRACT_SUBVECTOR 378
+G_EXTRACT_VECTOR_ELT 379
+G_FABS 380
+G_FACOS 381
+G_FADD 382
+G_FASIN 383
+G_FATAN 384
+G_FCANONICALIZE 385
+G_FCEIL 386
+G_FCMP 387
+G_FCONSTANT 388
+G_FCOPYSIGN 389
+G_FCOS 390
+G_FCOSH 391
+G_FDIV 392
+G_FENCE 393
+G_FEXP 394
+G_FFLOOR 395
+G_FFREXP 396
+G_FILD 397
+G_FIST 398
+G_FLDCW 399
+G_FLDEXP 400
+G_FLOG 401
+G_FMA 402
+G_FMAD 403
+G_FMAXIMUM 404
+G_FMAXIMUMNUM 405
+G_FMAXNUM 406
+G_FMAXNUM_IEEE 407
+G_FMINIMUM 408
+G_FMINIMUMNUM 409
+G_FMINNUM 410
+G_FMINNUM_IEEE 411
+G_FMODF 412
+G_FMUL 413
+G_FNEARBYINT 414
+G_FNEG 415
+G_FNSTCW 416
+G_FPEXT 417
+G_FPOW 418
+G_FPOWI 419
+G_FPTOSI 420
+G_FPTOSI_SAT 421
+G_FPTOUI 422
+G_FPTOUI_SAT 423
+G_FPTRUNC 424
+G_FRAME_INDEX 425
+G_FREEZE 426
+G_FREM 427
+G_FRINT 428
+G_FSHL 429
+G_FSHR 430
+G_FSIN 431
+G_FSINCOS 432
+G_FSINH 433
+G_FSQRT 434
+G_FSUB 435
+G_FTAN 436
+G_FTANH 437
+G_GET_FPENV 438
+G_GET_FPMODE 439
+G_GET_ROUNDING 440
+G_GLOBAL_VALUE 441
+G_ICMP 442
+G_IMPLICIT_DEF 443
+G_INDEXED_LOAD 444
+G_INDEXED_SEXTLOAD 445
+G_INDEXED_STORE 446
+G_INDEXED_ZEXTLOAD 447
+G_INSERT 448
+G_INSERT_SUBVECTOR 449
+G_INSERT_VECTOR_ELT 450
+G_INTRINSIC 451
+G_INTRINSIC_CONVERGENT 452
+G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS 453
+G_INTRINSIC_FPTRUNC_ROUND 454
+G_INTRINSIC_LLRINT 455
+G_INTRINSIC_LRINT 456
+G_INTRINSIC_ROUND 457
+G_INTRINSIC_ROUNDEVEN 458
+G_INTRINSIC_TRUNC 459
+G_INTRINSIC_W_SIDE_EFFECTS 460
+G_INTTOPTR 461
+G_INVOKE_REGION_START 462
+G_IS_FPCLASS 463
+G_JUMP_TABLE 464
+G_LLROUND 465
+G_LOAD 466
+G_LROUND 467
+G_LSHR 468
+G_MEMCPY 469
+G_MEMCPY_INLINE 470
+G_MEMMOVE 471
+G_MEMSET 472
+G_MERGE_VALUES 473
+G_MUL 474
+G_OR 475
+G_PHI 476
+G_PREFETCH 477
+G_PTRAUTH_GLOBAL_VALUE 478
+G_PTRMASK 479
+G_PTRTOINT 480
+G_PTR_ADD 481
+G_READCYCLECOUNTER 482
+G_READSTEADYCOUNTER 483
+G_READ_REGISTER 484
+G_RESET_FPENV 485
+G_RESET_FPMODE 486
+G_ROTL 487
+G_ROTR 488
+G_SADDE 489
+G_SADDO 490
+G_SADDSAT 491
+G_SBFX 492
+G_SCMP 493
+G_SDIV 494
+G_SDIVFIX 495
+G_SDIVFIXSAT 496
+G_SDIVREM 497
+G_SELECT 498
+G_SET_FPENV 499
+G_SET_FPMODE 500
+G_SET_ROUNDING 501
+G_SEXT 502
+G_SEXTLOAD 503
+G_SEXT_INREG 504
+G_SHL 505
+G_SHUFFLE_VECTOR 506
+G_SITOFP 507
+G_SMAX 508
+G_SMIN 509
+G_SMULFIX 510
+G_SMULFIXSAT 511
+G_SMULH 512
+G_SMULO 513
+G_SPLAT_VECTOR 514
+G_SREM 515
+G_SSHLSAT 516
+G_SSUBE 517
+G_SSUBO 518
+G_SSUBSAT 519
+G_STACKRESTORE 520
+G_STACKSAVE 521
+G_STEP_VECTOR 522
+G_STORE 523
+G_STRICT_FADD 524
+G_STRICT_FDIV 525
+G_STRICT_FLDEXP 526
+G_STRICT_FMA 527
+G_STRICT_FMUL 528
+G_STRICT_FREM 529
+G_STRICT_FSQRT 530
+G_STRICT_FSUB 531
+G_SUB 532
+G_TRAP 533
+G_TRUNC 534
+G_TRUNC_SSAT_S 535
+G_TRUNC_SSAT_U 536
+G_TRUNC_USAT_U 537
+G_UADDE 538
+G_UADDO 539
+G_UADDSAT 540
+G_UBFX 541
+G_UBSANTRAP 542
+G_UCMP 543
+G_UDIV 544
+G_UDIVFIX 545
+G_UDIVFIXSAT 546
+G_UDIVREM 547
+G_UITOFP 548
+G_UMAX 549
+G_UMIN 550
+G_UMULFIX 551
+G_UMULFIXSAT 552
+G_UMULH 553
+G_UMULO 554
+G_UNMERGE_VALUES 555
+G_UREM 556
+G_USHLSAT 557
+G_USUBE 558
+G_USUBO 559
+G_USUBSAT 560
+G_VAARG 561
+G_VASTART 562
+G_VECREDUCE_ADD 563
+G_VECREDUCE_AND 564
+G_VECREDUCE_FADD 565
+G_VECREDUCE_FMAX 566
+G_VECREDUCE_FMAXIMUM 567
+G_VECREDUCE_FMIN 568
+G_VECREDUCE_FMINIMUM 569
+G_VECREDUCE_FMUL 570
+G_VECREDUCE_MUL 571
+G_VECREDUCE_OR 572
+G_VECREDUCE_SEQ_FADD 573
+G_VECREDUCE_SEQ_FMUL 574
+G_VECREDUCE_SMAX 575
+G_VECREDUCE_SMIN 576
+G_VECREDUCE_UMAX 577
+G_VECREDUCE_UMIN 578
+G_VECREDUCE_XOR 579
+G_VECTOR_COMPRESS 580
+G_VSCALE 581
+G_WRITE_REGISTER 582
+G_XOR 583
+G_ZEXT 584
+G_ZEXTLOAD 585
+HADDPDrm 586
+HADDPDrr 587
+HADDPSrm 588
+HADDPSrr 589
+HLT 590
+HRESET 591
+HSUBPDrm 592
+HSUBPDrr 593
+HSUBPSrm 594
+HSUBPSrr 595
+ICALL_BRANCH_FUNNEL 596
+IDIV 597
+ILD_F 598
+ILD_Fp 599
+IMPLICIT_DEF 600
+IMUL 601
+IMULZU 602
+IN 603
+INC 604
+INCSSPD 605
+INCSSPQ 606
+INDIRECT_THUNK_CALL 607
+INDIRECT_THUNK_TCRETURN 608
+INIT_UNDEF 609
+INLINEASM 610
+INLINEASM_BR 611
+INSB 612
+INSERTPSrmi 613
+INSERTPSrri 614
+INSERTQ 615
+INSERTQI 616
+INSERT_SUBREG 617
+INSL 618
+INSW 619
+INT 620
+INTO 621
+INVD 622
+INVEPT 623
+INVLPG 624
+INVLPGA 625
+INVLPGB 626
+INVPCID 627
+INVVPID 628
+IRET 629
+ISTT_FP 630
+ISTT_Fp 631
+IST_F 632
+IST_FP 633
+IST_Fp 634
+Int_eh_sjlj_setup_dispatch 635
+JCC 636
+JCXZ 637
+JECXZ 638
+JMP 639
+JMPABS 640
+JRCXZ 641
+JUMP_TABLE_DEBUG_INFO 642
+KADDBkk 643
+KADDDkk 644
+KADDQkk 645
+KADDWkk 646
+KANDBkk 647
+KANDDkk 648
+KANDNBkk 649
+KANDNDkk 650
+KANDNQkk 651
+KANDNWkk 652
+KANDQkk 653
+KANDWkk 654
+KCFI_CHECK 655
+KILL 656
+KMOVBkk 657
+KMOVBkk_EVEX 658
+KMOVBkm 659
+KMOVBkm_EVEX 660
+KMOVBkr 661
+KMOVBkr_EVEX 662
+KMOVBmk 663
+KMOVBmk_EVEX 664
+KMOVBrk 665
+KMOVBrk_EVEX 666
+KMOVDkk 667
+KMOVDkk_EVEX 668
+KMOVDkm 669
+KMOVDkm_EVEX 670
+KMOVDkr 671
+KMOVDkr_EVEX 672
+KMOVDmk 673
+KMOVDmk_EVEX 674
+KMOVDrk 675
+KMOVDrk_EVEX 676
+KMOVQkk 677
+KMOVQkk_EVEX 678
+KMOVQkm 679
+KMOVQkm_EVEX 680
+KMOVQkr 681
+KMOVQkr_EVEX 682
+KMOVQmk 683
+KMOVQmk_EVEX 684
+KMOVQrk 685
+KMOVQrk_EVEX 686
+KMOVWkk 687
+KMOVWkk_EVEX 688
+KMOVWkm 689
+KMOVWkm_EVEX 690
+KMOVWkr 691
+KMOVWkr_EVEX 692
+KMOVWmk 693
+KMOVWmk_EVEX 694
+KMOVWrk 695
+KMOVWrk_EVEX 696
+KNOTBkk 697
+KNOTDkk 698
+KNOTQkk 699
+KNOTWkk 700
+KORBkk 701
+KORDkk 702
+KORQkk 703
+KORTESTBkk 704
+KORTESTDkk 705
+KORTESTQkk 706
+KORTESTWkk 707
+KORWkk 708
+KSET 709
+KSHIFTLBki 710
+KSHIFTLDki 711
+KSHIFTLQki 712
+KSHIFTLWki 713
+KSHIFTRBki 714
+KSHIFTRDki 715
+KSHIFTRQki 716
+KSHIFTRWki 717
+KTESTBkk 718
+KTESTDkk 719
+KTESTQkk 720
+KTESTWkk 721
+KUNPCKBWkk 722
+KUNPCKDQkk 723
+KUNPCKWDkk 724
+KXNORBkk 725
+KXNORDkk 726
+KXNORQkk 727
+KXNORWkk 728
+KXORBkk 729
+KXORDkk 730
+KXORQkk 731
+KXORWkk 732
+LAHF 733
+LAR 734
+LCMPXCHG 735
+LDDQUrm 736
+LDMXCSR 737
+LDS 738
+LDTILECFG 739
+LDTILECFG_EVEX 740
+LD_F 741
+LD_Fp 742
+LD_Frr 743
+LEA 744
+LEAVE 745
+LES 746
+LFENCE 747
+LFS 748
+LGDT 749
+LGS 750
+LIDT 751
+LIFETIME_END 752
+LIFETIME_START 753
+LKGS 754
+LLDT 755
+LLWPCB 756
+LMSW 757
+LOADIWKEY 758
+LOAD_STACK_GUARD 759
+LOCAL_ESCAPE 760
+LOCK_ADD 761
+LOCK_AND 762
+LOCK_BTC 763
+LOCK_BTC_RM 764
+LOCK_BTR 765
+LOCK_BTR_RM 766
+LOCK_BTS 767
+LOCK_BTS_RM 768
+LOCK_DEC 769
+LOCK_INC 770
+LOCK_OR 771
+LOCK_PREFIX 772
+LOCK_SUB 773
+LOCK_XOR 774
+LODSB 775
+LODSL 776
+LODSQ 777
+LODSW 778
+LOOP 779
+LOOPE 780
+LOOPNE 781
+LRET 782
+LRETI 783
+LSL 784
+LSS 785
+LTRm 786
+LTRr 787
+LWPINS 788
+LWPVAL 789
+LXADD 790
+LZCNT 791
+MASKMOVDQU 792
+MASKPAIR 793
+MAXCPDrm 794
+MAXCPDrr 795
+MAXCPSrm 796
+MAXCPSrr 797
+MAXCSDrm 798
+MAXCSDrr 799
+MAXCSSrm 800
+MAXCSSrr 801
+MAXPDrm 802
+MAXPDrr 803
+MAXPSrm 804
+MAXPSrr 805
+MAXSDrm 806
+MAXSDrm_Int 807
+MAXSDrr 808
+MAXSDrr_Int 809
+MAXSSrm 810
+MAXSSrm_Int 811
+MAXSSrr 812
+MAXSSrr_Int 813
+MEMBARRIER 814
+MFENCE 815
+MINCPDrm 816
+MINCPDrr 817
+MINCPSrm 818
+MINCPSrr 819
+MINCSDrm 820
+MINCSDrr 821
+MINCSSrm 822
+MINCSSrr 823
+MINPDrm 824
+MINPDrr 825
+MINPSrm 826
+MINPSrr 827
+MINSDrm 828
+MINSDrm_Int 829
+MINSDrr 830
+MINSDrr_Int 831
+MINSSrm 832
+MINSSrm_Int 833
+MINSSrr 834
+MINSSrr_Int 835
+MMX_CVTPD 836
+MMX_CVTPI 837
+MMX_CVTPS 838
+MMX_CVTTPD 839
+MMX_CVTTPS 840
+MMX_EMMS 841
+MMX_MASKMOVQ 842
+MMX_MOVD 843
+MMX_MOVDQ 844
+MMX_MOVFR 845
+MMX_MOVNTQmr 846
+MMX_MOVQ 847
+MMX_PABSBrm 848
+MMX_PABSBrr 849
+MMX_PABSDrm 850
+MMX_PABSDrr 851
+MMX_PABSWrm 852
+MMX_PABSWrr 853
+MMX_PACKSSDWrm 854
+MMX_PACKSSDWrr 855
+MMX_PACKSSWBrm 856
+MMX_PACKSSWBrr 857
+MMX_PACKUSWBrm 858
+MMX_PACKUSWBrr 859
+MMX_PADDBrm 860
+MMX_PADDBrr 861
+MMX_PADDDrm 862
+MMX_PADDDrr 863
+MMX_PADDQrm 864
+MMX_PADDQrr 865
+MMX_PADDSBrm 866
+MMX_PADDSBrr 867
+MMX_PADDSWrm 868
+MMX_PADDSWrr 869
+MMX_PADDUSBrm 870
+MMX_PADDUSBrr 871
+MMX_PADDUSWrm 872
+MMX_PADDUSWrr 873
+MMX_PADDWrm 874
+MMX_PADDWrr 875
+MMX_PALIGNRrmi 876
+MMX_PALIGNRrri 877
+MMX_PANDNrm 878
+MMX_PANDNrr 879
+MMX_PANDrm 880
+MMX_PANDrr 881
+MMX_PAVGBrm 882
+MMX_PAVGBrr 883
+MMX_PAVGWrm 884
+MMX_PAVGWrr 885
+MMX_PCMPEQBrm 886
+MMX_PCMPEQBrr 887
+MMX_PCMPEQDrm 888
+MMX_PCMPEQDrr 889
+MMX_PCMPEQWrm 890
+MMX_PCMPEQWrr 891
+MMX_PCMPGTBrm 892
+MMX_PCMPGTBrr 893
+MMX_PCMPGTDrm 894
+MMX_PCMPGTDrr 895
+MMX_PCMPGTWrm 896
+MMX_PCMPGTWrr 897
+MMX_PEXTRWrri 898
+MMX_PHADDDrm 899
+MMX_PHADDDrr 900
+MMX_PHADDSWrm 901
+MMX_PHADDSWrr 902
+MMX_PHADDWrm 903
+MMX_PHADDWrr 904
+MMX_PHSUBDrm 905
+MMX_PHSUBDrr 906
+MMX_PHSUBSWrm 907
+MMX_PHSUBSWrr 908
+MMX_PHSUBWrm 909
+MMX_PHSUBWrr 910
+MMX_PINSRWrmi 911
+MMX_PINSRWrri 912
+MMX_PMADDUBSWrm 913
+MMX_PMADDUBSWrr 914
+MMX_PMADDWDrm 915
+MMX_PMADDWDrr 916
+MMX_PMAXSWrm 917
+MMX_PMAXSWrr 918
+MMX_PMAXUBrm 919
+MMX_PMAXUBrr 920
+MMX_PMINSWrm 921
+MMX_PMINSWrr 922
+MMX_PMINUBrm 923
+MMX_PMINUBrr 924
+MMX_PMOVMSKBrr 925
+MMX_PMULHRSWrm 926
+MMX_PMULHRSWrr 927
+MMX_PMULHUWrm 928
+MMX_PMULHUWrr 929
+MMX_PMULHWrm 930
+MMX_PMULHWrr 931
+MMX_PMULLWrm 932
+MMX_PMULLWrr 933
+MMX_PMULUDQrm 934
+MMX_PMULUDQrr 935
+MMX_PORrm 936
+MMX_PORrr 937
+MMX_PSADBWrm 938
+MMX_PSADBWrr 939
+MMX_PSHUFBrm 940
+MMX_PSHUFBrr 941
+MMX_PSHUFWmi 942
+MMX_PSHUFWri 943
+MMX_PSIGNBrm 944
+MMX_PSIGNBrr 945
+MMX_PSIGNDrm 946
+MMX_PSIGNDrr 947
+MMX_PSIGNWrm 948
+MMX_PSIGNWrr 949
+MMX_PSLLDri 950
+MMX_PSLLDrm 951
+MMX_PSLLDrr 952
+MMX_PSLLQri 953
+MMX_PSLLQrm 954
+MMX_PSLLQrr 955
+MMX_PSLLWri 956
+MMX_PSLLWrm 957
+MMX_PSLLWrr 958
+MMX_PSRADri 959
+MMX_PSRADrm 960
+MMX_PSRADrr 961
+MMX_PSRAWri 962
+MMX_PSRAWrm 963
+MMX_PSRAWrr 964
+MMX_PSRLDri 965
+MMX_PSRLDrm 966
+MMX_PSRLDrr 967
+MMX_PSRLQri 968
+MMX_PSRLQrm 969
+MMX_PSRLQrr 970
+MMX_PSRLWri 971
+MMX_PSRLWrm 972
+MMX_PSRLWrr 973
+MMX_PSUBBrm 974
+MMX_PSUBBrr 975
+MMX_PSUBDrm 976
+MMX_PSUBDrr 977
+MMX_PSUBQrm 978
+MMX_PSUBQrr 979
+MMX_PSUBSBrm 980
+MMX_PSUBSBrr 981
+MMX_PSUBSWrm 982
+MMX_PSUBSWrr 983
+MMX_PSUBUSBrm 984
+MMX_PSUBUSBrr 985
+MMX_PSUBUSWrm 986
+MMX_PSUBUSWrr 987
+MMX_PSUBWrm 988
+MMX_PSUBWrr 989
+MMX_PUNPCKHBWrm 990
+MMX_PUNPCKHBWrr 991
+MMX_PUNPCKHDQrm 992
+MMX_PUNPCKHDQrr 993
+MMX_PUNPCKHWDrm 994
+MMX_PUNPCKHWDrr 995
+MMX_PUNPCKLBWrm 996
+MMX_PUNPCKLBWrr 997
+MMX_PUNPCKLDQrm 998
+MMX_PUNPCKLDQrr 999
+MMX_PUNPCKLWDrm 1000
+MMX_PUNPCKLWDrr 1001
+MMX_PXORrm 1002
+MMX_PXORrr 1003
+MMX_SET 1004
+MONITOR 1005
+MONITORX 1006
+MONTMUL 1007
+MORESTACK_RET 1008
+MORESTACK_RET_RESTORE_R 1009
+MOV 1010
+MOVAPDmr 1011
+MOVAPDrm 1012
+MOVAPDrr 1013
+MOVAPDrr_REV 1014
+MOVAPSmr 1015
+MOVAPSrm 1016
+MOVAPSrr 1017
+MOVAPSrr_REV 1018
+MOVBE 1019
+MOVDDUPrm 1020
+MOVDDUPrr 1021
+MOVDI 1022
+MOVDIR 1023
+MOVDIRI 1024
+MOVDQAmr 1025
+MOVDQArm 1026
+MOVDQArr 1027
+MOVDQArr_REV 1028
+MOVDQUmr 1029
+MOVDQUrm 1030
+MOVDQUrr 1031
+MOVDQUrr_REV 1032
+MOVHLPSrr 1033
+MOVHPDmr 1034
+MOVHPDrm 1035
+MOVHPSmr 1036
+MOVHPSrm 1037
+MOVLHPSrr 1038
+MOVLPDmr 1039
+MOVLPDrm 1040
+MOVLPSmr 1041
+MOVLPSrm 1042
+MOVMSKPDrr 1043
+MOVMSKPSrr 1044
+MOVNTDQArm 1045
+MOVNTDQmr 1046
+MOVNTI 1047
+MOVNTImr 1048
+MOVNTPDmr 1049
+MOVNTPSmr 1050
+MOVNTSD 1051
+MOVNTSS 1052
+MOVPC 1053
+MOVPDI 1054
+MOVPQI 1055
+MOVPQIto 1056
+MOVQI 1057
+MOVRS 1058
+MOVSB 1059
+MOVSDmr 1060
+MOVSDrm 1061
+MOVSDrm_alt 1062
+MOVSDrr 1063
+MOVSDrr_REV 1064
+MOVSDto 1065
+MOVSHDUPrm 1066
+MOVSHDUPrr 1067
+MOVSHPmr 1068
+MOVSHPrm 1069
+MOVSL 1070
+MOVSLDUPrm 1071
+MOVSLDUPrr 1072
+MOVSQ 1073
+MOVSS 1074
+MOVSSmr 1075
+MOVSSrm 1076
+MOVSSrm_alt 1077
+MOVSSrr 1078
+MOVSSrr_REV 1079
+MOVSW 1080
+MOVSX 1081
+MOVUPDmr 1082
+MOVUPDrm 1083
+MOVUPDrr 1084
+MOVUPDrr_REV 1085
+MOVUPSmr 1086
+MOVUPSrm 1087
+MOVUPSrr 1088
+MOVUPSrr_REV 1089
+MOVZPQILo 1090
+MOVZX 1091
+MPSADBWrmi 1092
+MPSADBWrri 1093
+MUL 1094
+MULPDrm 1095
+MULPDrr 1096
+MULPSrm 1097
+MULPSrr 1098
+MULSDrm 1099
+MULSDrm_Int 1100
+MULSDrr 1101
+MULSDrr_Int 1102
+MULSSrm 1103
+MULSSrm_Int 1104
+MULSSrr 1105
+MULSSrr_Int 1106
+MULX 1107
+MUL_F 1108
+MUL_FI 1109
+MUL_FPrST 1110
+MUL_FST 1111
+MUL_Fp 1112
+MUL_FpI 1113
+MUL_FrST 1114
+MWAITX 1115
+MWAITX_SAVE_RBX 1116
+MWAITXrrr 1117
+MWAITrr 1118
+NEG 1119
+NOOP 1120
+NOOPL 1121
+NOOPLr 1122
+NOOPQ 1123
+NOOPQr 1124
+NOOPW 1125
+NOOPWr 1126
+NOT 1127
+OR 1128
+ORPDrm 1129
+ORPDrr 1130
+ORPSrm 1131
+ORPSrr 1132
+OUT 1133
+OUTSB 1134
+OUTSL 1135
+OUTSW 1136
+PABSBrm 1137
+PABSBrr 1138
+PABSDrm 1139
+PABSDrr 1140
+PABSWrm 1141
+PABSWrr 1142
+PACKSSDWrm 1143
+PACKSSDWrr 1144
+PACKSSWBrm 1145
+PACKSSWBrr 1146
+PACKUSDWrm 1147
+PACKUSDWrr 1148
+PACKUSWBrm 1149
+PACKUSWBrr 1150
+PADDBrm 1151
+PADDBrr 1152
+PADDDrm 1153
+PADDDrr 1154
+PADDQrm 1155
+PADDQrr 1156
+PADDSBrm 1157
+PADDSBrr 1158
+PADDSWrm 1159
+PADDSWrr 1160
+PADDUSBrm 1161
+PADDUSBrr 1162
+PADDUSWrm 1163
+PADDUSWrr 1164
+PADDWrm 1165
+PADDWrr 1166
+PALIGNRrmi 1167
+PALIGNRrri 1168
+PANDNrm 1169
+PANDNrr 1170
+PANDrm 1171
+PANDrr 1172
+PATCHABLE_EVENT_CALL 1173
+PATCHABLE_FUNCTION_ENTER 1174
+PATCHABLE_FUNCTION_EXIT 1175
+PATCHABLE_OP 1176
+PATCHABLE_RET 1177
+PATCHABLE_TAIL_CALL 1178
+PATCHABLE_TYPED_EVENT_CALL 1179
+PATCHPOINT 1180
+PAUSE 1181
+PAVGBrm 1182
+PAVGBrr 1183
+PAVGUSBrm 1184
+PAVGUSBrr 1185
+PAVGWrm 1186
+PAVGWrr 1187
+PBLENDVBrm 1188
+PBLENDVBrr 1189
+PBLENDWrmi 1190
+PBLENDWrri 1191
+PBNDKB 1192
+PCLMULQDQrmi 1193
+PCLMULQDQrri 1194
+PCMPEQBrm 1195
+PCMPEQBrr 1196
+PCMPEQDrm 1197
+PCMPEQDrr 1198
+PCMPEQQrm 1199
+PCMPEQQrr 1200
+PCMPEQWrm 1201
+PCMPEQWrr 1202
+PCMPESTRIrmi 1203
+PCMPESTRIrri 1204
+PCMPESTRMrmi 1205
+PCMPESTRMrri 1206
+PCMPGTBrm 1207
+PCMPGTBrr 1208
+PCMPGTDrm 1209
+PCMPGTDrr 1210
+PCMPGTQrm 1211
+PCMPGTQrr 1212
+PCMPGTWrm 1213
+PCMPGTWrr 1214
+PCMPISTRIrmi 1215
+PCMPISTRIrri 1216
+PCMPISTRMrmi 1217
+PCMPISTRMrri 1218
+PCONFIG 1219
+PDEP 1220
+PEXT 1221
+PEXTRBmri 1222
+PEXTRBrri 1223
+PEXTRDmri 1224
+PEXTRDrri 1225
+PEXTRQmri 1226
+PEXTRQrri 1227
+PEXTRWmri 1228
+PEXTRWrri 1229
+PEXTRWrri_REV 1230
+PF 1231
+PFACCrm 1232
+PFACCrr 1233
+PFADDrm 1234
+PFADDrr 1235
+PFCMPEQrm 1236
+PFCMPEQrr 1237
+PFCMPGErm 1238
+PFCMPGErr 1239
+PFCMPGTrm 1240
+PFCMPGTrr 1241
+PFMAXrm 1242
+PFMAXrr 1243
+PFMINrm 1244
+PFMINrr 1245
+PFMULrm 1246
+PFMULrr 1247
+PFNACCrm 1248
+PFNACCrr 1249
+PFPNACCrm 1250
+PFPNACCrr 1251
+PFRCPIT 1252
+PFRCPrm 1253
+PFRCPrr 1254
+PFRSQIT 1255
+PFRSQRTrm 1256
+PFRSQRTrr 1257
+PFSUBRrm 1258
+PFSUBRrr 1259
+PFSUBrm 1260
+PFSUBrr 1261
+PHADDDrm 1262
+PHADDDrr 1263
+PHADDSWrm 1264
+PHADDSWrr 1265
+PHADDWrm 1266
+PHADDWrr 1267
+PHI 1268
+PHMINPOSUWrm 1269
+PHMINPOSUWrr 1270
+PHSUBDrm 1271
+PHSUBDrr 1272
+PHSUBSWrm 1273
+PHSUBSWrr 1274
+PHSUBWrm 1275
+PHSUBWrr 1276
+PI 1277
+PINSRBrmi 1278
+PINSRBrri 1279
+PINSRDrmi 1280
+PINSRDrri 1281
+PINSRQrmi 1282
+PINSRQrri 1283
+PINSRWrmi 1284
+PINSRWrri 1285
+PLDTILECFGV 1286
+PLEA 1287
+PMADDUBSWrm 1288
+PMADDUBSWrr 1289
+PMADDWDrm 1290
+PMADDWDrr 1291
+PMAXSBrm 1292
+PMAXSBrr 1293
+PMAXSDrm 1294
+PMAXSDrr 1295
+PMAXSWrm 1296
+PMAXSWrr 1297
+PMAXUBrm 1298
+PMAXUBrr 1299
+PMAXUDrm 1300
+PMAXUDrr 1301
+PMAXUWrm 1302
+PMAXUWrr 1303
+PMINSBrm 1304
+PMINSBrr 1305
+PMINSDrm 1306
+PMINSDrr 1307
+PMINSWrm 1308
+PMINSWrr 1309
+PMINUBrm 1310
+PMINUBrr 1311
+PMINUDrm 1312
+PMINUDrr 1313
+PMINUWrm 1314
+PMINUWrr 1315
+PMOVMSKBrr 1316
+PMOVSXBDrm 1317
+PMOVSXBDrr 1318
+PMOVSXBQrm 1319
+PMOVSXBQrr 1320
+PMOVSXBWrm 1321
+PMOVSXBWrr 1322
+PMOVSXDQrm 1323
+PMOVSXDQrr 1324
+PMOVSXWDrm 1325
+PMOVSXWDrr 1326
+PMOVSXWQrm 1327
+PMOVSXWQrr 1328
+PMOVZXBDrm 1329
+PMOVZXBDrr 1330
+PMOVZXBQrm 1331
+PMOVZXBQrr 1332
+PMOVZXBWrm 1333
+PMOVZXBWrr 1334
+PMOVZXDQrm 1335
+PMOVZXDQrr 1336
+PMOVZXWDrm 1337
+PMOVZXWDrr 1338
+PMOVZXWQrm 1339
+PMOVZXWQrr 1340
+PMULDQrm 1341
+PMULDQrr 1342
+PMULHRSWrm 1343
+PMULHRSWrr 1344
+PMULHRWrm 1345
+PMULHRWrr 1346
+PMULHUWrm 1347
+PMULHUWrr 1348
+PMULHWrm 1349
+PMULHWrr 1350
+PMULLDrm 1351
+PMULLDrr 1352
+PMULLWrm 1353
+PMULLWrr 1354
+PMULUDQrm 1355
+PMULUDQrr 1356
+POP 1357
+POPA 1358
+POPCNT 1359
+POPDS 1360
+POPES 1361
+POPF 1362
+POPFS 1363
+POPGS 1364
+POPP 1365
+POPSS 1366
+PORrm 1367
+PORrr 1368
+PREALLOCATED_ARG 1369
+PREALLOCATED_SETUP 1370
+PREFETCH 1371
+PREFETCHIT 1372
+PREFETCHNTA 1373
+PREFETCHRST 1374
+PREFETCHT 1375
+PREFETCHW 1376
+PREFETCHWT 1377
+PROBED_ALLOCA 1378
+PSADBWrm 1379
+PSADBWrr 1380
+PSEUDO_PROBE 1381
+PSHUFBrm 1382
+PSHUFBrr 1383
+PSHUFDmi 1384
+PSHUFDri 1385
+PSHUFHWmi 1386
+PSHUFHWri 1387
+PSHUFLWmi 1388
+PSHUFLWri 1389
+PSIGNBrm 1390
+PSIGNBrr 1391
+PSIGNDrm 1392
+PSIGNDrr 1393
+PSIGNWrm 1394
+PSIGNWrr 1395
+PSLLDQri 1396
+PSLLDri 1397
+PSLLDrm 1398
+PSLLDrr 1399
+PSLLQri 1400
+PSLLQrm 1401
+PSLLQrr 1402
+PSLLWri 1403
+PSLLWrm 1404
+PSLLWrr 1405
+PSMASH 1406
+PSRADri 1407
+PSRADrm 1408
+PSRADrr 1409
+PSRAWri 1410
+PSRAWrm 1411
+PSRAWrr 1412
+PSRLDQri 1413
+PSRLDri 1414
+PSRLDrm 1415
+PSRLDrr 1416
+PSRLQri 1417
+PSRLQrm 1418
+PSRLQrr 1419
+PSRLWri 1420
+PSRLWrm 1421
+PSRLWrr 1422
+PSUBBrm 1423
+PSUBBrr 1424
+PSUBDrm 1425
+PSUBDrr 1426
+PSUBQrm 1427
+PSUBQrr 1428
+PSUBSBrm 1429
+PSUBSBrr 1430
+PSUBSWrm 1431
+PSUBSWrr 1432
+PSUBUSBrm 1433
+PSUBUSBrr 1434
+PSUBUSWrm 1435
+PSUBUSWrr 1436
+PSUBWrm 1437
+PSUBWrr 1438
+PSWAPDrm 1439
+PSWAPDrr 1440
+PT 1441
+PTCMMIMFP 1442
+PTCMMRLFP 1443
+PTCONJTCMMIMFP 1444
+PTCONJTFP 1445
+PTCVTROWD 1446
+PTCVTROWPS 1447
+PTDPBF 1448
+PTDPBHF 1449
+PTDPBSSD 1450
+PTDPBSSDV 1451
+PTDPBSUD 1452
+PTDPBSUDV 1453
+PTDPBUSD 1454
+PTDPBUSDV 1455
+PTDPBUUD 1456
+PTDPBUUDV 1457
+PTDPFP 1458
+PTDPHBF 1459
+PTDPHF 1460
+PTESTrm 1461
+PTESTrr 1462
+PTILELOADD 1463
+PTILELOADDRS 1464
+PTILELOADDRST 1465
+PTILELOADDRSV 1466
+PTILELOADDT 1467
+PTILELOADDV 1468
+PTILEMOVROWrre 1469
+PTILEMOVROWrreV 1470
+PTILEMOVROWrri 1471
+PTILEMOVROWrriV 1472
+PTILEPAIRLOAD 1473
+PTILEPAIRSTORE 1474
+PTILESTORED 1475
+PTILESTOREDV 1476
+PTILEZERO 1477
+PTILEZEROV 1478
+PTMMULTF 1479
+PTTCMMIMFP 1480
+PTTCMMRLFP 1481
+PTTDPBF 1482
+PTTDPFP 1483
+PTTMMULTF 1484
+PTTRANSPOSED 1485
+PTTRANSPOSEDV 1486
+PTWRITE 1487
+PTWRITEm 1488
+PTWRITEr 1489
+PUNPCKHBWrm 1490
+PUNPCKHBWrr 1491
+PUNPCKHDQrm 1492
+PUNPCKHDQrr 1493
+PUNPCKHQDQrm 1494
+PUNPCKHQDQrr 1495
+PUNPCKHWDrm 1496
+PUNPCKHWDrr 1497
+PUNPCKLBWrm 1498
+PUNPCKLBWrr 1499
+PUNPCKLDQrm 1500
+PUNPCKLDQrr 1501
+PUNPCKLQDQrm 1502
+PUNPCKLQDQrr 1503
+PUNPCKLWDrm 1504
+PUNPCKLWDrr 1505
+PUSH 1506
+PUSHA 1507
+PUSHCS 1508
+PUSHDS 1509
+PUSHES 1510
+PUSHF 1511
+PUSHFS 1512
+PUSHGS 1513
+PUSHP 1514
+PUSHSS 1515
+PVALIDATE 1516
+PXORrm 1517
+PXORrr 1518
+RCL 1519
+RCPPSm 1520
+RCPPSr 1521
+RCPSSm 1522
+RCPSSm_Int 1523
+RCPSSr 1524
+RCPSSr_Int 1525
+RCR 1526
+RDFLAGS 1527
+RDFSBASE 1528
+RDGSBASE 1529
+RDMSR 1530
+RDMSRLIST 1531
+RDMSRri 1532
+RDMSRri_EVEX 1533
+RDPID 1534
+RDPKRUr 1535
+RDPMC 1536
+RDPRU 1537
+RDRAND 1538
+RDSEED 1539
+RDSSPD 1540
+RDSSPQ 1541
+RDTSC 1542
+RDTSCP 1543
+REG_SEQUENCE 1544
+REPNE_PREFIX 1545
+REP_MOVSB 1546
+REP_MOVSD 1547
+REP_MOVSQ 1548
+REP_MOVSW 1549
+REP_PREFIX 1550
+REP_STOSB 1551
+REP_STOSD 1552
+REP_STOSQ 1553
+REP_STOSW 1554
+RET 1555
+RETI 1556
+REX 1557
+RMPADJUST 1558
+RMPQUERY 1559
+RMPUPDATE 1560
+ROL 1561
+ROR 1562
+RORX 1563
+ROUNDPDmi 1564
+ROUNDPDri 1565
+ROUNDPSmi 1566
+ROUNDPSri 1567
+ROUNDSDmi 1568
+ROUNDSDmi_Int 1569
+ROUNDSDri 1570
+ROUNDSDri_Int 1571
+ROUNDSSmi 1572
+ROUNDSSmi_Int 1573
+ROUNDSSri 1574
+ROUNDSSri_Int 1575
+RSM 1576
+RSQRTPSm 1577
+RSQRTPSr 1578
+RSQRTSSm 1579
+RSQRTSSm_Int 1580
+RSQRTSSr 1581
+RSQRTSSr_Int 1582
+RSTORSSP 1583
+SAHF 1584
+SALC 1585
+SAR 1586
+SARX 1587
+SAVEPREVSSP 1588
+SBB 1589
+SCASB 1590
+SCASL 1591
+SCASQ 1592
+SCASW 1593
+SEAMCALL 1594
+SEAMOPS 1595
+SEAMRET 1596
+SEG_ALLOCA 1597
+SEH_BeginEpilogue 1598
+SEH_EndEpilogue 1599
+SEH_EndPrologue 1600
+SEH_PushFrame 1601
+SEH_PushReg 1602
+SEH_SaveReg 1603
+SEH_SaveXMM 1604
+SEH_SetFrame 1605
+SEH_StackAlign 1606
+SEH_StackAlloc 1607
+SEH_UnwindV 1608
+SEH_UnwindVersion 1609
+SENDUIPI 1610
+SERIALIZE 1611
+SETB_C 1612
+SETCCm 1613
+SETCCm_EVEX 1614
+SETCCr 1615
+SETCCr_EVEX 1616
+SETSSBSY 1617
+SETZUCCm 1618
+SETZUCCr 1619
+SFENCE 1620
+SGDT 1621
+SHA 1622
+SHL 1623
+SHLD 1624
+SHLDROT 1625
+SHLX 1626
+SHR 1627
+SHRD 1628
+SHRDROT 1629
+SHRX 1630
+SHUFPDrmi 1631
+SHUFPDrri 1632
+SHUFPSrmi 1633
+SHUFPSrri 1634
+SIDT 1635
+SKINIT 1636
+SLDT 1637
+SLWPCB 1638
+SMSW 1639
+SQRTPDm 1640
+SQRTPDr 1641
+SQRTPSm 1642
+SQRTPSr 1643
+SQRTSDm 1644
+SQRTSDm_Int 1645
+SQRTSDr 1646
+SQRTSDr_Int 1647
+SQRTSSm 1648
+SQRTSSm_Int 1649
+SQRTSSr 1650
+SQRTSSr_Int 1651
+SQRT_F 1652
+SQRT_Fp 1653
+SS_PREFIX 1654
+STAC 1655
+STACKALLOC_W_PROBING 1656
+STACKMAP 1657
+STATEPOINT 1658
+STC 1659
+STD 1660
+STGI 1661
+STI 1662
+STMXCSR 1663
+STOSB 1664
+STOSL 1665
+STOSQ 1666
+STOSW 1667
+STR 1668
+STRm 1669
+STTILECFG 1670
+STTILECFG_EVEX 1671
+STUI 1672
+ST_F 1673
+ST_FP 1674
+ST_FPrr 1675
+ST_Fp 1676
+ST_FpP 1677
+ST_Frr 1678
+SUB 1679
+SUBPDrm 1680
+SUBPDrr 1681
+SUBPSrm 1682
+SUBPSrr 1683
+SUBREG_TO_REG 1684
+SUBR_F 1685
+SUBR_FI 1686
+SUBR_FPrST 1687
+SUBR_FST 1688
+SUBR_Fp 1689
+SUBR_FpI 1690
+SUBR_FrST 1691
+SUBSDrm 1692
+SUBSDrm_Int 1693
+SUBSDrr 1694
+SUBSDrr_Int 1695
+SUBSSrm 1696
+SUBSSrm_Int 1697
+SUBSSrr 1698
+SUBSSrr_Int 1699
+SUB_F 1700
+SUB_FI 1701
+SUB_FPrST 1702
+SUB_FST 1703
+SUB_Fp 1704
+SUB_FpI 1705
+SUB_FrST 1706
+SWAPGS 1707
+SYSCALL 1708
+SYSENTER 1709
+SYSEXIT 1710
+SYSRET 1711
+T 1712
+TAILJMPd 1713
+TAILJMPd_CC 1714
+TAILJMPm 1715
+TAILJMPr 1716
+TCMMIMFP 1717
+TCMMRLFP 1718
+TCONJTCMMIMFP 1719
+TCONJTFP 1720
+TCRETURN_HIPE 1721
+TCRETURN_WIN 1722
+TCRETURN_WINmi 1723
+TCRETURNdi 1724
+TCRETURNdicc 1725
+TCRETURNmi 1726
+TCRETURNri 1727
+TCVTROWD 1728
+TCVTROWPS 1729
+TDCALL 1730
+TDPBF 1731
+TDPBHF 1732
+TDPBSSD 1733
+TDPBSUD 1734
+TDPBUSD 1735
+TDPBUUD 1736
+TDPFP 1737
+TDPHBF 1738
+TDPHF 1739
+TEST 1740
+TESTUI 1741
+TILELOADD 1742
+TILELOADDRS 1743
+TILELOADDRST 1744
+TILELOADDRS_EVEX 1745
+TILELOADDT 1746
+TILELOADD_EVEX 1747
+TILEMOVROWrre 1748
+TILEMOVROWrri 1749
+TILERELEASE 1750
+TILESTORED 1751
+TILESTORED_EVEX 1752
+TILEZERO 1753
+TLBSYNC 1754
+TLSCall 1755
+TLS_addr 1756
+TLS_addrX 1757
+TLS_base_addr 1758
+TLS_base_addrX 1759
+TLS_desc 1760
+TMMULTF 1761
+TPAUSE 1762
+TRAP 1763
+TST_F 1764
+TST_Fp 1765
+TTCMMIMFP 1766
+TTCMMRLFP 1767
+TTDPBF 1768
+TTDPFP 1769
+TTMMULTF 1770
+TTRANSPOSED 1771
+TZCNT 1772
+TZMSK 1773
+UBSAN_UD 1774
+UCOMISDrm 1775
+UCOMISDrm_Int 1776
+UCOMISDrr 1777
+UCOMISDrr_Int 1778
+UCOMISSrm 1779
+UCOMISSrm_Int 1780
+UCOMISSrr 1781
+UCOMISSrr_Int 1782
+UCOM_FIPr 1783
+UCOM_FIr 1784
+UCOM_FPPr 1785
+UCOM_FPr 1786
+UCOM_FpIr 1787
+UCOM_Fpr 1788
+UCOM_Fr 1789
+UD 1790
+UIRET 1791
+UMONITOR 1792
+UMWAIT 1793
+UNPCKHPDrm 1794
+UNPCKHPDrr 1795
+UNPCKHPSrm 1796
+UNPCKHPSrr 1797
+UNPCKLPDrm 1798
+UNPCKLPDrr 1799
+UNPCKLPSrm 1800
+UNPCKLPSrr 1801
+URDMSRri 1802
+URDMSRri_EVEX 1803
+URDMSRrr 1804
+URDMSRrr_EVEX 1805
+UWRMSRir 1806
+UWRMSRir_EVEX 1807
+UWRMSRrr 1808
+UWRMSRrr_EVEX 1809
+V 1810
+VAARG 1811
+VAARG_X 1812
+VADDBF 1813
+VADDPDYrm 1814
+VADDPDYrr 1815
+VADDPDZ 1816
+VADDPDZrm 1817
+VADDPDZrmb 1818
+VADDPDZrmbk 1819
+VADDPDZrmbkz 1820
+VADDPDZrmk 1821
+VADDPDZrmkz 1822
+VADDPDZrr 1823
+VADDPDZrrb 1824
+VADDPDZrrbk 1825
+VADDPDZrrbkz 1826
+VADDPDZrrk 1827
+VADDPDZrrkz 1828
+VADDPDrm 1829
+VADDPDrr 1830
+VADDPHZ 1831
+VADDPHZrm 1832
+VADDPHZrmb 1833
+VADDPHZrmbk 1834
+VADDPHZrmbkz 1835
+VADDPHZrmk 1836
+VADDPHZrmkz 1837
+VADDPHZrr 1838
+VADDPHZrrb 1839
+VADDPHZrrbk 1840
+VADDPHZrrbkz 1841
+VADDPHZrrk 1842
+VADDPHZrrkz 1843
+VADDPSYrm 1844
+VADDPSYrr 1845
+VADDPSZ 1846
+VADDPSZrm 1847
+VADDPSZrmb 1848
+VADDPSZrmbk 1849
+VADDPSZrmbkz 1850
+VADDPSZrmk 1851
+VADDPSZrmkz 1852
+VADDPSZrr 1853
+VADDPSZrrb 1854
+VADDPSZrrbk 1855
+VADDPSZrrbkz 1856
+VADDPSZrrk 1857
+VADDPSZrrkz 1858
+VADDPSrm 1859
+VADDPSrr 1860
+VADDSDZrm 1861
+VADDSDZrm_Int 1862
+VADDSDZrmk_Int 1863
+VADDSDZrmkz_Int 1864
+VADDSDZrr 1865
+VADDSDZrr_Int 1866
+VADDSDZrrb_Int 1867
+VADDSDZrrbk_Int 1868
+VADDSDZrrbkz_Int 1869
+VADDSDZrrk_Int 1870
+VADDSDZrrkz_Int 1871
+VADDSDrm 1872
+VADDSDrm_Int 1873
+VADDSDrr 1874
+VADDSDrr_Int 1875
+VADDSHZrm 1876
+VADDSHZrm_Int 1877
+VADDSHZrmk_Int 1878
+VADDSHZrmkz_Int 1879
+VADDSHZrr 1880
+VADDSHZrr_Int 1881
+VADDSHZrrb_Int 1882
+VADDSHZrrbk_Int 1883
+VADDSHZrrbkz_Int 1884
+VADDSHZrrk_Int 1885
+VADDSHZrrkz_Int 1886
+VADDSSZrm 1887
+VADDSSZrm_Int 1888
+VADDSSZrmk_Int 1889
+VADDSSZrmkz_Int 1890
+VADDSSZrr 1891
+VADDSSZrr_Int 1892
+VADDSSZrrb_Int 1893
+VADDSSZrrbk_Int 1894
+VADDSSZrrbkz_Int 1895
+VADDSSZrrk_Int 1896
+VADDSSZrrkz_Int 1897
+VADDSSrm 1898
+VADDSSrm_Int 1899
+VADDSSrr 1900
+VADDSSrr_Int 1901
+VADDSUBPDYrm 1902
+VADDSUBPDYrr 1903
+VADDSUBPDrm 1904
+VADDSUBPDrr 1905
+VADDSUBPSYrm 1906
+VADDSUBPSYrr 1907
+VADDSUBPSrm 1908
+VADDSUBPSrr 1909
+VAESDECLASTYrm 1910
+VAESDECLASTYrr 1911
+VAESDECLASTZ 1912
+VAESDECLASTZrm 1913
+VAESDECLASTZrr 1914
+VAESDECLASTrm 1915
+VAESDECLASTrr 1916
+VAESDECYrm 1917
+VAESDECYrr 1918
+VAESDECZ 1919
+VAESDECZrm 1920
+VAESDECZrr 1921
+VAESDECrm 1922
+VAESDECrr 1923
+VAESENCLASTYrm 1924
+VAESENCLASTYrr 1925
+VAESENCLASTZ 1926
+VAESENCLASTZrm 1927
+VAESENCLASTZrr 1928
+VAESENCLASTrm 1929
+VAESENCLASTrr 1930
+VAESENCYrm 1931
+VAESENCYrr 1932
+VAESENCZ 1933
+VAESENCZrm 1934
+VAESENCZrr 1935
+VAESENCrm 1936
+VAESENCrr 1937
+VAESIMCrm 1938
+VAESIMCrr 1939
+VAESKEYGENASSISTrmi 1940
+VAESKEYGENASSISTrri 1941
+VALIGNDZ 1942
+VALIGNDZrmbi 1943
+VALIGNDZrmbik 1944
+VALIGNDZrmbikz 1945
+VALIGNDZrmi 1946
+VALIGNDZrmik 1947
+VALIGNDZrmikz 1948
+VALIGNDZrri 1949
+VALIGNDZrrik 1950
+VALIGNDZrrikz 1951
+VALIGNQZ 1952
+VALIGNQZrmbi 1953
+VALIGNQZrmbik 1954
+VALIGNQZrmbikz 1955
+VALIGNQZrmi 1956
+VALIGNQZrmik 1957
+VALIGNQZrmikz 1958
+VALIGNQZrri 1959
+VALIGNQZrrik 1960
+VALIGNQZrrikz 1961
+VANDNPDYrm 1962
+VANDNPDYrr 1963
+VANDNPDZ 1964
+VANDNPDZrm 1965
+VANDNPDZrmb 1966
+VANDNPDZrmbk 1967
+VANDNPDZrmbkz 1968
+VANDNPDZrmk 1969
+VANDNPDZrmkz 1970
+VANDNPDZrr 1971
+VANDNPDZrrk 1972
+VANDNPDZrrkz 1973
+VANDNPDrm 1974
+VANDNPDrr 1975
+VANDNPSYrm 1976
+VANDNPSYrr 1977
+VANDNPSZ 1978
+VANDNPSZrm 1979
+VANDNPSZrmb 1980
+VANDNPSZrmbk 1981
+VANDNPSZrmbkz 1982
+VANDNPSZrmk 1983
+VANDNPSZrmkz 1984
+VANDNPSZrr 1985
+VANDNPSZrrk 1986
+VANDNPSZrrkz 1987
+VANDNPSrm 1988
+VANDNPSrr 1989
+VANDPDYrm 1990
+VANDPDYrr 1991
+VANDPDZ 1992
+VANDPDZrm 1993
+VANDPDZrmb 1994
+VANDPDZrmbk 1995
+VANDPDZrmbkz 1996
+VANDPDZrmk 1997
+VANDPDZrmkz 1998
+VANDPDZrr 1999
+VANDPDZrrk 2000
+VANDPDZrrkz 2001
+VANDPDrm 2002
+VANDPDrr 2003
+VANDPSYrm 2004
+VANDPSYrr 2005
+VANDPSZ 2006
+VANDPSZrm 2007
+VANDPSZrmb 2008
+VANDPSZrmbk 2009
+VANDPSZrmbkz 2010
+VANDPSZrmk 2011
+VANDPSZrmkz 2012
+VANDPSZrr 2013
+VANDPSZrrk 2014
+VANDPSZrrkz 2015
+VANDPSrm 2016
+VANDPSrr 2017
+VASTART_SAVE_XMM_REGS 2018
+VBCSTNEBF 2019
+VBCSTNESH 2020
+VBLENDMPDZ 2021
+VBLENDMPDZrm 2022
+VBLENDMPDZrmb 2023
+VBLENDMPDZrmbk 2024
+VBLENDMPDZrmbkz 2025
+VBLENDMPDZrmk 2026
+VBLENDMPDZrmkz 2027
+VBLENDMPDZrr 2028
+VBLENDMPDZrrk 2029
+VBLENDMPDZrrkz 2030
+VBLENDMPSZ 2031
+VBLENDMPSZrm 2032
+VBLENDMPSZrmb 2033
+VBLENDMPSZrmbk 2034
+VBLENDMPSZrmbkz 2035
+VBLENDMPSZrmk 2036
+VBLENDMPSZrmkz 2037
+VBLENDMPSZrr 2038
+VBLENDMPSZrrk 2039
+VBLENDMPSZrrkz 2040
+VBLENDPDYrmi 2041
+VBLENDPDYrri 2042
+VBLENDPDrmi 2043
+VBLENDPDrri 2044
+VBLENDPSYrmi 2045
+VBLENDPSYrri 2046
+VBLENDPSrmi 2047
+VBLENDPSrri 2048
+VBLENDVPDYrmr 2049
+VBLENDVPDYrrr 2050
+VBLENDVPDrmr 2051
+VBLENDVPDrrr 2052
+VBLENDVPSYrmr 2053
+VBLENDVPSYrrr 2054
+VBLENDVPSrmr 2055
+VBLENDVPSrrr 2056
+VBROADCASTF 2057
+VBROADCASTI 2058
+VBROADCASTSDYrm 2059
+VBROADCASTSDYrr 2060
+VBROADCASTSDZ 2061
+VBROADCASTSDZrm 2062
+VBROADCASTSDZrmk 2063
+VBROADCASTSDZrmkz 2064
+VBROADCASTSDZrr 2065
+VBROADCASTSDZrrk 2066
+VBROADCASTSDZrrkz 2067
+VBROADCASTSSYrm 2068
+VBROADCASTSSYrr 2069
+VBROADCASTSSZ 2070
+VBROADCASTSSZrm 2071
+VBROADCASTSSZrmk 2072
+VBROADCASTSSZrmkz 2073
+VBROADCASTSSZrr 2074
+VBROADCASTSSZrrk 2075
+VBROADCASTSSZrrkz 2076
+VBROADCASTSSrm 2077
+VBROADCASTSSrr 2078
+VCMPBF 2079
+VCMPPDYrmi 2080
+VCMPPDYrri 2081
+VCMPPDZ 2082
+VCMPPDZrmbi 2083
+VCMPPDZrmbik 2084
+VCMPPDZrmi 2085
+VCMPPDZrmik 2086
+VCMPPDZrri 2087
+VCMPPDZrrib 2088
+VCMPPDZrribk 2089
+VCMPPDZrrik 2090
+VCMPPDrmi 2091
+VCMPPDrri 2092
+VCMPPHZ 2093
+VCMPPHZrmbi 2094
+VCMPPHZrmbik 2095
+VCMPPHZrmi 2096
+VCMPPHZrmik 2097
+VCMPPHZrri 2098
+VCMPPHZrrib 2099
+VCMPPHZrribk 2100
+VCMPPHZrrik 2101
+VCMPPSYrmi 2102
+VCMPPSYrri 2103
+VCMPPSZ 2104
+VCMPPSZrmbi 2105
+VCMPPSZrmbik 2106
+VCMPPSZrmi 2107
+VCMPPSZrmik 2108
+VCMPPSZrri 2109
+VCMPPSZrrib 2110
+VCMPPSZrribk 2111
+VCMPPSZrrik 2112
+VCMPPSrmi 2113
+VCMPPSrri 2114
+VCMPSDZrmi 2115
+VCMPSDZrmi_Int 2116
+VCMPSDZrmik_Int 2117
+VCMPSDZrri 2118
+VCMPSDZrri_Int 2119
+VCMPSDZrrib_Int 2120
+VCMPSDZrribk_Int 2121
+VCMPSDZrrik_Int 2122
+VCMPSDrmi 2123
+VCMPSDrmi_Int 2124
+VCMPSDrri 2125
+VCMPSDrri_Int 2126
+VCMPSHZrmi 2127
+VCMPSHZrmi_Int 2128
+VCMPSHZrmik_Int 2129
+VCMPSHZrri 2130
+VCMPSHZrri_Int 2131
+VCMPSHZrrib_Int 2132
+VCMPSHZrribk_Int 2133
+VCMPSHZrrik_Int 2134
+VCMPSSZrmi 2135
+VCMPSSZrmi_Int 2136
+VCMPSSZrmik_Int 2137
+VCMPSSZrri 2138
+VCMPSSZrri_Int 2139
+VCMPSSZrrib_Int 2140
+VCMPSSZrribk_Int 2141
+VCMPSSZrrik_Int 2142
+VCMPSSrmi 2143
+VCMPSSrmi_Int 2144
+VCMPSSrri 2145
+VCMPSSrri_Int 2146
+VCOMISBF 2147
+VCOMISDZrm 2148
+VCOMISDZrm_Int 2149
+VCOMISDZrr 2150
+VCOMISDZrr_Int 2151
+VCOMISDZrrb 2152
+VCOMISDrm 2153
+VCOMISDrm_Int 2154
+VCOMISDrr 2155
+VCOMISDrr_Int 2156
+VCOMISHZrm 2157
+VCOMISHZrm_Int 2158
+VCOMISHZrr 2159
+VCOMISHZrr_Int 2160
+VCOMISHZrrb 2161
+VCOMISSZrm 2162
+VCOMISSZrm_Int 2163
+VCOMISSZrr 2164
+VCOMISSZrr_Int 2165
+VCOMISSZrrb 2166
+VCOMISSrm 2167
+VCOMISSrm_Int 2168
+VCOMISSrr 2169
+VCOMISSrr_Int 2170
+VCOMPRESSPDZ 2171
+VCOMPRESSPDZmr 2172
+VCOMPRESSPDZmrk 2173
+VCOMPRESSPDZrr 2174
+VCOMPRESSPDZrrk 2175
+VCOMPRESSPDZrrkz 2176
+VCOMPRESSPSZ 2177
+VCOMPRESSPSZmr 2178
+VCOMPRESSPSZmrk 2179
+VCOMPRESSPSZrr 2180
+VCOMPRESSPSZrrk 2181
+VCOMPRESSPSZrrkz 2182
+VCOMXSDZrm_Int 2183
+VCOMXSDZrr_Int 2184
+VCOMXSDZrrb_Int 2185
+VCOMXSHZrm_Int 2186
+VCOMXSHZrr_Int 2187
+VCOMXSHZrrb_Int 2188
+VCOMXSSZrm_Int 2189
+VCOMXSSZrr_Int 2190
+VCOMXSSZrrb_Int 2191
+VCVT 2192
+VCVTBF 2193
+VCVTBIASPH 2194
+VCVTDQ 2195
+VCVTHF 2196
+VCVTNE 2197
+VCVTNEEBF 2198
+VCVTNEEPH 2199
+VCVTNEOBF 2200
+VCVTNEOPH 2201
+VCVTNEPS 2202
+VCVTPD 2203
+VCVTPH 2204
+VCVTPS 2205
+VCVTQQ 2206
+VCVTSD 2207
+VCVTSH 2208
+VCVTSI 2209
+VCVTSS 2210
+VCVTTBF 2211
+VCVTTPD 2212
+VCVTTPH 2213
+VCVTTPS 2214
+VCVTTSD 2215
+VCVTTSH 2216
+VCVTTSS 2217
+VCVTUDQ 2218
+VCVTUQQ 2219
+VCVTUSI 2220
+VCVTUW 2221
+VCVTW 2222
+VDBPSADBWZ 2223
+VDBPSADBWZrmi 2224
+VDBPSADBWZrmik 2225
+VDBPSADBWZrmikz 2226
+VDBPSADBWZrri 2227
+VDBPSADBWZrrik 2228
+VDBPSADBWZrrikz 2229
+VDIVBF 2230
+VDIVPDYrm 2231
+VDIVPDYrr 2232
+VDIVPDZ 2233
+VDIVPDZrm 2234
+VDIVPDZrmb 2235
+VDIVPDZrmbk 2236
+VDIVPDZrmbkz 2237
+VDIVPDZrmk 2238
+VDIVPDZrmkz 2239
+VDIVPDZrr 2240
+VDIVPDZrrb 2241
+VDIVPDZrrbk 2242
+VDIVPDZrrbkz 2243
+VDIVPDZrrk 2244
+VDIVPDZrrkz 2245
+VDIVPDrm 2246
+VDIVPDrr 2247
+VDIVPHZ 2248
+VDIVPHZrm 2249
+VDIVPHZrmb 2250
+VDIVPHZrmbk 2251
+VDIVPHZrmbkz 2252
+VDIVPHZrmk 2253
+VDIVPHZrmkz 2254
+VDIVPHZrr 2255
+VDIVPHZrrb 2256
+VDIVPHZrrbk 2257
+VDIVPHZrrbkz 2258
+VDIVPHZrrk 2259
+VDIVPHZrrkz 2260
+VDIVPSYrm 2261
+VDIVPSYrr 2262
+VDIVPSZ 2263
+VDIVPSZrm 2264
+VDIVPSZrmb 2265
+VDIVPSZrmbk 2266
+VDIVPSZrmbkz 2267
+VDIVPSZrmk 2268
+VDIVPSZrmkz 2269
+VDIVPSZrr 2270
+VDIVPSZrrb 2271
+VDIVPSZrrbk 2272
+VDIVPSZrrbkz 2273
+VDIVPSZrrk 2274
+VDIVPSZrrkz 2275
+VDIVPSrm 2276
+VDIVPSrr 2277
+VDIVSDZrm 2278
+VDIVSDZrm_Int 2279
+VDIVSDZrmk_Int 2280
+VDIVSDZrmkz_Int 2281
+VDIVSDZrr 2282
+VDIVSDZrr_Int 2283
+VDIVSDZrrb_Int 2284
+VDIVSDZrrbk_Int 2285
+VDIVSDZrrbkz_Int 2286
+VDIVSDZrrk_Int 2287
+VDIVSDZrrkz_Int 2288
+VDIVSDrm 2289
+VDIVSDrm_Int 2290
+VDIVSDrr 2291
+VDIVSDrr_Int 2292
+VDIVSHZrm 2293
+VDIVSHZrm_Int 2294
+VDIVSHZrmk_Int 2295
+VDIVSHZrmkz_Int 2296
+VDIVSHZrr 2297
+VDIVSHZrr_Int 2298
+VDIVSHZrrb_Int 2299
+VDIVSHZrrbk_Int 2300
+VDIVSHZrrbkz_Int 2301
+VDIVSHZrrk_Int 2302
+VDIVSHZrrkz_Int 2303
+VDIVSSZrm 2304
+VDIVSSZrm_Int 2305
+VDIVSSZrmk_Int 2306
+VDIVSSZrmkz_Int 2307
+VDIVSSZrr 2308
+VDIVSSZrr_Int 2309
+VDIVSSZrrb_Int 2310
+VDIVSSZrrbk_Int 2311
+VDIVSSZrrbkz_Int 2312
+VDIVSSZrrk_Int 2313
+VDIVSSZrrkz_Int 2314
+VDIVSSrm 2315
+VDIVSSrm_Int 2316
+VDIVSSrr 2317
+VDIVSSrr_Int 2318
+VDPBF 2319
+VDPPDrmi 2320
+VDPPDrri 2321
+VDPPHPSZ 2322
+VDPPHPSZm 2323
+VDPPHPSZmb 2324
+VDPPHPSZmbk 2325
+VDPPHPSZmbkz 2326
+VDPPHPSZmk 2327
+VDPPHPSZmkz 2328
+VDPPHPSZr 2329
+VDPPHPSZrk 2330
+VDPPHPSZrkz 2331
+VDPPSYrmi 2332
+VDPPSYrri 2333
+VDPPSrmi 2334
+VDPPSrri 2335
+VERRm 2336
+VERRr 2337
+VERWm 2338
+VERWr 2339
+VEXP 2340
+VEXPANDPDZ 2341
+VEXPANDPDZrm 2342
+VEXPANDPDZrmk 2343
+VEXPANDPDZrmkz 2344
+VEXPANDPDZrr 2345
+VEXPANDPDZrrk 2346
+VEXPANDPDZrrkz 2347
+VEXPANDPSZ 2348
+VEXPANDPSZrm 2349
+VEXPANDPSZrmk 2350
+VEXPANDPSZrmkz 2351
+VEXPANDPSZrr 2352
+VEXPANDPSZrrk 2353
+VEXPANDPSZrrkz 2354
+VEXTRACTF 2355
+VEXTRACTI 2356
+VEXTRACTPSZmri 2357
+VEXTRACTPSZrri 2358
+VEXTRACTPSmri 2359
+VEXTRACTPSrri 2360
+VFCMADDCPHZ 2361
+VFCMADDCPHZm 2362
+VFCMADDCPHZmb 2363
+VFCMADDCPHZmbk 2364
+VFCMADDCPHZmbkz 2365
+VFCMADDCPHZmk 2366
+VFCMADDCPHZmkz 2367
+VFCMADDCPHZr 2368
+VFCMADDCPHZrb 2369
+VFCMADDCPHZrbk 2370
+VFCMADDCPHZrbkz 2371
+VFCMADDCPHZrk 2372
+VFCMADDCPHZrkz 2373
+VFCMADDCSHZm 2374
+VFCMADDCSHZmk 2375
+VFCMADDCSHZmkz 2376
+VFCMADDCSHZr 2377
+VFCMADDCSHZrb 2378
+VFCMADDCSHZrbk 2379
+VFCMADDCSHZrbkz 2380
+VFCMADDCSHZrk 2381
+VFCMADDCSHZrkz 2382
+VFCMULCPHZ 2383
+VFCMULCPHZrm 2384
+VFCMULCPHZrmb 2385
+VFCMULCPHZrmbk 2386
+VFCMULCPHZrmbkz 2387
+VFCMULCPHZrmk 2388
+VFCMULCPHZrmkz 2389
+VFCMULCPHZrr 2390
+VFCMULCPHZrrb 2391
+VFCMULCPHZrrbk 2392
+VFCMULCPHZrrbkz 2393
+VFCMULCPHZrrk 2394
+VFCMULCPHZrrkz 2395
+VFCMULCSHZrm 2396
+VFCMULCSHZrmk 2397
+VFCMULCSHZrmkz 2398
+VFCMULCSHZrr 2399
+VFCMULCSHZrrb 2400
+VFCMULCSHZrrbk 2401
+VFCMULCSHZrrbkz 2402
+VFCMULCSHZrrk 2403
+VFCMULCSHZrrkz 2404
+VFIXUPIMMPDZ 2405
+VFIXUPIMMPDZrmbi 2406
+VFIXUPIMMPDZrmbik 2407
+VFIXUPIMMPDZrmbikz 2408
+VFIXUPIMMPDZrmi 2409
+VFIXUPIMMPDZrmik 2410
+VFIXUPIMMPDZrmikz 2411
+VFIXUPIMMPDZrri 2412
+VFIXUPIMMPDZrrib 2413
+VFIXUPIMMPDZrribk 2414
+VFIXUPIMMPDZrribkz 2415
+VFIXUPIMMPDZrrik 2416
+VFIXUPIMMPDZrrikz 2417
+VFIXUPIMMPSZ 2418
+VFIXUPIMMPSZrmbi 2419
+VFIXUPIMMPSZrmbik 2420
+VFIXUPIMMPSZrmbikz 2421
+VFIXUPIMMPSZrmi 2422
+VFIXUPIMMPSZrmik 2423
+VFIXUPIMMPSZrmikz 2424
+VFIXUPIMMPSZrri 2425
+VFIXUPIMMPSZrrib 2426
+VFIXUPIMMPSZrribk 2427
+VFIXUPIMMPSZrribkz 2428
+VFIXUPIMMPSZrrik 2429
+VFIXUPIMMPSZrrikz 2430
+VFIXUPIMMSDZrmi 2431
+VFIXUPIMMSDZrmik 2432
+VFIXUPIMMSDZrmikz 2433
+VFIXUPIMMSDZrri 2434
+VFIXUPIMMSDZrrib 2435
+VFIXUPIMMSDZrribk 2436
+VFIXUPIMMSDZrribkz 2437
+VFIXUPIMMSDZrrik 2438
+VFIXUPIMMSDZrrikz 2439
+VFIXUPIMMSSZrmi 2440
+VFIXUPIMMSSZrmik 2441
+VFIXUPIMMSSZrmikz 2442
+VFIXUPIMMSSZrri 2443
+VFIXUPIMMSSZrrib 2444
+VFIXUPIMMSSZrribk 2445
+VFIXUPIMMSSZrribkz 2446
+VFIXUPIMMSSZrrik 2447
+VFIXUPIMMSSZrrikz 2448
+VFMADD 2449
+VFMADDCPHZ 2450
+VFMADDCPHZm 2451
+VFMADDCPHZmb 2452
+VFMADDCPHZmbk 2453
+VFMADDCPHZmbkz 2454
+VFMADDCPHZmk 2455
+VFMADDCPHZmkz 2456
+VFMADDCPHZr 2457
+VFMADDCPHZrb 2458
+VFMADDCPHZrbk 2459
+VFMADDCPHZrbkz 2460
+VFMADDCPHZrk 2461
+VFMADDCPHZrkz 2462
+VFMADDCSHZm 2463
+VFMADDCSHZmk 2464
+VFMADDCSHZmkz 2465
+VFMADDCSHZr 2466
+VFMADDCSHZrb 2467
+VFMADDCSHZrbk 2468
+VFMADDCSHZrbkz 2469
+VFMADDCSHZrk 2470
+VFMADDCSHZrkz 2471
+VFMADDPD 2472
+VFMADDPS 2473
+VFMADDSD 2474
+VFMADDSS 2475
+VFMADDSUB 2476
+VFMADDSUBPD 2477
+VFMADDSUBPS 2478
+VFMSUB 2479
+VFMSUBADD 2480
+VFMSUBADDPD 2481
+VFMSUBADDPS 2482
+VFMSUBPD 2483
+VFMSUBPS 2484
+VFMSUBSD 2485
+VFMSUBSS 2486
+VFMULCPHZ 2487
+VFMULCPHZrm 2488
+VFMULCPHZrmb 2489
+VFMULCPHZrmbk 2490
+VFMULCPHZrmbkz 2491
+VFMULCPHZrmk 2492
+VFMULCPHZrmkz 2493
+VFMULCPHZrr 2494
+VFMULCPHZrrb 2495
+VFMULCPHZrrbk 2496
+VFMULCPHZrrbkz 2497
+VFMULCPHZrrk 2498
+VFMULCPHZrrkz 2499
+VFMULCSHZrm 2500
+VFMULCSHZrmk 2501
+VFMULCSHZrmkz 2502
+VFMULCSHZrr 2503
+VFMULCSHZrrb 2504
+VFMULCSHZrrbk 2505
+VFMULCSHZrrbkz 2506
+VFMULCSHZrrk 2507
+VFMULCSHZrrkz 2508
+VFNMADD 2509
+VFNMADDPD 2510
+VFNMADDPS 2511
+VFNMADDSD 2512
+VFNMADDSS 2513
+VFNMSUB 2514
+VFNMSUBPD 2515
+VFNMSUBPS 2516
+VFNMSUBSD 2517
+VFNMSUBSS 2518
+VFPCLASSBF 2519
+VFPCLASSPDZ 2520
+VFPCLASSPDZmbi 2521
+VFPCLASSPDZmbik 2522
+VFPCLASSPDZmi 2523
+VFPCLASSPDZmik 2524
+VFPCLASSPDZri 2525
+VFPCLASSPDZrik 2526
+VFPCLASSPHZ 2527
+VFPCLASSPHZmbi 2528
+VFPCLASSPHZmbik 2529
+VFPCLASSPHZmi 2530
+VFPCLASSPHZmik 2531
+VFPCLASSPHZri 2532
+VFPCLASSPHZrik 2533
+VFPCLASSPSZ 2534
+VFPCLASSPSZmbi 2535
+VFPCLASSPSZmbik 2536
+VFPCLASSPSZmi 2537
+VFPCLASSPSZmik 2538
+VFPCLASSPSZri 2539
+VFPCLASSPSZrik 2540
+VFPCLASSSDZmi 2541
+VFPCLASSSDZmik 2542
+VFPCLASSSDZri 2543
+VFPCLASSSDZrik 2544
+VFPCLASSSHZmi 2545
+VFPCLASSSHZmik 2546
+VFPCLASSSHZri 2547
+VFPCLASSSHZrik 2548
+VFPCLASSSSZmi 2549
+VFPCLASSSSZmik 2550
+VFPCLASSSSZri 2551
+VFPCLASSSSZrik 2552
+VFRCZPDYrm 2553
+VFRCZPDYrr 2554
+VFRCZPDrm 2555
+VFRCZPDrr 2556
+VFRCZPSYrm 2557
+VFRCZPSYrr 2558
+VFRCZPSrm 2559
+VFRCZPSrr 2560
+VFRCZSDrm 2561
+VFRCZSDrr 2562
+VFRCZSSrm 2563
+VFRCZSSrr 2564
+VGATHERDPDYrm 2565
+VGATHERDPDZ 2566
+VGATHERDPDZrm 2567
+VGATHERDPDrm 2568
+VGATHERDPSYrm 2569
+VGATHERDPSZ 2570
+VGATHERDPSZrm 2571
+VGATHERDPSrm 2572
+VGATHERPF 2573
+VGATHERQPDYrm 2574
+VGATHERQPDZ 2575
+VGATHERQPDZrm 2576
+VGATHERQPDrm 2577
+VGATHERQPSYrm 2578
+VGATHERQPSZ 2579
+VGATHERQPSZrm 2580
+VGATHERQPSrm 2581
+VGETEXPBF 2582
+VGETEXPPDZ 2583
+VGETEXPPDZm 2584
+VGETEXPPDZmb 2585
+VGETEXPPDZmbk 2586
+VGETEXPPDZmbkz 2587
+VGETEXPPDZmk 2588
+VGETEXPPDZmkz 2589
+VGETEXPPDZr 2590
+VGETEXPPDZrb 2591
+VGETEXPPDZrbk 2592
+VGETEXPPDZrbkz 2593
+VGETEXPPDZrk 2594
+VGETEXPPDZrkz 2595
+VGETEXPPHZ 2596
+VGETEXPPHZm 2597
+VGETEXPPHZmb 2598
+VGETEXPPHZmbk 2599
+VGETEXPPHZmbkz 2600
+VGETEXPPHZmk 2601
+VGETEXPPHZmkz 2602
+VGETEXPPHZr 2603
+VGETEXPPHZrb 2604
+VGETEXPPHZrbk 2605
+VGETEXPPHZrbkz 2606
+VGETEXPPHZrk 2607
+VGETEXPPHZrkz 2608
+VGETEXPPSZ 2609
+VGETEXPPSZm 2610
+VGETEXPPSZmb 2611
+VGETEXPPSZmbk 2612
+VGETEXPPSZmbkz 2613
+VGETEXPPSZmk 2614
+VGETEXPPSZmkz 2615
+VGETEXPPSZr 2616
+VGETEXPPSZrb 2617
+VGETEXPPSZrbk 2618
+VGETEXPPSZrbkz 2619
+VGETEXPPSZrk 2620
+VGETEXPPSZrkz 2621
+VGETEXPSDZm 2622
+VGETEXPSDZmk 2623
+VGETEXPSDZmkz 2624
+VGETEXPSDZr 2625
+VGETEXPSDZrb 2626
+VGETEXPSDZrbk 2627
+VGETEXPSDZrbkz 2628
+VGETEXPSDZrk 2629
+VGETEXPSDZrkz 2630
+VGETEXPSHZm 2631
+VGETEXPSHZmk 2632
+VGETEXPSHZmkz 2633
+VGETEXPSHZr 2634
+VGETEXPSHZrb 2635
+VGETEXPSHZrbk 2636
+VGETEXPSHZrbkz 2637
+VGETEXPSHZrk 2638
+VGETEXPSHZrkz 2639
+VGETEXPSSZm 2640
+VGETEXPSSZmk 2641
+VGETEXPSSZmkz 2642
+VGETEXPSSZr 2643
+VGETEXPSSZrb 2644
+VGETEXPSSZrbk 2645
+VGETEXPSSZrbkz 2646
+VGETEXPSSZrk 2647
+VGETEXPSSZrkz 2648
+VGETMANTBF 2649
+VGETMANTPDZ 2650
+VGETMANTPDZrmbi 2651
+VGETMANTPDZrmbik 2652
+VGETMANTPDZrmbikz 2653
+VGETMANTPDZrmi 2654
+VGETMANTPDZrmik 2655
+VGETMANTPDZrmikz 2656
+VGETMANTPDZrri 2657
+VGETMANTPDZrrib 2658
+VGETMANTPDZrribk 2659
+VGETMANTPDZrribkz 2660
+VGETMANTPDZrrik 2661
+VGETMANTPDZrrikz 2662
+VGETMANTPHZ 2663
+VGETMANTPHZrmbi 2664
+VGETMANTPHZrmbik 2665
+VGETMANTPHZrmbikz 2666
+VGETMANTPHZrmi 2667
+VGETMANTPHZrmik 2668
+VGETMANTPHZrmikz 2669
+VGETMANTPHZrri 2670
+VGETMANTPHZrrib 2671
+VGETMANTPHZrribk 2672
+VGETMANTPHZrribkz 2673
+VGETMANTPHZrrik 2674
+VGETMANTPHZrrikz 2675
+VGETMANTPSZ 2676
+VGETMANTPSZrmbi 2677
+VGETMANTPSZrmbik 2678
+VGETMANTPSZrmbikz 2679
+VGETMANTPSZrmi 2680
+VGETMANTPSZrmik 2681
+VGETMANTPSZrmikz 2682
+VGETMANTPSZrri 2683
+VGETMANTPSZrrib 2684
+VGETMANTPSZrribk 2685
+VGETMANTPSZrribkz 2686
+VGETMANTPSZrrik 2687
+VGETMANTPSZrrikz 2688
+VGETMANTSDZrmi 2689
+VGETMANTSDZrmik 2690
+VGETMANTSDZrmikz 2691
+VGETMANTSDZrri 2692
+VGETMANTSDZrrib 2693
+VGETMANTSDZrribk 2694
+VGETMANTSDZrribkz 2695
+VGETMANTSDZrrik 2696
+VGETMANTSDZrrikz 2697
+VGETMANTSHZrmi 2698
+VGETMANTSHZrmik 2699
+VGETMANTSHZrmikz 2700
+VGETMANTSHZrri 2701
+VGETMANTSHZrrib 2702
+VGETMANTSHZrribk 2703
+VGETMANTSHZrribkz 2704
+VGETMANTSHZrrik 2705
+VGETMANTSHZrrikz 2706
+VGETMANTSSZrmi 2707
+VGETMANTSSZrmik 2708
+VGETMANTSSZrmikz 2709
+VGETMANTSSZrri 2710
+VGETMANTSSZrrib 2711
+VGETMANTSSZrribk 2712
+VGETMANTSSZrribkz 2713
+VGETMANTSSZrrik 2714
+VGETMANTSSZrrikz 2715
+VGF 2716
+VHADDPDYrm 2717
+VHADDPDYrr 2718
+VHADDPDrm 2719
+VHADDPDrr 2720
+VHADDPSYrm 2721
+VHADDPSYrr 2722
+VHADDPSrm 2723
+VHADDPSrr 2724
+VHSUBPDYrm 2725
+VHSUBPDYrr 2726
+VHSUBPDrm 2727
+VHSUBPDrr 2728
+VHSUBPSYrm 2729
+VHSUBPSYrr 2730
+VHSUBPSrm 2731
+VHSUBPSrr 2732
+VINSERTF 2733
+VINSERTI 2734
+VINSERTPSZrmi 2735
+VINSERTPSZrri 2736
+VINSERTPSrmi 2737
+VINSERTPSrri 2738
+VLDDQUYrm 2739
+VLDDQUrm 2740
+VLDMXCSR 2741
+VMASKMOVDQU 2742
+VMASKMOVPDYmr 2743
+VMASKMOVPDYrm 2744
+VMASKMOVPDmr 2745
+VMASKMOVPDrm 2746
+VMASKMOVPSYmr 2747
+VMASKMOVPSYrm 2748
+VMASKMOVPSmr 2749
+VMASKMOVPSrm 2750
+VMAXBF 2751
+VMAXCPDYrm 2752
+VMAXCPDYrr 2753
+VMAXCPDZ 2754
+VMAXCPDZrm 2755
+VMAXCPDZrmb 2756
+VMAXCPDZrmbk 2757
+VMAXCPDZrmbkz 2758
+VMAXCPDZrmk 2759
+VMAXCPDZrmkz 2760
+VMAXCPDZrr 2761
+VMAXCPDZrrk 2762
+VMAXCPDZrrkz 2763
+VMAXCPDrm 2764
+VMAXCPDrr 2765
+VMAXCPHZ 2766
+VMAXCPHZrm 2767
+VMAXCPHZrmb 2768
+VMAXCPHZrmbk 2769
+VMAXCPHZrmbkz 2770
+VMAXCPHZrmk 2771
+VMAXCPHZrmkz 2772
+VMAXCPHZrr 2773
+VMAXCPHZrrk 2774
+VMAXCPHZrrkz 2775
+VMAXCPSYrm 2776
+VMAXCPSYrr 2777
+VMAXCPSZ 2778
+VMAXCPSZrm 2779
+VMAXCPSZrmb 2780
+VMAXCPSZrmbk 2781
+VMAXCPSZrmbkz 2782
+VMAXCPSZrmk 2783
+VMAXCPSZrmkz 2784
+VMAXCPSZrr 2785
+VMAXCPSZrrk 2786
+VMAXCPSZrrkz 2787
+VMAXCPSrm 2788
+VMAXCPSrr 2789
+VMAXCSDZrm 2790
+VMAXCSDZrr 2791
+VMAXCSDrm 2792
+VMAXCSDrr 2793
+VMAXCSHZrm 2794
+VMAXCSHZrr 2795
+VMAXCSSZrm 2796
+VMAXCSSZrr 2797
+VMAXCSSrm 2798
+VMAXCSSrr 2799
+VMAXPDYrm 2800
+VMAXPDYrr 2801
+VMAXPDZ 2802
+VMAXPDZrm 2803
+VMAXPDZrmb 2804
+VMAXPDZrmbk 2805
+VMAXPDZrmbkz 2806
+VMAXPDZrmk 2807
+VMAXPDZrmkz 2808
+VMAXPDZrr 2809
+VMAXPDZrrb 2810
+VMAXPDZrrbk 2811
+VMAXPDZrrbkz 2812
+VMAXPDZrrk 2813
+VMAXPDZrrkz 2814
+VMAXPDrm 2815
+VMAXPDrr 2816
+VMAXPHZ 2817
+VMAXPHZrm 2818
+VMAXPHZrmb 2819
+VMAXPHZrmbk 2820
+VMAXPHZrmbkz 2821
+VMAXPHZrmk 2822
+VMAXPHZrmkz 2823
+VMAXPHZrr 2824
+VMAXPHZrrb 2825
+VMAXPHZrrbk 2826
+VMAXPHZrrbkz 2827
+VMAXPHZrrk 2828
+VMAXPHZrrkz 2829
+VMAXPSYrm 2830
+VMAXPSYrr 2831
+VMAXPSZ 2832
+VMAXPSZrm 2833
+VMAXPSZrmb 2834
+VMAXPSZrmbk 2835
+VMAXPSZrmbkz 2836
+VMAXPSZrmk 2837
+VMAXPSZrmkz 2838
+VMAXPSZrr 2839
+VMAXPSZrrb 2840
+VMAXPSZrrbk 2841
+VMAXPSZrrbkz 2842
+VMAXPSZrrk 2843
+VMAXPSZrrkz 2844
+VMAXPSrm 2845
+VMAXPSrr 2846
+VMAXSDZrm 2847
+VMAXSDZrm_Int 2848
+VMAXSDZrmk_Int 2849
+VMAXSDZrmkz_Int 2850
+VMAXSDZrr 2851
+VMAXSDZrr_Int 2852
+VMAXSDZrrb_Int 2853
+VMAXSDZrrbk_Int 2854
+VMAXSDZrrbkz_Int 2855
+VMAXSDZrrk_Int 2856
+VMAXSDZrrkz_Int 2857
+VMAXSDrm 2858
+VMAXSDrm_Int 2859
+VMAXSDrr 2860
+VMAXSDrr_Int 2861
+VMAXSHZrm 2862
+VMAXSHZrm_Int 2863
+VMAXSHZrmk_Int 2864
+VMAXSHZrmkz_Int 2865
+VMAXSHZrr 2866
+VMAXSHZrr_Int 2867
+VMAXSHZrrb_Int 2868
+VMAXSHZrrbk_Int 2869
+VMAXSHZrrbkz_Int 2870
+VMAXSHZrrk_Int 2871
+VMAXSHZrrkz_Int 2872
+VMAXSSZrm 2873
+VMAXSSZrm_Int 2874
+VMAXSSZrmk_Int 2875
+VMAXSSZrmkz_Int 2876
+VMAXSSZrr 2877
+VMAXSSZrr_Int 2878
+VMAXSSZrrb_Int 2879
+VMAXSSZrrbk_Int 2880
+VMAXSSZrrbkz_Int 2881
+VMAXSSZrrk_Int 2882
+VMAXSSZrrkz_Int 2883
+VMAXSSrm 2884
+VMAXSSrm_Int 2885
+VMAXSSrr 2886
+VMAXSSrr_Int 2887
+VMCALL 2888
+VMCLEARm 2889
+VMFUNC 2890
+VMINBF 2891
+VMINCPDYrm 2892
+VMINCPDYrr 2893
+VMINCPDZ 2894
+VMINCPDZrm 2895
+VMINCPDZrmb 2896
+VMINCPDZrmbk 2897
+VMINCPDZrmbkz 2898
+VMINCPDZrmk 2899
+VMINCPDZrmkz 2900
+VMINCPDZrr 2901
+VMINCPDZrrk 2902
+VMINCPDZrrkz 2903
+VMINCPDrm 2904
+VMINCPDrr 2905
+VMINCPHZ 2906
+VMINCPHZrm 2907
+VMINCPHZrmb 2908
+VMINCPHZrmbk 2909
+VMINCPHZrmbkz 2910
+VMINCPHZrmk 2911
+VMINCPHZrmkz 2912
+VMINCPHZrr 2913
+VMINCPHZrrk 2914
+VMINCPHZrrkz 2915
+VMINCPSYrm 2916
+VMINCPSYrr 2917
+VMINCPSZ 2918
+VMINCPSZrm 2919
+VMINCPSZrmb 2920
+VMINCPSZrmbk 2921
+VMINCPSZrmbkz 2922
+VMINCPSZrmk 2923
+VMINCPSZrmkz 2924
+VMINCPSZrr 2925
+VMINCPSZrrk 2926
+VMINCPSZrrkz 2927
+VMINCPSrm 2928
+VMINCPSrr 2929
+VMINCSDZrm 2930
+VMINCSDZrr 2931
+VMINCSDrm 2932
+VMINCSDrr 2933
+VMINCSHZrm 2934
+VMINCSHZrr 2935
+VMINCSSZrm 2936
+VMINCSSZrr 2937
+VMINCSSrm 2938
+VMINCSSrr 2939
+VMINMAXBF 2940
+VMINMAXPDZ 2941
+VMINMAXPDZrmbi 2942
+VMINMAXPDZrmbik 2943
+VMINMAXPDZrmbikz 2944
+VMINMAXPDZrmi 2945
+VMINMAXPDZrmik 2946
+VMINMAXPDZrmikz 2947
+VMINMAXPDZrri 2948
+VMINMAXPDZrrib 2949
+VMINMAXPDZrribk 2950
+VMINMAXPDZrribkz 2951
+VMINMAXPDZrrik 2952
+VMINMAXPDZrrikz 2953
+VMINMAXPHZ 2954
+VMINMAXPHZrmbi 2955
+VMINMAXPHZrmbik 2956
+VMINMAXPHZrmbikz 2957
+VMINMAXPHZrmi 2958
+VMINMAXPHZrmik 2959
+VMINMAXPHZrmikz 2960
+VMINMAXPHZrri 2961
+VMINMAXPHZrrib 2962
+VMINMAXPHZrribk 2963
+VMINMAXPHZrribkz 2964
+VMINMAXPHZrrik 2965
+VMINMAXPHZrrikz 2966
+VMINMAXPSZ 2967
+VMINMAXPSZrmbi 2968
+VMINMAXPSZrmbik 2969
+VMINMAXPSZrmbikz 2970
+VMINMAXPSZrmi 2971
+VMINMAXPSZrmik 2972
+VMINMAXPSZrmikz 2973
+VMINMAXPSZrri 2974
+VMINMAXPSZrrib 2975
+VMINMAXPSZrribk 2976
+VMINMAXPSZrribkz 2977
+VMINMAXPSZrrik 2978
+VMINMAXPSZrrikz 2979
+VMINMAXSDrmi 2980
+VMINMAXSDrmi_Int 2981
+VMINMAXSDrmik_Int 2982
+VMINMAXSDrmikz_Int 2983
+VMINMAXSDrri 2984
+VMINMAXSDrri_Int 2985
+VMINMAXSDrrib_Int 2986
+VMINMAXSDrribk_Int 2987
+VMINMAXSDrribkz_Int 2988
+VMINMAXSDrrik_Int 2989
+VMINMAXSDrrikz_Int 2990
+VMINMAXSHrmi 2991
+VMINMAXSHrmi_Int 2992
+VMINMAXSHrmik_Int 2993
+VMINMAXSHrmikz_Int 2994
+VMINMAXSHrri 2995
+VMINMAXSHrri_Int 2996
+VMINMAXSHrrib_Int 2997
+VMINMAXSHrribk_Int 2998
+VMINMAXSHrribkz_Int 2999
+VMINMAXSHrrik_Int 3000
+VMINMAXSHrrikz_Int 3001
+VMINMAXSSrmi 3002
+VMINMAXSSrmi_Int 3003
+VMINMAXSSrmik_Int 3004
+VMINMAXSSrmikz_Int 3005
+VMINMAXSSrri 3006
+VMINMAXSSrri_Int 3007
+VMINMAXSSrrib_Int 3008
+VMINMAXSSrribk_Int 3009
+VMINMAXSSrribkz_Int 3010
+VMINMAXSSrrik_Int 3011
+VMINMAXSSrrikz_Int 3012
+VMINPDYrm 3013
+VMINPDYrr 3014
+VMINPDZ 3015
+VMINPDZrm 3016
+VMINPDZrmb 3017
+VMINPDZrmbk 3018
+VMINPDZrmbkz 3019
+VMINPDZrmk 3020
+VMINPDZrmkz 3021
+VMINPDZrr 3022
+VMINPDZrrb 3023
+VMINPDZrrbk 3024
+VMINPDZrrbkz 3025
+VMINPDZrrk 3026
+VMINPDZrrkz 3027
+VMINPDrm 3028
+VMINPDrr 3029
+VMINPHZ 3030
+VMINPHZrm 3031
+VMINPHZrmb 3032
+VMINPHZrmbk 3033
+VMINPHZrmbkz 3034
+VMINPHZrmk 3035
+VMINPHZrmkz 3036
+VMINPHZrr 3037
+VMINPHZrrb 3038
+VMINPHZrrbk 3039
+VMINPHZrrbkz 3040
+VMINPHZrrk 3041
+VMINPHZrrkz 3042
+VMINPSYrm 3043
+VMINPSYrr 3044
+VMINPSZ 3045
+VMINPSZrm 3046
+VMINPSZrmb 3047
+VMINPSZrmbk 3048
+VMINPSZrmbkz 3049
+VMINPSZrmk 3050
+VMINPSZrmkz 3051
+VMINPSZrr 3052
+VMINPSZrrb 3053
+VMINPSZrrbk 3054
+VMINPSZrrbkz 3055
+VMINPSZrrk 3056
+VMINPSZrrkz 3057
+VMINPSrm 3058
+VMINPSrr 3059
+VMINSDZrm 3060
+VMINSDZrm_Int 3061
+VMINSDZrmk_Int 3062
+VMINSDZrmkz_Int 3063
+VMINSDZrr 3064
+VMINSDZrr_Int 3065
+VMINSDZrrb_Int 3066
+VMINSDZrrbk_Int 3067
+VMINSDZrrbkz_Int 3068
+VMINSDZrrk_Int 3069
+VMINSDZrrkz_Int 3070
+VMINSDrm 3071
+VMINSDrm_Int 3072
+VMINSDrr 3073
+VMINSDrr_Int 3074
+VMINSHZrm 3075
+VMINSHZrm_Int 3076
+VMINSHZrmk_Int 3077
+VMINSHZrmkz_Int 3078
+VMINSHZrr 3079
+VMINSHZrr_Int 3080
+VMINSHZrrb_Int 3081
+VMINSHZrrbk_Int 3082
+VMINSHZrrbkz_Int 3083
+VMINSHZrrk_Int 3084
+VMINSHZrrkz_Int 3085
+VMINSSZrm 3086
+VMINSSZrm_Int 3087
+VMINSSZrmk_Int 3088
+VMINSSZrmkz_Int 3089
+VMINSSZrr 3090
+VMINSSZrr_Int 3091
+VMINSSZrrb_Int 3092
+VMINSSZrrbk_Int 3093
+VMINSSZrrbkz_Int 3094
+VMINSSZrrk_Int 3095
+VMINSSZrrkz_Int 3096
+VMINSSrm 3097
+VMINSSrm_Int 3098
+VMINSSrr 3099
+VMINSSrr_Int 3100
+VMLAUNCH 3101
+VMLOAD 3102
+VMMCALL 3103
+VMOV 3104
+VMOVAPDYmr 3105
+VMOVAPDYrm 3106
+VMOVAPDYrr 3107
+VMOVAPDYrr_REV 3108
+VMOVAPDZ 3109
+VMOVAPDZmr 3110
+VMOVAPDZmrk 3111
+VMOVAPDZrm 3112
+VMOVAPDZrmk 3113
+VMOVAPDZrmkz 3114
+VMOVAPDZrr 3115
+VMOVAPDZrr_REV 3116
+VMOVAPDZrrk 3117
+VMOVAPDZrrk_REV 3118
+VMOVAPDZrrkz 3119
+VMOVAPDZrrkz_REV 3120
+VMOVAPDmr 3121
+VMOVAPDrm 3122
+VMOVAPDrr 3123
+VMOVAPDrr_REV 3124
+VMOVAPSYmr 3125
+VMOVAPSYrm 3126
+VMOVAPSYrr 3127
+VMOVAPSYrr_REV 3128
+VMOVAPSZ 3129
+VMOVAPSZmr 3130
+VMOVAPSZmrk 3131
+VMOVAPSZrm 3132
+VMOVAPSZrmk 3133
+VMOVAPSZrmkz 3134
+VMOVAPSZrr 3135
+VMOVAPSZrr_REV 3136
+VMOVAPSZrrk 3137
+VMOVAPSZrrk_REV 3138
+VMOVAPSZrrkz 3139
+VMOVAPSZrrkz_REV 3140
+VMOVAPSmr 3141
+VMOVAPSrm 3142
+VMOVAPSrr 3143
+VMOVAPSrr_REV 3144
+VMOVDDUPYrm 3145
+VMOVDDUPYrr 3146
+VMOVDDUPZ 3147
+VMOVDDUPZrm 3148
+VMOVDDUPZrmk 3149
+VMOVDDUPZrmkz 3150
+VMOVDDUPZrr 3151
+VMOVDDUPZrrk 3152
+VMOVDDUPZrrkz 3153
+VMOVDDUPrm 3154
+VMOVDDUPrr 3155
+VMOVDI 3156
+VMOVDQA 3157
+VMOVDQAYmr 3158
+VMOVDQAYrm 3159
+VMOVDQAYrr 3160
+VMOVDQAYrr_REV 3161
+VMOVDQAmr 3162
+VMOVDQArm 3163
+VMOVDQArr 3164
+VMOVDQArr_REV 3165
+VMOVDQU 3166
+VMOVDQUYmr 3167
+VMOVDQUYrm 3168
+VMOVDQUYrr 3169
+VMOVDQUYrr_REV 3170
+VMOVDQUmr 3171
+VMOVDQUrm 3172
+VMOVDQUrr 3173
+VMOVDQUrr_REV 3174
+VMOVHLPSZrr 3175
+VMOVHLPSrr 3176
+VMOVHPDZ 3177
+VMOVHPDmr 3178
+VMOVHPDrm 3179
+VMOVHPSZ 3180
+VMOVHPSmr 3181
+VMOVHPSrm 3182
+VMOVLHPSZrr 3183
+VMOVLHPSrr 3184
+VMOVLPDZ 3185
+VMOVLPDmr 3186
+VMOVLPDrm 3187
+VMOVLPSZ 3188
+VMOVLPSmr 3189
+VMOVLPSrm 3190
+VMOVMSKPDYrr 3191
+VMOVMSKPDrr 3192
+VMOVMSKPSYrr 3193
+VMOVMSKPSrr 3194
+VMOVNTDQAYrm 3195
+VMOVNTDQAZ 3196
+VMOVNTDQAZrm 3197
+VMOVNTDQArm 3198
+VMOVNTDQYmr 3199
+VMOVNTDQZ 3200
+VMOVNTDQZmr 3201
+VMOVNTDQmr 3202
+VMOVNTPDYmr 3203
+VMOVNTPDZ 3204
+VMOVNTPDZmr 3205
+VMOVNTPDmr 3206
+VMOVNTPSYmr 3207
+VMOVNTPSZ 3208
+VMOVNTPSZmr 3209
+VMOVNTPSmr 3210
+VMOVPDI 3211
+VMOVPQI 3212
+VMOVPQIto 3213
+VMOVQI 3214
+VMOVRSBZ 3215
+VMOVRSBZm 3216
+VMOVRSBZmk 3217
+VMOVRSBZmkz 3218
+VMOVRSDZ 3219
+VMOVRSDZm 3220
+VMOVRSDZmk 3221
+VMOVRSDZmkz 3222
+VMOVRSQZ 3223
+VMOVRSQZm 3224
+VMOVRSQZmk 3225
+VMOVRSQZmkz 3226
+VMOVRSWZ 3227
+VMOVRSWZm 3228
+VMOVRSWZmk 3229
+VMOVRSWZmkz 3230
+VMOVSDZmr 3231
+VMOVSDZmrk 3232
+VMOVSDZrm 3233
+VMOVSDZrm_alt 3234
+VMOVSDZrmk 3235
+VMOVSDZrmkz 3236
+VMOVSDZrr 3237
+VMOVSDZrr_REV 3238
+VMOVSDZrrk 3239
+VMOVSDZrrk_REV 3240
+VMOVSDZrrkz 3241
+VMOVSDZrrkz_REV 3242
+VMOVSDmr 3243
+VMOVSDrm 3244
+VMOVSDrm_alt 3245
+VMOVSDrr 3246
+VMOVSDrr_REV 3247
+VMOVSDto 3248
+VMOVSH 3249
+VMOVSHDUPYrm 3250
+VMOVSHDUPYrr 3251
+VMOVSHDUPZ 3252
+VMOVSHDUPZrm 3253
+VMOVSHDUPZrmk 3254
+VMOVSHDUPZrmkz 3255
+VMOVSHDUPZrr 3256
+VMOVSHDUPZrrk 3257
+VMOVSHDUPZrrkz 3258
+VMOVSHDUPrm 3259
+VMOVSHDUPrr 3260
+VMOVSHZmr 3261
+VMOVSHZmrk 3262
+VMOVSHZrm 3263
+VMOVSHZrm_alt 3264
+VMOVSHZrmk 3265
+VMOVSHZrmkz 3266
+VMOVSHZrr 3267
+VMOVSHZrr_REV 3268
+VMOVSHZrrk 3269
+VMOVSHZrrk_REV 3270
+VMOVSHZrrkz 3271
+VMOVSHZrrkz_REV 3272
+VMOVSHtoW 3273
+VMOVSLDUPYrm 3274
+VMOVSLDUPYrr 3275
+VMOVSLDUPZ 3276
+VMOVSLDUPZrm 3277
+VMOVSLDUPZrmk 3278
+VMOVSLDUPZrmkz 3279
+VMOVSLDUPZrr 3280
+VMOVSLDUPZrrk 3281
+VMOVSLDUPZrrkz 3282
+VMOVSLDUPrm 3283
+VMOVSLDUPrr 3284
+VMOVSS 3285
+VMOVSSZmr 3286
+VMOVSSZmrk 3287
+VMOVSSZrm 3288
+VMOVSSZrm_alt 3289
+VMOVSSZrmk 3290
+VMOVSSZrmkz 3291
+VMOVSSZrr 3292
+VMOVSSZrr_REV 3293
+VMOVSSZrrk 3294
+VMOVSSZrrk_REV 3295
+VMOVSSZrrkz 3296
+VMOVSSZrrkz_REV 3297
+VMOVSSmr 3298
+VMOVSSrm 3299
+VMOVSSrm_alt 3300
+VMOVSSrr 3301
+VMOVSSrr_REV 3302
+VMOVUPDYmr 3303
+VMOVUPDYrm 3304
+VMOVUPDYrr 3305
+VMOVUPDYrr_REV 3306
+VMOVUPDZ 3307
+VMOVUPDZmr 3308
+VMOVUPDZmrk 3309
+VMOVUPDZrm 3310
+VMOVUPDZrmk 3311
+VMOVUPDZrmkz 3312
+VMOVUPDZrr 3313
+VMOVUPDZrr_REV 3314
+VMOVUPDZrrk 3315
+VMOVUPDZrrk_REV 3316
+VMOVUPDZrrkz 3317
+VMOVUPDZrrkz_REV 3318
+VMOVUPDmr 3319
+VMOVUPDrm 3320
+VMOVUPDrr 3321
+VMOVUPDrr_REV 3322
+VMOVUPSYmr 3323
+VMOVUPSYrm 3324
+VMOVUPSYrr 3325
+VMOVUPSYrr_REV 3326
+VMOVUPSZ 3327
+VMOVUPSZmr 3328
+VMOVUPSZmrk 3329
+VMOVUPSZrm 3330
+VMOVUPSZrmk 3331
+VMOVUPSZrmkz 3332
+VMOVUPSZrr 3333
+VMOVUPSZrr_REV 3334
+VMOVUPSZrrk 3335
+VMOVUPSZrrk_REV 3336
+VMOVUPSZrrkz 3337
+VMOVUPSZrrkz_REV 3338
+VMOVUPSmr 3339
+VMOVUPSrm 3340
+VMOVUPSrr 3341
+VMOVUPSrr_REV 3342
+VMOVW 3343
+VMOVWmr 3344
+VMOVWrm 3345
+VMOVZPDILo 3346
+VMOVZPQILo 3347
+VMOVZPWILo 3348
+VMPSADBWYrmi 3349
+VMPSADBWYrri 3350
+VMPSADBWZ 3351
+VMPSADBWZrmi 3352
+VMPSADBWZrmik 3353
+VMPSADBWZrmikz 3354
+VMPSADBWZrri 3355
+VMPSADBWZrrik 3356
+VMPSADBWZrrikz 3357
+VMPSADBWrmi 3358
+VMPSADBWrri 3359
+VMPTRLDm 3360
+VMPTRSTm 3361
+VMREAD 3362
+VMRESUME 3363
+VMRUN 3364
+VMSAVE 3365
+VMULBF 3366
+VMULPDYrm 3367
+VMULPDYrr 3368
+VMULPDZ 3369
+VMULPDZrm 3370
+VMULPDZrmb 3371
+VMULPDZrmbk 3372
+VMULPDZrmbkz 3373
+VMULPDZrmk 3374
+VMULPDZrmkz 3375
+VMULPDZrr 3376
+VMULPDZrrb 3377
+VMULPDZrrbk 3378
+VMULPDZrrbkz 3379
+VMULPDZrrk 3380
+VMULPDZrrkz 3381
+VMULPDrm 3382
+VMULPDrr 3383
+VMULPHZ 3384
+VMULPHZrm 3385
+VMULPHZrmb 3386
+VMULPHZrmbk 3387
+VMULPHZrmbkz 3388
+VMULPHZrmk 3389
+VMULPHZrmkz 3390
+VMULPHZrr 3391
+VMULPHZrrb 3392
+VMULPHZrrbk 3393
+VMULPHZrrbkz 3394
+VMULPHZrrk 3395
+VMULPHZrrkz 3396
+VMULPSYrm 3397
+VMULPSYrr 3398
+VMULPSZ 3399
+VMULPSZrm 3400
+VMULPSZrmb 3401
+VMULPSZrmbk 3402
+VMULPSZrmbkz 3403
+VMULPSZrmk 3404
+VMULPSZrmkz 3405
+VMULPSZrr 3406
+VMULPSZrrb 3407
+VMULPSZrrbk 3408
+VMULPSZrrbkz 3409
+VMULPSZrrk 3410
+VMULPSZrrkz 3411
+VMULPSrm 3412
+VMULPSrr 3413
+VMULSDZrm 3414
+VMULSDZrm_Int 3415
+VMULSDZrmk_Int 3416
+VMULSDZrmkz_Int 3417
+VMULSDZrr 3418
+VMULSDZrr_Int 3419
+VMULSDZrrb_Int 3420
+VMULSDZrrbk_Int 3421
+VMULSDZrrbkz_Int 3422
+VMULSDZrrk_Int 3423
+VMULSDZrrkz_Int 3424
+VMULSDrm 3425
+VMULSDrm_Int 3426
+VMULSDrr 3427
+VMULSDrr_Int 3428
+VMULSHZrm 3429
+VMULSHZrm_Int 3430
+VMULSHZrmk_Int 3431
+VMULSHZrmkz_Int 3432
+VMULSHZrr 3433
+VMULSHZrr_Int 3434
+VMULSHZrrb_Int 3435
+VMULSHZrrbk_Int 3436
+VMULSHZrrbkz_Int 3437
+VMULSHZrrk_Int 3438
+VMULSHZrrkz_Int 3439
+VMULSSZrm 3440
+VMULSSZrm_Int 3441
+VMULSSZrmk_Int 3442
+VMULSSZrmkz_Int 3443
+VMULSSZrr 3444
+VMULSSZrr_Int 3445
+VMULSSZrrb_Int 3446
+VMULSSZrrbk_Int 3447
+VMULSSZrrbkz_Int 3448
+VMULSSZrrk_Int 3449
+VMULSSZrrkz_Int 3450
+VMULSSrm 3451
+VMULSSrm_Int 3452
+VMULSSrr 3453
+VMULSSrr_Int 3454
+VMWRITE 3455
+VMXOFF 3456
+VMXON 3457
+VORPDYrm 3458
+VORPDYrr 3459
+VORPDZ 3460
+VORPDZrm 3461
+VORPDZrmb 3462
+VORPDZrmbk 3463
+VORPDZrmbkz 3464
+VORPDZrmk 3465
+VORPDZrmkz 3466
+VORPDZrr 3467
+VORPDZrrk 3468
+VORPDZrrkz 3469
+VORPDrm 3470
+VORPDrr 3471
+VORPSYrm 3472
+VORPSYrr 3473
+VORPSZ 3474
+VORPSZrm 3475
+VORPSZrmb 3476
+VORPSZrmbk 3477
+VORPSZrmbkz 3478
+VORPSZrmk 3479
+VORPSZrmkz 3480
+VORPSZrr 3481
+VORPSZrrk 3482
+VORPSZrrkz 3483
+VORPSrm 3484
+VORPSrr 3485
+VP 3486
+VPABSBYrm 3487
+VPABSBYrr 3488
+VPABSBZ 3489
+VPABSBZrm 3490
+VPABSBZrmk 3491
+VPABSBZrmkz 3492
+VPABSBZrr 3493
+VPABSBZrrk 3494
+VPABSBZrrkz 3495
+VPABSBrm 3496
+VPABSBrr 3497
+VPABSDYrm 3498
+VPABSDYrr 3499
+VPABSDZ 3500
+VPABSDZrm 3501
+VPABSDZrmb 3502
+VPABSDZrmbk 3503
+VPABSDZrmbkz 3504
+VPABSDZrmk 3505
+VPABSDZrmkz 3506
+VPABSDZrr 3507
+VPABSDZrrk 3508
+VPABSDZrrkz 3509
+VPABSDrm 3510
+VPABSDrr 3511
+VPABSQZ 3512
+VPABSQZrm 3513
+VPABSQZrmb 3514
+VPABSQZrmbk 3515
+VPABSQZrmbkz 3516
+VPABSQZrmk 3517
+VPABSQZrmkz 3518
+VPABSQZrr 3519
+VPABSQZrrk 3520
+VPABSQZrrkz 3521
+VPABSWYrm 3522
+VPABSWYrr 3523
+VPABSWZ 3524
+VPABSWZrm 3525
+VPABSWZrmk 3526
+VPABSWZrmkz 3527
+VPABSWZrr 3528
+VPABSWZrrk 3529
+VPABSWZrrkz 3530
+VPABSWrm 3531
+VPABSWrr 3532
+VPACKSSDWYrm 3533
+VPACKSSDWYrr 3534
+VPACKSSDWZ 3535
+VPACKSSDWZrm 3536
+VPACKSSDWZrmb 3537
+VPACKSSDWZrmbk 3538
+VPACKSSDWZrmbkz 3539
+VPACKSSDWZrmk 3540
+VPACKSSDWZrmkz 3541
+VPACKSSDWZrr 3542
+VPACKSSDWZrrk 3543
+VPACKSSDWZrrkz 3544
+VPACKSSDWrm 3545
+VPACKSSDWrr 3546
+VPACKSSWBYrm 3547
+VPACKSSWBYrr 3548
+VPACKSSWBZ 3549
+VPACKSSWBZrm 3550
+VPACKSSWBZrmk 3551
+VPACKSSWBZrmkz 3552
+VPACKSSWBZrr 3553
+VPACKSSWBZrrk 3554
+VPACKSSWBZrrkz 3555
+VPACKSSWBrm 3556
+VPACKSSWBrr 3557
+VPACKUSDWYrm 3558
+VPACKUSDWYrr 3559
+VPACKUSDWZ 3560
+VPACKUSDWZrm 3561
+VPACKUSDWZrmb 3562
+VPACKUSDWZrmbk 3563
+VPACKUSDWZrmbkz 3564
+VPACKUSDWZrmk 3565
+VPACKUSDWZrmkz 3566
+VPACKUSDWZrr 3567
+VPACKUSDWZrrk 3568
+VPACKUSDWZrrkz 3569
+VPACKUSDWrm 3570
+VPACKUSDWrr 3571
+VPACKUSWBYrm 3572
+VPACKUSWBYrr 3573
+VPACKUSWBZ 3574
+VPACKUSWBZrm 3575
+VPACKUSWBZrmk 3576
+VPACKUSWBZrmkz 3577
+VPACKUSWBZrr 3578
+VPACKUSWBZrrk 3579
+VPACKUSWBZrrkz 3580
+VPACKUSWBrm 3581
+VPACKUSWBrr 3582
+VPADDBYrm 3583
+VPADDBYrr 3584
+VPADDBZ 3585
+VPADDBZrm 3586
+VPADDBZrmk 3587
+VPADDBZrmkz 3588
+VPADDBZrr 3589
+VPADDBZrrk 3590
+VPADDBZrrkz 3591
+VPADDBrm 3592
+VPADDBrr 3593
+VPADDDYrm 3594
+VPADDDYrr 3595
+VPADDDZ 3596
+VPADDDZrm 3597
+VPADDDZrmb 3598
+VPADDDZrmbk 3599
+VPADDDZrmbkz 3600
+VPADDDZrmk 3601
+VPADDDZrmkz 3602
+VPADDDZrr 3603
+VPADDDZrrk 3604
+VPADDDZrrkz 3605
+VPADDDrm 3606
+VPADDDrr 3607
+VPADDQYrm 3608
+VPADDQYrr 3609
+VPADDQZ 3610
+VPADDQZrm 3611
+VPADDQZrmb 3612
+VPADDQZrmbk 3613
+VPADDQZrmbkz 3614
+VPADDQZrmk 3615
+VPADDQZrmkz 3616
+VPADDQZrr 3617
+VPADDQZrrk 3618
+VPADDQZrrkz 3619
+VPADDQrm 3620
+VPADDQrr 3621
+VPADDSBYrm 3622
+VPADDSBYrr 3623
+VPADDSBZ 3624
+VPADDSBZrm 3625
+VPADDSBZrmk 3626
+VPADDSBZrmkz 3627
+VPADDSBZrr 3628
+VPADDSBZrrk 3629
+VPADDSBZrrkz 3630
+VPADDSBrm 3631
+VPADDSBrr 3632
+VPADDSWYrm 3633
+VPADDSWYrr 3634
+VPADDSWZ 3635
+VPADDSWZrm 3636
+VPADDSWZrmk 3637
+VPADDSWZrmkz 3638
+VPADDSWZrr 3639
+VPADDSWZrrk 3640
+VPADDSWZrrkz 3641
+VPADDSWrm 3642
+VPADDSWrr 3643
+VPADDUSBYrm 3644
+VPADDUSBYrr 3645
+VPADDUSBZ 3646
+VPADDUSBZrm 3647
+VPADDUSBZrmk 3648
+VPADDUSBZrmkz 3649
+VPADDUSBZrr 3650
+VPADDUSBZrrk 3651
+VPADDUSBZrrkz 3652
+VPADDUSBrm 3653
+VPADDUSBrr 3654
+VPADDUSWYrm 3655
+VPADDUSWYrr 3656
+VPADDUSWZ 3657
+VPADDUSWZrm 3658
+VPADDUSWZrmk 3659
+VPADDUSWZrmkz 3660
+VPADDUSWZrr 3661
+VPADDUSWZrrk 3662
+VPADDUSWZrrkz 3663
+VPADDUSWrm 3664
+VPADDUSWrr 3665
+VPADDWYrm 3666
+VPADDWYrr 3667
+VPADDWZ 3668
+VPADDWZrm 3669
+VPADDWZrmk 3670
+VPADDWZrmkz 3671
+VPADDWZrr 3672
+VPADDWZrrk 3673
+VPADDWZrrkz 3674
+VPADDWrm 3675
+VPADDWrr 3676
+VPALIGNRYrmi 3677
+VPALIGNRYrri 3678
+VPALIGNRZ 3679
+VPALIGNRZrmi 3680
+VPALIGNRZrmik 3681
+VPALIGNRZrmikz 3682
+VPALIGNRZrri 3683
+VPALIGNRZrrik 3684
+VPALIGNRZrrikz 3685
+VPALIGNRrmi 3686
+VPALIGNRrri 3687
+VPANDDZ 3688
+VPANDDZrm 3689
+VPANDDZrmb 3690
+VPANDDZrmbk 3691
+VPANDDZrmbkz 3692
+VPANDDZrmk 3693
+VPANDDZrmkz 3694
+VPANDDZrr 3695
+VPANDDZrrk 3696
+VPANDDZrrkz 3697
+VPANDNDZ 3698
+VPANDNDZrm 3699
+VPANDNDZrmb 3700
+VPANDNDZrmbk 3701
+VPANDNDZrmbkz 3702
+VPANDNDZrmk 3703
+VPANDNDZrmkz 3704
+VPANDNDZrr 3705
+VPANDNDZrrk 3706
+VPANDNDZrrkz 3707
+VPANDNQZ 3708
+VPANDNQZrm 3709
+VPANDNQZrmb 3710
+VPANDNQZrmbk 3711
+VPANDNQZrmbkz 3712
+VPANDNQZrmk 3713
+VPANDNQZrmkz 3714
+VPANDNQZrr 3715
+VPANDNQZrrk 3716
+VPANDNQZrrkz 3717
+VPANDNYrm 3718
+VPANDNYrr 3719
+VPANDNrm 3720
+VPANDNrr 3721
+VPANDQZ 3722
+VPANDQZrm 3723
+VPANDQZrmb 3724
+VPANDQZrmbk 3725
+VPANDQZrmbkz 3726
+VPANDQZrmk 3727
+VPANDQZrmkz 3728
+VPANDQZrr 3729
+VPANDQZrrk 3730
+VPANDQZrrkz 3731
+VPANDYrm 3732
+VPANDYrr 3733
+VPANDrm 3734
+VPANDrr 3735
+VPAVGBYrm 3736
+VPAVGBYrr 3737
+VPAVGBZ 3738
+VPAVGBZrm 3739
+VPAVGBZrmk 3740
+VPAVGBZrmkz 3741
+VPAVGBZrr 3742
+VPAVGBZrrk 3743
+VPAVGBZrrkz 3744
+VPAVGBrm 3745
+VPAVGBrr 3746
+VPAVGWYrm 3747
+VPAVGWYrr 3748
+VPAVGWZ 3749
+VPAVGWZrm 3750
+VPAVGWZrmk 3751
+VPAVGWZrmkz 3752
+VPAVGWZrr 3753
+VPAVGWZrrk 3754
+VPAVGWZrrkz 3755
+VPAVGWrm 3756
+VPAVGWrr 3757
+VPBLENDDYrmi 3758
+VPBLENDDYrri 3759
+VPBLENDDrmi 3760
+VPBLENDDrri 3761
+VPBLENDMBZ 3762
+VPBLENDMBZrm 3763
+VPBLENDMBZrmk 3764
+VPBLENDMBZrmkz 3765
+VPBLENDMBZrr 3766
+VPBLENDMBZrrk 3767
+VPBLENDMBZrrkz 3768
+VPBLENDMDZ 3769
+VPBLENDMDZrm 3770
+VPBLENDMDZrmb 3771
+VPBLENDMDZrmbk 3772
+VPBLENDMDZrmbkz 3773
+VPBLENDMDZrmk 3774
+VPBLENDMDZrmkz 3775
+VPBLENDMDZrr 3776
+VPBLENDMDZrrk 3777
+VPBLENDMDZrrkz 3778
+VPBLENDMQZ 3779
+VPBLENDMQZrm 3780
+VPBLENDMQZrmb 3781
+VPBLENDMQZrmbk 3782
+VPBLENDMQZrmbkz 3783
+VPBLENDMQZrmk 3784
+VPBLENDMQZrmkz 3785
+VPBLENDMQZrr 3786
+VPBLENDMQZrrk 3787
+VPBLENDMQZrrkz 3788
+VPBLENDMWZ 3789
+VPBLENDMWZrm 3790
+VPBLENDMWZrmk 3791
+VPBLENDMWZrmkz 3792
+VPBLENDMWZrr 3793
+VPBLENDMWZrrk 3794
+VPBLENDMWZrrkz 3795
+VPBLENDVBYrmr 3796
+VPBLENDVBYrrr 3797
+VPBLENDVBrmr 3798
+VPBLENDVBrrr 3799
+VPBLENDWYrmi 3800
+VPBLENDWYrri 3801
+VPBLENDWrmi 3802
+VPBLENDWrri 3803
+VPBROADCASTBYrm 3804
+VPBROADCASTBYrr 3805
+VPBROADCASTBZ 3806
+VPBROADCASTBZrm 3807
+VPBROADCASTBZrmk 3808
+VPBROADCASTBZrmkz 3809
+VPBROADCASTBZrr 3810
+VPBROADCASTBZrrk 3811
+VPBROADCASTBZrrkz 3812
+VPBROADCASTBrZ 3813
+VPBROADCASTBrZrr 3814
+VPBROADCASTBrZrrk 3815
+VPBROADCASTBrZrrkz 3816
+VPBROADCASTBrm 3817
+VPBROADCASTBrr 3818
+VPBROADCASTDYrm 3819
+VPBROADCASTDYrr 3820
+VPBROADCASTDZ 3821
+VPBROADCASTDZrm 3822
+VPBROADCASTDZrmk 3823
+VPBROADCASTDZrmkz 3824
+VPBROADCASTDZrr 3825
+VPBROADCASTDZrrk 3826
+VPBROADCASTDZrrkz 3827
+VPBROADCASTDrZ 3828
+VPBROADCASTDrZrr 3829
+VPBROADCASTDrZrrk 3830
+VPBROADCASTDrZrrkz 3831
+VPBROADCASTDrm 3832
+VPBROADCASTDrr 3833
+VPBROADCASTMB 3834
+VPBROADCASTMW 3835
+VPBROADCASTQYrm 3836
+VPBROADCASTQYrr 3837
+VPBROADCASTQZ 3838
+VPBROADCASTQZrm 3839
+VPBROADCASTQZrmk 3840
+VPBROADCASTQZrmkz 3841
+VPBROADCASTQZrr 3842
+VPBROADCASTQZrrk 3843
+VPBROADCASTQZrrkz 3844
+VPBROADCASTQrZ 3845
+VPBROADCASTQrZrr 3846
+VPBROADCASTQrZrrk 3847
+VPBROADCASTQrZrrkz 3848
+VPBROADCASTQrm 3849
+VPBROADCASTQrr 3850
+VPBROADCASTWYrm 3851
+VPBROADCASTWYrr 3852
+VPBROADCASTWZ 3853
+VPBROADCASTWZrm 3854
+VPBROADCASTWZrmk 3855
+VPBROADCASTWZrmkz 3856
+VPBROADCASTWZrr 3857
+VPBROADCASTWZrrk 3858
+VPBROADCASTWZrrkz 3859
+VPBROADCASTWrZ 3860
+VPBROADCASTWrZrr 3861
+VPBROADCASTWrZrrk 3862
+VPBROADCASTWrZrrkz 3863
+VPBROADCASTWrm 3864
+VPBROADCASTWrr 3865
+VPCLMULQDQYrmi 3866
+VPCLMULQDQYrri 3867
+VPCLMULQDQZ 3868
+VPCLMULQDQZrmi 3869
+VPCLMULQDQZrri 3870
+VPCLMULQDQrmi 3871
+VPCLMULQDQrri 3872
+VPCMOVYrmr 3873
+VPCMOVYrrm 3874
+VPCMOVYrrr 3875
+VPCMOVYrrr_REV 3876
+VPCMOVrmr 3877
+VPCMOVrrm 3878
+VPCMOVrrr 3879
+VPCMOVrrr_REV 3880
+VPCMPBZ 3881
+VPCMPBZrmi 3882
+VPCMPBZrmik 3883
+VPCMPBZrri 3884
+VPCMPBZrrik 3885
+VPCMPDZ 3886
+VPCMPDZrmbi 3887
+VPCMPDZrmbik 3888
+VPCMPDZrmi 3889
+VPCMPDZrmik 3890
+VPCMPDZrri 3891
+VPCMPDZrrik 3892
+VPCMPEQBYrm 3893
+VPCMPEQBYrr 3894
+VPCMPEQBZ 3895
+VPCMPEQBZrm 3896
+VPCMPEQBZrmk 3897
+VPCMPEQBZrr 3898
+VPCMPEQBZrrk 3899
+VPCMPEQBrm 3900
+VPCMPEQBrr 3901
+VPCMPEQDYrm 3902
+VPCMPEQDYrr 3903
+VPCMPEQDZ 3904
+VPCMPEQDZrm 3905
+VPCMPEQDZrmb 3906
+VPCMPEQDZrmbk 3907
+VPCMPEQDZrmk 3908
+VPCMPEQDZrr 3909
+VPCMPEQDZrrk 3910
+VPCMPEQDrm 3911
+VPCMPEQDrr 3912
+VPCMPEQQYrm 3913
+VPCMPEQQYrr 3914
+VPCMPEQQZ 3915
+VPCMPEQQZrm 3916
+VPCMPEQQZrmb 3917
+VPCMPEQQZrmbk 3918
+VPCMPEQQZrmk 3919
+VPCMPEQQZrr 3920
+VPCMPEQQZrrk 3921
+VPCMPEQQrm 3922
+VPCMPEQQrr 3923
+VPCMPEQWYrm 3924
+VPCMPEQWYrr 3925
+VPCMPEQWZ 3926
+VPCMPEQWZrm 3927
+VPCMPEQWZrmk 3928
+VPCMPEQWZrr 3929
+VPCMPEQWZrrk 3930
+VPCMPEQWrm 3931
+VPCMPEQWrr 3932
+VPCMPESTRIrmi 3933
+VPCMPESTRIrri 3934
+VPCMPESTRMrmi 3935
+VPCMPESTRMrri 3936
+VPCMPGTBYrm 3937
+VPCMPGTBYrr 3938
+VPCMPGTBZ 3939
+VPCMPGTBZrm 3940
+VPCMPGTBZrmk 3941
+VPCMPGTBZrr 3942
+VPCMPGTBZrrk 3943
+VPCMPGTBrm 3944
+VPCMPGTBrr 3945
+VPCMPGTDYrm 3946
+VPCMPGTDYrr 3947
+VPCMPGTDZ 3948
+VPCMPGTDZrm 3949
+VPCMPGTDZrmb 3950
+VPCMPGTDZrmbk 3951
+VPCMPGTDZrmk 3952
+VPCMPGTDZrr 3953
+VPCMPGTDZrrk 3954
+VPCMPGTDrm 3955
+VPCMPGTDrr 3956
+VPCMPGTQYrm 3957
+VPCMPGTQYrr 3958
+VPCMPGTQZ 3959
+VPCMPGTQZrm 3960
+VPCMPGTQZrmb 3961
+VPCMPGTQZrmbk 3962
+VPCMPGTQZrmk 3963
+VPCMPGTQZrr 3964
+VPCMPGTQZrrk 3965
+VPCMPGTQrm 3966
+VPCMPGTQrr 3967
+VPCMPGTWYrm 3968
+VPCMPGTWYrr 3969
+VPCMPGTWZ 3970
+VPCMPGTWZrm 3971
+VPCMPGTWZrmk 3972
+VPCMPGTWZrr 3973
+VPCMPGTWZrrk 3974
+VPCMPGTWrm 3975
+VPCMPGTWrr 3976
+VPCMPISTRIrmi 3977
+VPCMPISTRIrri 3978
+VPCMPISTRMrmi 3979
+VPCMPISTRMrri 3980
+VPCMPQZ 3981
+VPCMPQZrmbi 3982
+VPCMPQZrmbik 3983
+VPCMPQZrmi 3984
+VPCMPQZrmik 3985
+VPCMPQZrri 3986
+VPCMPQZrrik 3987
+VPCMPUBZ 3988
+VPCMPUBZrmi 3989
+VPCMPUBZrmik 3990
+VPCMPUBZrri 3991
+VPCMPUBZrrik 3992
+VPCMPUDZ 3993
+VPCMPUDZrmbi 3994
+VPCMPUDZrmbik 3995
+VPCMPUDZrmi 3996
+VPCMPUDZrmik 3997
+VPCMPUDZrri 3998
+VPCMPUDZrrik 3999
+VPCMPUQZ 4000
+VPCMPUQZrmbi 4001
+VPCMPUQZrmbik 4002
+VPCMPUQZrmi 4003
+VPCMPUQZrmik 4004
+VPCMPUQZrri 4005
+VPCMPUQZrrik 4006
+VPCMPUWZ 4007
+VPCMPUWZrmi 4008
+VPCMPUWZrmik 4009
+VPCMPUWZrri 4010
+VPCMPUWZrrik 4011
+VPCMPWZ 4012
+VPCMPWZrmi 4013
+VPCMPWZrmik 4014
+VPCMPWZrri 4015
+VPCMPWZrrik 4016
+VPCOMBmi 4017
+VPCOMBri 4018
+VPCOMDmi 4019
+VPCOMDri 4020
+VPCOMPRESSBZ 4021
+VPCOMPRESSBZmr 4022
+VPCOMPRESSBZmrk 4023
+VPCOMPRESSBZrr 4024
+VPCOMPRESSBZrrk 4025
+VPCOMPRESSBZrrkz 4026
+VPCOMPRESSDZ 4027
+VPCOMPRESSDZmr 4028
+VPCOMPRESSDZmrk 4029
+VPCOMPRESSDZrr 4030
+VPCOMPRESSDZrrk 4031
+VPCOMPRESSDZrrkz 4032
+VPCOMPRESSQZ 4033
+VPCOMPRESSQZmr 4034
+VPCOMPRESSQZmrk 4035
+VPCOMPRESSQZrr 4036
+VPCOMPRESSQZrrk 4037
+VPCOMPRESSQZrrkz 4038
+VPCOMPRESSWZ 4039
+VPCOMPRESSWZmr 4040
+VPCOMPRESSWZmrk 4041
+VPCOMPRESSWZrr 4042
+VPCOMPRESSWZrrk 4043
+VPCOMPRESSWZrrkz 4044
+VPCOMQmi 4045
+VPCOMQri 4046
+VPCOMUBmi 4047
+VPCOMUBri 4048
+VPCOMUDmi 4049
+VPCOMUDri 4050
+VPCOMUQmi 4051
+VPCOMUQri 4052
+VPCOMUWmi 4053
+VPCOMUWri 4054
+VPCOMWmi 4055
+VPCOMWri 4056
+VPCONFLICTDZ 4057
+VPCONFLICTDZrm 4058
+VPCONFLICTDZrmb 4059
+VPCONFLICTDZrmbk 4060
+VPCONFLICTDZrmbkz 4061
+VPCONFLICTDZrmk 4062
+VPCONFLICTDZrmkz 4063
+VPCONFLICTDZrr 4064
+VPCONFLICTDZrrk 4065
+VPCONFLICTDZrrkz 4066
+VPCONFLICTQZ 4067
+VPCONFLICTQZrm 4068
+VPCONFLICTQZrmb 4069
+VPCONFLICTQZrmbk 4070
+VPCONFLICTQZrmbkz 4071
+VPCONFLICTQZrmk 4072
+VPCONFLICTQZrmkz 4073
+VPCONFLICTQZrr 4074
+VPCONFLICTQZrrk 4075
+VPCONFLICTQZrrkz 4076
+VPDPBSSDSYrm 4077
+VPDPBSSDSYrr 4078
+VPDPBSSDSZ 4079
+VPDPBSSDSZrm 4080
+VPDPBSSDSZrmb 4081
+VPDPBSSDSZrmbk 4082
+VPDPBSSDSZrmbkz 4083
+VPDPBSSDSZrmk 4084
+VPDPBSSDSZrmkz 4085
+VPDPBSSDSZrr 4086
+VPDPBSSDSZrrk 4087
+VPDPBSSDSZrrkz 4088
+VPDPBSSDSrm 4089
+VPDPBSSDSrr 4090
+VPDPBSSDYrm 4091
+VPDPBSSDYrr 4092
+VPDPBSSDZ 4093
+VPDPBSSDZrm 4094
+VPDPBSSDZrmb 4095
+VPDPBSSDZrmbk 4096
+VPDPBSSDZrmbkz 4097
+VPDPBSSDZrmk 4098
+VPDPBSSDZrmkz 4099
+VPDPBSSDZrr 4100
+VPDPBSSDZrrk 4101
+VPDPBSSDZrrkz 4102
+VPDPBSSDrm 4103
+VPDPBSSDrr 4104
+VPDPBSUDSYrm 4105
+VPDPBSUDSYrr 4106
+VPDPBSUDSZ 4107
+VPDPBSUDSZrm 4108
+VPDPBSUDSZrmb 4109
+VPDPBSUDSZrmbk 4110
+VPDPBSUDSZrmbkz 4111
+VPDPBSUDSZrmk 4112
+VPDPBSUDSZrmkz 4113
+VPDPBSUDSZrr 4114
+VPDPBSUDSZrrk 4115
+VPDPBSUDSZrrkz 4116
+VPDPBSUDSrm 4117
+VPDPBSUDSrr 4118
+VPDPBSUDYrm 4119
+VPDPBSUDYrr 4120
+VPDPBSUDZ 4121
+VPDPBSUDZrm 4122
+VPDPBSUDZrmb 4123
+VPDPBSUDZrmbk 4124
+VPDPBSUDZrmbkz 4125
+VPDPBSUDZrmk 4126
+VPDPBSUDZrmkz 4127
+VPDPBSUDZrr 4128
+VPDPBSUDZrrk 4129
+VPDPBSUDZrrkz 4130
+VPDPBSUDrm 4131
+VPDPBSUDrr 4132
+VPDPBUSDSYrm 4133
+VPDPBUSDSYrr 4134
+VPDPBUSDSZ 4135
+VPDPBUSDSZrm 4136
+VPDPBUSDSZrmb 4137
+VPDPBUSDSZrmbk 4138
+VPDPBUSDSZrmbkz 4139
+VPDPBUSDSZrmk 4140
+VPDPBUSDSZrmkz 4141
+VPDPBUSDSZrr 4142
+VPDPBUSDSZrrk 4143
+VPDPBUSDSZrrkz 4144
+VPDPBUSDSrm 4145
+VPDPBUSDSrr 4146
+VPDPBUSDYrm 4147
+VPDPBUSDYrr 4148
+VPDPBUSDZ 4149
+VPDPBUSDZrm 4150
+VPDPBUSDZrmb 4151
+VPDPBUSDZrmbk 4152
+VPDPBUSDZrmbkz 4153
+VPDPBUSDZrmk 4154
+VPDPBUSDZrmkz 4155
+VPDPBUSDZrr 4156
+VPDPBUSDZrrk 4157
+VPDPBUSDZrrkz 4158
+VPDPBUSDrm 4159
+VPDPBUSDrr 4160
+VPDPBUUDSYrm 4161
+VPDPBUUDSYrr 4162
+VPDPBUUDSZ 4163
+VPDPBUUDSZrm 4164
+VPDPBUUDSZrmb 4165
+VPDPBUUDSZrmbk 4166
+VPDPBUUDSZrmbkz 4167
+VPDPBUUDSZrmk 4168
+VPDPBUUDSZrmkz 4169
+VPDPBUUDSZrr 4170
+VPDPBUUDSZrrk 4171
+VPDPBUUDSZrrkz 4172
+VPDPBUUDSrm 4173
+VPDPBUUDSrr 4174
+VPDPBUUDYrm 4175
+VPDPBUUDYrr 4176
+VPDPBUUDZ 4177
+VPDPBUUDZrm 4178
+VPDPBUUDZrmb 4179
+VPDPBUUDZrmbk 4180
+VPDPBUUDZrmbkz 4181
+VPDPBUUDZrmk 4182
+VPDPBUUDZrmkz 4183
+VPDPBUUDZrr 4184
+VPDPBUUDZrrk 4185
+VPDPBUUDZrrkz 4186
+VPDPBUUDrm 4187
+VPDPBUUDrr 4188
+VPDPWSSDSYrm 4189
+VPDPWSSDSYrr 4190
+VPDPWSSDSZ 4191
+VPDPWSSDSZrm 4192
+VPDPWSSDSZrmb 4193
+VPDPWSSDSZrmbk 4194
+VPDPWSSDSZrmbkz 4195
+VPDPWSSDSZrmk 4196
+VPDPWSSDSZrmkz 4197
+VPDPWSSDSZrr 4198
+VPDPWSSDSZrrk 4199
+VPDPWSSDSZrrkz 4200
+VPDPWSSDSrm 4201
+VPDPWSSDSrr 4202
+VPDPWSSDYrm 4203
+VPDPWSSDYrr 4204
+VPDPWSSDZ 4205
+VPDPWSSDZrm 4206
+VPDPWSSDZrmb 4207
+VPDPWSSDZrmbk 4208
+VPDPWSSDZrmbkz 4209
+VPDPWSSDZrmk 4210
+VPDPWSSDZrmkz 4211
+VPDPWSSDZrr 4212
+VPDPWSSDZrrk 4213
+VPDPWSSDZrrkz 4214
+VPDPWSSDrm 4215
+VPDPWSSDrr 4216
+VPDPWSUDSYrm 4217
+VPDPWSUDSYrr 4218
+VPDPWSUDSZ 4219
+VPDPWSUDSZrm 4220
+VPDPWSUDSZrmb 4221
+VPDPWSUDSZrmbk 4222
+VPDPWSUDSZrmbkz 4223
+VPDPWSUDSZrmk 4224
+VPDPWSUDSZrmkz 4225
+VPDPWSUDSZrr 4226
+VPDPWSUDSZrrk 4227
+VPDPWSUDSZrrkz 4228
+VPDPWSUDSrm 4229
+VPDPWSUDSrr 4230
+VPDPWSUDYrm 4231
+VPDPWSUDYrr 4232
+VPDPWSUDZ 4233
+VPDPWSUDZrm 4234
+VPDPWSUDZrmb 4235
+VPDPWSUDZrmbk 4236
+VPDPWSUDZrmbkz 4237
+VPDPWSUDZrmk 4238
+VPDPWSUDZrmkz 4239
+VPDPWSUDZrr 4240
+VPDPWSUDZrrk 4241
+VPDPWSUDZrrkz 4242
+VPDPWSUDrm 4243
+VPDPWSUDrr 4244
+VPDPWUSDSYrm 4245
+VPDPWUSDSYrr 4246
+VPDPWUSDSZ 4247
+VPDPWUSDSZrm 4248
+VPDPWUSDSZrmb 4249
+VPDPWUSDSZrmbk 4250
+VPDPWUSDSZrmbkz 4251
+VPDPWUSDSZrmk 4252
+VPDPWUSDSZrmkz 4253
+VPDPWUSDSZrr 4254
+VPDPWUSDSZrrk 4255
+VPDPWUSDSZrrkz 4256
+VPDPWUSDSrm 4257
+VPDPWUSDSrr 4258
+VPDPWUSDYrm 4259
+VPDPWUSDYrr 4260
+VPDPWUSDZ 4261
+VPDPWUSDZrm 4262
+VPDPWUSDZrmb 4263
+VPDPWUSDZrmbk 4264
+VPDPWUSDZrmbkz 4265
+VPDPWUSDZrmk 4266
+VPDPWUSDZrmkz 4267
+VPDPWUSDZrr 4268
+VPDPWUSDZrrk 4269
+VPDPWUSDZrrkz 4270
+VPDPWUSDrm 4271
+VPDPWUSDrr 4272
+VPDPWUUDSYrm 4273
+VPDPWUUDSYrr 4274
+VPDPWUUDSZ 4275
+VPDPWUUDSZrm 4276
+VPDPWUUDSZrmb 4277
+VPDPWUUDSZrmbk 4278
+VPDPWUUDSZrmbkz 4279
+VPDPWUUDSZrmk 4280
+VPDPWUUDSZrmkz 4281
+VPDPWUUDSZrr 4282
+VPDPWUUDSZrrk 4283
+VPDPWUUDSZrrkz 4284
+VPDPWUUDSrm 4285
+VPDPWUUDSrr 4286
+VPDPWUUDYrm 4287
+VPDPWUUDYrr 4288
+VPDPWUUDZ 4289
+VPDPWUUDZrm 4290
+VPDPWUUDZrmb 4291
+VPDPWUUDZrmbk 4292
+VPDPWUUDZrmbkz 4293
+VPDPWUUDZrmk 4294
+VPDPWUUDZrmkz 4295
+VPDPWUUDZrr 4296
+VPDPWUUDZrrk 4297
+VPDPWUUDZrrkz 4298
+VPDPWUUDrm 4299
+VPDPWUUDrr 4300
+VPERM 4301
+VPERMBZ 4302
+VPERMBZrm 4303
+VPERMBZrmk 4304
+VPERMBZrmkz 4305
+VPERMBZrr 4306
+VPERMBZrrk 4307
+VPERMBZrrkz 4308
+VPERMDYrm 4309
+VPERMDYrr 4310
+VPERMDZ 4311
+VPERMDZrm 4312
+VPERMDZrmb 4313
+VPERMDZrmbk 4314
+VPERMDZrmbkz 4315
+VPERMDZrmk 4316
+VPERMDZrmkz 4317
+VPERMDZrr 4318
+VPERMDZrrk 4319
+VPERMDZrrkz 4320
+VPERMI 4321
+VPERMIL 4322
+VPERMILPDYmi 4323
+VPERMILPDYri 4324
+VPERMILPDYrm 4325
+VPERMILPDYrr 4326
+VPERMILPDZ 4327
+VPERMILPDZmbi 4328
+VPERMILPDZmbik 4329
+VPERMILPDZmbikz 4330
+VPERMILPDZmi 4331
+VPERMILPDZmik 4332
+VPERMILPDZmikz 4333
+VPERMILPDZri 4334
+VPERMILPDZrik 4335
+VPERMILPDZrikz 4336
+VPERMILPDZrm 4337
+VPERMILPDZrmb 4338
+VPERMILPDZrmbk 4339
+VPERMILPDZrmbkz 4340
+VPERMILPDZrmk 4341
+VPERMILPDZrmkz 4342
+VPERMILPDZrr 4343
+VPERMILPDZrrk 4344
+VPERMILPDZrrkz 4345
+VPERMILPDmi 4346
+VPERMILPDri 4347
+VPERMILPDrm 4348
+VPERMILPDrr 4349
+VPERMILPSYmi 4350
+VPERMILPSYri 4351
+VPERMILPSYrm 4352
+VPERMILPSYrr 4353
+VPERMILPSZ 4354
+VPERMILPSZmbi 4355
+VPERMILPSZmbik 4356
+VPERMILPSZmbikz 4357
+VPERMILPSZmi 4358
+VPERMILPSZmik 4359
+VPERMILPSZmikz 4360
+VPERMILPSZri 4361
+VPERMILPSZrik 4362
+VPERMILPSZrikz 4363
+VPERMILPSZrm 4364
+VPERMILPSZrmb 4365
+VPERMILPSZrmbk 4366
+VPERMILPSZrmbkz 4367
+VPERMILPSZrmk 4368
+VPERMILPSZrmkz 4369
+VPERMILPSZrr 4370
+VPERMILPSZrrk 4371
+VPERMILPSZrrkz 4372
+VPERMILPSmi 4373
+VPERMILPSri 4374
+VPERMILPSrm 4375
+VPERMILPSrr 4376
+VPERMPDYmi 4377
+VPERMPDYri 4378
+VPERMPDZ 4379
+VPERMPDZmbi 4380
+VPERMPDZmbik 4381
+VPERMPDZmbikz 4382
+VPERMPDZmi 4383
+VPERMPDZmik 4384
+VPERMPDZmikz 4385
+VPERMPDZri 4386
+VPERMPDZrik 4387
+VPERMPDZrikz 4388
+VPERMPDZrm 4389
+VPERMPDZrmb 4390
+VPERMPDZrmbk 4391
+VPERMPDZrmbkz 4392
+VPERMPDZrmk 4393
+VPERMPDZrmkz 4394
+VPERMPDZrr 4395
+VPERMPDZrrk 4396
+VPERMPDZrrkz 4397
+VPERMPSYrm 4398
+VPERMPSYrr 4399
+VPERMPSZ 4400
+VPERMPSZrm 4401
+VPERMPSZrmb 4402
+VPERMPSZrmbk 4403
+VPERMPSZrmbkz 4404
+VPERMPSZrmk 4405
+VPERMPSZrmkz 4406
+VPERMPSZrr 4407
+VPERMPSZrrk 4408
+VPERMPSZrrkz 4409
+VPERMQYmi 4410
+VPERMQYri 4411
+VPERMQZ 4412
+VPERMQZmbi 4413
+VPERMQZmbik 4414
+VPERMQZmbikz 4415
+VPERMQZmi 4416
+VPERMQZmik 4417
+VPERMQZmikz 4418
+VPERMQZri 4419
+VPERMQZrik 4420
+VPERMQZrikz 4421
+VPERMQZrm 4422
+VPERMQZrmb 4423
+VPERMQZrmbk 4424
+VPERMQZrmbkz 4425
+VPERMQZrmk 4426
+VPERMQZrmkz 4427
+VPERMQZrr 4428
+VPERMQZrrk 4429
+VPERMQZrrkz 4430
+VPERMT 4431
+VPERMWZ 4432
+VPERMWZrm 4433
+VPERMWZrmk 4434
+VPERMWZrmkz 4435
+VPERMWZrr 4436
+VPERMWZrrk 4437
+VPERMWZrrkz 4438
+VPEXPANDBZ 4439
+VPEXPANDBZrm 4440
+VPEXPANDBZrmk 4441
+VPEXPANDBZrmkz 4442
+VPEXPANDBZrr 4443
+VPEXPANDBZrrk 4444
+VPEXPANDBZrrkz 4445
+VPEXPANDDZ 4446
+VPEXPANDDZrm 4447
+VPEXPANDDZrmk 4448
+VPEXPANDDZrmkz 4449
+VPEXPANDDZrr 4450
+VPEXPANDDZrrk 4451
+VPEXPANDDZrrkz 4452
+VPEXPANDQZ 4453
+VPEXPANDQZrm 4454
+VPEXPANDQZrmk 4455
+VPEXPANDQZrmkz 4456
+VPEXPANDQZrr 4457
+VPEXPANDQZrrk 4458
+VPEXPANDQZrrkz 4459
+VPEXPANDWZ 4460
+VPEXPANDWZrm 4461
+VPEXPANDWZrmk 4462
+VPEXPANDWZrmkz 4463
+VPEXPANDWZrr 4464
+VPEXPANDWZrrk 4465
+VPEXPANDWZrrkz 4466
+VPEXTRBZmri 4467
+VPEXTRBZrri 4468
+VPEXTRBmri 4469
+VPEXTRBrri 4470
+VPEXTRDZmri 4471
+VPEXTRDZrri 4472
+VPEXTRDmri 4473
+VPEXTRDrri 4474
+VPEXTRQZmri 4475
+VPEXTRQZrri 4476
+VPEXTRQmri 4477
+VPEXTRQrri 4478
+VPEXTRWZmri 4479
+VPEXTRWZrri 4480
+VPEXTRWZrri_REV 4481
+VPEXTRWmri 4482
+VPEXTRWrri 4483
+VPEXTRWrri_REV 4484
+VPGATHERDDYrm 4485
+VPGATHERDDZ 4486
+VPGATHERDDZrm 4487
+VPGATHERDDrm 4488
+VPGATHERDQYrm 4489
+VPGATHERDQZ 4490
+VPGATHERDQZrm 4491
+VPGATHERDQrm 4492
+VPGATHERQDYrm 4493
+VPGATHERQDZ 4494
+VPGATHERQDZrm 4495
+VPGATHERQDrm 4496
+VPGATHERQQYrm 4497
+VPGATHERQQZ 4498
+VPGATHERQQZrm 4499
+VPGATHERQQrm 4500
+VPHADDBDrm 4501
+VPHADDBDrr 4502
+VPHADDBQrm 4503
+VPHADDBQrr 4504
+VPHADDBWrm 4505
+VPHADDBWrr 4506
+VPHADDDQrm 4507
+VPHADDDQrr 4508
+VPHADDDYrm 4509
+VPHADDDYrr 4510
+VPHADDDrm 4511
+VPHADDDrr 4512
+VPHADDSWYrm 4513
+VPHADDSWYrr 4514
+VPHADDSWrm 4515
+VPHADDSWrr 4516
+VPHADDUBDrm 4517
+VPHADDUBDrr 4518
+VPHADDUBQrm 4519
+VPHADDUBQrr 4520
+VPHADDUBWrm 4521
+VPHADDUBWrr 4522
+VPHADDUDQrm 4523
+VPHADDUDQrr 4524
+VPHADDUWDrm 4525
+VPHADDUWDrr 4526
+VPHADDUWQrm 4527
+VPHADDUWQrr 4528
+VPHADDWDrm 4529
+VPHADDWDrr 4530
+VPHADDWQrm 4531
+VPHADDWQrr 4532
+VPHADDWYrm 4533
+VPHADDWYrr 4534
+VPHADDWrm 4535
+VPHADDWrr 4536
+VPHMINPOSUWrm 4537
+VPHMINPOSUWrr 4538
+VPHSUBBWrm 4539
+VPHSUBBWrr 4540
+VPHSUBDQrm 4541
+VPHSUBDQrr 4542
+VPHSUBDYrm 4543
+VPHSUBDYrr 4544
+VPHSUBDrm 4545
+VPHSUBDrr 4546
+VPHSUBSWYrm 4547
+VPHSUBSWYrr 4548
+VPHSUBSWrm 4549
+VPHSUBSWrr 4550
+VPHSUBWDrm 4551
+VPHSUBWDrr 4552
+VPHSUBWYrm 4553
+VPHSUBWYrr 4554
+VPHSUBWrm 4555
+VPHSUBWrr 4556
+VPINSRBZrmi 4557
+VPINSRBZrri 4558
+VPINSRBrmi 4559
+VPINSRBrri 4560
+VPINSRDZrmi 4561
+VPINSRDZrri 4562
+VPINSRDrmi 4563
+VPINSRDrri 4564
+VPINSRQZrmi 4565
+VPINSRQZrri 4566
+VPINSRQrmi 4567
+VPINSRQrri 4568
+VPINSRWZrmi 4569
+VPINSRWZrri 4570
+VPINSRWrmi 4571
+VPINSRWrri 4572
+VPLZCNTDZ 4573
+VPLZCNTDZrm 4574
+VPLZCNTDZrmb 4575
+VPLZCNTDZrmbk 4576
+VPLZCNTDZrmbkz 4577
+VPLZCNTDZrmk 4578
+VPLZCNTDZrmkz 4579
+VPLZCNTDZrr 4580
+VPLZCNTDZrrk 4581
+VPLZCNTDZrrkz 4582
+VPLZCNTQZ 4583
+VPLZCNTQZrm 4584
+VPLZCNTQZrmb 4585
+VPLZCNTQZrmbk 4586
+VPLZCNTQZrmbkz 4587
+VPLZCNTQZrmk 4588
+VPLZCNTQZrmkz 4589
+VPLZCNTQZrr 4590
+VPLZCNTQZrrk 4591
+VPLZCNTQZrrkz 4592
+VPMACSDDrm 4593
+VPMACSDDrr 4594
+VPMACSDQHrm 4595
+VPMACSDQHrr 4596
+VPMACSDQLrm 4597
+VPMACSDQLrr 4598
+VPMACSSDDrm 4599
+VPMACSSDDrr 4600
+VPMACSSDQHrm 4601
+VPMACSSDQHrr 4602
+VPMACSSDQLrm 4603
+VPMACSSDQLrr 4604
+VPMACSSWDrm 4605
+VPMACSSWDrr 4606
+VPMACSSWWrm 4607
+VPMACSSWWrr 4608
+VPMACSWDrm 4609
+VPMACSWDrr 4610
+VPMACSWWrm 4611
+VPMACSWWrr 4612
+VPMADCSSWDrm 4613
+VPMADCSSWDrr 4614
+VPMADCSWDrm 4615
+VPMADCSWDrr 4616
+VPMADD 4617
+VPMADDUBSWYrm 4618
+VPMADDUBSWYrr 4619
+VPMADDUBSWZ 4620
+VPMADDUBSWZrm 4621
+VPMADDUBSWZrmk 4622
+VPMADDUBSWZrmkz 4623
+VPMADDUBSWZrr 4624
+VPMADDUBSWZrrk 4625
+VPMADDUBSWZrrkz 4626
+VPMADDUBSWrm 4627
+VPMADDUBSWrr 4628
+VPMADDWDYrm 4629
+VPMADDWDYrr 4630
+VPMADDWDZ 4631
+VPMADDWDZrm 4632
+VPMADDWDZrmk 4633
+VPMADDWDZrmkz 4634
+VPMADDWDZrr 4635
+VPMADDWDZrrk 4636
+VPMADDWDZrrkz 4637
+VPMADDWDrm 4638
+VPMADDWDrr 4639
+VPMASKMOVDYmr 4640
+VPMASKMOVDYrm 4641
+VPMASKMOVDmr 4642
+VPMASKMOVDrm 4643
+VPMASKMOVQYmr 4644
+VPMASKMOVQYrm 4645
+VPMASKMOVQmr 4646
+VPMASKMOVQrm 4647
+VPMAXSBYrm 4648
+VPMAXSBYrr 4649
+VPMAXSBZ 4650
+VPMAXSBZrm 4651
+VPMAXSBZrmk 4652
+VPMAXSBZrmkz 4653
+VPMAXSBZrr 4654
+VPMAXSBZrrk 4655
+VPMAXSBZrrkz 4656
+VPMAXSBrm 4657
+VPMAXSBrr 4658
+VPMAXSDYrm 4659
+VPMAXSDYrr 4660
+VPMAXSDZ 4661
+VPMAXSDZrm 4662
+VPMAXSDZrmb 4663
+VPMAXSDZrmbk 4664
+VPMAXSDZrmbkz 4665
+VPMAXSDZrmk 4666
+VPMAXSDZrmkz 4667
+VPMAXSDZrr 4668
+VPMAXSDZrrk 4669
+VPMAXSDZrrkz 4670
+VPMAXSDrm 4671
+VPMAXSDrr 4672
+VPMAXSQZ 4673
+VPMAXSQZrm 4674
+VPMAXSQZrmb 4675
+VPMAXSQZrmbk 4676
+VPMAXSQZrmbkz 4677
+VPMAXSQZrmk 4678
+VPMAXSQZrmkz 4679
+VPMAXSQZrr 4680
+VPMAXSQZrrk 4681
+VPMAXSQZrrkz 4682
+VPMAXSWYrm 4683
+VPMAXSWYrr 4684
+VPMAXSWZ 4685
+VPMAXSWZrm 4686
+VPMAXSWZrmk 4687
+VPMAXSWZrmkz 4688
+VPMAXSWZrr 4689
+VPMAXSWZrrk 4690
+VPMAXSWZrrkz 4691
+VPMAXSWrm 4692
+VPMAXSWrr 4693
+VPMAXUBYrm 4694
+VPMAXUBYrr 4695
+VPMAXUBZ 4696
+VPMAXUBZrm 4697
+VPMAXUBZrmk 4698
+VPMAXUBZrmkz 4699
+VPMAXUBZrr 4700
+VPMAXUBZrrk 4701
+VPMAXUBZrrkz 4702
+VPMAXUBrm 4703
+VPMAXUBrr 4704
+VPMAXUDYrm 4705
+VPMAXUDYrr 4706
+VPMAXUDZ 4707
+VPMAXUDZrm 4708
+VPMAXUDZrmb 4709
+VPMAXUDZrmbk 4710
+VPMAXUDZrmbkz 4711
+VPMAXUDZrmk 4712
+VPMAXUDZrmkz 4713
+VPMAXUDZrr 4714
+VPMAXUDZrrk 4715
+VPMAXUDZrrkz 4716
+VPMAXUDrm 4717
+VPMAXUDrr 4718
+VPMAXUQZ 4719
+VPMAXUQZrm 4720
+VPMAXUQZrmb 4721
+VPMAXUQZrmbk 4722
+VPMAXUQZrmbkz 4723
+VPMAXUQZrmk 4724
+VPMAXUQZrmkz 4725
+VPMAXUQZrr 4726
+VPMAXUQZrrk 4727
+VPMAXUQZrrkz 4728
+VPMAXUWYrm 4729
+VPMAXUWYrr 4730
+VPMAXUWZ 4731
+VPMAXUWZrm 4732
+VPMAXUWZrmk 4733
+VPMAXUWZrmkz 4734
+VPMAXUWZrr 4735
+VPMAXUWZrrk 4736
+VPMAXUWZrrkz 4737
+VPMAXUWrm 4738
+VPMAXUWrr 4739
+VPMINSBYrm 4740
+VPMINSBYrr 4741
+VPMINSBZ 4742
+VPMINSBZrm 4743
+VPMINSBZrmk 4744
+VPMINSBZrmkz 4745
+VPMINSBZrr 4746
+VPMINSBZrrk 4747
+VPMINSBZrrkz 4748
+VPMINSBrm 4749
+VPMINSBrr 4750
+VPMINSDYrm 4751
+VPMINSDYrr 4752
+VPMINSDZ 4753
+VPMINSDZrm 4754
+VPMINSDZrmb 4755
+VPMINSDZrmbk 4756
+VPMINSDZrmbkz 4757
+VPMINSDZrmk 4758
+VPMINSDZrmkz 4759
+VPMINSDZrr 4760
+VPMINSDZrrk 4761
+VPMINSDZrrkz 4762
+VPMINSDrm 4763
+VPMINSDrr 4764
+VPMINSQZ 4765
+VPMINSQZrm 4766
+VPMINSQZrmb 4767
+VPMINSQZrmbk 4768
+VPMINSQZrmbkz 4769
+VPMINSQZrmk 4770
+VPMINSQZrmkz 4771
+VPMINSQZrr 4772
+VPMINSQZrrk 4773
+VPMINSQZrrkz 4774
+VPMINSWYrm 4775
+VPMINSWYrr 4776
+VPMINSWZ 4777
+VPMINSWZrm 4778
+VPMINSWZrmk 4779
+VPMINSWZrmkz 4780
+VPMINSWZrr 4781
+VPMINSWZrrk 4782
+VPMINSWZrrkz 4783
+VPMINSWrm 4784
+VPMINSWrr 4785
+VPMINUBYrm 4786
+VPMINUBYrr 4787
+VPMINUBZ 4788
+VPMINUBZrm 4789
+VPMINUBZrmk 4790
+VPMINUBZrmkz 4791
+VPMINUBZrr 4792
+VPMINUBZrrk 4793
+VPMINUBZrrkz 4794
+VPMINUBrm 4795
+VPMINUBrr 4796
+VPMINUDYrm 4797
+VPMINUDYrr 4798
+VPMINUDZ 4799
+VPMINUDZrm 4800
+VPMINUDZrmb 4801
+VPMINUDZrmbk 4802
+VPMINUDZrmbkz 4803
+VPMINUDZrmk 4804
+VPMINUDZrmkz 4805
+VPMINUDZrr 4806
+VPMINUDZrrk 4807
+VPMINUDZrrkz 4808
+VPMINUDrm 4809
+VPMINUDrr 4810
+VPMINUQZ 4811
+VPMINUQZrm 4812
+VPMINUQZrmb 4813
+VPMINUQZrmbk 4814
+VPMINUQZrmbkz 4815
+VPMINUQZrmk 4816
+VPMINUQZrmkz 4817
+VPMINUQZrr 4818
+VPMINUQZrrk 4819
+VPMINUQZrrkz 4820
+VPMINUWYrm 4821
+VPMINUWYrr 4822
+VPMINUWZ 4823
+VPMINUWZrm 4824
+VPMINUWZrmk 4825
+VPMINUWZrmkz 4826
+VPMINUWZrr 4827
+VPMINUWZrrk 4828
+VPMINUWZrrkz 4829
+VPMINUWrm 4830
+VPMINUWrr 4831
+VPMOVB 4832
+VPMOVD 4833
+VPMOVDBZ 4834
+VPMOVDBZmr 4835
+VPMOVDBZmrk 4836
+VPMOVDBZrr 4837
+VPMOVDBZrrk 4838
+VPMOVDBZrrkz 4839
+VPMOVDWZ 4840
+VPMOVDWZmr 4841
+VPMOVDWZmrk 4842
+VPMOVDWZrr 4843
+VPMOVDWZrrk 4844
+VPMOVDWZrrkz 4845
+VPMOVM 4846
+VPMOVMSKBYrr 4847
+VPMOVMSKBrr 4848
+VPMOVQ 4849
+VPMOVQBZ 4850
+VPMOVQBZmr 4851
+VPMOVQBZmrk 4852
+VPMOVQBZrr 4853
+VPMOVQBZrrk 4854
+VPMOVQBZrrkz 4855
+VPMOVQDZ 4856
+VPMOVQDZmr 4857
+VPMOVQDZmrk 4858
+VPMOVQDZrr 4859
+VPMOVQDZrrk 4860
+VPMOVQDZrrkz 4861
+VPMOVQWZ 4862
+VPMOVQWZmr 4863
+VPMOVQWZmrk 4864
+VPMOVQWZrr 4865
+VPMOVQWZrrk 4866
+VPMOVQWZrrkz 4867
+VPMOVSDBZ 4868
+VPMOVSDBZmr 4869
+VPMOVSDBZmrk 4870
+VPMOVSDBZrr 4871
+VPMOVSDBZrrk 4872
+VPMOVSDBZrrkz 4873
+VPMOVSDWZ 4874
+VPMOVSDWZmr 4875
+VPMOVSDWZmrk 4876
+VPMOVSDWZrr 4877
+VPMOVSDWZrrk 4878
+VPMOVSDWZrrkz 4879
+VPMOVSQBZ 4880
+VPMOVSQBZmr 4881
+VPMOVSQBZmrk 4882
+VPMOVSQBZrr 4883
+VPMOVSQBZrrk 4884
+VPMOVSQBZrrkz 4885
+VPMOVSQDZ 4886
+VPMOVSQDZmr 4887
+VPMOVSQDZmrk 4888
+VPMOVSQDZrr 4889
+VPMOVSQDZrrk 4890
+VPMOVSQDZrrkz 4891
+VPMOVSQWZ 4892
+VPMOVSQWZmr 4893
+VPMOVSQWZmrk 4894
+VPMOVSQWZrr 4895
+VPMOVSQWZrrk 4896
+VPMOVSQWZrrkz 4897
+VPMOVSWBZ 4898
+VPMOVSWBZmr 4899
+VPMOVSWBZmrk 4900
+VPMOVSWBZrr 4901
+VPMOVSWBZrrk 4902
+VPMOVSWBZrrkz 4903
+VPMOVSXBDYrm 4904
+VPMOVSXBDYrr 4905
+VPMOVSXBDZ 4906
+VPMOVSXBDZrm 4907
+VPMOVSXBDZrmk 4908
+VPMOVSXBDZrmkz 4909
+VPMOVSXBDZrr 4910
+VPMOVSXBDZrrk 4911
+VPMOVSXBDZrrkz 4912
+VPMOVSXBDrm 4913
+VPMOVSXBDrr 4914
+VPMOVSXBQYrm 4915
+VPMOVSXBQYrr 4916
+VPMOVSXBQZ 4917
+VPMOVSXBQZrm 4918
+VPMOVSXBQZrmk 4919
+VPMOVSXBQZrmkz 4920
+VPMOVSXBQZrr 4921
+VPMOVSXBQZrrk 4922
+VPMOVSXBQZrrkz 4923
+VPMOVSXBQrm 4924
+VPMOVSXBQrr 4925
+VPMOVSXBWYrm 4926
+VPMOVSXBWYrr 4927
+VPMOVSXBWZ 4928
+VPMOVSXBWZrm 4929
+VPMOVSXBWZrmk 4930
+VPMOVSXBWZrmkz 4931
+VPMOVSXBWZrr 4932
+VPMOVSXBWZrrk 4933
+VPMOVSXBWZrrkz 4934
+VPMOVSXBWrm 4935
+VPMOVSXBWrr 4936
+VPMOVSXDQYrm 4937
+VPMOVSXDQYrr 4938
+VPMOVSXDQZ 4939
+VPMOVSXDQZrm 4940
+VPMOVSXDQZrmk 4941
+VPMOVSXDQZrmkz 4942
+VPMOVSXDQZrr 4943
+VPMOVSXDQZrrk 4944
+VPMOVSXDQZrrkz 4945
+VPMOVSXDQrm 4946
+VPMOVSXDQrr 4947
+VPMOVSXWDYrm 4948
+VPMOVSXWDYrr 4949
+VPMOVSXWDZ 4950
+VPMOVSXWDZrm 4951
+VPMOVSXWDZrmk 4952
+VPMOVSXWDZrmkz 4953
+VPMOVSXWDZrr 4954
+VPMOVSXWDZrrk 4955
+VPMOVSXWDZrrkz 4956
+VPMOVSXWDrm 4957
+VPMOVSXWDrr 4958
+VPMOVSXWQYrm 4959
+VPMOVSXWQYrr 4960
+VPMOVSXWQZ 4961
+VPMOVSXWQZrm 4962
+VPMOVSXWQZrmk 4963
+VPMOVSXWQZrmkz 4964
+VPMOVSXWQZrr 4965
+VPMOVSXWQZrrk 4966
+VPMOVSXWQZrrkz 4967
+VPMOVSXWQrm 4968
+VPMOVSXWQrr 4969
+VPMOVUSDBZ 4970
+VPMOVUSDBZmr 4971
+VPMOVUSDBZmrk 4972
+VPMOVUSDBZrr 4973
+VPMOVUSDBZrrk 4974
+VPMOVUSDBZrrkz 4975
+VPMOVUSDWZ 4976
+VPMOVUSDWZmr 4977
+VPMOVUSDWZmrk 4978
+VPMOVUSDWZrr 4979
+VPMOVUSDWZrrk 4980
+VPMOVUSDWZrrkz 4981
+VPMOVUSQBZ 4982
+VPMOVUSQBZmr 4983
+VPMOVUSQBZmrk 4984
+VPMOVUSQBZrr 4985
+VPMOVUSQBZrrk 4986
+VPMOVUSQBZrrkz 4987
+VPMOVUSQDZ 4988
+VPMOVUSQDZmr 4989
+VPMOVUSQDZmrk 4990
+VPMOVUSQDZrr 4991
+VPMOVUSQDZrrk 4992
+VPMOVUSQDZrrkz 4993
+VPMOVUSQWZ 4994
+VPMOVUSQWZmr 4995
+VPMOVUSQWZmrk 4996
+VPMOVUSQWZrr 4997
+VPMOVUSQWZrrk 4998
+VPMOVUSQWZrrkz 4999
+VPMOVUSWBZ 5000
+VPMOVUSWBZmr 5001
+VPMOVUSWBZmrk 5002
+VPMOVUSWBZrr 5003
+VPMOVUSWBZrrk 5004
+VPMOVUSWBZrrkz 5005
+VPMOVW 5006
+VPMOVWBZ 5007
+VPMOVWBZmr 5008
+VPMOVWBZmrk 5009
+VPMOVWBZrr 5010
+VPMOVWBZrrk 5011
+VPMOVWBZrrkz 5012
+VPMOVZXBDYrm 5013
+VPMOVZXBDYrr 5014
+VPMOVZXBDZ 5015
+VPMOVZXBDZrm 5016
+VPMOVZXBDZrmk 5017
+VPMOVZXBDZrmkz 5018
+VPMOVZXBDZrr 5019
+VPMOVZXBDZrrk 5020
+VPMOVZXBDZrrkz 5021
+VPMOVZXBDrm 5022
+VPMOVZXBDrr 5023
+VPMOVZXBQYrm 5024
+VPMOVZXBQYrr 5025
+VPMOVZXBQZ 5026
+VPMOVZXBQZrm 5027
+VPMOVZXBQZrmk 5028
+VPMOVZXBQZrmkz 5029
+VPMOVZXBQZrr 5030
+VPMOVZXBQZrrk 5031
+VPMOVZXBQZrrkz 5032
+VPMOVZXBQrm 5033
+VPMOVZXBQrr 5034
+VPMOVZXBWYrm 5035
+VPMOVZXBWYrr 5036
+VPMOVZXBWZ 5037
+VPMOVZXBWZrm 5038
+VPMOVZXBWZrmk 5039
+VPMOVZXBWZrmkz 5040
+VPMOVZXBWZrr 5041
+VPMOVZXBWZrrk 5042
+VPMOVZXBWZrrkz 5043
+VPMOVZXBWrm 5044
+VPMOVZXBWrr 5045
+VPMOVZXDQYrm 5046
+VPMOVZXDQYrr 5047
+VPMOVZXDQZ 5048
+VPMOVZXDQZrm 5049
+VPMOVZXDQZrmk 5050
+VPMOVZXDQZrmkz 5051
+VPMOVZXDQZrr 5052
+VPMOVZXDQZrrk 5053
+VPMOVZXDQZrrkz 5054
+VPMOVZXDQrm 5055
+VPMOVZXDQrr 5056
+VPMOVZXWDYrm 5057
+VPMOVZXWDYrr 5058
+VPMOVZXWDZ 5059
+VPMOVZXWDZrm 5060
+VPMOVZXWDZrmk 5061
+VPMOVZXWDZrmkz 5062
+VPMOVZXWDZrr 5063
+VPMOVZXWDZrrk 5064
+VPMOVZXWDZrrkz 5065
+VPMOVZXWDrm 5066
+VPMOVZXWDrr 5067
+VPMOVZXWQYrm 5068
+VPMOVZXWQYrr 5069
+VPMOVZXWQZ 5070
+VPMOVZXWQZrm 5071
+VPMOVZXWQZrmk 5072
+VPMOVZXWQZrmkz 5073
+VPMOVZXWQZrr 5074
+VPMOVZXWQZrrk 5075
+VPMOVZXWQZrrkz 5076
+VPMOVZXWQrm 5077
+VPMOVZXWQrr 5078
+VPMULDQYrm 5079
+VPMULDQYrr 5080
+VPMULDQZ 5081
+VPMULDQZrm 5082
+VPMULDQZrmb 5083
+VPMULDQZrmbk 5084
+VPMULDQZrmbkz 5085
+VPMULDQZrmk 5086
+VPMULDQZrmkz 5087
+VPMULDQZrr 5088
+VPMULDQZrrk 5089
+VPMULDQZrrkz 5090
+VPMULDQrm 5091
+VPMULDQrr 5092
+VPMULHRSWYrm 5093
+VPMULHRSWYrr 5094
+VPMULHRSWZ 5095
+VPMULHRSWZrm 5096
+VPMULHRSWZrmk 5097
+VPMULHRSWZrmkz 5098
+VPMULHRSWZrr 5099
+VPMULHRSWZrrk 5100
+VPMULHRSWZrrkz 5101
+VPMULHRSWrm 5102
+VPMULHRSWrr 5103
+VPMULHUWYrm 5104
+VPMULHUWYrr 5105
+VPMULHUWZ 5106
+VPMULHUWZrm 5107
+VPMULHUWZrmk 5108
+VPMULHUWZrmkz 5109
+VPMULHUWZrr 5110
+VPMULHUWZrrk 5111
+VPMULHUWZrrkz 5112
+VPMULHUWrm 5113
+VPMULHUWrr 5114
+VPMULHWYrm 5115
+VPMULHWYrr 5116
+VPMULHWZ 5117
+VPMULHWZrm 5118
+VPMULHWZrmk 5119
+VPMULHWZrmkz 5120
+VPMULHWZrr 5121
+VPMULHWZrrk 5122
+VPMULHWZrrkz 5123
+VPMULHWrm 5124
+VPMULHWrr 5125
+VPMULLDYrm 5126
+VPMULLDYrr 5127
+VPMULLDZ 5128
+VPMULLDZrm 5129
+VPMULLDZrmb 5130
+VPMULLDZrmbk 5131
+VPMULLDZrmbkz 5132
+VPMULLDZrmk 5133
+VPMULLDZrmkz 5134
+VPMULLDZrr 5135
+VPMULLDZrrk 5136
+VPMULLDZrrkz 5137
+VPMULLDrm 5138
+VPMULLDrr 5139
+VPMULLQZ 5140
+VPMULLQZrm 5141
+VPMULLQZrmb 5142
+VPMULLQZrmbk 5143
+VPMULLQZrmbkz 5144
+VPMULLQZrmk 5145
+VPMULLQZrmkz 5146
+VPMULLQZrr 5147
+VPMULLQZrrk 5148
+VPMULLQZrrkz 5149
+VPMULLWYrm 5150
+VPMULLWYrr 5151
+VPMULLWZ 5152
+VPMULLWZrm 5153
+VPMULLWZrmk 5154
+VPMULLWZrmkz 5155
+VPMULLWZrr 5156
+VPMULLWZrrk 5157
+VPMULLWZrrkz 5158
+VPMULLWrm 5159
+VPMULLWrr 5160
+VPMULTISHIFTQBZ 5161
+VPMULTISHIFTQBZrm 5162
+VPMULTISHIFTQBZrmb 5163
+VPMULTISHIFTQBZrmbk 5164
+VPMULTISHIFTQBZrmbkz 5165
+VPMULTISHIFTQBZrmk 5166
+VPMULTISHIFTQBZrmkz 5167
+VPMULTISHIFTQBZrr 5168
+VPMULTISHIFTQBZrrk 5169
+VPMULTISHIFTQBZrrkz 5170
+VPMULUDQYrm 5171
+VPMULUDQYrr 5172
+VPMULUDQZ 5173
+VPMULUDQZrm 5174
+VPMULUDQZrmb 5175
+VPMULUDQZrmbk 5176
+VPMULUDQZrmbkz 5177
+VPMULUDQZrmk 5178
+VPMULUDQZrmkz 5179
+VPMULUDQZrr 5180
+VPMULUDQZrrk 5181
+VPMULUDQZrrkz 5182
+VPMULUDQrm 5183
+VPMULUDQrr 5184
+VPOPCNTBZ 5185
+VPOPCNTBZrm 5186
+VPOPCNTBZrmk 5187
+VPOPCNTBZrmkz 5188
+VPOPCNTBZrr 5189
+VPOPCNTBZrrk 5190
+VPOPCNTBZrrkz 5191
+VPOPCNTDZ 5192
+VPOPCNTDZrm 5193
+VPOPCNTDZrmb 5194
+VPOPCNTDZrmbk 5195
+VPOPCNTDZrmbkz 5196
+VPOPCNTDZrmk 5197
+VPOPCNTDZrmkz 5198
+VPOPCNTDZrr 5199
+VPOPCNTDZrrk 5200
+VPOPCNTDZrrkz 5201
+VPOPCNTQZ 5202
+VPOPCNTQZrm 5203
+VPOPCNTQZrmb 5204
+VPOPCNTQZrmbk 5205
+VPOPCNTQZrmbkz 5206
+VPOPCNTQZrmk 5207
+VPOPCNTQZrmkz 5208
+VPOPCNTQZrr 5209
+VPOPCNTQZrrk 5210
+VPOPCNTQZrrkz 5211
+VPOPCNTWZ 5212
+VPOPCNTWZrm 5213
+VPOPCNTWZrmk 5214
+VPOPCNTWZrmkz 5215
+VPOPCNTWZrr 5216
+VPOPCNTWZrrk 5217
+VPOPCNTWZrrkz 5218
+VPORDZ 5219
+VPORDZrm 5220
+VPORDZrmb 5221
+VPORDZrmbk 5222
+VPORDZrmbkz 5223
+VPORDZrmk 5224
+VPORDZrmkz 5225
+VPORDZrr 5226
+VPORDZrrk 5227
+VPORDZrrkz 5228
+VPORQZ 5229
+VPORQZrm 5230
+VPORQZrmb 5231
+VPORQZrmbk 5232
+VPORQZrmbkz 5233
+VPORQZrmk 5234
+VPORQZrmkz 5235
+VPORQZrr 5236
+VPORQZrrk 5237
+VPORQZrrkz 5238
+VPORYrm 5239
+VPORYrr 5240
+VPORrm 5241
+VPORrr 5242
+VPPERMrmr 5243
+VPPERMrrm 5244
+VPPERMrrr 5245
+VPPERMrrr_REV 5246
+VPROLDZ 5247
+VPROLDZmbi 5248
+VPROLDZmbik 5249
+VPROLDZmbikz 5250
+VPROLDZmi 5251
+VPROLDZmik 5252
+VPROLDZmikz 5253
+VPROLDZri 5254
+VPROLDZrik 5255
+VPROLDZrikz 5256
+VPROLQZ 5257
+VPROLQZmbi 5258
+VPROLQZmbik 5259
+VPROLQZmbikz 5260
+VPROLQZmi 5261
+VPROLQZmik 5262
+VPROLQZmikz 5263
+VPROLQZri 5264
+VPROLQZrik 5265
+VPROLQZrikz 5266
+VPROLVDZ 5267
+VPROLVDZrm 5268
+VPROLVDZrmb 5269
+VPROLVDZrmbk 5270
+VPROLVDZrmbkz 5271
+VPROLVDZrmk 5272
+VPROLVDZrmkz 5273
+VPROLVDZrr 5274
+VPROLVDZrrk 5275
+VPROLVDZrrkz 5276
+VPROLVQZ 5277
+VPROLVQZrm 5278
+VPROLVQZrmb 5279
+VPROLVQZrmbk 5280
+VPROLVQZrmbkz 5281
+VPROLVQZrmk 5282
+VPROLVQZrmkz 5283
+VPROLVQZrr 5284
+VPROLVQZrrk 5285
+VPROLVQZrrkz 5286
+VPRORDZ 5287
+VPRORDZmbi 5288
+VPRORDZmbik 5289
+VPRORDZmbikz 5290
+VPRORDZmi 5291
+VPRORDZmik 5292
+VPRORDZmikz 5293
+VPRORDZri 5294
+VPRORDZrik 5295
+VPRORDZrikz 5296
+VPRORQZ 5297
+VPRORQZmbi 5298
+VPRORQZmbik 5299
+VPRORQZmbikz 5300
+VPRORQZmi 5301
+VPRORQZmik 5302
+VPRORQZmikz 5303
+VPRORQZri 5304
+VPRORQZrik 5305
+VPRORQZrikz 5306
+VPRORVDZ 5307
+VPRORVDZrm 5308
+VPRORVDZrmb 5309
+VPRORVDZrmbk 5310
+VPRORVDZrmbkz 5311
+VPRORVDZrmk 5312
+VPRORVDZrmkz 5313
+VPRORVDZrr 5314
+VPRORVDZrrk 5315
+VPRORVDZrrkz 5316
+VPRORVQZ 5317
+VPRORVQZrm 5318
+VPRORVQZrmb 5319
+VPRORVQZrmbk 5320
+VPRORVQZrmbkz 5321
+VPRORVQZrmk 5322
+VPRORVQZrmkz 5323
+VPRORVQZrr 5324
+VPRORVQZrrk 5325
+VPRORVQZrrkz 5326
+VPROTBmi 5327
+VPROTBmr 5328
+VPROTBri 5329
+VPROTBrm 5330
+VPROTBrr 5331
+VPROTBrr_REV 5332
+VPROTDmi 5333
+VPROTDmr 5334
+VPROTDri 5335
+VPROTDrm 5336
+VPROTDrr 5337
+VPROTDrr_REV 5338
+VPROTQmi 5339
+VPROTQmr 5340
+VPROTQri 5341
+VPROTQrm 5342
+VPROTQrr 5343
+VPROTQrr_REV 5344
+VPROTWmi 5345
+VPROTWmr 5346
+VPROTWri 5347
+VPROTWrm 5348
+VPROTWrr 5349
+VPROTWrr_REV 5350
+VPSADBWYrm 5351
+VPSADBWYrr 5352
+VPSADBWZ 5353
+VPSADBWZrm 5354
+VPSADBWZrr 5355
+VPSADBWrm 5356
+VPSADBWrr 5357
+VPSCATTERDDZ 5358
+VPSCATTERDDZmr 5359
+VPSCATTERDQZ 5360
+VPSCATTERDQZmr 5361
+VPSCATTERQDZ 5362
+VPSCATTERQDZmr 5363
+VPSCATTERQQZ 5364
+VPSCATTERQQZmr 5365
+VPSHABmr 5366
+VPSHABrm 5367
+VPSHABrr 5368
+VPSHABrr_REV 5369
+VPSHADmr 5370
+VPSHADrm 5371
+VPSHADrr 5372
+VPSHADrr_REV 5373
+VPSHAQmr 5374
+VPSHAQrm 5375
+VPSHAQrr 5376
+VPSHAQrr_REV 5377
+VPSHAWmr 5378
+VPSHAWrm 5379
+VPSHAWrr 5380
+VPSHAWrr_REV 5381
+VPSHLBmr 5382
+VPSHLBrm 5383
+VPSHLBrr 5384
+VPSHLBrr_REV 5385
+VPSHLDDZ 5386
+VPSHLDDZrmbi 5387
+VPSHLDDZrmbik 5388
+VPSHLDDZrmbikz 5389
+VPSHLDDZrmi 5390
+VPSHLDDZrmik 5391
+VPSHLDDZrmikz 5392
+VPSHLDDZrri 5393
+VPSHLDDZrrik 5394
+VPSHLDDZrrikz 5395
+VPSHLDQZ 5396
+VPSHLDQZrmbi 5397
+VPSHLDQZrmbik 5398
+VPSHLDQZrmbikz 5399
+VPSHLDQZrmi 5400
+VPSHLDQZrmik 5401
+VPSHLDQZrmikz 5402
+VPSHLDQZrri 5403
+VPSHLDQZrrik 5404
+VPSHLDQZrrikz 5405
+VPSHLDVDZ 5406
+VPSHLDVDZm 5407
+VPSHLDVDZmb 5408
+VPSHLDVDZmbk 5409
+VPSHLDVDZmbkz 5410
+VPSHLDVDZmk 5411
+VPSHLDVDZmkz 5412
+VPSHLDVDZr 5413
+VPSHLDVDZrk 5414
+VPSHLDVDZrkz 5415
+VPSHLDVQZ 5416
+VPSHLDVQZm 5417
+VPSHLDVQZmb 5418
+VPSHLDVQZmbk 5419
+VPSHLDVQZmbkz 5420
+VPSHLDVQZmk 5421
+VPSHLDVQZmkz 5422
+VPSHLDVQZr 5423
+VPSHLDVQZrk 5424
+VPSHLDVQZrkz 5425
+VPSHLDVWZ 5426
+VPSHLDVWZm 5427
+VPSHLDVWZmk 5428
+VPSHLDVWZmkz 5429
+VPSHLDVWZr 5430
+VPSHLDVWZrk 5431
+VPSHLDVWZrkz 5432
+VPSHLDWZ 5433
+VPSHLDWZrmi 5434
+VPSHLDWZrmik 5435
+VPSHLDWZrmikz 5436
+VPSHLDWZrri 5437
+VPSHLDWZrrik 5438
+VPSHLDWZrrikz 5439
+VPSHLDmr 5440
+VPSHLDrm 5441
+VPSHLDrr 5442
+VPSHLDrr_REV 5443
+VPSHLQmr 5444
+VPSHLQrm 5445
+VPSHLQrr 5446
+VPSHLQrr_REV 5447
+VPSHLWmr 5448
+VPSHLWrm 5449
+VPSHLWrr 5450
+VPSHLWrr_REV 5451
+VPSHRDDZ 5452
+VPSHRDDZrmbi 5453
+VPSHRDDZrmbik 5454
+VPSHRDDZrmbikz 5455
+VPSHRDDZrmi 5456
+VPSHRDDZrmik 5457
+VPSHRDDZrmikz 5458
+VPSHRDDZrri 5459
+VPSHRDDZrrik 5460
+VPSHRDDZrrikz 5461
+VPSHRDQZ 5462
+VPSHRDQZrmbi 5463
+VPSHRDQZrmbik 5464
+VPSHRDQZrmbikz 5465
+VPSHRDQZrmi 5466
+VPSHRDQZrmik 5467
+VPSHRDQZrmikz 5468
+VPSHRDQZrri 5469
+VPSHRDQZrrik 5470
+VPSHRDQZrrikz 5471
+VPSHRDVDZ 5472
+VPSHRDVDZm 5473
+VPSHRDVDZmb 5474
+VPSHRDVDZmbk 5475
+VPSHRDVDZmbkz 5476
+VPSHRDVDZmk 5477
+VPSHRDVDZmkz 5478
+VPSHRDVDZr 5479
+VPSHRDVDZrk 5480
+VPSHRDVDZrkz 5481
+VPSHRDVQZ 5482
+VPSHRDVQZm 5483
+VPSHRDVQZmb 5484
+VPSHRDVQZmbk 5485
+VPSHRDVQZmbkz 5486
+VPSHRDVQZmk 5487
+VPSHRDVQZmkz 5488
+VPSHRDVQZr 5489
+VPSHRDVQZrk 5490
+VPSHRDVQZrkz 5491
+VPSHRDVWZ 5492
+VPSHRDVWZm 5493
+VPSHRDVWZmk 5494
+VPSHRDVWZmkz 5495
+VPSHRDVWZr 5496
+VPSHRDVWZrk 5497
+VPSHRDVWZrkz 5498
+VPSHRDWZ 5499
+VPSHRDWZrmi 5500
+VPSHRDWZrmik 5501
+VPSHRDWZrmikz 5502
+VPSHRDWZrri 5503
+VPSHRDWZrrik 5504
+VPSHRDWZrrikz 5505
+VPSHUFBITQMBZ 5506
+VPSHUFBITQMBZrm 5507
+VPSHUFBITQMBZrmk 5508
+VPSHUFBITQMBZrr 5509
+VPSHUFBITQMBZrrk 5510
+VPSHUFBYrm 5511
+VPSHUFBYrr 5512
+VPSHUFBZ 5513
+VPSHUFBZrm 5514
+VPSHUFBZrmk 5515
+VPSHUFBZrmkz 5516
+VPSHUFBZrr 5517
+VPSHUFBZrrk 5518
+VPSHUFBZrrkz 5519
+VPSHUFBrm 5520
+VPSHUFBrr 5521
+VPSHUFDYmi 5522
+VPSHUFDYri 5523
+VPSHUFDZ 5524
+VPSHUFDZmbi 5525
+VPSHUFDZmbik 5526
+VPSHUFDZmbikz 5527
+VPSHUFDZmi 5528
+VPSHUFDZmik 5529
+VPSHUFDZmikz 5530
+VPSHUFDZri 5531
+VPSHUFDZrik 5532
+VPSHUFDZrikz 5533
+VPSHUFDmi 5534
+VPSHUFDri 5535
+VPSHUFHWYmi 5536
+VPSHUFHWYri 5537
+VPSHUFHWZ 5538
+VPSHUFHWZmi 5539
+VPSHUFHWZmik 5540
+VPSHUFHWZmikz 5541
+VPSHUFHWZri 5542
+VPSHUFHWZrik 5543
+VPSHUFHWZrikz 5544
+VPSHUFHWmi 5545
+VPSHUFHWri 5546
+VPSHUFLWYmi 5547
+VPSHUFLWYri 5548
+VPSHUFLWZ 5549
+VPSHUFLWZmi 5550
+VPSHUFLWZmik 5551
+VPSHUFLWZmikz 5552
+VPSHUFLWZri 5553
+VPSHUFLWZrik 5554
+VPSHUFLWZrikz 5555
+VPSHUFLWmi 5556
+VPSHUFLWri 5557
+VPSIGNBYrm 5558
+VPSIGNBYrr 5559
+VPSIGNBrm 5560
+VPSIGNBrr 5561
+VPSIGNDYrm 5562
+VPSIGNDYrr 5563
+VPSIGNDrm 5564
+VPSIGNDrr 5565
+VPSIGNWYrm 5566
+VPSIGNWYrr 5567
+VPSIGNWrm 5568
+VPSIGNWrr 5569
+VPSLLDQYri 5570
+VPSLLDQZ 5571
+VPSLLDQZmi 5572
+VPSLLDQZri 5573
+VPSLLDQri 5574
+VPSLLDYri 5575
+VPSLLDYrm 5576
+VPSLLDYrr 5577
+VPSLLDZ 5578
+VPSLLDZmbi 5579
+VPSLLDZmbik 5580
+VPSLLDZmbikz 5581
+VPSLLDZmi 5582
+VPSLLDZmik 5583
+VPSLLDZmikz 5584
+VPSLLDZri 5585
+VPSLLDZrik 5586
+VPSLLDZrikz 5587
+VPSLLDZrm 5588
+VPSLLDZrmk 5589
+VPSLLDZrmkz 5590
+VPSLLDZrr 5591
+VPSLLDZrrk 5592
+VPSLLDZrrkz 5593
+VPSLLDri 5594
+VPSLLDrm 5595
+VPSLLDrr 5596
+VPSLLQYri 5597
+VPSLLQYrm 5598
+VPSLLQYrr 5599
+VPSLLQZ 5600
+VPSLLQZmbi 5601
+VPSLLQZmbik 5602
+VPSLLQZmbikz 5603
+VPSLLQZmi 5604
+VPSLLQZmik 5605
+VPSLLQZmikz 5606
+VPSLLQZri 5607
+VPSLLQZrik 5608
+VPSLLQZrikz 5609
+VPSLLQZrm 5610
+VPSLLQZrmk 5611
+VPSLLQZrmkz 5612
+VPSLLQZrr 5613
+VPSLLQZrrk 5614
+VPSLLQZrrkz 5615
+VPSLLQri 5616
+VPSLLQrm 5617
+VPSLLQrr 5618
+VPSLLVDYrm 5619
+VPSLLVDYrr 5620
+VPSLLVDZ 5621
+VPSLLVDZrm 5622
+VPSLLVDZrmb 5623
+VPSLLVDZrmbk 5624
+VPSLLVDZrmbkz 5625
+VPSLLVDZrmk 5626
+VPSLLVDZrmkz 5627
+VPSLLVDZrr 5628
+VPSLLVDZrrk 5629
+VPSLLVDZrrkz 5630
+VPSLLVDrm 5631
+VPSLLVDrr 5632
+VPSLLVQYrm 5633
+VPSLLVQYrr 5634
+VPSLLVQZ 5635
+VPSLLVQZrm 5636
+VPSLLVQZrmb 5637
+VPSLLVQZrmbk 5638
+VPSLLVQZrmbkz 5639
+VPSLLVQZrmk 5640
+VPSLLVQZrmkz 5641
+VPSLLVQZrr 5642
+VPSLLVQZrrk 5643
+VPSLLVQZrrkz 5644
+VPSLLVQrm 5645
+VPSLLVQrr 5646
+VPSLLVWZ 5647
+VPSLLVWZrm 5648
+VPSLLVWZrmk 5649
+VPSLLVWZrmkz 5650
+VPSLLVWZrr 5651
+VPSLLVWZrrk 5652
+VPSLLVWZrrkz 5653
+VPSLLWYri 5654
+VPSLLWYrm 5655
+VPSLLWYrr 5656
+VPSLLWZ 5657
+VPSLLWZmi 5658
+VPSLLWZmik 5659
+VPSLLWZmikz 5660
+VPSLLWZri 5661
+VPSLLWZrik 5662
+VPSLLWZrikz 5663
+VPSLLWZrm 5664
+VPSLLWZrmk 5665
+VPSLLWZrmkz 5666
+VPSLLWZrr 5667
+VPSLLWZrrk 5668
+VPSLLWZrrkz 5669
+VPSLLWri 5670
+VPSLLWrm 5671
+VPSLLWrr 5672
+VPSRADYri 5673
+VPSRADYrm 5674
+VPSRADYrr 5675
+VPSRADZ 5676
+VPSRADZmbi 5677
+VPSRADZmbik 5678
+VPSRADZmbikz 5679
+VPSRADZmi 5680
+VPSRADZmik 5681
+VPSRADZmikz 5682
+VPSRADZri 5683
+VPSRADZrik 5684
+VPSRADZrikz 5685
+VPSRADZrm 5686
+VPSRADZrmk 5687
+VPSRADZrmkz 5688
+VPSRADZrr 5689
+VPSRADZrrk 5690
+VPSRADZrrkz 5691
+VPSRADri 5692
+VPSRADrm 5693
+VPSRADrr 5694
+VPSRAQZ 5695
+VPSRAQZmbi 5696
+VPSRAQZmbik 5697
+VPSRAQZmbikz 5698
+VPSRAQZmi 5699
+VPSRAQZmik 5700
+VPSRAQZmikz 5701
+VPSRAQZri 5702
+VPSRAQZrik 5703
+VPSRAQZrikz 5704
+VPSRAQZrm 5705
+VPSRAQZrmk 5706
+VPSRAQZrmkz 5707
+VPSRAQZrr 5708
+VPSRAQZrrk 5709
+VPSRAQZrrkz 5710
+VPSRAVDYrm 5711
+VPSRAVDYrr 5712
+VPSRAVDZ 5713
+VPSRAVDZrm 5714
+VPSRAVDZrmb 5715
+VPSRAVDZrmbk 5716
+VPSRAVDZrmbkz 5717
+VPSRAVDZrmk 5718
+VPSRAVDZrmkz 5719
+VPSRAVDZrr 5720
+VPSRAVDZrrk 5721
+VPSRAVDZrrkz 5722
+VPSRAVDrm 5723
+VPSRAVDrr 5724
+VPSRAVQZ 5725
+VPSRAVQZrm 5726
+VPSRAVQZrmb 5727
+VPSRAVQZrmbk 5728
+VPSRAVQZrmbkz 5729
+VPSRAVQZrmk 5730
+VPSRAVQZrmkz 5731
+VPSRAVQZrr 5732
+VPSRAVQZrrk 5733
+VPSRAVQZrrkz 5734
+VPSRAVWZ 5735
+VPSRAVWZrm 5736
+VPSRAVWZrmk 5737
+VPSRAVWZrmkz 5738
+VPSRAVWZrr 5739
+VPSRAVWZrrk 5740
+VPSRAVWZrrkz 5741
+VPSRAWYri 5742
+VPSRAWYrm 5743
+VPSRAWYrr 5744
+VPSRAWZ 5745
+VPSRAWZmi 5746
+VPSRAWZmik 5747
+VPSRAWZmikz 5748
+VPSRAWZri 5749
+VPSRAWZrik 5750
+VPSRAWZrikz 5751
+VPSRAWZrm 5752
+VPSRAWZrmk 5753
+VPSRAWZrmkz 5754
+VPSRAWZrr 5755
+VPSRAWZrrk 5756
+VPSRAWZrrkz 5757
+VPSRAWri 5758
+VPSRAWrm 5759
+VPSRAWrr 5760
+VPSRLDQYri 5761
+VPSRLDQZ 5762
+VPSRLDQZmi 5763
+VPSRLDQZri 5764
+VPSRLDQri 5765
+VPSRLDYri 5766
+VPSRLDYrm 5767
+VPSRLDYrr 5768
+VPSRLDZ 5769
+VPSRLDZmbi 5770
+VPSRLDZmbik 5771
+VPSRLDZmbikz 5772
+VPSRLDZmi 5773
+VPSRLDZmik 5774
+VPSRLDZmikz 5775
+VPSRLDZri 5776
+VPSRLDZrik 5777
+VPSRLDZrikz 5778
+VPSRLDZrm 5779
+VPSRLDZrmk 5780
+VPSRLDZrmkz 5781
+VPSRLDZrr 5782
+VPSRLDZrrk 5783
+VPSRLDZrrkz 5784
+VPSRLDri 5785
+VPSRLDrm 5786
+VPSRLDrr 5787
+VPSRLQYri 5788
+VPSRLQYrm 5789
+VPSRLQYrr 5790
+VPSRLQZ 5791
+VPSRLQZmbi 5792
+VPSRLQZmbik 5793
+VPSRLQZmbikz 5794
+VPSRLQZmi 5795
+VPSRLQZmik 5796
+VPSRLQZmikz 5797
+VPSRLQZri 5798
+VPSRLQZrik 5799
+VPSRLQZrikz 5800
+VPSRLQZrm 5801
+VPSRLQZrmk 5802
+VPSRLQZrmkz 5803
+VPSRLQZrr 5804
+VPSRLQZrrk 5805
+VPSRLQZrrkz 5806
+VPSRLQri 5807
+VPSRLQrm 5808
+VPSRLQrr 5809
+VPSRLVDYrm 5810
+VPSRLVDYrr 5811
+VPSRLVDZ 5812
+VPSRLVDZrm 5813
+VPSRLVDZrmb 5814
+VPSRLVDZrmbk 5815
+VPSRLVDZrmbkz 5816
+VPSRLVDZrmk 5817
+VPSRLVDZrmkz 5818
+VPSRLVDZrr 5819
+VPSRLVDZrrk 5820
+VPSRLVDZrrkz 5821
+VPSRLVDrm 5822
+VPSRLVDrr 5823
+VPSRLVQYrm 5824
+VPSRLVQYrr 5825
+VPSRLVQZ 5826
+VPSRLVQZrm 5827
+VPSRLVQZrmb 5828
+VPSRLVQZrmbk 5829
+VPSRLVQZrmbkz 5830
+VPSRLVQZrmk 5831
+VPSRLVQZrmkz 5832
+VPSRLVQZrr 5833
+VPSRLVQZrrk 5834
+VPSRLVQZrrkz 5835
+VPSRLVQrm 5836
+VPSRLVQrr 5837
+VPSRLVWZ 5838
+VPSRLVWZrm 5839
+VPSRLVWZrmk 5840
+VPSRLVWZrmkz 5841
+VPSRLVWZrr 5842
+VPSRLVWZrrk 5843
+VPSRLVWZrrkz 5844
+VPSRLWYri 5845
+VPSRLWYrm 5846
+VPSRLWYrr 5847
+VPSRLWZ 5848
+VPSRLWZmi 5849
+VPSRLWZmik 5850
+VPSRLWZmikz 5851
+VPSRLWZri 5852
+VPSRLWZrik 5853
+VPSRLWZrikz 5854
+VPSRLWZrm 5855
+VPSRLWZrmk 5856
+VPSRLWZrmkz 5857
+VPSRLWZrr 5858
+VPSRLWZrrk 5859
+VPSRLWZrrkz 5860
+VPSRLWri 5861
+VPSRLWrm 5862
+VPSRLWrr 5863
+VPSUBBYrm 5864
+VPSUBBYrr 5865
+VPSUBBZ 5866
+VPSUBBZrm 5867
+VPSUBBZrmk 5868
+VPSUBBZrmkz 5869
+VPSUBBZrr 5870
+VPSUBBZrrk 5871
+VPSUBBZrrkz 5872
+VPSUBBrm 5873
+VPSUBBrr 5874
+VPSUBDYrm 5875
+VPSUBDYrr 5876
+VPSUBDZ 5877
+VPSUBDZrm 5878
+VPSUBDZrmb 5879
+VPSUBDZrmbk 5880
+VPSUBDZrmbkz 5881
+VPSUBDZrmk 5882
+VPSUBDZrmkz 5883
+VPSUBDZrr 5884
+VPSUBDZrrk 5885
+VPSUBDZrrkz 5886
+VPSUBDrm 5887
+VPSUBDrr 5888
+VPSUBQYrm 5889
+VPSUBQYrr 5890
+VPSUBQZ 5891
+VPSUBQZrm 5892
+VPSUBQZrmb 5893
+VPSUBQZrmbk 5894
+VPSUBQZrmbkz 5895
+VPSUBQZrmk 5896
+VPSUBQZrmkz 5897
+VPSUBQZrr 5898
+VPSUBQZrrk 5899
+VPSUBQZrrkz 5900
+VPSUBQrm 5901
+VPSUBQrr 5902
+VPSUBSBYrm 5903
+VPSUBSBYrr 5904
+VPSUBSBZ 5905
+VPSUBSBZrm 5906
+VPSUBSBZrmk 5907
+VPSUBSBZrmkz 5908
+VPSUBSBZrr 5909
+VPSUBSBZrrk 5910
+VPSUBSBZrrkz 5911
+VPSUBSBrm 5912
+VPSUBSBrr 5913
+VPSUBSWYrm 5914
+VPSUBSWYrr 5915
+VPSUBSWZ 5916
+VPSUBSWZrm 5917
+VPSUBSWZrmk 5918
+VPSUBSWZrmkz 5919
+VPSUBSWZrr 5920
+VPSUBSWZrrk 5921
+VPSUBSWZrrkz 5922
+VPSUBSWrm 5923
+VPSUBSWrr 5924
+VPSUBUSBYrm 5925
+VPSUBUSBYrr 5926
+VPSUBUSBZ 5927
+VPSUBUSBZrm 5928
+VPSUBUSBZrmk 5929
+VPSUBUSBZrmkz 5930
+VPSUBUSBZrr 5931
+VPSUBUSBZrrk 5932
+VPSUBUSBZrrkz 5933
+VPSUBUSBrm 5934
+VPSUBUSBrr 5935
+VPSUBUSWYrm 5936
+VPSUBUSWYrr 5937
+VPSUBUSWZ 5938
+VPSUBUSWZrm 5939
+VPSUBUSWZrmk 5940
+VPSUBUSWZrmkz 5941
+VPSUBUSWZrr 5942
+VPSUBUSWZrrk 5943
+VPSUBUSWZrrkz 5944
+VPSUBUSWrm 5945
+VPSUBUSWrr 5946
+VPSUBWYrm 5947
+VPSUBWYrr 5948
+VPSUBWZ 5949
+VPSUBWZrm 5950
+VPSUBWZrmk 5951
+VPSUBWZrmkz 5952
+VPSUBWZrr 5953
+VPSUBWZrrk 5954
+VPSUBWZrrkz 5955
+VPSUBWrm 5956
+VPSUBWrr 5957
+VPTERNLOGDZ 5958
+VPTERNLOGDZrmbi 5959
+VPTERNLOGDZrmbik 5960
+VPTERNLOGDZrmbikz 5961
+VPTERNLOGDZrmi 5962
+VPTERNLOGDZrmik 5963
+VPTERNLOGDZrmikz 5964
+VPTERNLOGDZrri 5965
+VPTERNLOGDZrrik 5966
+VPTERNLOGDZrrikz 5967
+VPTERNLOGQZ 5968
+VPTERNLOGQZrmbi 5969
+VPTERNLOGQZrmbik 5970
+VPTERNLOGQZrmbikz 5971
+VPTERNLOGQZrmi 5972
+VPTERNLOGQZrmik 5973
+VPTERNLOGQZrmikz 5974
+VPTERNLOGQZrri 5975
+VPTERNLOGQZrrik 5976
+VPTERNLOGQZrrikz 5977
+VPTESTMBZ 5978
+VPTESTMBZrm 5979
+VPTESTMBZrmk 5980
+VPTESTMBZrr 5981
+VPTESTMBZrrk 5982
+VPTESTMDZ 5983
+VPTESTMDZrm 5984
+VPTESTMDZrmb 5985
+VPTESTMDZrmbk 5986
+VPTESTMDZrmk 5987
+VPTESTMDZrr 5988
+VPTESTMDZrrk 5989
+VPTESTMQZ 5990
+VPTESTMQZrm 5991
+VPTESTMQZrmb 5992
+VPTESTMQZrmbk 5993
+VPTESTMQZrmk 5994
+VPTESTMQZrr 5995
+VPTESTMQZrrk 5996
+VPTESTMWZ 5997
+VPTESTMWZrm 5998
+VPTESTMWZrmk 5999
+VPTESTMWZrr 6000
+VPTESTMWZrrk 6001
+VPTESTNMBZ 6002
+VPTESTNMBZrm 6003
+VPTESTNMBZrmk 6004
+VPTESTNMBZrr 6005
+VPTESTNMBZrrk 6006
+VPTESTNMDZ 6007
+VPTESTNMDZrm 6008
+VPTESTNMDZrmb 6009
+VPTESTNMDZrmbk 6010
+VPTESTNMDZrmk 6011
+VPTESTNMDZrr 6012
+VPTESTNMDZrrk 6013
+VPTESTNMQZ 6014
+VPTESTNMQZrm 6015
+VPTESTNMQZrmb 6016
+VPTESTNMQZrmbk 6017
+VPTESTNMQZrmk 6018
+VPTESTNMQZrr 6019
+VPTESTNMQZrrk 6020
+VPTESTNMWZ 6021
+VPTESTNMWZrm 6022
+VPTESTNMWZrmk 6023
+VPTESTNMWZrr 6024
+VPTESTNMWZrrk 6025
+VPTESTYrm 6026
+VPTESTYrr 6027
+VPTESTrm 6028
+VPTESTrr 6029
+VPUNPCKHBWYrm 6030
+VPUNPCKHBWYrr 6031
+VPUNPCKHBWZ 6032
+VPUNPCKHBWZrm 6033
+VPUNPCKHBWZrmk 6034
+VPUNPCKHBWZrmkz 6035
+VPUNPCKHBWZrr 6036
+VPUNPCKHBWZrrk 6037
+VPUNPCKHBWZrrkz 6038
+VPUNPCKHBWrm 6039
+VPUNPCKHBWrr 6040
+VPUNPCKHDQYrm 6041
+VPUNPCKHDQYrr 6042
+VPUNPCKHDQZ 6043
+VPUNPCKHDQZrm 6044
+VPUNPCKHDQZrmb 6045
+VPUNPCKHDQZrmbk 6046
+VPUNPCKHDQZrmbkz 6047
+VPUNPCKHDQZrmk 6048
+VPUNPCKHDQZrmkz 6049
+VPUNPCKHDQZrr 6050
+VPUNPCKHDQZrrk 6051
+VPUNPCKHDQZrrkz 6052
+VPUNPCKHDQrm 6053
+VPUNPCKHDQrr 6054
+VPUNPCKHQDQYrm 6055
+VPUNPCKHQDQYrr 6056
+VPUNPCKHQDQZ 6057
+VPUNPCKHQDQZrm 6058
+VPUNPCKHQDQZrmb 6059
+VPUNPCKHQDQZrmbk 6060
+VPUNPCKHQDQZrmbkz 6061
+VPUNPCKHQDQZrmk 6062
+VPUNPCKHQDQZrmkz 6063
+VPUNPCKHQDQZrr 6064
+VPUNPCKHQDQZrrk 6065
+VPUNPCKHQDQZrrkz 6066
+VPUNPCKHQDQrm 6067
+VPUNPCKHQDQrr 6068
+VPUNPCKHWDYrm 6069
+VPUNPCKHWDYrr 6070
+VPUNPCKHWDZ 6071
+VPUNPCKHWDZrm 6072
+VPUNPCKHWDZrmk 6073
+VPUNPCKHWDZrmkz 6074
+VPUNPCKHWDZrr 6075
+VPUNPCKHWDZrrk 6076
+VPUNPCKHWDZrrkz 6077
+VPUNPCKHWDrm 6078
+VPUNPCKHWDrr 6079
+VPUNPCKLBWYrm 6080
+VPUNPCKLBWYrr 6081
+VPUNPCKLBWZ 6082
+VPUNPCKLBWZrm 6083
+VPUNPCKLBWZrmk 6084
+VPUNPCKLBWZrmkz 6085
+VPUNPCKLBWZrr 6086
+VPUNPCKLBWZrrk 6087
+VPUNPCKLBWZrrkz 6088
+VPUNPCKLBWrm 6089
+VPUNPCKLBWrr 6090
+VPUNPCKLDQYrm 6091
+VPUNPCKLDQYrr 6092
+VPUNPCKLDQZ 6093
+VPUNPCKLDQZrm 6094
+VPUNPCKLDQZrmb 6095
+VPUNPCKLDQZrmbk 6096
+VPUNPCKLDQZrmbkz 6097
+VPUNPCKLDQZrmk 6098
+VPUNPCKLDQZrmkz 6099
+VPUNPCKLDQZrr 6100
+VPUNPCKLDQZrrk 6101
+VPUNPCKLDQZrrkz 6102
+VPUNPCKLDQrm 6103
+VPUNPCKLDQrr 6104
+VPUNPCKLQDQYrm 6105
+VPUNPCKLQDQYrr 6106
+VPUNPCKLQDQZ 6107
+VPUNPCKLQDQZrm 6108
+VPUNPCKLQDQZrmb 6109
+VPUNPCKLQDQZrmbk 6110
+VPUNPCKLQDQZrmbkz 6111
+VPUNPCKLQDQZrmk 6112
+VPUNPCKLQDQZrmkz 6113
+VPUNPCKLQDQZrr 6114
+VPUNPCKLQDQZrrk 6115
+VPUNPCKLQDQZrrkz 6116
+VPUNPCKLQDQrm 6117
+VPUNPCKLQDQrr 6118
+VPUNPCKLWDYrm 6119
+VPUNPCKLWDYrr 6120
+VPUNPCKLWDZ 6121
+VPUNPCKLWDZrm 6122
+VPUNPCKLWDZrmk 6123
+VPUNPCKLWDZrmkz 6124
+VPUNPCKLWDZrr 6125
+VPUNPCKLWDZrrk 6126
+VPUNPCKLWDZrrkz 6127
+VPUNPCKLWDrm 6128
+VPUNPCKLWDrr 6129
+VPXORDZ 6130
+VPXORDZrm 6131
+VPXORDZrmb 6132
+VPXORDZrmbk 6133
+VPXORDZrmbkz 6134
+VPXORDZrmk 6135
+VPXORDZrmkz 6136
+VPXORDZrr 6137
+VPXORDZrrk 6138
+VPXORDZrrkz 6139
+VPXORQZ 6140
+VPXORQZrm 6141
+VPXORQZrmb 6142
+VPXORQZrmbk 6143
+VPXORQZrmbkz 6144
+VPXORQZrmk 6145
+VPXORQZrmkz 6146
+VPXORQZrr 6147
+VPXORQZrrk 6148
+VPXORQZrrkz 6149
+VPXORYrm 6150
+VPXORYrr 6151
+VPXORrm 6152
+VPXORrr 6153
+VRANGEPDZ 6154
+VRANGEPDZrmbi 6155
+VRANGEPDZrmbik 6156
+VRANGEPDZrmbikz 6157
+VRANGEPDZrmi 6158
+VRANGEPDZrmik 6159
+VRANGEPDZrmikz 6160
+VRANGEPDZrri 6161
+VRANGEPDZrrib 6162
+VRANGEPDZrribk 6163
+VRANGEPDZrribkz 6164
+VRANGEPDZrrik 6165
+VRANGEPDZrrikz 6166
+VRANGEPSZ 6167
+VRANGEPSZrmbi 6168
+VRANGEPSZrmbik 6169
+VRANGEPSZrmbikz 6170
+VRANGEPSZrmi 6171
+VRANGEPSZrmik 6172
+VRANGEPSZrmikz 6173
+VRANGEPSZrri 6174
+VRANGEPSZrrib 6175
+VRANGEPSZrribk 6176
+VRANGEPSZrribkz 6177
+VRANGEPSZrrik 6178
+VRANGEPSZrrikz 6179
+VRANGESDZrmi 6180
+VRANGESDZrmik 6181
+VRANGESDZrmikz 6182
+VRANGESDZrri 6183
+VRANGESDZrrib 6184
+VRANGESDZrribk 6185
+VRANGESDZrribkz 6186
+VRANGESDZrrik 6187
+VRANGESDZrrikz 6188
+VRANGESSZrmi 6189
+VRANGESSZrmik 6190
+VRANGESSZrmikz 6191
+VRANGESSZrri 6192
+VRANGESSZrrib 6193
+VRANGESSZrribk 6194
+VRANGESSZrribkz 6195
+VRANGESSZrrik 6196
+VRANGESSZrrikz 6197
+VRCP 6198
+VRCPBF 6199
+VRCPPHZ 6200
+VRCPPHZm 6201
+VRCPPHZmb 6202
+VRCPPHZmbk 6203
+VRCPPHZmbkz 6204
+VRCPPHZmk 6205
+VRCPPHZmkz 6206
+VRCPPHZr 6207
+VRCPPHZrk 6208
+VRCPPHZrkz 6209
+VRCPPSYm 6210
+VRCPPSYr 6211
+VRCPPSm 6212
+VRCPPSr 6213
+VRCPSHZrm 6214
+VRCPSHZrmk 6215
+VRCPSHZrmkz 6216
+VRCPSHZrr 6217
+VRCPSHZrrk 6218
+VRCPSHZrrkz 6219
+VRCPSSm 6220
+VRCPSSm_Int 6221
+VRCPSSr 6222
+VRCPSSr_Int 6223
+VREDUCEBF 6224
+VREDUCEPDZ 6225
+VREDUCEPDZrmbi 6226
+VREDUCEPDZrmbik 6227
+VREDUCEPDZrmbikz 6228
+VREDUCEPDZrmi 6229
+VREDUCEPDZrmik 6230
+VREDUCEPDZrmikz 6231
+VREDUCEPDZrri 6232
+VREDUCEPDZrrib 6233
+VREDUCEPDZrribk 6234
+VREDUCEPDZrribkz 6235
+VREDUCEPDZrrik 6236
+VREDUCEPDZrrikz 6237
+VREDUCEPHZ 6238
+VREDUCEPHZrmbi 6239
+VREDUCEPHZrmbik 6240
+VREDUCEPHZrmbikz 6241
+VREDUCEPHZrmi 6242
+VREDUCEPHZrmik 6243
+VREDUCEPHZrmikz 6244
+VREDUCEPHZrri 6245
+VREDUCEPHZrrib 6246
+VREDUCEPHZrribk 6247
+VREDUCEPHZrribkz 6248
+VREDUCEPHZrrik 6249
+VREDUCEPHZrrikz 6250
+VREDUCEPSZ 6251
+VREDUCEPSZrmbi 6252
+VREDUCEPSZrmbik 6253
+VREDUCEPSZrmbikz 6254
+VREDUCEPSZrmi 6255
+VREDUCEPSZrmik 6256
+VREDUCEPSZrmikz 6257
+VREDUCEPSZrri 6258
+VREDUCEPSZrrib 6259
+VREDUCEPSZrribk 6260
+VREDUCEPSZrribkz 6261
+VREDUCEPSZrrik 6262
+VREDUCEPSZrrikz 6263
+VREDUCESDZrmi 6264
+VREDUCESDZrmik 6265
+VREDUCESDZrmikz 6266
+VREDUCESDZrri 6267
+VREDUCESDZrrib 6268
+VREDUCESDZrribk 6269
+VREDUCESDZrribkz 6270
+VREDUCESDZrrik 6271
+VREDUCESDZrrikz 6272
+VREDUCESHZrmi 6273
+VREDUCESHZrmik 6274
+VREDUCESHZrmikz 6275
+VREDUCESHZrri 6276
+VREDUCESHZrrib 6277
+VREDUCESHZrribk 6278
+VREDUCESHZrribkz 6279
+VREDUCESHZrrik 6280
+VREDUCESHZrrikz 6281
+VREDUCESSZrmi 6282
+VREDUCESSZrmik 6283
+VREDUCESSZrmikz 6284
+VREDUCESSZrri 6285
+VREDUCESSZrrib 6286
+VREDUCESSZrribk 6287
+VREDUCESSZrribkz 6288
+VREDUCESSZrrik 6289
+VREDUCESSZrrikz 6290
+VRNDSCALEBF 6291
+VRNDSCALEPDZ 6292
+VRNDSCALEPDZrmbi 6293
+VRNDSCALEPDZrmbik 6294
+VRNDSCALEPDZrmbikz 6295
+VRNDSCALEPDZrmi 6296
+VRNDSCALEPDZrmik 6297
+VRNDSCALEPDZrmikz 6298
+VRNDSCALEPDZrri 6299
+VRNDSCALEPDZrrib 6300
+VRNDSCALEPDZrribk 6301
+VRNDSCALEPDZrribkz 6302
+VRNDSCALEPDZrrik 6303
+VRNDSCALEPDZrrikz 6304
+VRNDSCALEPHZ 6305
+VRNDSCALEPHZrmbi 6306
+VRNDSCALEPHZrmbik 6307
+VRNDSCALEPHZrmbikz 6308
+VRNDSCALEPHZrmi 6309
+VRNDSCALEPHZrmik 6310
+VRNDSCALEPHZrmikz 6311
+VRNDSCALEPHZrri 6312
+VRNDSCALEPHZrrib 6313
+VRNDSCALEPHZrribk 6314
+VRNDSCALEPHZrribkz 6315
+VRNDSCALEPHZrrik 6316
+VRNDSCALEPHZrrikz 6317
+VRNDSCALEPSZ 6318
+VRNDSCALEPSZrmbi 6319
+VRNDSCALEPSZrmbik 6320
+VRNDSCALEPSZrmbikz 6321
+VRNDSCALEPSZrmi 6322
+VRNDSCALEPSZrmik 6323
+VRNDSCALEPSZrmikz 6324
+VRNDSCALEPSZrri 6325
+VRNDSCALEPSZrrib 6326
+VRNDSCALEPSZrribk 6327
+VRNDSCALEPSZrribkz 6328
+VRNDSCALEPSZrrik 6329
+VRNDSCALEPSZrrikz 6330
+VRNDSCALESDZrmi 6331
+VRNDSCALESDZrmi_Int 6332
+VRNDSCALESDZrmik_Int 6333
+VRNDSCALESDZrmikz_Int 6334
+VRNDSCALESDZrri 6335
+VRNDSCALESDZrri_Int 6336
+VRNDSCALESDZrrib_Int 6337
+VRNDSCALESDZrribk_Int 6338
+VRNDSCALESDZrribkz_Int 6339
+VRNDSCALESDZrrik_Int 6340
+VRNDSCALESDZrrikz_Int 6341
+VRNDSCALESHZrmi 6342
+VRNDSCALESHZrmi_Int 6343
+VRNDSCALESHZrmik_Int 6344
+VRNDSCALESHZrmikz_Int 6345
+VRNDSCALESHZrri 6346
+VRNDSCALESHZrri_Int 6347
+VRNDSCALESHZrrib_Int 6348
+VRNDSCALESHZrribk_Int 6349
+VRNDSCALESHZrribkz_Int 6350
+VRNDSCALESHZrrik_Int 6351
+VRNDSCALESHZrrikz_Int 6352
+VRNDSCALESSZrmi 6353
+VRNDSCALESSZrmi_Int 6354
+VRNDSCALESSZrmik_Int 6355
+VRNDSCALESSZrmikz_Int 6356
+VRNDSCALESSZrri 6357
+VRNDSCALESSZrri_Int 6358
+VRNDSCALESSZrrib_Int 6359
+VRNDSCALESSZrribk_Int 6360
+VRNDSCALESSZrribkz_Int 6361
+VRNDSCALESSZrrik_Int 6362
+VRNDSCALESSZrrikz_Int 6363
+VROUNDPDYmi 6364
+VROUNDPDYri 6365
+VROUNDPDmi 6366
+VROUNDPDri 6367
+VROUNDPSYmi 6368
+VROUNDPSYri 6369
+VROUNDPSmi 6370
+VROUNDPSri 6371
+VROUNDSDmi 6372
+VROUNDSDmi_Int 6373
+VROUNDSDri 6374
+VROUNDSDri_Int 6375
+VROUNDSSmi 6376
+VROUNDSSmi_Int 6377
+VROUNDSSri 6378
+VROUNDSSri_Int 6379
+VRSQRT 6380
+VRSQRTBF 6381
+VRSQRTPHZ 6382
+VRSQRTPHZm 6383
+VRSQRTPHZmb 6384
+VRSQRTPHZmbk 6385
+VRSQRTPHZmbkz 6386
+VRSQRTPHZmk 6387
+VRSQRTPHZmkz 6388
+VRSQRTPHZr 6389
+VRSQRTPHZrk 6390
+VRSQRTPHZrkz 6391
+VRSQRTPSYm 6392
+VRSQRTPSYr 6393
+VRSQRTPSm 6394
+VRSQRTPSr 6395
+VRSQRTSHZrm 6396
+VRSQRTSHZrmk 6397
+VRSQRTSHZrmkz 6398
+VRSQRTSHZrr 6399
+VRSQRTSHZrrk 6400
+VRSQRTSHZrrkz 6401
+VRSQRTSSm 6402
+VRSQRTSSm_Int 6403
+VRSQRTSSr 6404
+VRSQRTSSr_Int 6405
+VSCALEFBF 6406
+VSCALEFPDZ 6407
+VSCALEFPDZrm 6408
+VSCALEFPDZrmb 6409
+VSCALEFPDZrmbk 6410
+VSCALEFPDZrmbkz 6411
+VSCALEFPDZrmk 6412
+VSCALEFPDZrmkz 6413
+VSCALEFPDZrr 6414
+VSCALEFPDZrrb 6415
+VSCALEFPDZrrbk 6416
+VSCALEFPDZrrbkz 6417
+VSCALEFPDZrrk 6418
+VSCALEFPDZrrkz 6419
+VSCALEFPHZ 6420
+VSCALEFPHZrm 6421
+VSCALEFPHZrmb 6422
+VSCALEFPHZrmbk 6423
+VSCALEFPHZrmbkz 6424
+VSCALEFPHZrmk 6425
+VSCALEFPHZrmkz 6426
+VSCALEFPHZrr 6427
+VSCALEFPHZrrb 6428
+VSCALEFPHZrrbk 6429
+VSCALEFPHZrrbkz 6430
+VSCALEFPHZrrk 6431
+VSCALEFPHZrrkz 6432
+VSCALEFPSZ 6433
+VSCALEFPSZrm 6434
+VSCALEFPSZrmb 6435
+VSCALEFPSZrmbk 6436
+VSCALEFPSZrmbkz 6437
+VSCALEFPSZrmk 6438
+VSCALEFPSZrmkz 6439
+VSCALEFPSZrr 6440
+VSCALEFPSZrrb 6441
+VSCALEFPSZrrbk 6442
+VSCALEFPSZrrbkz 6443
+VSCALEFPSZrrk 6444
+VSCALEFPSZrrkz 6445
+VSCALEFSDZrm 6446
+VSCALEFSDZrmk 6447
+VSCALEFSDZrmkz 6448
+VSCALEFSDZrr 6449
+VSCALEFSDZrrb_Int 6450
+VSCALEFSDZrrbk_Int 6451
+VSCALEFSDZrrbkz_Int 6452
+VSCALEFSDZrrk 6453
+VSCALEFSDZrrkz 6454
+VSCALEFSHZrm 6455
+VSCALEFSHZrmk 6456
+VSCALEFSHZrmkz 6457
+VSCALEFSHZrr 6458
+VSCALEFSHZrrb_Int 6459
+VSCALEFSHZrrbk_Int 6460
+VSCALEFSHZrrbkz_Int 6461
+VSCALEFSHZrrk 6462
+VSCALEFSHZrrkz 6463
+VSCALEFSSZrm 6464
+VSCALEFSSZrmk 6465
+VSCALEFSSZrmkz 6466
+VSCALEFSSZrr 6467
+VSCALEFSSZrrb_Int 6468
+VSCALEFSSZrrbk_Int 6469
+VSCALEFSSZrrbkz_Int 6470
+VSCALEFSSZrrk 6471
+VSCALEFSSZrrkz 6472
+VSCATTERDPDZ 6473
+VSCATTERDPDZmr 6474
+VSCATTERDPSZ 6475
+VSCATTERDPSZmr 6476
+VSCATTERPF 6477
+VSCATTERQPDZ 6478
+VSCATTERQPDZmr 6479
+VSCATTERQPSZ 6480
+VSCATTERQPSZmr 6481
+VSHA 6482
+VSHUFF 6483
+VSHUFI 6484
+VSHUFPDYrmi 6485
+VSHUFPDYrri 6486
+VSHUFPDZ 6487
+VSHUFPDZrmbi 6488
+VSHUFPDZrmbik 6489
+VSHUFPDZrmbikz 6490
+VSHUFPDZrmi 6491
+VSHUFPDZrmik 6492
+VSHUFPDZrmikz 6493
+VSHUFPDZrri 6494
+VSHUFPDZrrik 6495
+VSHUFPDZrrikz 6496
+VSHUFPDrmi 6497
+VSHUFPDrri 6498
+VSHUFPSYrmi 6499
+VSHUFPSYrri 6500
+VSHUFPSZ 6501
+VSHUFPSZrmbi 6502
+VSHUFPSZrmbik 6503
+VSHUFPSZrmbikz 6504
+VSHUFPSZrmi 6505
+VSHUFPSZrmik 6506
+VSHUFPSZrmikz 6507
+VSHUFPSZrri 6508
+VSHUFPSZrrik 6509
+VSHUFPSZrrikz 6510
+VSHUFPSrmi 6511
+VSHUFPSrri 6512
+VSM 6513
+VSQRTBF 6514
+VSQRTPDYm 6515
+VSQRTPDYr 6516
+VSQRTPDZ 6517
+VSQRTPDZm 6518
+VSQRTPDZmb 6519
+VSQRTPDZmbk 6520
+VSQRTPDZmbkz 6521
+VSQRTPDZmk 6522
+VSQRTPDZmkz 6523
+VSQRTPDZr 6524
+VSQRTPDZrb 6525
+VSQRTPDZrbk 6526
+VSQRTPDZrbkz 6527
+VSQRTPDZrk 6528
+VSQRTPDZrkz 6529
+VSQRTPDm 6530
+VSQRTPDr 6531
+VSQRTPHZ 6532
+VSQRTPHZm 6533
+VSQRTPHZmb 6534
+VSQRTPHZmbk 6535
+VSQRTPHZmbkz 6536
+VSQRTPHZmk 6537
+VSQRTPHZmkz 6538
+VSQRTPHZr 6539
+VSQRTPHZrb 6540
+VSQRTPHZrbk 6541
+VSQRTPHZrbkz 6542
+VSQRTPHZrk 6543
+VSQRTPHZrkz 6544
+VSQRTPSYm 6545
+VSQRTPSYr 6546
+VSQRTPSZ 6547
+VSQRTPSZm 6548
+VSQRTPSZmb 6549
+VSQRTPSZmbk 6550
+VSQRTPSZmbkz 6551
+VSQRTPSZmk 6552
+VSQRTPSZmkz 6553
+VSQRTPSZr 6554
+VSQRTPSZrb 6555
+VSQRTPSZrbk 6556
+VSQRTPSZrbkz 6557
+VSQRTPSZrk 6558
+VSQRTPSZrkz 6559
+VSQRTPSm 6560
+VSQRTPSr 6561
+VSQRTSDZm 6562
+VSQRTSDZm_Int 6563
+VSQRTSDZmk_Int 6564
+VSQRTSDZmkz_Int 6565
+VSQRTSDZr 6566
+VSQRTSDZr_Int 6567
+VSQRTSDZrb_Int 6568
+VSQRTSDZrbk_Int 6569
+VSQRTSDZrbkz_Int 6570
+VSQRTSDZrk_Int 6571
+VSQRTSDZrkz_Int 6572
+VSQRTSDm 6573
+VSQRTSDm_Int 6574
+VSQRTSDr 6575
+VSQRTSDr_Int 6576
+VSQRTSHZm 6577
+VSQRTSHZm_Int 6578
+VSQRTSHZmk_Int 6579
+VSQRTSHZmkz_Int 6580
+VSQRTSHZr 6581
+VSQRTSHZr_Int 6582
+VSQRTSHZrb_Int 6583
+VSQRTSHZrbk_Int 6584
+VSQRTSHZrbkz_Int 6585
+VSQRTSHZrk_Int 6586
+VSQRTSHZrkz_Int 6587
+VSQRTSSZm 6588
+VSQRTSSZm_Int 6589
+VSQRTSSZmk_Int 6590
+VSQRTSSZmkz_Int 6591
+VSQRTSSZr 6592
+VSQRTSSZr_Int 6593
+VSQRTSSZrb_Int 6594
+VSQRTSSZrbk_Int 6595
+VSQRTSSZrbkz_Int 6596
+VSQRTSSZrk_Int 6597
+VSQRTSSZrkz_Int 6598
+VSQRTSSm 6599
+VSQRTSSm_Int 6600
+VSQRTSSr 6601
+VSQRTSSr_Int 6602
+VSTMXCSR 6603
+VSUBBF 6604
+VSUBPDYrm 6605
+VSUBPDYrr 6606
+VSUBPDZ 6607
+VSUBPDZrm 6608
+VSUBPDZrmb 6609
+VSUBPDZrmbk 6610
+VSUBPDZrmbkz 6611
+VSUBPDZrmk 6612
+VSUBPDZrmkz 6613
+VSUBPDZrr 6614
+VSUBPDZrrb 6615
+VSUBPDZrrbk 6616
+VSUBPDZrrbkz 6617
+VSUBPDZrrk 6618
+VSUBPDZrrkz 6619
+VSUBPDrm 6620
+VSUBPDrr 6621
+VSUBPHZ 6622
+VSUBPHZrm 6623
+VSUBPHZrmb 6624
+VSUBPHZrmbk 6625
+VSUBPHZrmbkz 6626
+VSUBPHZrmk 6627
+VSUBPHZrmkz 6628
+VSUBPHZrr 6629
+VSUBPHZrrb 6630
+VSUBPHZrrbk 6631
+VSUBPHZrrbkz 6632
+VSUBPHZrrk 6633
+VSUBPHZrrkz 6634
+VSUBPSYrm 6635
+VSUBPSYrr 6636
+VSUBPSZ 6637
+VSUBPSZrm 6638
+VSUBPSZrmb 6639
+VSUBPSZrmbk 6640
+VSUBPSZrmbkz 6641
+VSUBPSZrmk 6642
+VSUBPSZrmkz 6643
+VSUBPSZrr 6644
+VSUBPSZrrb 6645
+VSUBPSZrrbk 6646
+VSUBPSZrrbkz 6647
+VSUBPSZrrk 6648
+VSUBPSZrrkz 6649
+VSUBPSrm 6650
+VSUBPSrr 6651
+VSUBSDZrm 6652
+VSUBSDZrm_Int 6653
+VSUBSDZrmk_Int 6654
+VSUBSDZrmkz_Int 6655
+VSUBSDZrr 6656
+VSUBSDZrr_Int 6657
+VSUBSDZrrb_Int 6658
+VSUBSDZrrbk_Int 6659
+VSUBSDZrrbkz_Int 6660
+VSUBSDZrrk_Int 6661
+VSUBSDZrrkz_Int 6662
+VSUBSDrm 6663
+VSUBSDrm_Int 6664
+VSUBSDrr 6665
+VSUBSDrr_Int 6666
+VSUBSHZrm 6667
+VSUBSHZrm_Int 6668
+VSUBSHZrmk_Int 6669
+VSUBSHZrmkz_Int 6670
+VSUBSHZrr 6671
+VSUBSHZrr_Int 6672
+VSUBSHZrrb_Int 6673
+VSUBSHZrrbk_Int 6674
+VSUBSHZrrbkz_Int 6675
+VSUBSHZrrk_Int 6676
+VSUBSHZrrkz_Int 6677
+VSUBSSZrm 6678
+VSUBSSZrm_Int 6679
+VSUBSSZrmk_Int 6680
+VSUBSSZrmkz_Int 6681
+VSUBSSZrr 6682
+VSUBSSZrr_Int 6683
+VSUBSSZrrb_Int 6684
+VSUBSSZrrbk_Int 6685
+VSUBSSZrrbkz_Int 6686
+VSUBSSZrrk_Int 6687
+VSUBSSZrrkz_Int 6688
+VSUBSSrm 6689
+VSUBSSrm_Int 6690
+VSUBSSrr 6691
+VSUBSSrr_Int 6692
+VTESTPDYrm 6693
+VTESTPDYrr 6694
+VTESTPDrm 6695
+VTESTPDrr 6696
+VTESTPSYrm 6697
+VTESTPSYrr 6698
+VTESTPSrm 6699
+VTESTPSrr 6700
+VUCOMISDZrm 6701
+VUCOMISDZrm_Int 6702
+VUCOMISDZrr 6703
+VUCOMISDZrr_Int 6704
+VUCOMISDZrrb 6705
+VUCOMISDrm 6706
+VUCOMISDrm_Int 6707
+VUCOMISDrr 6708
+VUCOMISDrr_Int 6709
+VUCOMISHZrm 6710
+VUCOMISHZrm_Int 6711
+VUCOMISHZrr 6712
+VUCOMISHZrr_Int 6713
+VUCOMISHZrrb 6714
+VUCOMISSZrm 6715
+VUCOMISSZrm_Int 6716
+VUCOMISSZrr 6717
+VUCOMISSZrr_Int 6718
+VUCOMISSZrrb 6719
+VUCOMISSrm 6720
+VUCOMISSrm_Int 6721
+VUCOMISSrr 6722
+VUCOMISSrr_Int 6723
+VUCOMXSDZrm 6724
+VUCOMXSDZrm_Int 6725
+VUCOMXSDZrr 6726
+VUCOMXSDZrr_Int 6727
+VUCOMXSDZrrb_Int 6728
+VUCOMXSHZrm 6729
+VUCOMXSHZrm_Int 6730
+VUCOMXSHZrr 6731
+VUCOMXSHZrr_Int 6732
+VUCOMXSHZrrb_Int 6733
+VUCOMXSSZrm 6734
+VUCOMXSSZrm_Int 6735
+VUCOMXSSZrr 6736
+VUCOMXSSZrr_Int 6737
+VUCOMXSSZrrb_Int 6738
+VUNPCKHPDYrm 6739
+VUNPCKHPDYrr 6740
+VUNPCKHPDZ 6741
+VUNPCKHPDZrm 6742
+VUNPCKHPDZrmb 6743
+VUNPCKHPDZrmbk 6744
+VUNPCKHPDZrmbkz 6745
+VUNPCKHPDZrmk 6746
+VUNPCKHPDZrmkz 6747
+VUNPCKHPDZrr 6748
+VUNPCKHPDZrrk 6749
+VUNPCKHPDZrrkz 6750
+VUNPCKHPDrm 6751
+VUNPCKHPDrr 6752
+VUNPCKHPSYrm 6753
+VUNPCKHPSYrr 6754
+VUNPCKHPSZ 6755
+VUNPCKHPSZrm 6756
+VUNPCKHPSZrmb 6757
+VUNPCKHPSZrmbk 6758
+VUNPCKHPSZrmbkz 6759
+VUNPCKHPSZrmk 6760
+VUNPCKHPSZrmkz 6761
+VUNPCKHPSZrr 6762
+VUNPCKHPSZrrk 6763
+VUNPCKHPSZrrkz 6764
+VUNPCKHPSrm 6765
+VUNPCKHPSrr 6766
+VUNPCKLPDYrm 6767
+VUNPCKLPDYrr 6768
+VUNPCKLPDZ 6769
+VUNPCKLPDZrm 6770
+VUNPCKLPDZrmb 6771
+VUNPCKLPDZrmbk 6772
+VUNPCKLPDZrmbkz 6773
+VUNPCKLPDZrmk 6774
+VUNPCKLPDZrmkz 6775
+VUNPCKLPDZrr 6776
+VUNPCKLPDZrrk 6777
+VUNPCKLPDZrrkz 6778
+VUNPCKLPDrm 6779
+VUNPCKLPDrr 6780
+VUNPCKLPSYrm 6781
+VUNPCKLPSYrr 6782
+VUNPCKLPSZ 6783
+VUNPCKLPSZrm 6784
+VUNPCKLPSZrmb 6785
+VUNPCKLPSZrmbk 6786
+VUNPCKLPSZrmbkz 6787
+VUNPCKLPSZrmk 6788
+VUNPCKLPSZrmkz 6789
+VUNPCKLPSZrr 6790
+VUNPCKLPSZrrk 6791
+VUNPCKLPSZrrkz 6792
+VUNPCKLPSrm 6793
+VUNPCKLPSrr 6794
+VXORPDYrm 6795
+VXORPDYrr 6796
+VXORPDZ 6797
+VXORPDZrm 6798
+VXORPDZrmb 6799
+VXORPDZrmbk 6800
+VXORPDZrmbkz 6801
+VXORPDZrmk 6802
+VXORPDZrmkz 6803
+VXORPDZrr 6804
+VXORPDZrrk 6805
+VXORPDZrrkz 6806
+VXORPDrm 6807
+VXORPDrr 6808
+VXORPSYrm 6809
+VXORPSYrr 6810
+VXORPSZ 6811
+VXORPSZrm 6812
+VXORPSZrmb 6813
+VXORPSZrmbk 6814
+VXORPSZrmbkz 6815
+VXORPSZrmk 6816
+VXORPSZrmkz 6817
+VXORPSZrr 6818
+VXORPSZrrk 6819
+VXORPSZrrkz 6820
+VXORPSrm 6821
+VXORPSrr 6822
+VZEROALL 6823
+VZEROUPPER 6824
+V_SET 6825
+V_SETALLONES 6826
+WAIT 6827
+WBINVD 6828
+WBNOINVD 6829
+WRFLAGS 6830
+WRFSBASE 6831
+WRGSBASE 6832
+WRMSR 6833
+WRMSRLIST 6834
+WRMSRNS 6835
+WRMSRNSir 6836
+WRMSRNSir_EVEX 6837
+WRPKRUr 6838
+WRSSD 6839
+WRSSD_EVEX 6840
+WRSSQ 6841
+WRSSQ_EVEX 6842
+WRUSSD 6843
+WRUSSD_EVEX 6844
+WRUSSQ 6845
+WRUSSQ_EVEX 6846
+XABORT 6847
+XABORT_DEF 6848
+XACQUIRE_PREFIX 6849
+XADD 6850
+XAM_F 6851
+XAM_Fp 6852
+XBEGIN 6853
+XCHG 6854
+XCH_F 6855
+XCRYPTCBC 6856
+XCRYPTCFB 6857
+XCRYPTCTR 6858
+XCRYPTECB 6859
+XCRYPTOFB 6860
+XEND 6861
+XGETBV 6862
+XLAT 6863
+XOR 6864
+XORPDrm 6865
+XORPDrr 6866
+XORPSrm 6867
+XORPSrr 6868
+XRELEASE_PREFIX 6869
+XRESLDTRK 6870
+XRSTOR 6871
+XRSTORS 6872
+XSAVE 6873
+XSAVEC 6874
+XSAVEOPT 6875
+XSAVES 6876
+XSETBV 6877
+XSHA 6878
+XSTORE 6879
+XSUSLDTRK 6880
+XTEST 6881
+Immediate 6882
+CImmediate 6883
+FPImmediate 6884
+MBB 6885
+FrameIndex 6886
+ConstantPoolIndex 6887
+TargetIndex 6888
+JumpTableIndex 6889
+ExternalSymbol 6890
+GlobalAddress 6891
+BlockAddress 6892
+RegisterMask 6893
+RegisterLiveOut 6894
+Metadata 6895
+MCSymbol 6896
+CFIIndex 6897
+IntrinsicID 6898
+Predicate 6899
+ShuffleMask 6900
+PhyReg_GR8 6901
+PhyReg_GRH8 6902
+PhyReg_GR8_NOREX2 6903
+PhyReg_GR8_NOREX 6904
+PhyReg_GR8_ABCD_H 6905
+PhyReg_GR8_ABCD_L 6906
+PhyReg_GRH16 6907
+PhyReg_GR16 6908
+PhyReg_GR16_NOREX2 6909
+PhyReg_GR16_NOREX 6910
+PhyReg_VK1 6911
+PhyReg_VK16 6912
+PhyReg_VK2 6913
+PhyReg_VK4 6914
+PhyReg_VK8 6915
+PhyReg_VK16WM 6916
+PhyReg_VK1WM 6917
+PhyReg_VK2WM 6918
+PhyReg_VK4WM 6919
+PhyReg_VK8WM 6920
+PhyReg_SEGMENT_REG 6921
+PhyReg_GR16_ABCD 6922
+PhyReg_FPCCR 6923
+PhyReg_FR16X 6924
+PhyReg_FR16 6925
+PhyReg_VK16PAIR 6926
+PhyReg_VK1PAIR 6927
+PhyReg_VK2PAIR 6928
+PhyReg_VK4PAIR 6929
+PhyReg_VK8PAIR 6930
+PhyReg_VK1PAIR_with_sub_mask_0_in_VK1WM 6931
+PhyReg_LOW32_ADDR_ACCESS_RBP 6932
+PhyReg_LOW32_ADDR_ACCESS 6933
+PhyReg_LOW32_ADDR_ACCESS_RBP_with_sub_8bit 6934
+PhyReg_FR32X 6935
+PhyReg_GR32 6936
+PhyReg_GR32_NOSP 6937
+PhyReg_LOW32_ADDR_ACCESS_RBP_with_sub_16bit_in_GR16_NOREX2 6938
+PhyReg_DEBUG_REG 6939
+PhyReg_FR32 6940
+PhyReg_GR32_NOREX2 6941
+PhyReg_GR32_NOREX2_NOSP 6942
+PhyReg_LOW32_ADDR_ACCESS_RBP_with_sub_16bit_in_GR16_NOREX 6943
+PhyReg_GR32_NOREX 6944
+PhyReg_VK32 6945
+PhyReg_GR32_NOREX_NOSP 6946
+PhyReg_RFP32 6947
+PhyReg_VK32WM 6948
+PhyReg_GR32_ABCD 6949
+PhyReg_GR32_TC 6950
+PhyReg_GR32_ABCD_and_GR32_TC 6951
+PhyReg_GR32_AD 6952
+PhyReg_GR32_ArgRef 6953
+PhyReg_GR32_BPSP 6954
+PhyReg_GR32_BSI 6955
+PhyReg_GR32_CB 6956
+PhyReg_GR32_DC 6957
+PhyReg_GR32_DIBP 6958
+PhyReg_GR32_SIDI 6959
+PhyReg_LOW32_ADDR_ACCESS_RBP_with_sub_32bit 6960
+PhyReg_CCR 6961
+PhyReg_DFCCR 6962
+PhyReg_GR32_ABCD_and_GR32_BSI 6963
+PhyReg_GR32_AD_and_GR32_ArgRef 6964
+PhyReg_GR32_ArgRef_and_GR32_CB 6965
+PhyReg_GR32_BPSP_and_GR32_DIBP 6966
+PhyReg_GR32_BPSP_and_GR32_TC 6967
+PhyReg_GR32_BSI_and_GR32_SIDI 6968
+PhyReg_GR32_DIBP_and_GR32_SIDI 6969
+PhyReg_LOW32_ADDR_ACCESS_RBP_with_sub_8bit_with_sub_32bit 6970
+PhyReg_LOW32_ADDR_ACCESS_with_sub_32bit 6971
+PhyReg_RFP64 6972
+PhyReg_GR64 6973
+PhyReg_FR64X 6974
+PhyReg_GR64_with_sub_8bit 6975
+PhyReg_GR64_NOSP 6976
+PhyReg_GR64_NOREX2 6977
+PhyReg_CONTROL_REG 6978
+PhyReg_FR64 6979
+PhyReg_GR64_with_sub_16bit_in_GR16_NOREX2 6980
+PhyReg_GR64_NOREX2_NOSP 6981
+PhyReg_GR64PLTSafe 6982
+PhyReg_GR64_TC 6983
+PhyReg_GR64_NOREX 6984
+PhyReg_GR64_TCW64 6985
+PhyReg_GR64_TC_with_sub_8bit 6986
+PhyReg_GR64_NOREX2_NOSP_and_GR64_TC 6987
+PhyReg_GR64_TCW64_with_sub_8bit 6988
+PhyReg_GR64_TC_and_GR64_TCW64 6989
+PhyReg_GR64_with_sub_16bit_in_GR16_NOREX 6990
+PhyReg_VK64 6991
+PhyReg_VR64 6992
+PhyReg_GR64PLTSafe_and_GR64_TC 6993
+PhyReg_GR64_NOREX2_NOSP_and_GR64_TCW64 6994
+PhyReg_GR64_NOREX_NOSP 6995
+PhyReg_GR64_NOREX_and_GR64_TC 6996
+PhyReg_GR64_TCW64_and_GR64_TC_with_sub_8bit 6997
+PhyReg_VK64WM 6998
+PhyReg_GR64_TC_and_GR64_NOREX2_NOSP_and_GR64_TCW64 6999
+PhyReg_GR64_TC_and_GR64_with_sub_16bit_in_GR16_NOREX 7000
+PhyReg_GR64PLTSafe_and_GR64_TCW64 7001
+PhyReg_GR64_NOREX_and_GR64PLTSafe_and_GR64_TC 7002
+PhyReg_GR64_NOREX_and_GR64_TCW64 7003
+PhyReg_GR64_ABCD 7004
+PhyReg_GR64_with_sub_32bit_in_GR32_TC 7005
+PhyReg_GR64_with_sub_32bit_in_GR32_ABCD_and_GR32_TC 7006
+PhyReg_GR64_AD 7007
+PhyReg_GR64_ArgRef 7008
+PhyReg_GR64_and_LOW32_ADDR_ACCESS_RBP 7009
+PhyReg_GR64_with_sub_32bit_in_GR32_ArgRef 7010
+PhyReg_GR64_with_sub_32bit_in_GR32_BPSP 7011
+PhyReg_GR64_with_sub_32bit_in_GR32_BSI 7012
+PhyReg_GR64_with_sub_32bit_in_GR32_CB 7013
+PhyReg_GR64_with_sub_32bit_in_GR32_DIBP 7014
+PhyReg_GR64_with_sub_32bit_in_GR32_SIDI 7015
+PhyReg_GR64_A 7016
+PhyReg_GR64_ArgRef_and_GR64_TC 7017
+PhyReg_GR64_and_LOW32_ADDR_ACCESS 7018
+PhyReg_GR64_with_sub_32bit_in_GR32_ABCD_and_GR32_BSI 7019
+PhyReg_GR64_with_sub_32bit_in_GR32_AD_and_GR32_ArgRef 7020
+PhyReg_GR64_with_sub_32bit_in_GR32_ArgRef_and_GR32_CB 7021
+PhyReg_GR64_with_sub_32bit_in_GR32_BPSP_and_GR32_DIBP 7022
+PhyReg_GR64_with_sub_32bit_in_GR32_BPSP_and_GR32_TC 7023
+PhyReg_GR64_with_sub_32bit_in_GR32_BSI_and_GR32_SIDI 7024
+PhyReg_GR64_with_sub_32bit_in_GR32_DIBP_and_GR32_SIDI 7025
+PhyReg_RST 7026
+PhyReg_RFP80 7027
+PhyReg_RFP80_7 7028
+PhyReg_VR128X 7029
+PhyReg_VR128 7030
+PhyReg_VR256X 7031
+PhyReg_VR256 7032
+PhyReg_VR512 7033
+PhyReg_VR512_0_15 7034
+PhyReg_TILE 7035
+PhyReg_TILEPAIR 7036
+VirtReg_GR8 7037
+VirtReg_GRH8 7038
+VirtReg_GR8_NOREX2 7039
+VirtReg_GR8_NOREX 7040
+VirtReg_GR8_ABCD_H 7041
+VirtReg_GR8_ABCD_L 7042
+VirtReg_GRH16 7043
+VirtReg_GR16 7044
+VirtReg_GR16_NOREX2 7045
+VirtReg_GR16_NOREX 7046
+VirtReg_VK1 7047
+VirtReg_VK16 7048
+VirtReg_VK2 7049
+VirtReg_VK4 7050
+VirtReg_VK8 7051
+VirtReg_VK16WM 7052
+VirtReg_VK1WM 7053
+VirtReg_VK2WM 7054
+VirtReg_VK4WM 7055
+VirtReg_VK8WM 7056
+VirtReg_SEGMENT_REG 7057
+VirtReg_GR16_ABCD 7058
+VirtReg_FPCCR 7059
+VirtReg_FR16X 7060
+VirtReg_FR16 7061
+VirtReg_VK16PAIR 7062
+VirtReg_VK1PAIR 7063
+VirtReg_VK2PAIR 7064
+VirtReg_VK4PAIR 7065
+VirtReg_VK8PAIR 7066
+VirtReg_VK1PAIR_with_sub_mask_0_in_VK1WM 7067
+VirtReg_LOW32_ADDR_ACCESS_RBP 7068
+VirtReg_LOW32_ADDR_ACCESS 7069
+VirtReg_LOW32_ADDR_ACCESS_RBP_with_sub_8bit 7070
+VirtReg_FR32X 7071
+VirtReg_GR32 7072
+VirtReg_GR32_NOSP 7073
+VirtReg_LOW32_ADDR_ACCESS_RBP_with_sub_16bit_in_GR16_NOREX2 7074
+VirtReg_DEBUG_REG 7075
+VirtReg_FR32 7076
+VirtReg_GR32_NOREX2 7077
+VirtReg_GR32_NOREX2_NOSP 7078
+VirtReg_LOW32_ADDR_ACCESS_RBP_with_sub_16bit_in_GR16_NOREX 7079
+VirtReg_GR32_NOREX 7080
+VirtReg_VK32 7081
+VirtReg_GR32_NOREX_NOSP 7082
+VirtReg_RFP32 7083
+VirtReg_VK32WM 7084
+VirtReg_GR32_ABCD 7085
+VirtReg_GR32_TC 7086
+VirtReg_GR32_ABCD_and_GR32_TC 7087
+VirtReg_GR32_AD 7088
+VirtReg_GR32_ArgRef 7089
+VirtReg_GR32_BPSP 7090
+VirtReg_GR32_BSI 7091
+VirtReg_GR32_CB 7092
+VirtReg_GR32_DC 7093
+VirtReg_GR32_DIBP 7094
+VirtReg_GR32_SIDI 7095
+VirtReg_LOW32_ADDR_ACCESS_RBP_with_sub_32bit 7096
+VirtReg_CCR 7097
+VirtReg_DFCCR 7098
+VirtReg_GR32_ABCD_and_GR32_BSI 7099
+VirtReg_GR32_AD_and_GR32_ArgRef 7100
+VirtReg_GR32_ArgRef_and_GR32_CB 7101
+VirtReg_GR32_BPSP_and_GR32_DIBP 7102
+VirtReg_GR32_BPSP_and_GR32_TC 7103
+VirtReg_GR32_BSI_and_GR32_SIDI 7104
+VirtReg_GR32_DIBP_and_GR32_SIDI 7105
+VirtReg_LOW32_ADDR_ACCESS_RBP_with_sub_8bit_with_sub_32bit 7106
+VirtReg_LOW32_ADDR_ACCESS_with_sub_32bit 7107
+VirtReg_RFP64 7108
+VirtReg_GR64 7109
+VirtReg_FR64X 7110
+VirtReg_GR64_with_sub_8bit 7111
+VirtReg_GR64_NOSP 7112
+VirtReg_GR64_NOREX2 7113
+VirtReg_CONTROL_REG 7114
+VirtReg_FR64 7115
+VirtReg_GR64_with_sub_16bit_in_GR16_NOREX2 7116
+VirtReg_GR64_NOREX2_NOSP 7117
+VirtReg_GR64PLTSafe 7118
+VirtReg_GR64_TC 7119
+VirtReg_GR64_NOREX 7120
+VirtReg_GR64_TCW64 7121
+VirtReg_GR64_TC_with_sub_8bit 7122
+VirtReg_GR64_NOREX2_NOSP_and_GR64_TC 7123
+VirtReg_GR64_TCW64_with_sub_8bit 7124
+VirtReg_GR64_TC_and_GR64_TCW64 7125
+VirtReg_GR64_with_sub_16bit_in_GR16_NOREX 7126
+VirtReg_VK64 7127
+VirtReg_VR64 7128
+VirtReg_GR64PLTSafe_and_GR64_TC 7129
+VirtReg_GR64_NOREX2_NOSP_and_GR64_TCW64 7130
+VirtReg_GR64_NOREX_NOSP 7131
+VirtReg_GR64_NOREX_and_GR64_TC 7132
+VirtReg_GR64_TCW64_and_GR64_TC_with_sub_8bit 7133
+VirtReg_VK64WM 7134
+VirtReg_GR64_TC_and_GR64_NOREX2_NOSP_and_GR64_TCW64 7135
+VirtReg_GR64_TC_and_GR64_with_sub_16bit_in_GR16_NOREX 7136
+VirtReg_GR64PLTSafe_and_GR64_TCW64 7137
+VirtReg_GR64_NOREX_and_GR64PLTSafe_and_GR64_TC 7138
+VirtReg_GR64_NOREX_and_GR64_TCW64 7139
+VirtReg_GR64_ABCD 7140
+VirtReg_GR64_with_sub_32bit_in_GR32_TC 7141
+VirtReg_GR64_with_sub_32bit_in_GR32_ABCD_and_GR32_TC 7142
+VirtReg_GR64_AD 7143
+VirtReg_GR64_ArgRef 7144
+VirtReg_GR64_and_LOW32_ADDR_ACCESS_RBP 7145
+VirtReg_GR64_with_sub_32bit_in_GR32_ArgRef 7146
+VirtReg_GR64_with_sub_32bit_in_GR32_BPSP 7147
+VirtReg_GR64_with_sub_32bit_in_GR32_BSI 7148
+VirtReg_GR64_with_sub_32bit_in_GR32_CB 7149
+VirtReg_GR64_with_sub_32bit_in_GR32_DIBP 7150
+VirtReg_GR64_with_sub_32bit_in_GR32_SIDI 7151
+VirtReg_GR64_A 7152
+VirtReg_GR64_ArgRef_and_GR64_TC 7153
+VirtReg_GR64_and_LOW32_ADDR_ACCESS 7154
+VirtReg_GR64_with_sub_32bit_in_GR32_ABCD_and_GR32_BSI 7155
+VirtReg_GR64_with_sub_32bit_in_GR32_AD_and_GR32_ArgRef 7156
+VirtReg_GR64_with_sub_32bit_in_GR32_ArgRef_and_GR32_CB 7157
+VirtReg_GR64_with_sub_32bit_in_GR32_BPSP_and_GR32_DIBP 7158
+VirtReg_GR64_with_sub_32bit_in_GR32_BPSP_and_GR32_TC 7159
+VirtReg_GR64_with_sub_32bit_in_GR32_BSI_and_GR32_SIDI 7160
+VirtReg_GR64_with_sub_32bit_in_GR32_DIBP_and_GR32_SIDI 7161
+VirtReg_RST 7162
+VirtReg_RFP80 7163
+VirtReg_RFP80_7 7164
+VirtReg_VR128X 7165
+VirtReg_VR128 7166
+VirtReg_VR256X 7167
+VirtReg_VR256 7168
+VirtReg_VR512 7169
+VirtReg_VR512_0_15 7170
+VirtReg_TILE 7171
+VirtReg_TILEPAIR 7172
diff --git a/llvm/test/tools/llvm-ir2vec/triplets.mir b/llvm/test/tools/llvm-ir2vec/triplets.mir
new file mode 100644
index 0000000..274984a
--- /dev/null
+++ b/llvm/test/tools/llvm-ir2vec/triplets.mir
@@ -0,0 +1,61 @@
+# REQUIRES: x86_64-linux
+# RUN: llvm-ir2vec triplets --mode=mir %s -o 2>&1 %t1.log
+# RUN: diff %S/output/reference_triplets.txt %t1.log
+
+--- |
+ target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128"
+ target triple = "x86_64-unknown-linux-gnu"
+
+ define dso_local noundef i32 @add_function(i32 noundef %a, i32 noundef %b) {
+ entry:
+ %sum = add nsw i32 %a, %b
+ ret i32 %sum
+ }
+
+ define dso_local noundef i32 @mul_function(i32 noundef %x, i32 noundef %y) {
+ entry:
+ %product = mul nsw i32 %x, %y
+ ret i32 %product
+ }
+...
+---
+name: add_function
+alignment: 16
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: gr32 }
+ - { id: 1, class: gr32 }
+ - { id: 2, class: gr32 }
+liveins:
+ - { reg: '$edi', virtual-reg: '%0' }
+ - { reg: '$esi', virtual-reg: '%1' }
+body: |
+ bb.0.entry:
+ liveins: $edi, $esi
+
+ %1:gr32 = COPY $esi
+ %0:gr32 = COPY $edi
+ %2:gr32 = nsw ADD32rr %0, %1, implicit-def dead $eflags
+ $eax = COPY %2
+ RET 0, $eax
+
+---
+name: mul_function
+alignment: 16
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: gr32 }
+ - { id: 1, class: gr32 }
+ - { id: 2, class: gr32 }
+liveins:
+ - { reg: '$edi', virtual-reg: '%0' }
+ - { reg: '$esi', virtual-reg: '%1' }
+body: |
+ bb.0.entry:
+ liveins: $edi, $esi
+
+ %1:gr32 = COPY $esi
+ %0:gr32 = COPY $edi
+ %2:gr32 = nsw IMUL32rr %0, %1, implicit-def dead $eflags
+ $eax = COPY %2
+ RET 0, $eax
diff --git a/llvm/test/tools/llvm-objdump/MachO/disassemble-source-dsym.test b/llvm/test/tools/llvm-objdump/MachO/disassemble-source-dsym.test
index aaaf6bf..9899dc5 100644
--- a/llvm/test/tools/llvm-objdump/MachO/disassemble-source-dsym.test
+++ b/llvm/test/tools/llvm-objdump/MachO/disassemble-source-dsym.test
@@ -13,4 +13,35 @@
# RUN: dsymutil -f -oso-prepend-path=%p/../../dsymutil/ %t3 -o %t3.dSYM
# RUN: llvm-objdump --source --prefix=%p/../../dsymutil %t3 | FileCheck --check-prefix=SOURCE %s
+## Test that --source works with --macho flag.
+
+## --macho w/ explicit .dSYM
+# RUN: llvm-objdump < %p/../../dsymutil/Inputs/basic.macho.x86_64 - --source --macho --dsym=%t1.dSYM --prefix=%p/../../dsymutil | \
+# RUN: FileCheck --check-prefix=SOURCE %s
+
+## --macho w/ auto-detected .dSYM (dir)
+# RUN: llvm-objdump --source --macho --prefix=%p/../../dsymutil %t2 | FileCheck --check-prefix=SOURCE %s
+
+## --macho w/ auto-detected .dSYM (file)
+# RUN: llvm-objdump --source --macho --prefix=%p/../../dsymutil %t3 | FileCheck --check-prefix=SOURCE %s
+
# SOURCE: ; int bar(int arg) {
+
+## Test that --line-numbers works with --macho flag.
+
+## --macho -l w/ explicit .dSYM
+# RUN: llvm-objdump -d -l --macho --dsym=%t1.dSYM %p/../../dsymutil/Inputs/basic.macho.x86_64 | FileCheck --check-prefix=LINE %s
+
+## --macho -l w/ object file (embedded debug info)
+# RUN: llvm-objdump -d -l --macho %p/../../dsymutil/Inputs/basic1.macho.x86_64.o | FileCheck --check-prefix=LINE_OBJ %s
+
+# LINE: (__TEXT,__text) section
+# LINE: _bar:
+# LINE: ; bar():
+# LINE: ; {{.*}}basic3.c:
+
+# LINE_OBJ: (__TEXT,__text) section
+# LINE_OBJ: _main:
+# LINE_OBJ: ; main():
+# LINE_OBJ: ; {{.*}}basic1.c:23
+# LINE_OBJ: pushq %rbp ## basic1.c:23:0
diff --git a/llvm/test/tools/llvm-readobj/ELF/section-types.test b/llvm/test/tools/llvm-readobj/ELF/section-types.test
index 904892a..12a9d05 100644
--- a/llvm/test/tools/llvm-readobj/ELF/section-types.test
+++ b/llvm/test/tools/llvm-readobj/ELF/section-types.test
@@ -63,6 +63,8 @@
# LLVM: Type: SHT_LLVM_PART_PHDR
# LLVM: Name: .llvm.lto
# LLVM: Type: SHT_LLVM_LTO
+# LLVM: Name: .llvm.callgraph
+# LLVM: Type: SHT_LLVM_CALL_GRAPH
# LLVM: Name: gnu_sframe
# LLVM: Type: SHT_GNU_SFRAME
# LLVM: Name: gnu_attributes
@@ -127,6 +129,7 @@
# GNU-NEXT: part1 LLVM_PART_EHDR
# GNU-NEXT: .phdrs LLVM_PART_PHDR
# GNU-NEXT: .llvm.lto LLVM_LTO
+# GNU-NEXT: .llvm.callgraph LLVM_CALL_GRAPH
# GNU-NEXT: gnu_sframe SFRAME
# GNU-NEXT: gnu_attributes ATTRIBUTES
# GNU-NEXT: gnu_hash GNU_HASH
@@ -218,6 +221,8 @@ Sections:
Type: SHT_LLVM_PART_PHDR
- Name: .llvm.lto
Type: SHT_LLVM_LTO
+ - Name: .llvm.callgraph
+ Type: SHT_LLVM_CALL_GRAPH
- Name: gnu_sframe
Type: SHT_GNU_SFRAME
- Name: gnu_attributes
diff --git a/llvm/test/tools/opt/no-target-machine.ll b/llvm/test/tools/opt/no-target-machine.ll
new file mode 100644
index 0000000..4f07c81
--- /dev/null
+++ b/llvm/test/tools/opt/no-target-machine.ll
@@ -0,0 +1,18 @@
+; Report error when pass requires TargetMachine.
+; RUN: not opt -passes=atomic-expand -disable-output %s 2>&1 | FileCheck %s
+; RUN: not opt -passes=codegenprepare -disable-output %s 2>&1 | FileCheck %s
+; RUN: not opt -passes=complex-deinterleaving -disable-output %s 2>&1 | FileCheck %s
+; RUN: not opt -passes=dwarf-eh-prepare -disable-output %s 2>&1 | FileCheck %s
+; RUN: not opt -passes=expand-large-div-rem -disable-output %s 2>&1 | FileCheck %s
+; RUN: not opt -passes=expand-memcmp -disable-output %s 2>&1 | FileCheck %s
+; RUN: not opt -passes=indirectbr-expand -disable-output %s 2>&1 | FileCheck %s
+; RUN: not opt -passes=interleaved-access -disable-output %s 2>&1 | FileCheck %s
+; RUN: not opt -passes=interleaved-load-combine -disable-output %s 2>&1 | FileCheck %s
+; RUN: not opt -passes=safe-stack -disable-output %s 2>&1 | FileCheck %s
+; RUN: not opt -passes=select-optimize -disable-output %s 2>&1 | FileCheck %s
+; RUN: not opt -passes=stack-protector -disable-output %s 2>&1 | FileCheck %s
+; RUN: not opt -passes=typepromotion -disable-output %s 2>&1 | FileCheck %s
+; RUN: not opt -passes='expand-fp<O1>' -disable-output %s 2>&1 | FileCheck %s
+define void @foo() { ret void }
+; CHECK: pass '{{.+}}' requires TargetMachine
+;requires TargetMachine