aboutsummaryrefslogtreecommitdiff
path: root/llvm
diff options
context:
space:
mode:
Diffstat (limited to 'llvm')
-rw-r--r--llvm/docs/CommandGuide/llvm-ir2vec.rst88
-rw-r--r--llvm/docs/Extensions.rst31
-rw-r--r--llvm/docs/TableGen/index.rst44
-rw-r--r--llvm/include/llvm/ADT/SmallVector.h12
-rw-r--r--llvm/include/llvm/Analysis/TargetTransformInfo.h3
-rw-r--r--llvm/include/llvm/CodeGen/MIR2Vec.h38
-rw-r--r--llvm/include/llvm/CodeGen/MachineBlockHashInfo.h114
-rw-r--r--llvm/include/llvm/CodeGen/Passes.h3
-rw-r--r--llvm/include/llvm/ExecutionEngine/Orc/Core.h2
-rw-r--r--llvm/include/llvm/ExecutionEngine/Orc/DebugObjectManagerPlugin.h2
-rw-r--r--llvm/include/llvm/ExecutionEngine/Orc/Debugging/PerfSupportPlugin.h2
-rw-r--r--llvm/include/llvm/ExecutionEngine/Orc/EPCGenericRTDyldMemoryManager.h2
-rw-r--r--llvm/include/llvm/ExecutionEngine/Orc/IndirectionUtils.h2
-rw-r--r--llvm/include/llvm/ExecutionEngine/Orc/Layer.h2
-rw-r--r--llvm/include/llvm/ExecutionEngine/Orc/LinkGraphLinkingLayer.h2
-rw-r--r--llvm/include/llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h2
-rw-r--r--llvm/include/llvm/ExecutionEngine/Orc/SimpleRemoteEPC.h2
-rw-r--r--llvm/include/llvm/ExecutionEngine/Orc/TargetProcess/ExecutorSharedMemoryMapperService.h2
-rw-r--r--llvm/include/llvm/ExecutionEngine/Orc/TargetProcess/SimpleExecutorDylibManager.h2
-rw-r--r--llvm/include/llvm/ExecutionEngine/Orc/TargetProcess/SimpleExecutorMemoryManager.h2
-rw-r--r--llvm/include/llvm/ExecutionEngine/Orc/TaskDispatch.h2
-rw-r--r--llvm/include/llvm/ExecutionEngine/Orc/WaitingOnGraph.h4
-rw-r--r--llvm/include/llvm/IR/Intrinsics.td12
-rw-r--r--llvm/include/llvm/IR/IntrinsicsNVVM.td100
-rw-r--r--llvm/include/llvm/IR/ModuleSummaryIndex.h4
-rw-r--r--llvm/include/llvm/InitializePasses.h1
-rw-r--r--llvm/include/llvm/LTO/LTO.h3
-rw-r--r--llvm/include/llvm/MC/MCContext.h2
-rw-r--r--llvm/lib/Analysis/LoopCacheAnalysis.cpp7
-rw-r--r--llvm/lib/Analysis/TargetTransformInfo.cpp20
-rw-r--r--llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp21
-rw-r--r--llvm/lib/CodeGen/CMakeLists.txt1
-rw-r--r--llvm/lib/CodeGen/MachineBlockHashInfo.cpp115
-rw-r--r--llvm/lib/CodeGen/TargetPassConfig.cpp8
-rw-r--r--llvm/lib/ExecutionEngine/JITLink/JITLinkMemoryManager.cpp2
-rw-r--r--llvm/lib/ExecutionEngine/Orc/Debugging/DebuggerSupportPlugin.cpp2
-rw-r--r--llvm/lib/ExecutionEngine/Orc/LinkGraphLinkingLayer.cpp2
-rw-r--r--llvm/lib/ExecutionEngine/Orc/OrcV2CBindings.cpp4
-rw-r--r--llvm/lib/LTO/LTO.cpp11
-rw-r--r--llvm/lib/Target/AArch64/AArch64InstrInfo.td24
-rw-r--r--llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp26
-rw-r--r--llvm/lib/Target/ARM/ARMAsmPrinter.cpp434
-rw-r--r--llvm/lib/Target/ARM/ARMAsmPrinter.h11
-rw-r--r--llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp2
-rw-r--r--llvm/lib/Target/ARM/ARMISelLowering.cpp69
-rw-r--r--llvm/lib/Target/ARM/ARMISelLowering.h6
-rw-r--r--llvm/lib/Target/ARM/ARMInstrInfo.td30
-rw-r--r--llvm/lib/Target/ARM/ARMTargetMachine.cpp12
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXIntrinsics.td20
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td2
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp31
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVBuiltins.td1
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp4
-rw-r--r--llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp31
-rw-r--r--llvm/lib/Transforms/Scalar/ConstraintElimination.cpp20
-rw-r--r--llvm/lib/Transforms/Scalar/MergeICmps.cpp34
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlan.h13
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp67
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp80
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlanUtils.cpp2
-rw-r--r--llvm/test/Analysis/CostModel/ARM/fparith.ll172
-rw-r--r--llvm/test/Analysis/CostModel/ARM/fptoi_sat.ll612
-rw-r--r--llvm/test/Analysis/CostModel/ARM/intrinsic-cost-kinds.ll371
-rw-r--r--llvm/test/Analysis/CostModel/ARM/mve-intrinsic-cost-kinds.ll181
-rw-r--r--llvm/test/Analysis/CostModel/ARM/mve-target-intrinsics.ll24
-rw-r--r--llvm/test/Analysis/CostModel/ARM/target-intrinsics.ll41
-rw-r--r--llvm/test/CodeGen/AArch64/load-zext-bitcast.ll51
-rw-r--r--llvm/test/CodeGen/ARM/O3-pipeline.ll1
-rw-r--r--llvm/test/CodeGen/ARM/kcfi-arm.ll138
-rw-r--r--llvm/test/CodeGen/ARM/kcfi-cbz-range.ll81
-rw-r--r--llvm/test/CodeGen/ARM/kcfi-patchable-function-prefix.ll99
-rw-r--r--llvm/test/CodeGen/ARM/kcfi-thumb.ll215
-rw-r--r--llvm/test/CodeGen/ARM/kcfi-thumb2.ll163
-rw-r--r--llvm/test/CodeGen/ARM/kcfi.ll28
-rw-r--r--llvm/test/CodeGen/DirectX/CBufferAccess/unused.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_shader_clock/shader_clock.ll9
-rw-r--r--llvm/test/CodeGen/X86/atomic-load-store.ll117
-rw-r--r--llvm/test/CodeGen/X86/basic-block-address-map-empty-function.ll2
-rw-r--r--llvm/test/CodeGen/X86/basic-block-address-map-function-sections.ll6
-rw-r--r--llvm/test/CodeGen/X86/basic-block-address-map-pgo-features.ll2
-rw-r--r--llvm/test/CodeGen/X86/basic-block-address-map-with-basic-block-sections.ll2
-rw-r--r--llvm/test/CodeGen/X86/basic-block-address-map-with-emit-bb-hash.ll94
-rw-r--r--llvm/test/CodeGen/X86/basic-block-address-map-with-mfs.ll2
-rw-r--r--llvm/test/CodeGen/X86/basic-block-address-map.ll2
-rw-r--r--llvm/test/TableGen/intrinsic-manual-name.td6
-rw-r--r--llvm/test/ThinLTO/X86/devirt_external_comdat_same_guid.ll10
-rw-r--r--llvm/test/ThinLTO/X86/dtlto/json.ll20
-rw-r--r--llvm/test/Transforms/ConstraintElimination/add-nsw.ll6
-rw-r--r--llvm/test/Transforms/ConstraintElimination/gep-arithmetic-add.ll6
-rw-r--r--llvm/test/Transforms/DeadArgElim/dbginfo.ll6
-rw-r--r--llvm/test/Transforms/DeadStoreElimination/mda-with-dbg-values.ll3
-rw-r--r--llvm/test/Transforms/FunctionImport/Inputs/funcimport_debug.ll4
-rw-r--r--llvm/test/Transforms/FunctionImport/funcimport_debug.ll7
-rw-r--r--llvm/test/Transforms/GCOVProfiling/exit-block.ll8
-rw-r--r--llvm/test/Transforms/GCOVProfiling/linezero.ll19
-rw-r--r--llvm/test/Transforms/GCOVProfiling/split-indirectbr-critical-edges.ll4
-rw-r--r--llvm/test/Transforms/GVN/cond_br2.ll35
-rw-r--r--llvm/test/Transforms/GVN/pr33549.ll6
-rw-r--r--llvm/test/Transforms/GVN/pr42605.ll2
-rw-r--r--llvm/test/Transforms/GVNHoist/hoist-unsafe-pr31729.ll10
-rw-r--r--llvm/test/Transforms/GVNHoist/pr30499.ll10
-rw-r--r--llvm/test/Transforms/IndVarSimplify/X86/widen-nsw.ll4
-rw-r--r--llvm/test/Transforms/Inline/always-inline-attr.ll10
-rw-r--r--llvm/test/Transforms/Inline/debug-info-duplicate-calls.ll7
-rw-r--r--llvm/test/Transforms/Inline/inline-vla.ll10
-rw-r--r--llvm/test/Transforms/Inline/optimization-remarks-hotness-threshold.ll6
-rw-r--r--llvm/test/Transforms/Inline/optimization-remarks-passed-deleted-callee-yaml.ll6
-rw-r--r--llvm/test/Transforms/Inline/optimization-remarks-passed-yaml.ll6
-rw-r--r--llvm/test/Transforms/Inline/optimization-remarks.ll6
-rw-r--r--llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-to-svbool-binops.ll33
-rw-r--r--llvm/test/Transforms/InstCombine/bitreverse-hang.ll4
-rw-r--r--llvm/test/Transforms/InstCombine/select-extractelement-inseltpoison.ll3
-rw-r--r--llvm/test/Transforms/InstCombine/select-extractelement.ll18
-rw-r--r--llvm/test/Transforms/JumpThreading/ddt-crash3.ll6
-rw-r--r--llvm/test/Transforms/LICM/volatile-alias.ll4
-rw-r--r--llvm/test/Transforms/LoopRotate/noalias.ll6
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/AMDGPU/different-addrspace-addressing-mode-loops.ll2
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/X86/pr17473.ll4
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/pr18165.ll4
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/two-combinations-bug.ll2
-rw-r--r--llvm/test/Transforms/LoopUnroll/runtime-epilog-debuginfo.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/fully-unrolled-cost.ll3
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/maxbandwidth-regpressure.ll3
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-chained.ll226
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-constant-ops.ll80
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-epilogue.ll14
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-mixed.ll108
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-neon.ll90
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product.ll751
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-interleave.ll26
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-sub.ll48
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce.ll48
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/pr33053.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/reg-usage.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/vplan-printing.ll11
-rw-r--r--llvm/test/Transforms/LoopVectorize/ARM/arm-ieee-vectorize.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/partial-reduce-dot-product.ll52
-rw-r--r--llvm/test/Transforms/LoopVectorize/WebAssembly/partial-reduce-accumulate.ll126
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/illegal-parallel-loop-uniform-write.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/int128_no_gather.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/reduction-fastmath.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/scatter_crash.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/tail_folding_and_assume_safety.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/vectorization-remarks-profitable.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/diag-missing-instr-debug-loc.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/diag-with-hotness-info-2.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/diag-with-hotness-info.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/incorrect-dom-info.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/metadata-width.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/multiple-address-spaces.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/runtime-check-address-space.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/runtime-check-readonly-address-space.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/scev-exitlim-crash.ll4
-rw-r--r--llvm/test/Transforms/MergeICmps/X86/int64-and-ptr.ll8
-rw-r--r--llvm/test/Transforms/MergeICmps/X86/pr41917.ll16
-rw-r--r--llvm/test/Transforms/NewGVN/basic-cyclic-opt.ll8
-rw-r--r--llvm/test/Transforms/NewGVN/cond_br2-xfail.ll25
-rw-r--r--llvm/test/Transforms/NewGVN/equivalent-phi.ll4
-rw-r--r--llvm/test/Transforms/NewGVN/memory-handling.ll6
-rw-r--r--llvm/test/Transforms/NewGVN/pr31483.ll11
-rw-r--r--llvm/test/Transforms/NewGVN/pr31501.ll6
-rw-r--r--llvm/test/Transforms/NewGVN/pr33187.ll9
-rw-r--r--llvm/test/Transforms/NewGVN/pr33305.ll17
-rw-r--r--llvm/test/Transforms/NewGVN/pr34430.ll4
-rw-r--r--llvm/test/Transforms/NewGVN/pr34452.ll9
-rw-r--r--llvm/test/Transforms/OpenMP/dead_use.ll18
-rw-r--r--llvm/test/Transforms/OpenMP/icv_remarks.ll43
-rw-r--r--llvm/test/Transforms/PGOProfile/misexpect-branch-correct.ll29
-rw-r--r--llvm/test/Transforms/PGOProfile/misexpect-branch-overflow.ll28
-rw-r--r--llvm/test/Transforms/PGOProfile/misexpect-branch-stripped.ll29
-rw-r--r--llvm/test/Transforms/PGOProfile/misexpect-branch-unpredictable.ll26
-rw-r--r--llvm/test/Transforms/PGOProfile/misexpect-branch.ll28
-rw-r--r--llvm/test/Transforms/PGOProfile/misexpect-switch-default.ll47
-rw-r--r--llvm/test/Transforms/PGOProfile/misexpect-switch.ll47
-rw-r--r--llvm/test/tools/llvm-ir2vec/entities.mir28
-rw-r--r--llvm/test/tools/llvm-ir2vec/output/lit.local.cfg3
-rw-r--r--llvm/test/tools/llvm-ir2vec/output/reference_triplets.txt33
-rw-r--r--llvm/test/tools/llvm-ir2vec/output/reference_x86_entities.txt7174
-rw-r--r--llvm/test/tools/llvm-ir2vec/triplets.mir61
-rw-r--r--llvm/tools/llvm-ir2vec/llvm-ir2vec.cpp244
-rw-r--r--llvm/tools/llvm-lto2/llvm-lto2.cpp11
-rw-r--r--llvm/unittests/ADT/SmallVectorTest.cpp30
-rw-r--r--llvm/unittests/ExecutionEngine/JITLink/JITLinkTestUtils.h2
-rw-r--r--llvm/unittests/ExecutionEngine/Orc/EPCGenericMemoryAccessTest.cpp2
-rw-r--r--llvm/unittests/ExecutionEngine/Orc/JITLinkRedirectionManagerTest.cpp2
-rw-r--r--llvm/unittests/ExecutionEngine/Orc/ObjectLinkingLayerTest.cpp8
-rw-r--r--llvm/unittests/ExecutionEngine/Orc/OrcTestCommon.h2
-rw-r--r--llvm/unittests/ExecutionEngine/Orc/ReOptimizeLayerTest.cpp2
-rw-r--r--llvm/unittests/ExecutionEngine/Orc/ResourceTrackerTest.cpp2
-rw-r--r--llvm/utils/TableGen/Basic/CodeGenIntrinsics.cpp10
-rw-r--r--llvm/utils/gn/secondary/clang/lib/Headers/BUILD.gn3
193 files changed, 11684 insertions, 2302 deletions
diff --git a/llvm/docs/CommandGuide/llvm-ir2vec.rst b/llvm/docs/CommandGuide/llvm-ir2vec.rst
index 55fe75d..f51da06 100644
--- a/llvm/docs/CommandGuide/llvm-ir2vec.rst
+++ b/llvm/docs/CommandGuide/llvm-ir2vec.rst
@@ -68,32 +68,52 @@ these two modes are used to generate the triplets and entity mappings.
Triplet Generation
~~~~~~~~~~~~~~~~~~
-With the `triplets` subcommand, :program:`llvm-ir2vec` analyzes LLVM IR and extracts
-numeric triplets consisting of opcode IDs, type IDs, and operand IDs. These triplets
+With the `triplets` subcommand, :program:`llvm-ir2vec` analyzes LLVM IR or Machine IR
+and extracts numeric triplets consisting of opcode IDs and operand IDs. These triplets
are generated in the standard format used for knowledge graph embedding training.
-The tool outputs numeric IDs directly using the ir2vec::Vocabulary mapping
-infrastructure, eliminating the need for string-to-ID preprocessing.
+The tool outputs numeric IDs directly using the vocabulary mapping infrastructure,
+eliminating the need for string-to-ID preprocessing.
-Usage:
+Usage for LLVM IR:
.. code-block:: bash
- llvm-ir2vec triplets input.bc -o triplets_train2id.txt
+ llvm-ir2vec triplets --mode=llvm input.bc -o triplets_train2id.txt
+
+Usage for Machine IR:
+
+.. code-block:: bash
+
+ llvm-ir2vec triplets --mode=mir input.mir -o triplets_train2id.txt
Entity Mapping Generation
~~~~~~~~~~~~~~~~~~~~~~~~~
With the `entities` subcommand, :program:`llvm-ir2vec` generates the entity mappings
-supported by IR2Vec in the standard format used for knowledge graph embedding
-training. This subcommand outputs all supported entities (opcodes, types, and
-operands) with their corresponding numeric IDs, and is not specific for an
-LLVM IR file.
+supported by IR2Vec or MIR2Vec in the standard format used for knowledge graph embedding
+training. This subcommand outputs all supported entities with their corresponding numeric IDs.
+
+For LLVM IR, entities include opcodes, types, and operands. For Machine IR, entities include
+machine opcodes, common operands, and register classes (both physical and virtual).
+
+Usage for LLVM IR:
-Usage:
+.. code-block:: bash
+
+ llvm-ir2vec entities --mode=llvm -o entity2id.txt
+
+Usage for Machine IR:
.. code-block:: bash
- llvm-ir2vec entities -o entity2id.txt
+ llvm-ir2vec entities --mode=mir input.mir -o entity2id.txt
+
+.. note::
+
+ For LLVM IR mode, the entity mapping is target-independent and does not require an input file.
+ For Machine IR mode, an input .mir file is required to determine the target architecture,
+ as entity mappings vary by target (different architectures have different instruction sets
+ and register classes).
Embedding Generation
~~~~~~~~~~~~~~~~~~~~
@@ -222,12 +242,17 @@ Subcommand-specific options:
.. option:: <input-file>
- The input LLVM IR or bitcode file to process. This positional argument is
- required for the `triplets` subcommand.
+ The input LLVM IR/bitcode file (.ll/.bc) or Machine IR file (.mir) to process.
+ This positional argument is required for the `triplets` subcommand.
**entities** subcommand:
- No subcommand-specific options.
+.. option:: <input-file>
+
+ The input Machine IR file (.mir) to process. This positional argument is required
+ for the `entities` subcommand when using ``--mode=mir``, as the entity mappings
+ are target-specific. For ``--mode=llvm``, no input file is required as IR2Vec
+ entity mappings are target-independent.
OUTPUT FORMAT
-------------
@@ -240,19 +265,37 @@ metadata headers. The format includes:
.. code-block:: text
- MAX_RELATIONS=<max_relations_count>
+ MAX_RELATION=<max_relation_count>
<head_entity_id> <tail_entity_id> <relation_id>
<head_entity_id> <tail_entity_id> <relation_id>
...
Each line after the metadata header represents one instruction relationship,
-with numeric IDs for head entity, relation, and tail entity. The metadata
-header (MAX_RELATIONS) provides counts for post-processing and training setup.
+with numeric IDs for head entity, tail entity, and relation type. The metadata
+header (MAX_RELATION) indicates the maximum relation ID used.
+
+**Relation Types:**
+
+For LLVM IR (IR2Vec):
+ * **0** = Type relationship (instruction to its type)
+ * **1** = Next relationship (sequential instructions)
+ * **2+** = Argument relationships (Arg0, Arg1, Arg2, ...)
+
+For Machine IR (MIR2Vec):
+ * **0** = Next relationship (sequential instructions)
+ * **1+** = Argument relationships (Arg0, Arg1, Arg2, ...)
+
+**Entity IDs:**
+
+For LLVM IR: Entity IDs represent opcodes, types, and operands as defined by the IR2Vec vocabulary.
+
+For Machine IR: Entity IDs represent machine opcodes, common operands (immediate, frame index, etc.),
+physical register classes, and virtual register classes as defined by the MIR2Vec vocabulary. The entity layout is target-specific.
Entity Mode Output
~~~~~~~~~~~~~~~~~~
-In entity mode, the output consists of entity mapping in the format:
+In entity mode, the output consists of entity mappings in the format:
.. code-block:: text
@@ -264,6 +307,13 @@ In entity mode, the output consists of entity mapping in the format:
The first line contains the total number of entities, followed by one entity
mapping per line with tab-separated entity string and numeric ID.
+For LLVM IR, entities include instruction opcodes (e.g., "Add", "Ret"), types
+(e.g., "INT", "PTR"), and operand kinds.
+
+For Machine IR, entities include machine opcodes (e.g., "COPY", "ADD"),
+common operands (e.g., "Immediate", "FrameIndex"), physical register classes
+(e.g., "PhyReg_GR32"), and virtual register classes (e.g., "VirtReg_GR32").
+
Embedding Mode Output
~~~~~~~~~~~~~~~~~~~~~
diff --git a/llvm/docs/Extensions.rst b/llvm/docs/Extensions.rst
index 214323e..91a3ac0 100644
--- a/llvm/docs/Extensions.rst
+++ b/llvm/docs/Extensions.rst
@@ -416,7 +416,36 @@ as offsets relative to prior addresses.
The following versioning schemes are currently supported (newer versions support
features of the older versions).
-Version 3 (newest): Capable of encoding callsite offsets. Enabled by the 6th bit
+Version 4 (newest): Capable of encoding basic block hashes. This feature is
+enabled by the 7th bit of the feature byte.
+
+Example:
+
+.. code-block:: gas
+
+ .section ".llvm_bb_addr_map","",@llvm_bb_addr_map
+ .byte 4 # version number
+ .byte 96 # feature byte
+ .quad .Lfunc_begin0 # address of the function
+ .byte 2 # number of basic blocks
+ # BB record for BB_0
+ .byte 0 # BB_0 ID
+ .uleb128 .Lfunc_begin0-.Lfunc_begin0 # BB_0 offset relative to function entry (always zero)
+ .byte 0 # number of callsites in this block
+ .uleb128 .LBB_END0_0-.Lfunc_begin0 # BB_0 size
+ .byte x # BB_0 metadata
+ .quad 9080480745856761856 # BB_0 hash
+ # BB record for BB_1
+ .byte 1 # BB_1 ID
+ .uleb128 .LBB0_1-.LBB_END0_0 # BB_1 offset relative to the end of last block (BB_0).
+ .byte 2 # number of callsites in this block
+ .uleb128 .LBB0_1_CS0-.LBB0_1 # offset of callsite end relative to the previous offset (.LBB0_1)
+ .uleb128 .LBB0_1_CS1-.LBB0_1_CS0 # offset of callsite end relative to the previous offset (.LBB0_1_CS0)
+ .uleb128 .LBB_END0_1-.LBB0_1_CS1 # BB_1 size offset (Offset of the block end relative to the previous offset).
+ .byte y # BB_1 metadata
+ .quad 2363478788702666771 # BB_1 hash
+
+Version 3: Capable of encoding callsite offsets. Enabled by the 6th bit
of the feature byte.
Example:
diff --git a/llvm/docs/TableGen/index.rst b/llvm/docs/TableGen/index.rst
index e916c15..e334b2e 100644
--- a/llvm/docs/TableGen/index.rst
+++ b/llvm/docs/TableGen/index.rst
@@ -20,7 +20,7 @@ domain-specific information. Because there may be a large number of these
records, it is specifically designed to allow writing flexible descriptions and
for common features of these records to be factored out. This reduces the
amount of duplication in the description, reduces the chance of error, and makes
-it easier to structure domain specific information.
+it easier to structure domain-specific information.
The TableGen front end parses a file, instantiates the declarations, and
hands the result off to a domain-specific `backend`_ for processing. See
@@ -44,8 +44,8 @@ distribution, respectively.
The TableGen program
====================
-TableGen files are interpreted by the TableGen program: `llvm-tblgen` available
-on your build directory under `bin`. It is not installed in the system (or where
+TableGen files are interpreted by the TableGen program: ``llvm-tblgen`` available
+in your build directory under ``bin``. It is not installed in the system (or where
your sysroot is set to), since it has no use beyond LLVM's build process.
Running TableGen
@@ -86,7 +86,7 @@ the `-dump-json` option.
If you plan to use TableGen, you will most likely have to write a `backend`_
that extracts the information specific to what you need and formats it in the
-appropriate way. You can do this by extending TableGen itself in C++, or by
+appropriate way. You can do this by extending TableGen itself in C++ or by
writing a script in any language that can consume the JSON output.
Example
@@ -152,7 +152,7 @@ of the x86 architecture. ``def ADD32rr`` defines a record named
``ADD32rr``, and the comment at the end of the line indicates the superclasses
of the definition. The body of the record contains all of the data that
TableGen assembled for the record, indicating that the instruction is part of
-the "X86" namespace, the pattern indicating how the instruction is selected by
+the ``X86`` namespace, the pattern indicating how the instruction is selected by
the code generator, that it is a two-address instruction, has a particular
encoding, etc. The contents and semantics of the information in the record are
specific to the needs of the X86 backend, and are only shown as an example.
@@ -175,13 +175,13 @@ TableGen, all of the information was derived from the following definition:
This definition makes use of the custom class ``I`` (extended from the custom
class ``X86Inst``), which is defined in the X86-specific TableGen file, to
factor out the common features that instructions of its class share. A key
-feature of TableGen is that it allows the end-user to define the abstractions
+feature of TableGen is that it allows the end user to define the abstractions
they prefer to use when describing their information.
Syntax
======
-TableGen has a syntax that is loosely based on C++ templates, with built-in
+TableGen has a syntax loosely based on C++ templates, with built-in
types and specification. In addition, TableGen's syntax introduces some
automation concepts like multiclass, foreach, let, etc.
@@ -193,41 +193,41 @@ which are considered 'records'.
**TableGen records** have a unique name, a list of values, and a list of
superclasses. The list of values is the main data that TableGen builds for each
-record; it is this that holds the domain specific information for the
+record; it is this that holds the domain-specific information for the
application. The interpretation of this data is left to a specific `backend`_,
-but the structure and format rules are taken care of and are fixed by
+but the structure and format rules are taken care of and fixed by
TableGen.
**TableGen definitions** are the concrete form of 'records'. These generally do
-not have any undefined values, and are marked with the '``def``' keyword.
+not have any undefined values and are marked with the '``def``' keyword.
.. code-block:: text
def FeatureFPARMv8 : SubtargetFeature<"fp-armv8", "HasFPARMv8", "true",
"Enable ARMv8 FP">;
-In this example, FeatureFPARMv8 is ``SubtargetFeature`` record initialised
+In this example, ``FeatureFPARMv8`` is ``SubtargetFeature`` record initialised
with some values. The names of the classes are defined via the
-keyword `class` either on the same file or some other included. Most target
+keyword `class` either in the same file or some other included. Most target
TableGen files include the generic ones in ``include/llvm/Target``.
**TableGen classes** are abstract records that are used to build and describe
other records. These classes allow the end-user to build abstractions for
-either the domain they are targeting (such as "Register", "RegisterClass", and
-"Instruction" in the LLVM code generator) or for the implementor to help factor
-out common properties of records (such as "FPInst", which is used to represent
+either the domain they are targeting (such as ``Register``, ``RegisterClass``, and
+``Instruction`` in the LLVM code generator) or for the implementor to help factor
+out common properties of records (such as ``FPInst``, which is used to represent
floating point instructions in the X86 backend). TableGen keeps track of all of
the classes that are used to build up a definition, so the backend can find all
-definitions of a particular class, such as "Instruction".
+definitions of a particular class, such as ``Instruction``.
.. code-block:: text
class ProcNoItin<string Name, list<SubtargetFeature> Features>
: Processor<Name, NoItineraries, Features>;
-Here, the class ProcNoItin, receiving parameters `Name` of type `string` and
-a list of target features is specializing the class Processor by passing the
-arguments down as well as hard-coding NoItineraries.
+Here, the class ``ProcNoItin``, receiving parameters ``Name`` of type ``string`` and
+a list of target features is specializing the class ``Processor`` by passing the
+arguments down as well as hard-coding ``NoItineraries``.
**TableGen multiclasses** are groups of abstract records that are instantiated
all at once. Each instantiation can result in multiple TableGen definitions.
@@ -295,8 +295,8 @@ TableGen Deficiencies
Despite being very generic, TableGen has some deficiencies that have been
pointed out numerous times. The common theme is that, while TableGen allows
-you to build domain specific languages, the final languages that you create
-lack the power of other DSLs, which in turn increase considerably the size
+you to build domain-specific languages, the final languages that you create
+lack the power of other DSLs, which in turn considerably increases the size
and complexity of TableGen files.
At the same time, TableGen allows you to create virtually any meaning of
@@ -305,6 +305,6 @@ design and make it very hard for newcomers to understand the evil TableGen
file.
There are some in favor of extending the semantics even more, but making sure
-backends adhere to strict rules. Others are suggesting we should move to less,
+backends adhere to strict rules. Others suggest moving to fewer,
more powerful DSLs designed with specific purposes, or even reusing existing
DSLs.
diff --git a/llvm/include/llvm/ADT/SmallVector.h b/llvm/include/llvm/ADT/SmallVector.h
index ca0b918..51109d1 100644
--- a/llvm/include/llvm/ADT/SmallVector.h
+++ b/llvm/include/llvm/ADT/SmallVector.h
@@ -14,6 +14,7 @@
#ifndef LLVM_ADT_SMALLVECTOR_H
#define LLVM_ADT_SMALLVECTOR_H
+#include "llvm/ADT/ADL.h"
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/Support/Compiler.h"
#include <algorithm>
@@ -1295,28 +1296,27 @@ inline size_t capacity_in_bytes(const SmallVector<T, N> &X) {
template <typename RangeType>
using ValueTypeFromRangeType =
- std::remove_const_t<std::remove_reference_t<decltype(*std::begin(
- std::declval<RangeType &>()))>>;
+ std::remove_const_t<detail::ValueOfRange<RangeType>>;
/// Given a range of type R, iterate the entire range and return a
/// SmallVector with elements of the vector. This is useful, for example,
/// when you want to iterate a range and then sort the results.
template <unsigned Size, typename R>
SmallVector<ValueTypeFromRangeType<R>, Size> to_vector(R &&Range) {
- return {std::begin(Range), std::end(Range)};
+ return {adl_begin(Range), adl_end(Range)};
}
template <typename R>
SmallVector<ValueTypeFromRangeType<R>> to_vector(R &&Range) {
- return {std::begin(Range), std::end(Range)};
+ return {adl_begin(Range), adl_end(Range)};
}
template <typename Out, unsigned Size, typename R>
SmallVector<Out, Size> to_vector_of(R &&Range) {
- return {std::begin(Range), std::end(Range)};
+ return {adl_begin(Range), adl_end(Range)};
}
template <typename Out, typename R> SmallVector<Out> to_vector_of(R &&Range) {
- return {std::begin(Range), std::end(Range)};
+ return {adl_begin(Range), adl_end(Range)};
}
// Explicit instantiations
diff --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h
index 5d3b233..7b7dc1b 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfo.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h
@@ -227,6 +227,9 @@ public:
/// Get the kind of extension that an instruction represents.
LLVM_ABI static PartialReductionExtendKind
getPartialReductionExtendKind(Instruction *I);
+ /// Get the kind of extension that a cast opcode represents.
+ LLVM_ABI static PartialReductionExtendKind
+ getPartialReductionExtendKind(Instruction::CastOps CastOpc);
/// Construct a TTI object using a type implementing the \c Concept
/// API below.
diff --git a/llvm/include/llvm/CodeGen/MIR2Vec.h b/llvm/include/llvm/CodeGen/MIR2Vec.h
index 48bb0a5..44f009c 100644
--- a/llvm/include/llvm/CodeGen/MIR2Vec.h
+++ b/llvm/include/llvm/CodeGen/MIR2Vec.h
@@ -111,6 +111,11 @@ class MIRVocabulary {
size_t TotalEntries = 0;
} Layout;
+ // TODO: See if we can have only one reg classes section instead of physical
+ // and virtual separate sections in the vocabulary. This would reduce the
+ // number of vocabulary entities significantly.
+ // We can potentially distinguish physical and virtual registers by
+ // considering them as a separate feature.
enum class Section : unsigned {
Opcodes = 0,
CommonOperands = 1,
@@ -185,6 +190,25 @@ class MIRVocabulary {
return Storage[static_cast<unsigned>(SectionID)][LocalIndex];
}
+ /// Get entity ID (flat index) for a common operand type
+ /// This is used for triplet generation
+ unsigned getEntityIDForCommonOperand(
+ MachineOperand::MachineOperandType OperandType) const {
+ return Layout.CommonOperandBase + getCommonOperandIndex(OperandType);
+ }
+
+ /// Get entity ID (flat index) for a register
+ /// This is used for triplet generation
+ unsigned getEntityIDForRegister(Register Reg) const {
+ if (!Reg.isValid() || Reg.isStack())
+ return Layout
+ .VirtRegBase; // Return VirtRegBase for invalid/stack registers
+ unsigned LocalIndex = getRegisterOperandIndex(Reg);
+ size_t BaseOffset =
+ Reg.isPhysical() ? Layout.PhyRegBase : Layout.VirtRegBase;
+ return BaseOffset + LocalIndex;
+ }
+
public:
/// Static method for extracting base opcode names (public for testing)
static std::string extractBaseOpcodeName(StringRef InstrName);
@@ -201,6 +225,20 @@ public:
unsigned getDimension() const { return Storage.getDimension(); }
+ /// Get entity ID (flat index) for an opcode
+ /// This is used for triplet generation
+ unsigned getEntityIDForOpcode(unsigned Opcode) const {
+ return Layout.OpcodeBase + getCanonicalOpcodeIndex(Opcode);
+ }
+
+ /// Get entity ID (flat index) for a machine operand
+ /// This is used for triplet generation
+ unsigned getEntityIDForMachineOperand(const MachineOperand &MO) const {
+ if (MO.getType() == MachineOperand::MO_Register)
+ return getEntityIDForRegister(MO.getReg());
+ return getEntityIDForCommonOperand(MO.getType());
+ }
+
// Accessor methods
const Embedding &operator[](unsigned Opcode) const {
unsigned LocalIndex = getCanonicalOpcodeIndex(Opcode);
diff --git a/llvm/include/llvm/CodeGen/MachineBlockHashInfo.h b/llvm/include/llvm/CodeGen/MachineBlockHashInfo.h
new file mode 100644
index 0000000..d044d5f
--- /dev/null
+++ b/llvm/include/llvm/CodeGen/MachineBlockHashInfo.h
@@ -0,0 +1,114 @@
+//===- llvm/CodeGen/MachineBlockHashInfo.h ----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Compute the hashes of basic blocks.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEBLOCKHASHINFO_H
+#define LLVM_CODEGEN_MACHINEBLOCKHASHINFO_H
+
+#include "llvm/CodeGen/MachineFunctionPass.h"
+
+namespace llvm {
+
+/// An object wrapping several components of a basic block hash. The combined
+/// (blended) hash is represented and stored as one uint64_t, while individual
+/// components are of smaller size (e.g., uint16_t or uint8_t).
+struct BlendedBlockHash {
+public:
+ explicit BlendedBlockHash(uint16_t Offset, uint16_t OpcodeHash,
+ uint16_t InstrHash, uint16_t NeighborHash)
+ : Offset(Offset), OpcodeHash(OpcodeHash), InstrHash(InstrHash),
+ NeighborHash(NeighborHash) {}
+
+ explicit BlendedBlockHash(uint64_t CombinedHash) {
+ Offset = CombinedHash & 0xffff;
+ CombinedHash >>= 16;
+ OpcodeHash = CombinedHash & 0xffff;
+ CombinedHash >>= 16;
+ InstrHash = CombinedHash & 0xffff;
+ CombinedHash >>= 16;
+ NeighborHash = CombinedHash & 0xffff;
+ }
+
+ /// Combine the blended hash into uint64_t.
+ uint64_t combine() const {
+ uint64_t Hash = 0;
+ Hash |= uint64_t(NeighborHash);
+ Hash <<= 16;
+ Hash |= uint64_t(InstrHash);
+ Hash <<= 16;
+ Hash |= uint64_t(OpcodeHash);
+ Hash <<= 16;
+ Hash |= uint64_t(Offset);
+ return Hash;
+ }
+
+ /// Compute a distance between two given blended hashes. The smaller the
+ /// distance, the more similar two blocks are. For identical basic blocks,
+ /// the distance is zero.
+ /// Since OpcodeHash is highly stable, we consider a match good only if
+ /// the OpcodeHashes are identical. Mismatched OpcodeHashes lead to low
+ /// matching accuracy, and poor matches undermine the quality of final
+ /// inference. Notably, during inference, we also consider the matching
+ /// ratio of basic blocks. For MachineFunctions with a low matching
+ /// ratio, we directly skip optimization to reduce the impact of
+ /// mismatches. This ensures even very poor profiles won’t cause negative
+ /// optimization.
+ /// In the context of matching, we consider NeighborHash to be more
+ /// important. This is especially true when accounting for inlining
+ /// scenarios, where the position of a basic block in the control
+ /// flow graph is more critical.
+ uint64_t distance(const BlendedBlockHash &BBH) const {
+ assert(OpcodeHash == BBH.OpcodeHash &&
+ "incorrect blended hash distance computation");
+ uint64_t Dist = 0;
+ // Account for NeighborHash
+ Dist += NeighborHash == BBH.NeighborHash ? 0 : 1;
+ Dist <<= 16;
+ // Account for InstrHash
+ Dist += InstrHash == BBH.InstrHash ? 0 : 1;
+ Dist <<= 16;
+ // Account for Offset
+ Dist += (Offset >= BBH.Offset ? Offset - BBH.Offset : BBH.Offset - Offset);
+ return Dist;
+ }
+
+private:
+ /// The offset of the basic block from the function start.
+ uint16_t Offset{0};
+ /// Hash of the basic block instructions, excluding operands.
+ uint16_t OpcodeHash{0};
+ /// Hash of the basic block instructions, including opcodes and
+ /// operands.
+ uint16_t InstrHash{0};
+ /// OpcodeHash of the basic block together with OpcodeHashes of its
+ /// successors and predecessors.
+ uint16_t NeighborHash{0};
+};
+
+class MachineBlockHashInfo : public MachineFunctionPass {
+ DenseMap<const MachineBasicBlock *, uint64_t> MBBHashInfo;
+
+public:
+ static char ID;
+ MachineBlockHashInfo();
+
+ StringRef getPassName() const override { return "Basic Block Hash Compute"; }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+ bool runOnMachineFunction(MachineFunction &F) override;
+
+ uint64_t getMBBHash(const MachineBasicBlock &MBB);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_MACHINEBLOCKHASHINFO_H
diff --git a/llvm/include/llvm/CodeGen/Passes.h b/llvm/include/llvm/CodeGen/Passes.h
index 7fae550..9fddd47 100644
--- a/llvm/include/llvm/CodeGen/Passes.h
+++ b/llvm/include/llvm/CodeGen/Passes.h
@@ -69,6 +69,9 @@ LLVM_ABI MachineFunctionPass *createBasicBlockSectionsPass();
LLVM_ABI MachineFunctionPass *createBasicBlockPathCloningPass();
+/// createMachineBlockHashInfoPass - This pass computes basic block hashes.
+LLVM_ABI MachineFunctionPass *createMachineBlockHashInfoPass();
+
/// createMachineFunctionSplitterPass - This pass splits machine functions
/// using profile information.
LLVM_ABI MachineFunctionPass *createMachineFunctionSplitterPass();
diff --git a/llvm/include/llvm/ExecutionEngine/Orc/Core.h b/llvm/include/llvm/ExecutionEngine/Orc/Core.h
index 8613ddd..f05febf 100644
--- a/llvm/include/llvm/ExecutionEngine/Orc/Core.h
+++ b/llvm/include/llvm/ExecutionEngine/Orc/Core.h
@@ -448,7 +448,7 @@ public:
FailedToMaterialize(std::shared_ptr<SymbolStringPool> SSP,
std::shared_ptr<SymbolDependenceMap> Symbols);
- ~FailedToMaterialize();
+ ~FailedToMaterialize() override;
std::error_code convertToErrorCode() const override;
void log(raw_ostream &OS) const override;
const SymbolDependenceMap &getSymbols() const { return *Symbols; }
diff --git a/llvm/include/llvm/ExecutionEngine/Orc/DebugObjectManagerPlugin.h b/llvm/include/llvm/ExecutionEngine/Orc/DebugObjectManagerPlugin.h
index 254b897..e84eb4b 100644
--- a/llvm/include/llvm/ExecutionEngine/Orc/DebugObjectManagerPlugin.h
+++ b/llvm/include/llvm/ExecutionEngine/Orc/DebugObjectManagerPlugin.h
@@ -70,7 +70,7 @@ public:
DebugObjectManagerPlugin(ExecutionSession &ES,
std::unique_ptr<DebugObjectRegistrar> Target,
bool RequireDebugSections, bool AutoRegisterCode);
- ~DebugObjectManagerPlugin();
+ ~DebugObjectManagerPlugin() override;
void notifyMaterializing(MaterializationResponsibility &MR,
jitlink::LinkGraph &G, jitlink::JITLinkContext &Ctx,
diff --git a/llvm/include/llvm/ExecutionEngine/Orc/Debugging/PerfSupportPlugin.h b/llvm/include/llvm/ExecutionEngine/Orc/Debugging/PerfSupportPlugin.h
index 179fedc..f9b1d2c 100644
--- a/llvm/include/llvm/ExecutionEngine/Orc/Debugging/PerfSupportPlugin.h
+++ b/llvm/include/llvm/ExecutionEngine/Orc/Debugging/PerfSupportPlugin.h
@@ -32,7 +32,7 @@ public:
ExecutorAddr RegisterPerfEndAddr,
ExecutorAddr RegisterPerfImplAddr, bool EmitDebugInfo,
bool EmitUnwindInfo);
- ~PerfSupportPlugin();
+ ~PerfSupportPlugin() override;
void modifyPassConfig(MaterializationResponsibility &MR,
jitlink::LinkGraph &G,
diff --git a/llvm/include/llvm/ExecutionEngine/Orc/EPCGenericRTDyldMemoryManager.h b/llvm/include/llvm/ExecutionEngine/Orc/EPCGenericRTDyldMemoryManager.h
index fa48480..031bb27 100644
--- a/llvm/include/llvm/ExecutionEngine/Orc/EPCGenericRTDyldMemoryManager.h
+++ b/llvm/include/llvm/ExecutionEngine/Orc/EPCGenericRTDyldMemoryManager.h
@@ -52,7 +52,7 @@ public:
EPCGenericRTDyldMemoryManager(EPCGenericRTDyldMemoryManager &&) = delete;
EPCGenericRTDyldMemoryManager &
operator=(EPCGenericRTDyldMemoryManager &&) = delete;
- ~EPCGenericRTDyldMemoryManager();
+ ~EPCGenericRTDyldMemoryManager() override;
uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment,
unsigned SectionID,
diff --git a/llvm/include/llvm/ExecutionEngine/Orc/IndirectionUtils.h b/llvm/include/llvm/ExecutionEngine/Orc/IndirectionUtils.h
index fecffc2..fa4bb9d 100644
--- a/llvm/include/llvm/ExecutionEngine/Orc/IndirectionUtils.h
+++ b/llvm/include/llvm/ExecutionEngine/Orc/IndirectionUtils.h
@@ -285,7 +285,7 @@ public:
/// Map type for initializing the manager. See init.
using StubInitsMap = StringMap<std::pair<ExecutorAddr, JITSymbolFlags>>;
- virtual ~IndirectStubsManager() = default;
+ ~IndirectStubsManager() override = default;
/// Create a single stub with the given name, target address and flags.
virtual Error createStub(StringRef StubName, ExecutorAddr StubAddr,
diff --git a/llvm/include/llvm/ExecutionEngine/Orc/Layer.h b/llvm/include/llvm/ExecutionEngine/Orc/Layer.h
index 8dfc12e..25df380 100644
--- a/llvm/include/llvm/ExecutionEngine/Orc/Layer.h
+++ b/llvm/include/llvm/ExecutionEngine/Orc/Layer.h
@@ -136,7 +136,7 @@ public:
static char ID;
ObjectLayer(ExecutionSession &ES);
- virtual ~ObjectLayer();
+ ~ObjectLayer() override;
/// Returns the execution session for this layer.
ExecutionSession &getExecutionSession() { return ES; }
diff --git a/llvm/include/llvm/ExecutionEngine/Orc/LinkGraphLinkingLayer.h b/llvm/include/llvm/ExecutionEngine/Orc/LinkGraphLinkingLayer.h
index d3643f9..2079a35 100644
--- a/llvm/include/llvm/ExecutionEngine/Orc/LinkGraphLinkingLayer.h
+++ b/llvm/include/llvm/ExecutionEngine/Orc/LinkGraphLinkingLayer.h
@@ -88,7 +88,7 @@ public:
std::unique_ptr<jitlink::JITLinkMemoryManager> MemMgr);
/// Destroy the LinkGraphLinkingLayer.
- ~LinkGraphLinkingLayer();
+ ~LinkGraphLinkingLayer() override;
/// Add a plugin.
LinkGraphLinkingLayer &addPlugin(std::shared_ptr<Plugin> P) {
diff --git a/llvm/include/llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h b/llvm/include/llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h
index 1fb472a1..8c6a8f5 100644
--- a/llvm/include/llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h
+++ b/llvm/include/llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h
@@ -58,7 +58,7 @@ public:
RTDyldObjectLinkingLayer(ExecutionSession &ES,
GetMemoryManagerFunction GetMemoryManager);
- ~RTDyldObjectLinkingLayer();
+ ~RTDyldObjectLinkingLayer() override;
/// Emit the object.
void emit(std::unique_ptr<MaterializationResponsibility> R,
diff --git a/llvm/include/llvm/ExecutionEngine/Orc/SimpleRemoteEPC.h b/llvm/include/llvm/ExecutionEngine/Orc/SimpleRemoteEPC.h
index 7acb6a4..8176183 100644
--- a/llvm/include/llvm/ExecutionEngine/Orc/SimpleRemoteEPC.h
+++ b/llvm/include/llvm/ExecutionEngine/Orc/SimpleRemoteEPC.h
@@ -69,7 +69,7 @@ public:
SimpleRemoteEPC &operator=(const SimpleRemoteEPC &) = delete;
SimpleRemoteEPC(SimpleRemoteEPC &&) = delete;
SimpleRemoteEPC &operator=(SimpleRemoteEPC &&) = delete;
- ~SimpleRemoteEPC();
+ ~SimpleRemoteEPC() override;
Expected<int32_t> runAsMain(ExecutorAddr MainFnAddr,
ArrayRef<std::string> Args) override;
diff --git a/llvm/include/llvm/ExecutionEngine/Orc/TargetProcess/ExecutorSharedMemoryMapperService.h b/llvm/include/llvm/ExecutionEngine/Orc/TargetProcess/ExecutorSharedMemoryMapperService.h
index 85c2d65..2c385de 100644
--- a/llvm/include/llvm/ExecutionEngine/Orc/TargetProcess/ExecutorSharedMemoryMapperService.h
+++ b/llvm/include/llvm/ExecutionEngine/Orc/TargetProcess/ExecutorSharedMemoryMapperService.h
@@ -29,7 +29,7 @@ namespace rt_bootstrap {
class LLVM_ABI ExecutorSharedMemoryMapperService final
: public ExecutorBootstrapService {
public:
- ~ExecutorSharedMemoryMapperService(){};
+ ~ExecutorSharedMemoryMapperService() override {};
Expected<std::pair<ExecutorAddr, std::string>> reserve(uint64_t Size);
Expected<ExecutorAddr> initialize(ExecutorAddr Reservation,
diff --git a/llvm/include/llvm/ExecutionEngine/Orc/TargetProcess/SimpleExecutorDylibManager.h b/llvm/include/llvm/ExecutionEngine/Orc/TargetProcess/SimpleExecutorDylibManager.h
index 7526a29d..7ca2ff2 100644
--- a/llvm/include/llvm/ExecutionEngine/Orc/TargetProcess/SimpleExecutorDylibManager.h
+++ b/llvm/include/llvm/ExecutionEngine/Orc/TargetProcess/SimpleExecutorDylibManager.h
@@ -37,7 +37,7 @@ namespace rt_bootstrap {
/// Simple page-based allocator.
class LLVM_ABI SimpleExecutorDylibManager : public ExecutorBootstrapService {
public:
- virtual ~SimpleExecutorDylibManager();
+ ~SimpleExecutorDylibManager() override;
Expected<tpctypes::DylibHandle> open(const std::string &Path, uint64_t Mode);
diff --git a/llvm/include/llvm/ExecutionEngine/Orc/TargetProcess/SimpleExecutorMemoryManager.h b/llvm/include/llvm/ExecutionEngine/Orc/TargetProcess/SimpleExecutorMemoryManager.h
index 6224e92..45256ec 100644
--- a/llvm/include/llvm/ExecutionEngine/Orc/TargetProcess/SimpleExecutorMemoryManager.h
+++ b/llvm/include/llvm/ExecutionEngine/Orc/TargetProcess/SimpleExecutorMemoryManager.h
@@ -32,7 +32,7 @@ namespace rt_bootstrap {
/// Simple page-based allocator.
class LLVM_ABI SimpleExecutorMemoryManager : public ExecutorBootstrapService {
public:
- virtual ~SimpleExecutorMemoryManager();
+ ~SimpleExecutorMemoryManager() override;
Expected<ExecutorAddr> reserve(uint64_t Size);
Expected<ExecutorAddr> initialize(tpctypes::FinalizeRequest &FR);
diff --git a/llvm/include/llvm/ExecutionEngine/Orc/TaskDispatch.h b/llvm/include/llvm/ExecutionEngine/Orc/TaskDispatch.h
index 9cf6e00..b73da19 100644
--- a/llvm/include/llvm/ExecutionEngine/Orc/TaskDispatch.h
+++ b/llvm/include/llvm/ExecutionEngine/Orc/TaskDispatch.h
@@ -37,7 +37,7 @@ class LLVM_ABI Task : public RTTIExtends<Task, RTTIRoot> {
public:
static char ID;
- virtual ~Task() = default;
+ ~Task() override = default;
/// Description of the task to be performed. Used for logging.
virtual void printDescription(raw_ostream &OS) = 0;
diff --git a/llvm/include/llvm/ExecutionEngine/Orc/WaitingOnGraph.h b/llvm/include/llvm/ExecutionEngine/Orc/WaitingOnGraph.h
index 45834f1..d8506ce 100644
--- a/llvm/include/llvm/ExecutionEngine/Orc/WaitingOnGraph.h
+++ b/llvm/include/llvm/ExecutionEngine/Orc/WaitingOnGraph.h
@@ -186,9 +186,7 @@ private:
SmallVector<ElementId> SortedElems(ContainerElems.begin(),
ContainerElems.end());
llvm::sort(SortedElems);
- Hash = hash_combine(
- Hash, Container,
- hash_combine_range(SortedElems.begin(), SortedElems.end()));
+ Hash = hash_combine(Hash, Container, hash_combine_range(SortedElems));
}
return Hash;
}
diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td
index e6cce9a4..4d59ee8 100644
--- a/llvm/include/llvm/IR/Intrinsics.td
+++ b/llvm/include/llvm/IR/Intrinsics.td
@@ -1487,24 +1487,23 @@ def int_eh_sjlj_setup_dispatch : Intrinsic<[], []>;
//
def int_var_annotation : DefaultAttrsIntrinsic<
[], [llvm_anyptr_ty, llvm_anyptr_ty, LLVMMatchType<1>, llvm_i32_ty, LLVMMatchType<1>],
- [IntrInaccessibleMemOnly], "llvm.var.annotation">;
+ [IntrInaccessibleMemOnly]>;
def int_ptr_annotation : DefaultAttrsIntrinsic<
[llvm_anyptr_ty],
[LLVMMatchType<0>, llvm_anyptr_ty, LLVMMatchType<1>, llvm_i32_ty, LLVMMatchType<1>],
- [IntrInaccessibleMemOnly], "llvm.ptr.annotation">;
+ [IntrInaccessibleMemOnly]>;
def int_annotation : DefaultAttrsIntrinsic<
[llvm_anyint_ty],
[LLVMMatchType<0>, llvm_anyptr_ty, LLVMMatchType<1>, llvm_i32_ty],
- [IntrInaccessibleMemOnly], "llvm.annotation">;
+ [IntrInaccessibleMemOnly]>;
// Annotates the current program point with metadata strings which are emitted
// as CodeView debug info records. This is expensive, as it disables inlining
// and is modelled as having side effects.
def int_codeview_annotation : DefaultAttrsIntrinsic<[], [llvm_metadata_ty],
- [IntrInaccessibleMemOnly, IntrNoDuplicate],
- "llvm.codeview.annotation">;
+ [IntrInaccessibleMemOnly, IntrNoDuplicate]>;
//===------------------------ Trampoline Intrinsics -----------------------===//
//
@@ -1881,8 +1880,7 @@ def int_clear_cache : Intrinsic<[], [llvm_ptr_ty, llvm_ptr_ty],
// Intrinsic to detect whether its argument is a constant.
def int_is_constant : DefaultAttrsIntrinsic<[llvm_i1_ty], [llvm_any_ty],
- [IntrNoMem, IntrConvergent],
- "llvm.is.constant">;
+ [IntrNoMem, IntrConvergent]>;
// Introduce a use of the argument without generating any code.
def int_fake_use : DefaultAttrsIntrinsic<[], [llvm_vararg_ty],
diff --git a/llvm/include/llvm/IR/IntrinsicsNVVM.td b/llvm/include/llvm/IR/IntrinsicsNVVM.td
index 3af1750..c9df6c4 100644
--- a/llvm/include/llvm/IR/IntrinsicsNVVM.td
+++ b/llvm/include/llvm/IR/IntrinsicsNVVM.td
@@ -456,7 +456,7 @@ class WMMA_REGS<string Geom, string Frag, string PtxEltType, bit IsSparse = fals
}
class WMMA_NAME_LDST<string Op, WMMA_REGS Frag, string Layout, int WithStride> {
- string intr = "llvm.nvvm.wmma."
+ string intr_name = "llvm.nvvm.wmma."
# Frag.geom
# "." # Op
# "." # Frag.frag
@@ -467,7 +467,7 @@ class WMMA_NAME_LDST<string Op, WMMA_REGS Frag, string Layout, int WithStride> {
// TODO(tra): record name should ideally use the same field order as the intrinsic.
// E.g. string record = !subst("llvm", "int",
// !subst(".", "_", llvm));
- string record = "int_nvvm_wmma_"
+ string record_name = "int_nvvm_wmma_"
# Frag.geom
# "_" # Op
# "_" # Frag.frag
@@ -496,7 +496,7 @@ class MMA_SIGNATURE<WMMA_REGS A, WMMA_REGS B, WMMA_REGS C, WMMA_REGS D> {
class WMMA_NAME<string ALayout, string BLayout, int Satfinite, string Rnd, string b1op,
WMMA_REGS A, WMMA_REGS B, WMMA_REGS C, WMMA_REGS D> {
string signature = MMA_SIGNATURE<A, B, C, D>.ret;
- string record = "int_nvvm_wmma_"
+ string record_name = "int_nvvm_wmma_"
# A.geom
# "_mma"
# !subst(".", "_", b1op)
@@ -510,7 +510,7 @@ class WMMA_NAME<string ALayout, string BLayout, int Satfinite, string Rnd, strin
class MMA_NAME<string ALayout, string BLayout, int Satfinite, string b1op, string Kind,
WMMA_REGS A, WMMA_REGS B, WMMA_REGS C, WMMA_REGS D> {
string signature = MMA_SIGNATURE<A, B, C, D>.ret;
- string record = "int_nvvm_mma"
+ string record_name = "int_nvvm_mma"
# !subst(".", "_", b1op)
# "_" # A.geom
# "_" # ALayout
@@ -524,7 +524,7 @@ class MMA_SP_NAME<string Metadata, string Kind, int Satfinite,
WMMA_REGS A, WMMA_REGS B,
WMMA_REGS C, WMMA_REGS D> {
string signature = MMA_SIGNATURE<A, B, C, D>.ret;
- string record = "int_nvvm_mma"
+ string record_name = "int_nvvm_mma"
# "_" # !subst("::", "_", Metadata)
# "_" # A.geom
# "_row_col"
@@ -533,26 +533,37 @@ class MMA_SP_NAME<string Metadata, string Kind, int Satfinite,
# signature;
}
+// Helper class that takes an intrinsic name and construct a record name.
+// Additionally, sets `intr_name` to be non-empty if the default name assigned
+// to this intrinsic will not match the name given.
+class IntrinsicName<string name> {
+ string record_name = !subst(".", "_",
+ !subst("llvm.", "int_", name));
+ // Use explicit intrinsic name if it has an _ in it, else rely on LLVM
+ // assigned default name.
+ string intr_name = !if(!ne(!find(name, "_"), -1), name, "");
+}
+
class LDMATRIX_NAME<WMMA_REGS Frag, int Trans> {
- string intr = "llvm.nvvm.ldmatrix.sync.aligned"
+ defvar name = "llvm.nvvm.ldmatrix.sync.aligned"
# "." # Frag.geom
# "." # Frag.frag
# !if(Trans, ".trans", "")
# "." # Frag.ptx_elt_type
;
- string record = !subst(".", "_",
- !subst("llvm.", "int_", intr));
+ string intr_name = IntrinsicName<name>.intr_name;
+ string record_name = IntrinsicName<name>.record_name;
}
class STMATRIX_NAME<WMMA_REGS Frag, int Trans> {
- string intr = "llvm.nvvm.stmatrix.sync.aligned"
+ defvar name = "llvm.nvvm.stmatrix.sync.aligned"
# "." # Frag.geom
# "." # Frag.frag
# !if(Trans, ".trans", "")
# "." # Frag.ptx_elt_type
;
- string record = !subst(".", "_",
- !subst("llvm.", "int_", intr));
+ string intr_name = IntrinsicName<name>.intr_name;
+ string record_name = IntrinsicName<name>.record_name;
}
// Generates list of 4-tuples of WMMA_REGS representing a valid MMA op.
@@ -1042,45 +1053,49 @@ class NVVM_TCGEN05_MMA_BASE<string Space, bit Sp> {
class NVVM_TCGEN05_MMA<bit Sp, string Space,
bit AShift, bit ScaleInputD>:
NVVM_TCGEN05_MMA_BASE<Space, Sp> {
- string intr = "llvm.nvvm.tcgen05.mma"
+ string name = "llvm.nvvm.tcgen05.mma"
# !if(!eq(Sp, 1), ".sp", "")
# "." # Space
# !if(!eq(ScaleInputD, 1), ".scale_d", "")
# !if(!eq(AShift, 1), ".ashift", "");
- string record = !subst(".", "_", !subst("llvm.", "int_", intr));
+ string intr_name = IntrinsicName<name>.intr_name;
+ string record_name = IntrinsicName<name>.record_name;
}
class NVVM_TCGEN05_MMA_BLOCKSCALE<bit Sp, string Space,
string Kind, string ScaleVecSize>:
NVVM_TCGEN05_MMA_BASE<Space, Sp> {
- string intr = "llvm.nvvm.tcgen05.mma"
+ string name = "llvm.nvvm.tcgen05.mma"
# !if(!eq(Sp, 1), ".sp", "")
# "." # Space
# "." # Kind
# ".block_scale" # ScaleVecSize;
- string record = !subst(".", "_", !subst("llvm.", "int_", intr));
+ string intr_name = IntrinsicName<name>.intr_name;
+ string record_name = IntrinsicName<name>.record_name;
}
class NVVM_TCGEN05_MMA_WS<bit Sp, string Space, bit ZeroColMask>:
NVVM_TCGEN05_MMA_BASE<Space, Sp> {
- string intr = "llvm.nvvm.tcgen05.mma.ws"
+ string name = "llvm.nvvm.tcgen05.mma.ws"
# !if(!eq(Sp, 1), ".sp", "")
# "." # Space
# !if(!eq(ZeroColMask, 1), ".zero_col_mask", "");
- string record = !subst(".", "_", !subst("llvm.", "int_", intr));
+ string intr_name = IntrinsicName<name>.intr_name;
+ string record_name = IntrinsicName<name>.record_name;
}
class NVVM_TCGEN05_MMA_DISABLE_OUTPUT_LANE<bit Sp, string Space,
int CtaGroup, bit AShift,
bit ScaleInputD>:
NVVM_TCGEN05_MMA_BASE<Space, Sp> {
- string intr = "llvm.nvvm.tcgen05.mma"
+ string name = "llvm.nvvm.tcgen05.mma"
# !if(!eq(Sp, 1), ".sp", "")
# "." # Space
# !if(!eq(ScaleInputD, 1), ".scale_d", "")
# ".disable_output_lane.cg" # CtaGroup
# !if(!eq(AShift, 1), ".ashift", "");
- string record = !subst(".", "_", !subst("llvm.", "int_", intr));
+ string intr_name = IntrinsicName<name>.intr_name;
+ string record_name = IntrinsicName<name>.record_name;
}
class NVVM_TCGEN05_MMA_BLOCKSCALE_SUPPORTED<string Kind, string ScaleVecSize> {
@@ -2273,7 +2288,7 @@ class NVVM_WMMA_LD<WMMA_REGS Frag, string Layout, int WithStride>
: Intrinsic<Frag.regs,
!if(WithStride, [llvm_anyptr_ty, llvm_i32_ty], [llvm_anyptr_ty]),
[IntrWillReturn, IntrReadMem, IntrArgMemOnly, IntrNoCallback, ReadOnly<ArgIndex<0>>, NoCapture<ArgIndex<0>>],
- WMMA_NAME_LDST<"load", Frag, Layout, WithStride>.intr>;
+ WMMA_NAME_LDST<"load", Frag, Layout, WithStride>.intr_name>;
// WMMA.STORE.D
class NVVM_WMMA_ST<WMMA_REGS Frag, string Layout, int WithStride>
@@ -2283,18 +2298,18 @@ class NVVM_WMMA_ST<WMMA_REGS Frag, string Layout, int WithStride>
Frag.regs,
!if(WithStride, [llvm_i32_ty], [])),
[IntrWriteMem, IntrArgMemOnly, IntrNoCallback, WriteOnly<ArgIndex<0>>, NoCapture<ArgIndex<0>>],
- WMMA_NAME_LDST<"store", Frag, Layout, WithStride>.intr>;
+ WMMA_NAME_LDST<"store", Frag, Layout, WithStride>.intr_name>;
// Create all load/store variants
foreach layout = ["row", "col"] in {
foreach stride = [0, 1] in {
foreach frag = NVVM_MMA_OPS.all_ld_ops in
if NVVM_WMMA_LDST_SUPPORTED<frag, layout>.ret then
- def WMMA_NAME_LDST<"load", frag, layout, stride>.record
+ def WMMA_NAME_LDST<"load", frag, layout, stride>.record_name
: NVVM_WMMA_LD<frag, layout, stride>;
foreach frag = NVVM_MMA_OPS.all_st_ops in
if NVVM_WMMA_LDST_SUPPORTED<frag, layout>.ret then
- def WMMA_NAME_LDST<"store", frag, layout, stride>.record
+ def WMMA_NAME_LDST<"store", frag, layout, stride>.record_name
: NVVM_WMMA_ST<frag, layout, stride>;
}
}
@@ -2313,7 +2328,7 @@ foreach layout_a = ["row", "col"] in {
foreach b1op = NVVM_MMA_B1OPS<op>.ret in {
if NVVM_WMMA_SUPPORTED<op, layout_a, layout_b, satf, rnd>.ret then {
def WMMA_NAME<layout_a, layout_b, satf, rnd, b1op,
- op[0], op[1], op[2], op[3]>.record
+ op[0], op[1], op[2], op[3]>.record_name
: NVVM_MMA<op[0], op[1], op[2], op[3]>;
}
} // b1op
@@ -2330,7 +2345,7 @@ foreach layout_a = ["row", "col"] in {
foreach b1op = NVVM_MMA_B1OPS<op>.ret in {
foreach kind = ["", "kind::f8f6f4"] in {
if NVVM_MMA_SUPPORTED<op, layout_a, layout_b, kind, satf>.ret then {
- def MMA_NAME<layout_a, layout_b, satf, b1op, kind, op[0], op[1], op[2], op[3]>.record
+ def MMA_NAME<layout_a, layout_b, satf, b1op, kind, op[0], op[1], op[2], op[3]>.record_name
: NVVM_MMA<op[0], op[1], op[2], op[3]>;
}
} // kind
@@ -2379,7 +2394,7 @@ foreach metadata = ["sp", "sp::ordered_metadata"] in {
foreach op = NVVM_MMA_OPS.all_mma_sp_ops in {
if NVVM_MMA_SP_SUPPORTED<op, metadata, kind, satf>.ret then {
def MMA_SP_NAME<metadata, kind, satf,
- op[0], op[1], op[2], op[3]>.record
+ op[0], op[1], op[2], op[3]>.record_name
: NVVM_MMA_SP<op[0], op[1], op[2], op[3]>;
}
} // op
@@ -2392,12 +2407,12 @@ class NVVM_LDMATRIX<WMMA_REGS Frag, int Transposed>
: Intrinsic<Frag.regs, [llvm_anyptr_ty],
[IntrReadMem, IntrArgMemOnly, IntrNoCallback, ReadOnly<ArgIndex<0>>,
NoCapture<ArgIndex<0>>],
- LDMATRIX_NAME<Frag, Transposed>.intr>;
+ LDMATRIX_NAME<Frag, Transposed>.intr_name>;
foreach transposed = [0, 1] in {
foreach frag = NVVM_MMA_OPS.all_ldmatrix_ops in {
if NVVM_LDMATRIX_SUPPORTED<frag, transposed>.ret then {
- def LDMATRIX_NAME<frag, transposed>.record
+ def LDMATRIX_NAME<frag, transposed>.record_name
: NVVM_LDMATRIX<frag, transposed>;
}
}
@@ -2409,12 +2424,12 @@ class NVVM_STMATRIX<WMMA_REGS Frag, int Transposed>
!listconcat([llvm_anyptr_ty], Frag.regs),
[IntrWriteMem, IntrArgMemOnly, IntrNoCallback,
WriteOnly<ArgIndex<0>>, NoCapture<ArgIndex<0>>],
- STMATRIX_NAME<Frag, Transposed>.intr>;
+ STMATRIX_NAME<Frag, Transposed>.intr_name>;
foreach transposed = [0, 1] in {
foreach frag = NVVM_MMA_OPS.all_stmatrix_ops in {
if NVVM_STMATRIX_SUPPORTED<frag, transposed>.ret then {
- def STMATRIX_NAME<frag, transposed>.record
+ def STMATRIX_NAME<frag, transposed>.record_name
: NVVM_STMATRIX<frag, transposed>;
}
}
@@ -2767,14 +2782,15 @@ foreach cta_group = ["cg1", "cg2"] in {
"64x128b_warpx2_02_13",
"64x128b_warpx2_01_23",
"32x128b_warpx4"] in {
- defvar intr_suffix = StrJoin<"_", [shape, src_fmt, cta_group]>.ret;
- defvar name_suffix = StrJoin<".", [shape, src_fmt, cta_group]>.ret;
+ defvar name = "llvm.nvvm.tcgen05.cp." #
+ StrJoin<".", [shape, src_fmt, cta_group]>.ret;
- def int_nvvm_tcgen05_cp_ # intr_suffix : Intrinsic<[],
+ defvar intrinsic_name = IntrinsicName<name>;
+ def intrinsic_name.record_name : Intrinsic<[],
[llvm_tmem_ptr_ty, // tmem_addr
llvm_i64_ty], // smem descriptor
[IntrConvergent, IntrInaccessibleMemOrArgMemOnly, NoCapture<ArgIndex<0>>],
- "llvm.nvvm.tcgen05.cp." # name_suffix>;
+ intrinsic_name.intr_name>;
}
}
}
@@ -2881,9 +2897,9 @@ foreach sp = [0, 1] in {
]
);
- def mma.record:
+ def mma.record_name:
DefaultAttrsIntrinsicFlags<[], args, flags, intrinsic_properties,
- mma.intr>;
+ mma.intr_name>;
}
}
}
@@ -2918,8 +2934,8 @@ foreach sp = [0, 1] in {
Range<ArgIndex<!add(nargs, 1)>, 0, !if(!eq(ashift, 1), 2, 4)>]
);
- def mma.record: DefaultAttrsIntrinsicFlags<[], args, flags, intrinsic_properties,
- mma.intr>;
+ def mma.record_name : DefaultAttrsIntrinsicFlags<[], args, flags,
+ intrinsic_properties, mma.intr_name>;
} // ashift
} // scale_d
} // cta_group
@@ -2944,11 +2960,11 @@ foreach sp = [0, 1] in {
defvar collector_usage = ArgIndex<!add(nargs, 1)>;
if NVVM_TCGEN05_MMA_BLOCKSCALE_SUPPORTED<kind, scale_vec_size>.ret then {
- def mma.record: DefaultAttrsIntrinsicFlags<[], args, flags,
+ def mma.record_name : DefaultAttrsIntrinsicFlags<[], args, flags,
!listconcat(mma.common_intr_props,
[Range<cta_group, 1, 3>,
Range<collector_usage, 0, 4>]),
- mma.intr>;
+ mma.intr_name>;
}
}
}
@@ -2977,9 +2993,9 @@ foreach sp = [0, 1] in {
Range<ArgIndex<!add(nargs, 2)>, 0, 4>]
);
- def mma.record:
+ def mma.record_name:
DefaultAttrsIntrinsicFlags<[], args, flags, intrinsic_properties,
- mma.intr>;
+ mma.intr_name>;
}
}
}
diff --git a/llvm/include/llvm/IR/ModuleSummaryIndex.h b/llvm/include/llvm/IR/ModuleSummaryIndex.h
index cdfee72..eb60bee 100644
--- a/llvm/include/llvm/IR/ModuleSummaryIndex.h
+++ b/llvm/include/llvm/IR/ModuleSummaryIndex.h
@@ -178,6 +178,8 @@ struct alignas(8) GlobalValueSummaryInfo {
/// only be called prior to index-based internalization and promotion.
inline void verifyLocal() const;
+ bool hasLocal() const { return HasLocal; }
+
private:
/// List of global value summary structures for a particular value held
/// in the GlobalValueMap. Requires a vector in the case of multiple
@@ -239,6 +241,8 @@ struct ValueInfo {
void verifyLocal() const { getRef()->second.verifyLocal(); }
+ bool hasLocal() const { return getRef()->second.hasLocal(); }
+
// Even if the index is built with GVs available, we may not have one for
// summary entries synthesized for profiled indirect call targets.
bool hasName() const { return !haveGVs() || getValue(); }
diff --git a/llvm/include/llvm/InitializePasses.h b/llvm/include/llvm/InitializePasses.h
index d507ba2..581b4ad 100644
--- a/llvm/include/llvm/InitializePasses.h
+++ b/llvm/include/llvm/InitializePasses.h
@@ -62,6 +62,7 @@ LLVM_ABI void initializeBasicBlockSectionsPass(PassRegistry &);
LLVM_ABI void initializeBarrierNoopPass(PassRegistry &);
LLVM_ABI void initializeBasicAAWrapperPassPass(PassRegistry &);
LLVM_ABI void initializeBlockFrequencyInfoWrapperPassPass(PassRegistry &);
+LLVM_ABI void initializeMachineBlockHashInfoPass(PassRegistry &);
LLVM_ABI void initializeBranchFolderLegacyPass(PassRegistry &);
LLVM_ABI void initializeBranchProbabilityInfoWrapperPassPass(PassRegistry &);
LLVM_ABI void initializeBranchRelaxationLegacyPass(PassRegistry &);
diff --git a/llvm/include/llvm/LTO/LTO.h b/llvm/include/llvm/LTO/LTO.h
index 000472f..a837cdd 100644
--- a/llvm/include/llvm/LTO/LTO.h
+++ b/llvm/include/llvm/LTO/LTO.h
@@ -317,6 +317,8 @@ LLVM_ABI ThinBackend createInProcessThinBackend(
/// distributor.
/// RemoteCompiler specifies the path to a Clang executable to be invoked for
/// the backend jobs.
+/// RemoteCompilerPrependArgs specifies a list of prepend arguments to be
+/// applied to the backend compilations.
/// RemoteCompilerArgs specifies a list of arguments to be applied to the
/// backend compilations.
/// SaveTemps is a debugging tool that prevents temporary files created by this
@@ -326,6 +328,7 @@ LLVM_ABI ThinBackend createOutOfProcessThinBackend(
bool ShouldEmitIndexFiles, bool ShouldEmitImportsFiles,
StringRef LinkerOutputFile, StringRef Distributor,
ArrayRef<StringRef> DistributorArgs, StringRef RemoteCompiler,
+ ArrayRef<StringRef> RemoteCompilerPrependArgs,
ArrayRef<StringRef> RemoteCompilerArgs, bool SaveTemps);
/// This ThinBackend writes individual module indexes to files, instead of
diff --git a/llvm/include/llvm/MC/MCContext.h b/llvm/include/llvm/MC/MCContext.h
index 4a528ee..74abe34 100644
--- a/llvm/include/llvm/MC/MCContext.h
+++ b/llvm/include/llvm/MC/MCContext.h
@@ -175,7 +175,7 @@ private:
unsigned GetInstance(unsigned LocalLabelVal);
/// SHT_LLVM_BB_ADDR_MAP version to emit.
- uint8_t BBAddrMapVersion = 3;
+ uint8_t BBAddrMapVersion = 4;
/// The file name of the log file from the environment variable
/// AS_SECURE_LOG_FILE. Which must be set before the .secure_log_unique
diff --git a/llvm/lib/Analysis/LoopCacheAnalysis.cpp b/llvm/lib/Analysis/LoopCacheAnalysis.cpp
index 050c327..424a7fe 100644
--- a/llvm/lib/Analysis/LoopCacheAnalysis.cpp
+++ b/llvm/lib/Analysis/LoopCacheAnalysis.cpp
@@ -436,10 +436,9 @@ bool IndexedReference::delinearize(const LoopInfo &LI) {
const SCEV *StepRec = AccessFnAR ? AccessFnAR->getStepRecurrence(SE) : nullptr;
if (StepRec && SE.isKnownNegative(StepRec))
- AccessFn = SE.getAddRecExpr(AccessFnAR->getStart(),
- SE.getNegativeSCEV(StepRec),
- AccessFnAR->getLoop(),
- AccessFnAR->getNoWrapFlags());
+ AccessFn = SE.getAddRecExpr(
+ AccessFnAR->getStart(), SE.getNegativeSCEV(StepRec),
+ AccessFnAR->getLoop(), SCEV::NoWrapFlags::FlagAnyWrap);
const SCEV *Div = SE.getUDivExactExpr(AccessFn, ElemSize);
Subscripts.push_back(Div);
Sizes.push_back(ElemSize);
diff --git a/llvm/lib/Analysis/TargetTransformInfo.cpp b/llvm/lib/Analysis/TargetTransformInfo.cpp
index bf62623..c47a1c1 100644
--- a/llvm/lib/Analysis/TargetTransformInfo.cpp
+++ b/llvm/lib/Analysis/TargetTransformInfo.cpp
@@ -1001,13 +1001,25 @@ InstructionCost TargetTransformInfo::getShuffleCost(
TargetTransformInfo::PartialReductionExtendKind
TargetTransformInfo::getPartialReductionExtendKind(Instruction *I) {
- if (isa<SExtInst>(I))
- return PR_SignExtend;
- if (isa<ZExtInst>(I))
- return PR_ZeroExtend;
+ if (auto *Cast = dyn_cast<CastInst>(I))
+ return getPartialReductionExtendKind(Cast->getOpcode());
return PR_None;
}
+TargetTransformInfo::PartialReductionExtendKind
+TargetTransformInfo::getPartialReductionExtendKind(
+ Instruction::CastOps CastOpc) {
+ switch (CastOpc) {
+ case Instruction::CastOps::ZExt:
+ return PR_ZeroExtend;
+ case Instruction::CastOps::SExt:
+ return PR_SignExtend;
+ default:
+ return PR_None;
+ }
+ llvm_unreachable("Unhandled cast opcode");
+}
+
TTI::CastContextHint
TargetTransformInfo::getCastContextHint(const Instruction *I) {
if (!I)
diff --git a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
index fefde64f..8aa488f 100644
--- a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
@@ -41,6 +41,7 @@
#include "llvm/CodeGen/GCMetadataPrinter.h"
#include "llvm/CodeGen/LazyMachineBlockFrequencyInfo.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineBlockHashInfo.h"
#include "llvm/CodeGen/MachineBranchProbabilityInfo.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineDominators.h"
@@ -184,6 +185,8 @@ static cl::opt<bool> PrintLatency(
cl::desc("Print instruction latencies as verbose asm comments"), cl::Hidden,
cl::init(false));
+extern cl::opt<bool> EmitBBHash;
+
STATISTIC(EmittedInsts, "Number of machine instrs printed");
char AsmPrinter::ID = 0;
@@ -474,6 +477,8 @@ void AsmPrinter::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<GCModuleInfo>();
AU.addRequired<LazyMachineBlockFrequencyInfoPass>();
AU.addRequired<MachineBranchProbabilityInfoWrapperPass>();
+ if (EmitBBHash)
+ AU.addRequired<MachineBlockHashInfo>();
}
bool AsmPrinter::doInitialization(Module &M) {
@@ -1434,14 +1439,11 @@ getBBAddrMapFeature(const MachineFunction &MF, int NumMBBSectionRanges,
"BB entries info is required for BBFreq and BrProb "
"features");
}
- return {FuncEntryCountEnabled,
- BBFreqEnabled,
- BrProbEnabled,
+ return {FuncEntryCountEnabled, BBFreqEnabled, BrProbEnabled,
MF.hasBBSections() && NumMBBSectionRanges > 1,
// Use static_cast to avoid breakage of tests on windows.
- static_cast<bool>(BBAddrMapSkipEmitBBEntries),
- HasCalls,
- false};
+ static_cast<bool>(BBAddrMapSkipEmitBBEntries), HasCalls,
+ static_cast<bool>(EmitBBHash)};
}
void AsmPrinter::emitBBAddrMapSection(const MachineFunction &MF) {
@@ -1500,6 +1502,9 @@ void AsmPrinter::emitBBAddrMapSection(const MachineFunction &MF) {
PrevMBBEndSymbol = MBBSymbol;
}
+ auto MBHI =
+ Features.BBHash ? &getAnalysis<MachineBlockHashInfo>() : nullptr;
+
if (!Features.OmitBBEntries) {
OutStreamer->AddComment("BB id");
// Emit the BB ID for this basic block.
@@ -1527,6 +1532,10 @@ void AsmPrinter::emitBBAddrMapSection(const MachineFunction &MF) {
emitLabelDifferenceAsULEB128(MBB.getEndSymbol(), CurrentLabel);
// Emit the Metadata.
OutStreamer->emitULEB128IntValue(getBBAddrMapMetadata(MBB));
+ // Emit the Hash.
+ if (MBHI) {
+ OutStreamer->emitInt64(MBHI->getMBBHash(MBB));
+ }
}
PrevMBBEndSymbol = MBB.getEndSymbol();
}
diff --git a/llvm/lib/CodeGen/CMakeLists.txt b/llvm/lib/CodeGen/CMakeLists.txt
index b6872605..4373c53 100644
--- a/llvm/lib/CodeGen/CMakeLists.txt
+++ b/llvm/lib/CodeGen/CMakeLists.txt
@@ -108,6 +108,7 @@ add_llvm_component_library(LLVMCodeGen
LowerEmuTLS.cpp
MachineBasicBlock.cpp
MachineBlockFrequencyInfo.cpp
+ MachineBlockHashInfo.cpp
MachineBlockPlacement.cpp
MachineBranchProbabilityInfo.cpp
MachineCFGPrinter.cpp
diff --git a/llvm/lib/CodeGen/MachineBlockHashInfo.cpp b/llvm/lib/CodeGen/MachineBlockHashInfo.cpp
new file mode 100644
index 0000000..c4d9c0f
--- /dev/null
+++ b/llvm/lib/CodeGen/MachineBlockHashInfo.cpp
@@ -0,0 +1,115 @@
+//===- llvm/CodeGen/MachineBlockHashInfo.cpp---------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Compute the hashes of basic blocks.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/CodeGen/MachineBlockHashInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/InitializePasses.h"
+#include "llvm/Target/TargetMachine.h"
+
+using namespace llvm;
+
+uint64_t hashBlock(const MachineBasicBlock &MBB, bool HashOperands) {
+ uint64_t Hash = 0;
+ for (const MachineInstr &MI : MBB) {
+ if (MI.isMetaInstruction() || MI.isTerminator())
+ continue;
+ Hash = hashing::detail::hash_16_bytes(Hash, MI.getOpcode());
+ if (HashOperands) {
+ for (unsigned i = 0; i < MI.getNumOperands(); i++) {
+ Hash =
+ hashing::detail::hash_16_bytes(Hash, hash_value(MI.getOperand(i)));
+ }
+ }
+ }
+ return Hash;
+}
+
+/// Fold a 64-bit integer to a 16-bit one.
+uint16_t fold_64_to_16(const uint64_t Value) {
+ uint16_t Res = static_cast<uint16_t>(Value);
+ Res ^= static_cast<uint16_t>(Value >> 16);
+ Res ^= static_cast<uint16_t>(Value >> 32);
+ Res ^= static_cast<uint16_t>(Value >> 48);
+ return Res;
+}
+
+INITIALIZE_PASS(MachineBlockHashInfo, "machine-block-hash",
+ "Machine Block Hash Analysis", true, true)
+
+char MachineBlockHashInfo::ID = 0;
+
+MachineBlockHashInfo::MachineBlockHashInfo() : MachineFunctionPass(ID) {
+ initializeMachineBlockHashInfoPass(*PassRegistry::getPassRegistry());
+}
+
+void MachineBlockHashInfo::getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesAll();
+ MachineFunctionPass::getAnalysisUsage(AU);
+}
+
+struct CollectHashInfo {
+ uint64_t Offset;
+ uint64_t OpcodeHash;
+ uint64_t InstrHash;
+ uint64_t NeighborHash;
+};
+
+bool MachineBlockHashInfo::runOnMachineFunction(MachineFunction &F) {
+ DenseMap<const MachineBasicBlock *, CollectHashInfo> HashInfos;
+ uint16_t Offset = 0;
+ // Initialize hash components
+ for (const MachineBasicBlock &MBB : F) {
+ // offset of the machine basic block
+ HashInfos[&MBB].Offset = Offset;
+ Offset += MBB.size();
+ // Hashing opcodes
+ HashInfos[&MBB].OpcodeHash = hashBlock(MBB, /*HashOperands=*/false);
+ // Hash complete instructions
+ HashInfos[&MBB].InstrHash = hashBlock(MBB, /*HashOperands=*/true);
+ }
+
+ // Initialize neighbor hash
+ for (const MachineBasicBlock &MBB : F) {
+ uint64_t Hash = HashInfos[&MBB].OpcodeHash;
+ // Append hashes of successors
+ for (const MachineBasicBlock *SuccMBB : MBB.successors()) {
+ uint64_t SuccHash = HashInfos[SuccMBB].OpcodeHash;
+ Hash = hashing::detail::hash_16_bytes(Hash, SuccHash);
+ }
+ // Append hashes of predecessors
+ for (const MachineBasicBlock *PredMBB : MBB.predecessors()) {
+ uint64_t PredHash = HashInfos[PredMBB].OpcodeHash;
+ Hash = hashing::detail::hash_16_bytes(Hash, PredHash);
+ }
+ HashInfos[&MBB].NeighborHash = Hash;
+ }
+
+ // Assign hashes
+ for (const MachineBasicBlock &MBB : F) {
+ const auto &HashInfo = HashInfos[&MBB];
+ BlendedBlockHash BlendedHash(fold_64_to_16(HashInfo.Offset),
+ fold_64_to_16(HashInfo.OpcodeHash),
+ fold_64_to_16(HashInfo.InstrHash),
+ fold_64_to_16(HashInfo.NeighborHash));
+ MBBHashInfo[&MBB] = BlendedHash.combine();
+ }
+
+ return false;
+}
+
+uint64_t MachineBlockHashInfo::getMBBHash(const MachineBasicBlock &MBB) {
+ return MBBHashInfo[&MBB];
+}
+
+MachineFunctionPass *llvm::createMachineBlockHashInfoPass() {
+ return new MachineBlockHashInfo();
+}
diff --git a/llvm/lib/CodeGen/TargetPassConfig.cpp b/llvm/lib/CodeGen/TargetPassConfig.cpp
index b6169e6..10b7238 100644
--- a/llvm/lib/CodeGen/TargetPassConfig.cpp
+++ b/llvm/lib/CodeGen/TargetPassConfig.cpp
@@ -272,6 +272,12 @@ static cl::opt<bool>
cl::desc("Split static data sections into hot and cold "
"sections using profile information"));
+cl::opt<bool> EmitBBHash(
+ "emit-bb-hash",
+ cl::desc(
+ "Emit the hash of basic block in the SHT_LLVM_BB_ADDR_MAP section."),
+ cl::init(false), cl::Optional);
+
/// Allow standard passes to be disabled by command line options. This supports
/// simple binary flags that either suppress the pass or do nothing.
/// i.e. -disable-mypass=false has no effect.
@@ -1281,6 +1287,8 @@ void TargetPassConfig::addMachinePasses() {
// address map (or both).
if (TM->getBBSectionsType() != llvm::BasicBlockSection::None ||
TM->Options.BBAddrMap) {
+ if (EmitBBHash)
+ addPass(llvm::createMachineBlockHashInfoPass());
if (TM->getBBSectionsType() == llvm::BasicBlockSection::List) {
addPass(llvm::createBasicBlockSectionsProfileReaderWrapperPass(
TM->getBBSectionsFuncListBuf()));
diff --git a/llvm/lib/ExecutionEngine/JITLink/JITLinkMemoryManager.cpp b/llvm/lib/ExecutionEngine/JITLink/JITLinkMemoryManager.cpp
index 6c7e27e..fa04976 100644
--- a/llvm/lib/ExecutionEngine/JITLink/JITLinkMemoryManager.cpp
+++ b/llvm/lib/ExecutionEngine/JITLink/JITLinkMemoryManager.cpp
@@ -247,7 +247,7 @@ public:
StandardSegments(std::move(StandardSegments)),
FinalizationSegments(std::move(FinalizationSegments)) {}
- ~IPInFlightAlloc() {
+ ~IPInFlightAlloc() override {
assert(!G && "InFlight alloc neither abandoned nor finalized");
}
diff --git a/llvm/lib/ExecutionEngine/Orc/Debugging/DebuggerSupportPlugin.cpp b/llvm/lib/ExecutionEngine/Orc/Debugging/DebuggerSupportPlugin.cpp
index 75ae80f..4ceff48 100644
--- a/llvm/lib/ExecutionEngine/Orc/Debugging/DebuggerSupportPlugin.cpp
+++ b/llvm/lib/ExecutionEngine/Orc/Debugging/DebuggerSupportPlugin.cpp
@@ -38,7 +38,7 @@ public:
MachODebugObjectSynthesizerBase(LinkGraph &G, ExecutorAddr RegisterActionAddr)
: G(G), RegisterActionAddr(RegisterActionAddr) {}
- virtual ~MachODebugObjectSynthesizerBase() = default;
+ ~MachODebugObjectSynthesizerBase() override = default;
Error preserveDebugSections() {
if (G.findSectionByName(SynthDebugSectionName)) {
diff --git a/llvm/lib/ExecutionEngine/Orc/LinkGraphLinkingLayer.cpp b/llvm/lib/ExecutionEngine/Orc/LinkGraphLinkingLayer.cpp
index d1a6eaf..a2990ab 100644
--- a/llvm/lib/ExecutionEngine/Orc/LinkGraphLinkingLayer.cpp
+++ b/llvm/lib/ExecutionEngine/Orc/LinkGraphLinkingLayer.cpp
@@ -55,7 +55,7 @@ public:
Plugins = Layer.Plugins;
}
- ~JITLinkCtx() {
+ ~JITLinkCtx() override {
// If there is an object buffer return function then use it to
// return ownership of the buffer.
if (Layer.ReturnObjectBuffer && ObjBuffer)
diff --git a/llvm/lib/ExecutionEngine/Orc/OrcV2CBindings.cpp b/llvm/lib/ExecutionEngine/Orc/OrcV2CBindings.cpp
index fd805fbf..cdde733 100644
--- a/llvm/lib/ExecutionEngine/Orc/OrcV2CBindings.cpp
+++ b/llvm/lib/ExecutionEngine/Orc/OrcV2CBindings.cpp
@@ -92,7 +92,7 @@ public:
Name(std::move(Name)), Ctx(Ctx), Materialize(Materialize),
Discard(Discard), Destroy(Destroy) {}
- ~OrcCAPIMaterializationUnit() {
+ ~OrcCAPIMaterializationUnit() override {
if (Ctx)
Destroy(Ctx);
}
@@ -264,7 +264,7 @@ public:
LLVMOrcCAPIDefinitionGeneratorTryToGenerateFunction TryToGenerate)
: Dispose(Dispose), Ctx(Ctx), TryToGenerate(TryToGenerate) {}
- ~CAPIDefinitionGenerator() {
+ ~CAPIDefinitionGenerator() override {
if (Dispose)
Dispose(Ctx);
}
diff --git a/llvm/lib/LTO/LTO.cpp b/llvm/lib/LTO/LTO.cpp
index 86780e1..9d0fa11 100644
--- a/llvm/lib/LTO/LTO.cpp
+++ b/llvm/lib/LTO/LTO.cpp
@@ -2224,6 +2224,7 @@ class OutOfProcessThinBackend : public CGThinBackend {
ArrayRef<StringRef> DistributorArgs;
SString RemoteCompiler;
+ ArrayRef<StringRef> RemoteCompilerPrependArgs;
ArrayRef<StringRef> RemoteCompilerArgs;
bool SaveTemps;
@@ -2260,12 +2261,14 @@ public:
bool ShouldEmitIndexFiles, bool ShouldEmitImportsFiles,
StringRef LinkerOutputFile, StringRef Distributor,
ArrayRef<StringRef> DistributorArgs, StringRef RemoteCompiler,
+ ArrayRef<StringRef> RemoteCompilerPrependArgs,
ArrayRef<StringRef> RemoteCompilerArgs, bool SaveTemps)
: CGThinBackend(Conf, CombinedIndex, ModuleToDefinedGVSummaries,
AddStream, OnWrite, ShouldEmitIndexFiles,
ShouldEmitImportsFiles, ThinLTOParallelism),
LinkerOutputFile(LinkerOutputFile), DistributorPath(Distributor),
DistributorArgs(DistributorArgs), RemoteCompiler(RemoteCompiler),
+ RemoteCompilerPrependArgs(RemoteCompilerPrependArgs),
RemoteCompilerArgs(RemoteCompilerArgs), SaveTemps(SaveTemps) {}
virtual void setup(unsigned ThinLTONumTasks, unsigned ThinLTOTaskOffset,
@@ -2387,6 +2390,11 @@ public:
JOS.attributeArray("args", [&]() {
JOS.value(RemoteCompiler);
+ // Forward any supplied prepend options.
+ if (!RemoteCompilerPrependArgs.empty())
+ for (auto &A : RemoteCompilerPrependArgs)
+ JOS.value(A);
+
JOS.value("-c");
JOS.value(Saver.save("--target=" + Triple.str()));
@@ -2517,6 +2525,7 @@ ThinBackend lto::createOutOfProcessThinBackend(
bool ShouldEmitIndexFiles, bool ShouldEmitImportsFiles,
StringRef LinkerOutputFile, StringRef Distributor,
ArrayRef<StringRef> DistributorArgs, StringRef RemoteCompiler,
+ ArrayRef<StringRef> RemoteCompilerPrependArgs,
ArrayRef<StringRef> RemoteCompilerArgs, bool SaveTemps) {
auto Func =
[=](const Config &Conf, ModuleSummaryIndex &CombinedIndex,
@@ -2526,7 +2535,7 @@ ThinBackend lto::createOutOfProcessThinBackend(
Conf, CombinedIndex, Parallelism, ModuleToDefinedGVSummaries,
AddStream, OnWrite, ShouldEmitIndexFiles, ShouldEmitImportsFiles,
LinkerOutputFile, Distributor, DistributorArgs, RemoteCompiler,
- RemoteCompilerArgs, SaveTemps);
+ RemoteCompilerPrependArgs, RemoteCompilerArgs, SaveTemps);
};
return ThinBackend(Func, Parallelism);
}
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index f788c75..92f260f 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -4005,24 +4005,20 @@ def : Pat<(i64 (zextloadi32 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))),
(SUBREG_TO_REG (i64 0), (LDRWui GPR64sp:$Rn, uimm12s4:$offset), sub_32)>;
// load zero-extended i32, bitcast to f64
-def : Pat <(f64 (bitconvert (i64 (zextloadi32 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))))),
- (SUBREG_TO_REG (i64 0), (LDRSui GPR64sp:$Rn, uimm12s4:$offset), ssub)>;
-
+def : Pat<(f64 (bitconvert (i64 (zextloadi32 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))))),
+ (SUBREG_TO_REG (i64 0), (LDRSui GPR64sp:$Rn, uimm12s4:$offset), ssub)>;
// load zero-extended i16, bitcast to f64
-def : Pat <(f64 (bitconvert (i64 (zextloadi16 (am_indexed32 GPR64sp:$Rn, uimm12s2:$offset))))),
- (SUBREG_TO_REG (i64 0), (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub)>;
-
+def : Pat<(f64 (bitconvert (i64 (zextloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
+ (SUBREG_TO_REG (i64 0), (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub)>;
// load zero-extended i8, bitcast to f64
-def : Pat <(f64 (bitconvert (i64 (zextloadi8 (am_indexed32 GPR64sp:$Rn, uimm12s1:$offset))))),
- (SUBREG_TO_REG (i64 0), (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub)>;
-
+def : Pat<(f64 (bitconvert (i64 (zextloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
+ (SUBREG_TO_REG (i64 0), (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub)>;
// load zero-extended i16, bitcast to f32
-def : Pat <(f32 (bitconvert (i32 (zextloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
- (SUBREG_TO_REG (i32 0), (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub)>;
-
+def : Pat<(f32 (bitconvert (i32 (zextloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
+ (SUBREG_TO_REG (i32 0), (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub)>;
// load zero-extended i8, bitcast to f32
-def : Pat <(f32 (bitconvert (i32 (zextloadi8 (am_indexed16 GPR64sp:$Rn, uimm12s1:$offset))))),
- (SUBREG_TO_REG (i32 0), (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub)>;
+def : Pat<(f32 (bitconvert (i32 (zextloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
+ (SUBREG_TO_REG (i32 0), (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub)>;
// Pre-fetch.
def PRFMui : PrefetchUI<0b11, 0, 0b10, "prfm",
diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
index e3370d3..2053fc4 100644
--- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
@@ -1577,18 +1577,26 @@ static SVEIntrinsicInfo constructSVEIntrinsicInfo(IntrinsicInst &II) {
}
static bool isAllActivePredicate(Value *Pred) {
- // Look through convert.from.svbool(convert.to.svbool(...) chain.
Value *UncastedPred;
+
+ // Look through predicate casts that only remove lanes.
if (match(Pred, m_Intrinsic<Intrinsic::aarch64_sve_convert_from_svbool>(
- m_Intrinsic<Intrinsic::aarch64_sve_convert_to_svbool>(
- m_Value(UncastedPred)))))
- // If the predicate has the same or less lanes than the uncasted
- // predicate then we know the casting has no effect.
- if (cast<ScalableVectorType>(Pred->getType())->getMinNumElements() <=
- cast<ScalableVectorType>(UncastedPred->getType())->getMinNumElements())
- Pred = UncastedPred;
+ m_Value(UncastedPred)))) {
+ auto *OrigPredTy = cast<ScalableVectorType>(Pred->getType());
+ Pred = UncastedPred;
+
+ if (match(Pred, m_Intrinsic<Intrinsic::aarch64_sve_convert_to_svbool>(
+ m_Value(UncastedPred))))
+ // If the predicate has the same or less lanes than the uncasted predicate
+ // then we know the casting has no effect.
+ if (OrigPredTy->getMinNumElements() <=
+ cast<ScalableVectorType>(UncastedPred->getType())
+ ->getMinNumElements())
+ Pred = UncastedPred;
+ }
+
auto *C = dyn_cast<Constant>(Pred);
- return (C && C->isAllOnesValue());
+ return C && C->isAllOnesValue();
}
// Simplify `V` by only considering the operations that affect active lanes.
diff --git a/llvm/lib/Target/ARM/ARMAsmPrinter.cpp b/llvm/lib/Target/ARM/ARMAsmPrinter.cpp
index 3368a50..36b9908 100644
--- a/llvm/lib/Target/ARM/ARMAsmPrinter.cpp
+++ b/llvm/lib/Target/ARM/ARMAsmPrinter.cpp
@@ -1471,6 +1471,435 @@ void ARMAsmPrinter::EmitUnwindingInstruction(const MachineInstr *MI) {
// instructions) auto-generated.
#include "ARMGenMCPseudoLowering.inc"
+// Helper function to check if a register is live (used as an implicit operand)
+// in the given call instruction.
+static bool isRegisterLiveInCall(const MachineInstr &Call, MCRegister Reg) {
+ for (const MachineOperand &MO : Call.implicit_operands()) {
+ if (MO.isReg() && MO.getReg() == Reg && MO.isUse()) {
+ return true;
+ }
+ }
+ return false;
+}
+
+void ARMAsmPrinter::EmitKCFI_CHECK_ARM32(Register AddrReg, int64_t Type,
+ const MachineInstr &Call,
+ int64_t PrefixNops) {
+ // Choose scratch register: r12 primary, r3 if target is r12.
+ unsigned ScratchReg = ARM::R12;
+ if (AddrReg == ARM::R12) {
+ ScratchReg = ARM::R3;
+ }
+
+ // Calculate ESR for ARM mode (16-bit): 0x8000 | (scratch_reg << 5) | addr_reg
+ // Note: scratch_reg is always 0x1F since the EOR sequence clobbers it.
+ const ARMBaseRegisterInfo *TRI = static_cast<const ARMBaseRegisterInfo *>(
+ MF->getSubtarget().getRegisterInfo());
+ unsigned AddrIndex = TRI->getEncodingValue(AddrReg);
+ unsigned ESR = 0x8000 | (31 << 5) | (AddrIndex & 31);
+
+ // Check if r3 is live and needs to be spilled.
+ bool NeedSpillR3 =
+ (ScratchReg == ARM::R3) && isRegisterLiveInCall(Call, ARM::R3);
+
+ // If we need to spill r3, push it first.
+ if (NeedSpillR3) {
+ // push {r3}
+ EmitToStreamer(*OutStreamer, MCInstBuilder(ARM::STMDB_UPD)
+ .addReg(ARM::SP)
+ .addReg(ARM::SP)
+ .addImm(ARMCC::AL)
+ .addReg(0)
+ .addReg(ARM::R3));
+ }
+
+ // Clear bit 0 of target address to handle Thumb function pointers.
+ // In 32-bit ARM, function pointers may have the low bit set to indicate
+ // Thumb state when ARM/Thumb interworking is enabled (ARMv4T and later).
+ // We need to clear it to avoid an alignment fault when loading.
+ // bic scratch, target, #1
+ EmitToStreamer(*OutStreamer, MCInstBuilder(ARM::BICri)
+ .addReg(ScratchReg)
+ .addReg(AddrReg)
+ .addImm(1)
+ .addImm(ARMCC::AL)
+ .addReg(0)
+ .addReg(0));
+
+ // ldr scratch, [scratch, #-(PrefixNops * 4 + 4)]
+ EmitToStreamer(*OutStreamer, MCInstBuilder(ARM::LDRi12)
+ .addReg(ScratchReg)
+ .addReg(ScratchReg)
+ .addImm(-(PrefixNops * 4 + 4))
+ .addImm(ARMCC::AL)
+ .addReg(0));
+
+ // Each EOR instruction XORs one byte of the type, shifted to its position.
+ for (int i = 0; i < 4; i++) {
+ uint8_t byte = (Type >> (i * 8)) & 0xFF;
+ uint32_t imm = byte << (i * 8);
+ bool isLast = (i == 3);
+
+ // Encode as ARM modified immediate.
+ int SOImmVal = ARM_AM::getSOImmVal(imm);
+ assert(SOImmVal != -1 &&
+ "Cannot encode immediate as ARM modified immediate");
+
+ // eor[s] scratch, scratch, #imm (last one sets flags with CPSR)
+ EmitToStreamer(*OutStreamer,
+ MCInstBuilder(ARM::EORri)
+ .addReg(ScratchReg)
+ .addReg(ScratchReg)
+ .addImm(SOImmVal)
+ .addImm(ARMCC::AL)
+ .addReg(0)
+ .addReg(isLast ? ARM::CPSR : ARM::NoRegister));
+ }
+
+ // If we spilled r3, restore it immediately after the comparison.
+ // This must happen before the branch so r3 is valid on both paths.
+ if (NeedSpillR3) {
+ // pop {r3}
+ EmitToStreamer(*OutStreamer, MCInstBuilder(ARM::LDMIA_UPD)
+ .addReg(ARM::SP)
+ .addReg(ARM::SP)
+ .addImm(ARMCC::AL)
+ .addReg(0)
+ .addReg(ARM::R3));
+ }
+
+ // beq .Lpass (branch if types match, i.e., scratch is zero)
+ MCSymbol *Pass = OutContext.createTempSymbol();
+ EmitToStreamer(*OutStreamer,
+ MCInstBuilder(ARM::Bcc)
+ .addExpr(MCSymbolRefExpr::create(Pass, OutContext))
+ .addImm(ARMCC::EQ)
+ .addReg(ARM::CPSR));
+
+ // udf #ESR (trap with encoded diagnostic)
+ EmitToStreamer(*OutStreamer, MCInstBuilder(ARM::UDF).addImm(ESR));
+
+ OutStreamer->emitLabel(Pass);
+}
+
+void ARMAsmPrinter::EmitKCFI_CHECK_Thumb2(Register AddrReg, int64_t Type,
+ const MachineInstr &Call,
+ int64_t PrefixNops) {
+ // Choose scratch register: r12 primary, r3 if target is r12.
+ unsigned ScratchReg = ARM::R12;
+ if (AddrReg == ARM::R12) {
+ ScratchReg = ARM::R3;
+ }
+
+ // Calculate ESR for Thumb mode (8-bit): 0x80 | addr_reg
+ // Bit 7: KCFI trap indicator
+ // Bits 6-5: Reserved
+ // Bits 4-0: Address register encoding
+ const ARMBaseRegisterInfo *TRI = static_cast<const ARMBaseRegisterInfo *>(
+ MF->getSubtarget().getRegisterInfo());
+ unsigned AddrIndex = TRI->getEncodingValue(AddrReg);
+ unsigned ESR = 0x80 | (AddrIndex & 0x1F);
+
+ // Check if r3 is live and needs to be spilled.
+ bool NeedSpillR3 =
+ (ScratchReg == ARM::R3) && isRegisterLiveInCall(Call, ARM::R3);
+
+ // If we need to spill r3, push it first.
+ if (NeedSpillR3) {
+ // push {r3}
+ EmitToStreamer(
+ *OutStreamer,
+ MCInstBuilder(ARM::tPUSH).addImm(ARMCC::AL).addReg(0).addReg(ARM::R3));
+ }
+
+ // Clear bit 0 of target address to handle Thumb function pointers.
+ // In 32-bit ARM, function pointers may have the low bit set to indicate
+ // Thumb state when ARM/Thumb interworking is enabled (ARMv4T and later).
+ // We need to clear it to avoid an alignment fault when loading.
+ // bic scratch, target, #1
+ EmitToStreamer(*OutStreamer, MCInstBuilder(ARM::t2BICri)
+ .addReg(ScratchReg)
+ .addReg(AddrReg)
+ .addImm(1)
+ .addImm(ARMCC::AL)
+ .addReg(0)
+ .addReg(0));
+
+ // ldr scratch, [scratch, #-(PrefixNops * 4 + 4)]
+ EmitToStreamer(*OutStreamer, MCInstBuilder(ARM::t2LDRi8)
+ .addReg(ScratchReg)
+ .addReg(ScratchReg)
+ .addImm(-(PrefixNops * 4 + 4))
+ .addImm(ARMCC::AL)
+ .addReg(0));
+
+ // Each EOR instruction XORs one byte of the type, shifted to its position.
+ for (int i = 0; i < 4; i++) {
+ uint8_t byte = (Type >> (i * 8)) & 0xFF;
+ uint32_t imm = byte << (i * 8);
+ bool isLast = (i == 3);
+
+ // Verify the immediate can be encoded as Thumb2 modified immediate.
+ assert(ARM_AM::getT2SOImmVal(imm) != -1 &&
+ "Cannot encode immediate as Thumb2 modified immediate");
+
+ // eor[s] scratch, scratch, #imm (last one sets flags with CPSR)
+ EmitToStreamer(*OutStreamer,
+ MCInstBuilder(ARM::t2EORri)
+ .addReg(ScratchReg)
+ .addReg(ScratchReg)
+ .addImm(imm)
+ .addImm(ARMCC::AL)
+ .addReg(0)
+ .addReg(isLast ? ARM::CPSR : ARM::NoRegister));
+ }
+
+ // If we spilled r3, restore it immediately after the comparison.
+ // This must happen before the branch so r3 is valid on both paths.
+ if (NeedSpillR3) {
+ // pop {r3}
+ EmitToStreamer(
+ *OutStreamer,
+ MCInstBuilder(ARM::tPOP).addImm(ARMCC::AL).addReg(0).addReg(ARM::R3));
+ }
+
+ // beq .Lpass (branch if types match, i.e., scratch is zero)
+ MCSymbol *Pass = OutContext.createTempSymbol();
+ EmitToStreamer(*OutStreamer,
+ MCInstBuilder(ARM::t2Bcc)
+ .addExpr(MCSymbolRefExpr::create(Pass, OutContext))
+ .addImm(ARMCC::EQ)
+ .addReg(ARM::CPSR));
+
+ // udf #ESR (trap with encoded diagnostic)
+ EmitToStreamer(*OutStreamer, MCInstBuilder(ARM::tUDF).addImm(ESR));
+
+ OutStreamer->emitLabel(Pass);
+}
+
+void ARMAsmPrinter::EmitKCFI_CHECK_Thumb1(Register AddrReg, int64_t Type,
+ const MachineInstr &Call,
+ int64_t PrefixNops) {
+ // For Thumb1, use R2 unconditionally as scratch register (a low register
+ // required for tLDRi). R3 is used for building the type hash.
+ unsigned ScratchReg = ARM::R2;
+ unsigned TempReg = ARM::R3;
+
+ // Check if r3 is live and needs to be spilled.
+ bool NeedSpillR3 = isRegisterLiveInCall(Call, ARM::R3);
+
+ // Spill r3 if needed
+ if (NeedSpillR3) {
+ EmitToStreamer(
+ *OutStreamer,
+ MCInstBuilder(ARM::tPUSH).addImm(ARMCC::AL).addReg(0).addReg(ARM::R3));
+ }
+
+ // Check if r2 is live and needs to be spilled.
+ bool NeedSpillR2 = isRegisterLiveInCall(Call, ARM::R2);
+
+ // Push R2 if it's live
+ if (NeedSpillR2) {
+ EmitToStreamer(
+ *OutStreamer,
+ MCInstBuilder(ARM::tPUSH).addImm(ARMCC::AL).addReg(0).addReg(ARM::R2));
+ }
+
+ // Clear bit 0 from target address
+ // TempReg (R3) is used first as helper for BIC, then later for building type
+ // hash.
+
+ // movs temp, #1
+ EmitToStreamer(*OutStreamer, MCInstBuilder(ARM::tMOVi8)
+ .addReg(TempReg)
+ .addReg(ARM::CPSR)
+ .addImm(1)
+ .addImm(ARMCC::AL)
+ .addReg(0));
+
+ // mov scratch, target
+ EmitToStreamer(*OutStreamer, MCInstBuilder(ARM::tMOVr)
+ .addReg(ScratchReg)
+ .addReg(AddrReg)
+ .addImm(ARMCC::AL));
+
+ // bics scratch, temp (scratch = scratch & ~temp)
+ EmitToStreamer(*OutStreamer, MCInstBuilder(ARM::tBIC)
+ .addReg(ScratchReg)
+ .addReg(ARM::CPSR)
+ .addReg(ScratchReg)
+ .addReg(TempReg)
+ .addImm(ARMCC::AL)
+ .addReg(0));
+
+ // Load type hash. Thumb1 doesn't support negative offsets, so subtract.
+ int offset = PrefixNops * 4 + 4;
+
+ // subs scratch, #offset
+ EmitToStreamer(*OutStreamer, MCInstBuilder(ARM::tSUBi8)
+ .addReg(ScratchReg)
+ .addReg(ARM::CPSR)
+ .addReg(ScratchReg)
+ .addImm(offset)
+ .addImm(ARMCC::AL)
+ .addReg(0));
+
+ // ldr scratch, [scratch, #0]
+ EmitToStreamer(*OutStreamer, MCInstBuilder(ARM::tLDRi)
+ .addReg(ScratchReg)
+ .addReg(ScratchReg)
+ .addImm(0)
+ .addImm(ARMCC::AL)
+ .addReg(0));
+
+ // Load expected type inline (instead of EOR sequence)
+ //
+ // This creates the 32-bit value byte-by-byte in the temp register:
+ // movs temp, #byte3 (high byte)
+ // lsls temp, temp, #8
+ // adds temp, #byte2
+ // lsls temp, temp, #8
+ // adds temp, #byte1
+ // lsls temp, temp, #8
+ // adds temp, #byte0 (low byte)
+
+ uint8_t byte0 = (Type >> 0) & 0xFF;
+ uint8_t byte1 = (Type >> 8) & 0xFF;
+ uint8_t byte2 = (Type >> 16) & 0xFF;
+ uint8_t byte3 = (Type >> 24) & 0xFF;
+
+ // movs temp, #byte3 (start with high byte)
+ EmitToStreamer(*OutStreamer, MCInstBuilder(ARM::tMOVi8)
+ .addReg(TempReg)
+ .addReg(ARM::CPSR)
+ .addImm(byte3)
+ .addImm(ARMCC::AL)
+ .addReg(0));
+
+ // lsls temp, temp, #8
+ EmitToStreamer(*OutStreamer, MCInstBuilder(ARM::tLSLri)
+ .addReg(TempReg)
+ .addReg(ARM::CPSR)
+ .addReg(TempReg)
+ .addImm(8)
+ .addImm(ARMCC::AL)
+ .addReg(0));
+
+ // adds temp, #byte2
+ EmitToStreamer(*OutStreamer, MCInstBuilder(ARM::tADDi8)
+ .addReg(TempReg)
+ .addReg(ARM::CPSR)
+ .addReg(TempReg)
+ .addImm(byte2)
+ .addImm(ARMCC::AL)
+ .addReg(0));
+
+ // lsls temp, temp, #8
+ EmitToStreamer(*OutStreamer, MCInstBuilder(ARM::tLSLri)
+ .addReg(TempReg)
+ .addReg(ARM::CPSR)
+ .addReg(TempReg)
+ .addImm(8)
+ .addImm(ARMCC::AL)
+ .addReg(0));
+
+ // adds temp, #byte1
+ EmitToStreamer(*OutStreamer, MCInstBuilder(ARM::tADDi8)
+ .addReg(TempReg)
+ .addReg(ARM::CPSR)
+ .addReg(TempReg)
+ .addImm(byte1)
+ .addImm(ARMCC::AL)
+ .addReg(0));
+
+ // lsls temp, temp, #8
+ EmitToStreamer(*OutStreamer, MCInstBuilder(ARM::tLSLri)
+ .addReg(TempReg)
+ .addReg(ARM::CPSR)
+ .addReg(TempReg)
+ .addImm(8)
+ .addImm(ARMCC::AL)
+ .addReg(0));
+
+ // adds temp, #byte0 (low byte)
+ EmitToStreamer(*OutStreamer, MCInstBuilder(ARM::tADDi8)
+ .addReg(TempReg)
+ .addReg(ARM::CPSR)
+ .addReg(TempReg)
+ .addImm(byte0)
+ .addImm(ARMCC::AL)
+ .addReg(0));
+
+ // cmp scratch, temp
+ EmitToStreamer(*OutStreamer, MCInstBuilder(ARM::tCMPr)
+ .addReg(ScratchReg)
+ .addReg(TempReg)
+ .addImm(ARMCC::AL)
+ .addReg(0));
+
+ // Restore registers if spilled (pop in reverse order of push: R2, then R3)
+ if (NeedSpillR2) {
+ // pop {r2}
+ EmitToStreamer(
+ *OutStreamer,
+ MCInstBuilder(ARM::tPOP).addImm(ARMCC::AL).addReg(0).addReg(ARM::R2));
+ }
+
+ // Restore r3 if spilled
+ if (NeedSpillR3) {
+ // pop {r3}
+ EmitToStreamer(
+ *OutStreamer,
+ MCInstBuilder(ARM::tPOP).addImm(ARMCC::AL).addReg(0).addReg(ARM::R3));
+ }
+
+ // beq .Lpass (branch if types match, i.e., scratch == temp)
+ MCSymbol *Pass = OutContext.createTempSymbol();
+ EmitToStreamer(*OutStreamer,
+ MCInstBuilder(ARM::tBcc)
+ .addExpr(MCSymbolRefExpr::create(Pass, OutContext))
+ .addImm(ARMCC::EQ)
+ .addReg(ARM::CPSR));
+
+ // bkpt #0 (trap with encoded diagnostic)
+ EmitToStreamer(*OutStreamer, MCInstBuilder(ARM::tBKPT).addImm(0));
+
+ OutStreamer->emitLabel(Pass);
+}
+
+void ARMAsmPrinter::LowerKCFI_CHECK(const MachineInstr &MI) {
+ Register AddrReg = MI.getOperand(0).getReg();
+ const int64_t Type = MI.getOperand(1).getImm();
+
+ // Get the call instruction that follows this KCFI_CHECK.
+ assert(std::next(MI.getIterator())->isCall() &&
+ "KCFI_CHECK not followed by a call instruction");
+ const MachineInstr &Call = *std::next(MI.getIterator());
+
+ // Adjust the offset for patchable-function-prefix.
+ int64_t PrefixNops = 0;
+ MI.getMF()
+ ->getFunction()
+ .getFnAttribute("patchable-function-prefix")
+ .getValueAsString()
+ .getAsInteger(10, PrefixNops);
+
+ // Emit the appropriate instruction sequence based on the opcode variant.
+ switch (MI.getOpcode()) {
+ case ARM::KCFI_CHECK_ARM:
+ EmitKCFI_CHECK_ARM32(AddrReg, Type, Call, PrefixNops);
+ break;
+ case ARM::KCFI_CHECK_Thumb2:
+ EmitKCFI_CHECK_Thumb2(AddrReg, Type, Call, PrefixNops);
+ break;
+ case ARM::KCFI_CHECK_Thumb1:
+ EmitKCFI_CHECK_Thumb1(AddrReg, Type, Call, PrefixNops);
+ break;
+ default:
+ llvm_unreachable("Unexpected KCFI_CHECK opcode");
+ }
+}
+
void ARMAsmPrinter::emitInstruction(const MachineInstr *MI) {
ARM_MC::verifyInstructionPredicates(MI->getOpcode(),
getSubtargetInfo().getFeatureBits());
@@ -1504,6 +1933,11 @@ void ARMAsmPrinter::emitInstruction(const MachineInstr *MI) {
switch (Opc) {
case ARM::t2MOVi32imm: llvm_unreachable("Should be lowered by thumb2it pass");
case ARM::DBG_VALUE: llvm_unreachable("Should be handled by generic printing");
+ case ARM::KCFI_CHECK_ARM:
+ case ARM::KCFI_CHECK_Thumb2:
+ case ARM::KCFI_CHECK_Thumb1:
+ LowerKCFI_CHECK(*MI);
+ return;
case ARM::LEApcrel:
case ARM::tLEApcrel:
case ARM::t2LEApcrel: {
diff --git a/llvm/lib/Target/ARM/ARMAsmPrinter.h b/llvm/lib/Target/ARM/ARMAsmPrinter.h
index 2b067c7..9e92b5a 100644
--- a/llvm/lib/Target/ARM/ARMAsmPrinter.h
+++ b/llvm/lib/Target/ARM/ARMAsmPrinter.h
@@ -123,9 +123,20 @@ public:
void LowerPATCHABLE_FUNCTION_EXIT(const MachineInstr &MI);
void LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI);
+ // KCFI check lowering
+ void LowerKCFI_CHECK(const MachineInstr &MI);
+
private:
void EmitSled(const MachineInstr &MI, SledKind Kind);
+ // KCFI check emission helpers
+ void EmitKCFI_CHECK_ARM32(Register AddrReg, int64_t Type,
+ const MachineInstr &Call, int64_t PrefixNops);
+ void EmitKCFI_CHECK_Thumb2(Register AddrReg, int64_t Type,
+ const MachineInstr &Call, int64_t PrefixNops);
+ void EmitKCFI_CHECK_Thumb1(Register AddrReg, int64_t Type,
+ const MachineInstr &Call, int64_t PrefixNops);
+
// Helpers for emitStartOfAsmFile() and emitEndOfAsmFile()
void emitAttributes();
diff --git a/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp b/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp
index 0d7b6d1..fffb6373 100644
--- a/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp
+++ b/llvm/lib/Target/ARM/ARMExpandPseudoInsts.cpp
@@ -2301,6 +2301,8 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
for (unsigned i = 2, e = MBBI->getNumOperands(); i != e; ++i)
NewMI->addOperand(MBBI->getOperand(i));
+ NewMI->setCFIType(*MBB.getParent(), MI.getCFIType());
+
// Update call info and delete the pseudo instruction TCRETURN.
if (MI.isCandidateForAdditionalCallInfo())
MI.getMF()->moveAdditionalCallInfo(&MI, &*NewMI);
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index b1a668e..8122db2 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -2849,6 +2849,8 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
if (isTailCall) {
MF.getFrameInfo().setHasTailCall();
SDValue Ret = DAG.getNode(ARMISD::TC_RETURN, dl, MVT::Other, Ops);
+ if (CLI.CFIType)
+ Ret.getNode()->setCFIType(CLI.CFIType->getZExtValue());
DAG.addNoMergeSiteInfo(Ret.getNode(), CLI.NoMerge);
DAG.addCallSiteInfo(Ret.getNode(), std::move(CSInfo));
return Ret;
@@ -2856,6 +2858,8 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// Returns a chain and a flag for retval copy to use.
Chain = DAG.getNode(CallOpc, dl, {MVT::Other, MVT::Glue}, Ops);
+ if (CLI.CFIType)
+ Chain.getNode()->setCFIType(CLI.CFIType->getZExtValue());
DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
InGlue = Chain.getValue(1);
DAG.addCallSiteInfo(Chain.getNode(), std::move(CSInfo));
@@ -12008,6 +12012,71 @@ static void genTPLoopBody(MachineBasicBlock *TpLoopBody,
.add(predOps(ARMCC::AL));
}
+bool ARMTargetLowering::supportKCFIBundles() const {
+ // KCFI is supported in all ARM/Thumb modes
+ return true;
+}
+
+MachineInstr *
+ARMTargetLowering::EmitKCFICheck(MachineBasicBlock &MBB,
+ MachineBasicBlock::instr_iterator &MBBI,
+ const TargetInstrInfo *TII) const {
+ assert(MBBI->isCall() && MBBI->getCFIType() &&
+ "Invalid call instruction for a KCFI check");
+
+ MachineOperand *TargetOp = nullptr;
+ switch (MBBI->getOpcode()) {
+ // ARM mode opcodes
+ case ARM::BLX:
+ case ARM::BLX_pred:
+ case ARM::BLX_noip:
+ case ARM::BLX_pred_noip:
+ case ARM::BX_CALL:
+ TargetOp = &MBBI->getOperand(0);
+ break;
+ case ARM::TCRETURNri:
+ case ARM::TCRETURNrinotr12:
+ case ARM::TAILJMPr:
+ case ARM::TAILJMPr4:
+ TargetOp = &MBBI->getOperand(0);
+ break;
+ // Thumb mode opcodes (Thumb1 and Thumb2)
+ // Note: Most Thumb call instructions have predicate operands before the
+ // target register Format: tBLXr pred, predreg, target_register, ...
+ case ARM::tBLXr: // Thumb1/Thumb2: BLX register (requires V5T)
+ case ARM::tBLXr_noip: // Thumb1/Thumb2: BLX register, no IP clobber
+ case ARM::tBX_CALL: // Thumb1 only: BX call (push LR, BX)
+ TargetOp = &MBBI->getOperand(2);
+ break;
+ // Tail call instructions don't have predicates, target is operand 0
+ case ARM::tTAILJMPr: // Thumb1/Thumb2: Tail call via register
+ TargetOp = &MBBI->getOperand(0);
+ break;
+ default:
+ llvm_unreachable("Unexpected CFI call opcode");
+ }
+
+ assert(TargetOp && TargetOp->isReg() && "Invalid target operand");
+ TargetOp->setIsRenamable(false);
+
+ // Select the appropriate KCFI_CHECK variant based on the instruction set
+ unsigned KCFICheckOpcode;
+ if (Subtarget->isThumb()) {
+ if (Subtarget->isThumb2()) {
+ KCFICheckOpcode = ARM::KCFI_CHECK_Thumb2;
+ } else {
+ KCFICheckOpcode = ARM::KCFI_CHECK_Thumb1;
+ }
+ } else {
+ KCFICheckOpcode = ARM::KCFI_CHECK_ARM;
+ }
+
+ return BuildMI(MBB, MBBI, MBBI->getDebugLoc(), TII->get(KCFICheckOpcode))
+ .addReg(TargetOp->getReg())
+ .addImm(MBBI->getCFIType())
+ .getInstr();
+}
+
MachineBasicBlock *
ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
MachineBasicBlock *BB) const {
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.h b/llvm/lib/Target/ARM/ARMISelLowering.h
index 70aa001..8c5e0cf 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.h
+++ b/llvm/lib/Target/ARM/ARMISelLowering.h
@@ -447,6 +447,12 @@ class VectorType;
void AdjustInstrPostInstrSelection(MachineInstr &MI,
SDNode *Node) const override;
+ bool supportKCFIBundles() const override;
+
+ MachineInstr *EmitKCFICheck(MachineBasicBlock &MBB,
+ MachineBasicBlock::instr_iterator &MBBI,
+ const TargetInstrInfo *TII) const override;
+
SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const;
SDValue PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG) const;
SDValue PerformCMOVToBFICombine(SDNode *N, SelectionDAG &DAG) const;
diff --git a/llvm/lib/Target/ARM/ARMInstrInfo.td b/llvm/lib/Target/ARM/ARMInstrInfo.td
index 282ff53..53be167 100644
--- a/llvm/lib/Target/ARM/ARMInstrInfo.td
+++ b/llvm/lib/Target/ARM/ARMInstrInfo.td
@@ -6536,6 +6536,36 @@ def CMP_SWAP_64 : PseudoInst<(outs GPRPair:$Rd, GPRPair:$addr_temp_out),
def : Pat<(atomic_fence (timm), 0), (MEMBARRIER)>;
//===----------------------------------------------------------------------===//
+// KCFI check pseudo-instruction.
+//===----------------------------------------------------------------------===//
+// KCFI_CHECK pseudo-instruction for Kernel Control-Flow Integrity.
+// Expands to a sequence that verifies the function pointer's type hash.
+// Different sizes for different architectures due to different expansions.
+
+def KCFI_CHECK_ARM
+ : PseudoInst<(outs), (ins GPR:$ptr, i32imm:$type), NoItinerary, []>,
+ Sched<[]>,
+ Requires<[IsARM]> {
+ let Size = 28; // 7 instructions (bic, ldr, 4x eor, beq, udf)
+}
+
+def KCFI_CHECK_Thumb2
+ : PseudoInst<(outs), (ins GPR:$ptr, i32imm:$type), NoItinerary, []>,
+ Sched<[]>,
+ Requires<[IsThumb2]> {
+ let Size =
+ 32; // worst-case 9 instructions (push, bic, ldr, 4x eor, pop, beq.w, udf)
+}
+
+def KCFI_CHECK_Thumb1
+ : PseudoInst<(outs), (ins GPR:$ptr, i32imm:$type), NoItinerary, []>,
+ Sched<[]>,
+ Requires<[IsThumb1Only]> {
+ let Size = 50; // worst-case 25 instructions (pushes, bic helper, type
+ // building, cmp, pops)
+}
+
+//===----------------------------------------------------------------------===//
// Instructions used for emitting unwind opcodes on Windows.
//===----------------------------------------------------------------------===//
let isPseudo = 1 in {
diff --git a/llvm/lib/Target/ARM/ARMTargetMachine.cpp b/llvm/lib/Target/ARM/ARMTargetMachine.cpp
index 86740a9..590d4c7 100644
--- a/llvm/lib/Target/ARM/ARMTargetMachine.cpp
+++ b/llvm/lib/Target/ARM/ARMTargetMachine.cpp
@@ -111,6 +111,7 @@ extern "C" LLVM_ABI LLVM_EXTERNAL_VISIBILITY void LLVMInitializeARMTarget() {
initializeMVELaneInterleavingPass(Registry);
initializeARMFixCortexA57AES1742098Pass(Registry);
initializeARMDAGToDAGISelLegacyPass(Registry);
+ initializeKCFIPass(Registry);
}
static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
@@ -487,6 +488,9 @@ void ARMPassConfig::addPreSched2() {
// proper scheduling.
addPass(createARMExpandPseudoPass());
+ // Emit KCFI checks for indirect calls.
+ addPass(createKCFIPass());
+
if (getOptLevel() != CodeGenOptLevel::None) {
// When optimising for size, always run the Thumb2SizeReduction pass before
// IfConversion. Otherwise, check whether IT blocks are restricted
@@ -517,9 +521,12 @@ void ARMPassConfig::addPreSched2() {
void ARMPassConfig::addPreEmitPass() {
addPass(createThumb2SizeReductionPass());
- // Constant island pass work on unbundled instructions.
+ // Unpack bundles for:
+ // - Thumb2: Constant island pass requires unbundled instructions
+ // - KCFI: KCFI_CHECK pseudo instructions need to be unbundled for AsmPrinter
addPass(createUnpackMachineBundles([](const MachineFunction &MF) {
- return MF.getSubtarget<ARMSubtarget>().isThumb2();
+ return MF.getSubtarget<ARMSubtarget>().isThumb2() ||
+ MF.getFunction().getParent()->getModuleFlag("kcfi");
}));
// Don't optimize barriers or block placement at -O0.
@@ -530,6 +537,7 @@ void ARMPassConfig::addPreEmitPass() {
}
void ARMPassConfig::addPreEmitPass2() {
+
// Inserts fixup instructions before unsafe AES operations. Instructions may
// be inserted at the start of blocks and at within blocks so this pass has to
// come before those below.
diff --git a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
index 22cf3a7..598735f 100644
--- a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
+++ b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
@@ -4675,7 +4675,7 @@ class WMMA_INSTR<string _Intr, list<dag> _Args>
//
class WMMA_LOAD<WMMA_REGINFO Frag, string Layout, string Space, bit WithStride>
- : WMMA_INSTR<WMMA_NAME_LDST<"load", Frag, Layout, WithStride>.record,
+ : WMMA_INSTR<WMMA_NAME_LDST<"load", Frag, Layout, WithStride>.record_name,
[!con((ins ADDR:$src),
!if(WithStride, (ins B32:$ldm), (ins)))]>,
Requires<Frag.Predicates> {
@@ -4714,7 +4714,7 @@ class WMMA_LOAD<WMMA_REGINFO Frag, string Layout, string Space, bit WithStride>
//
class WMMA_STORE_D<WMMA_REGINFO Frag, string Layout, string Space,
bit WithStride>
- : WMMA_INSTR<WMMA_NAME_LDST<"store", Frag, Layout, WithStride>.record,
+ : WMMA_INSTR<WMMA_NAME_LDST<"store", Frag, Layout, WithStride>.record_name,
[!con((ins ADDR:$dst),
Frag.Ins,
!if(WithStride, (ins B32:$ldm), (ins)))]>,
@@ -4778,7 +4778,7 @@ class MMA_OP_PREDICATES<WMMA_REGINFO FragA, string b1op> {
class WMMA_MMA<WMMA_REGINFO FragA, WMMA_REGINFO FragB,
WMMA_REGINFO FragC, WMMA_REGINFO FragD,
string ALayout, string BLayout, int Satfinite, string rnd, string b1op>
- : WMMA_INSTR<WMMA_NAME<ALayout, BLayout, Satfinite, rnd, b1op, FragA, FragB, FragC, FragD>.record,
+ : WMMA_INSTR<WMMA_NAME<ALayout, BLayout, Satfinite, rnd, b1op, FragA, FragB, FragC, FragD>.record_name,
[FragA.Ins, FragB.Ins, FragC.Ins]>,
// Requires does not seem to have effect on Instruction w/o Patterns.
// We set it here anyways and propagate to the Pat<> we construct below.
@@ -4837,7 +4837,7 @@ defset list<WMMA_INSTR> WMMAs = {
class MMA<WMMA_REGINFO FragA, WMMA_REGINFO FragB,
WMMA_REGINFO FragC, WMMA_REGINFO FragD,
string ALayout, string BLayout, int Satfinite, string b1op, string Kind>
- : WMMA_INSTR<MMA_NAME<ALayout, BLayout, Satfinite, b1op, Kind, FragA, FragB, FragC, FragD>.record,
+ : WMMA_INSTR<MMA_NAME<ALayout, BLayout, Satfinite, b1op, Kind, FragA, FragB, FragC, FragD>.record_name,
[FragA.Ins, FragB.Ins, FragC.Ins]>,
// Requires does not seem to have effect on Instruction w/o Patterns.
// We set it here anyways and propagate to the Pat<> we construct below.
@@ -4891,7 +4891,7 @@ class MMA_SP<WMMA_REGINFO FragA, WMMA_REGINFO FragB,
WMMA_REGINFO FragC, WMMA_REGINFO FragD,
string Metadata, string Kind, int Satfinite>
: WMMA_INSTR<MMA_SP_NAME<Metadata, Kind, Satfinite,
- FragA, FragB, FragC, FragD>.record,
+ FragA, FragB, FragC, FragD>.record_name,
[FragA.Ins, FragB.Ins, FragC.Ins,
(ins B32:$metadata, i32imm:$selector)]>,
// Requires does not seem to have effect on Instruction w/o Patterns.
@@ -4946,7 +4946,7 @@ defset list<WMMA_INSTR> MMA_SPs = {
// ldmatrix.sync.aligned.m8n8[|.trans][|.shared].b16
//
class LDMATRIX<WMMA_REGINFO Frag, bit Transposed, string Space>
- : WMMA_INSTR<LDMATRIX_NAME<Frag, Transposed>.record, [(ins ADDR:$src)]>,
+ : WMMA_INSTR<LDMATRIX_NAME<Frag, Transposed>.record_name, [(ins ADDR:$src)]>,
Requires<Frag.Predicates> {
// Build PatFrag that only matches particular address space.
PatFrag IntrFrag = PatFrag<(ops node:$src), (Intr node:$src),
@@ -4981,7 +4981,7 @@ defset list<WMMA_INSTR> LDMATRIXs = {
// stmatrix.sync.aligned.m8n8[|.trans][|.shared].b16
//
class STMATRIX<WMMA_REGINFO Frag, bit Transposed, string Space>
- : WMMA_INSTR<STMATRIX_NAME<Frag, Transposed>.record, [!con((ins ADDR:$dst), Frag.Ins)]>,
+ : WMMA_INSTR<STMATRIX_NAME<Frag, Transposed>.record_name, [!con((ins ADDR:$dst), Frag.Ins)]>,
Requires<Frag.Predicates> {
// Build PatFrag that only matches particular address space.
dag PFOperands = !con((ops node:$dst),
@@ -5376,7 +5376,7 @@ class Tcgen05MMAInst<bit Sp, string KindStr, string ASpace,
Requires<PTXPredicates> {
Intrinsic Intrin = !cast<Intrinsic>(
- NVVM_TCGEN05_MMA<Sp, ASpace, AShift, ScaleInputD>.record
+ NVVM_TCGEN05_MMA<Sp, ASpace, AShift, ScaleInputD>.record_name
);
dag ScaleInpIns = !if(!eq(ScaleInputD, 1), (ins i64imm:$scale_input_d), (ins));
@@ -5618,7 +5618,7 @@ class Tcgen05MMABlockScaleInst<bit Sp, string ASpace, string KindStr,
Requires<[hasTcgen05Instructions, PTXPredicate]> {
Intrinsic Intrin = !cast<Intrinsic>(
- NVVM_TCGEN05_MMA_BLOCKSCALE<Sp, ASpace, KindStr, ScaleVecSize>.record);
+ NVVM_TCGEN05_MMA_BLOCKSCALE<Sp, ASpace, KindStr, ScaleVecSize>.record_name);
dag SparseMetadataIns = !if(!eq(Sp, 1), (ins B32:$spmetadata), (ins));
dag SparseMetadataIntr = !if(!eq(Sp, 1), (Intrin i32:$spmetadata), (Intrin));
@@ -5702,7 +5702,7 @@ class Tcgen05MMAWSInst<bit Sp, string ASpace, string KindStr,
Requires<[hasTcgen05Instructions]> {
Intrinsic Intrin = !cast<Intrinsic>(
- NVVM_TCGEN05_MMA_WS<Sp, ASpace, HasZeroColMask>.record);
+ NVVM_TCGEN05_MMA_WS<Sp, ASpace, HasZeroColMask>.record_name);
dag ZeroColMaskIns = !if(!eq(HasZeroColMask, 1),
(ins B64:$zero_col_mask), (ins));
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td
index 4104abd..4c2f7f6 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td
@@ -482,7 +482,7 @@ let Predicates = [HasVendorXSfvfwmaccqqq] in {
defm SF_VFWMACC_4x4x4 : VPseudoSiFiveVFWMACC;
}
-let Predicates = [HasVendorXSfvfnrclipxfqf] in {
+let Predicates = [HasVendorXSfvfnrclipxfqf], AltFmtType = IS_NOT_ALTFMT in {
defm SF_VFNRCLIP_XU_F_QF : VPseudoSiFiveVFNRCLIP;
defm SF_VFNRCLIP_X_F_QF : VPseudoSiFiveVFNRCLIP;
}
diff --git a/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp b/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp
index d91923b..56a38bb 100644
--- a/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp
@@ -1499,18 +1499,25 @@ static bool generateKernelClockInst(const SPIRV::IncomingCall *Call,
Register ResultReg = Call->ReturnRegister;
- // Deduce the `Scope` operand from the builtin function name.
- SPIRV::Scope::Scope ScopeArg =
- StringSwitch<SPIRV::Scope::Scope>(Builtin->Name)
- .EndsWith("device", SPIRV::Scope::Scope::Device)
- .EndsWith("work_group", SPIRV::Scope::Scope::Workgroup)
- .EndsWith("sub_group", SPIRV::Scope::Scope::Subgroup);
- Register ScopeReg = buildConstantIntReg32(ScopeArg, MIRBuilder, GR);
-
- MIRBuilder.buildInstr(SPIRV::OpReadClockKHR)
- .addDef(ResultReg)
- .addUse(GR->getSPIRVTypeID(Call->ReturnType))
- .addUse(ScopeReg);
+ if (Builtin->Name == "__spirv_ReadClockKHR") {
+ MIRBuilder.buildInstr(SPIRV::OpReadClockKHR)
+ .addDef(ResultReg)
+ .addUse(GR->getSPIRVTypeID(Call->ReturnType))
+ .addUse(Call->Arguments[0]);
+ } else {
+ // Deduce the `Scope` operand from the builtin function name.
+ SPIRV::Scope::Scope ScopeArg =
+ StringSwitch<SPIRV::Scope::Scope>(Builtin->Name)
+ .EndsWith("device", SPIRV::Scope::Scope::Device)
+ .EndsWith("work_group", SPIRV::Scope::Scope::Workgroup)
+ .EndsWith("sub_group", SPIRV::Scope::Scope::Subgroup);
+ Register ScopeReg = buildConstantIntReg32(ScopeArg, MIRBuilder, GR);
+
+ MIRBuilder.buildInstr(SPIRV::OpReadClockKHR)
+ .addDef(ResultReg)
+ .addUse(GR->getSPIRVTypeID(Call->ReturnType))
+ .addUse(ScopeReg);
+ }
return true;
}
diff --git a/llvm/lib/Target/SPIRV/SPIRVBuiltins.td b/llvm/lib/Target/SPIRV/SPIRVBuiltins.td
index 3b8764a..c259cce 100644
--- a/llvm/lib/Target/SPIRV/SPIRVBuiltins.td
+++ b/llvm/lib/Target/SPIRV/SPIRVBuiltins.td
@@ -1174,6 +1174,7 @@ defm : DemangledNativeBuiltin<"clock_read_sub_group", OpenCL_std, KernelClock, 0
defm : DemangledNativeBuiltin<"clock_read_hilo_device", OpenCL_std, KernelClock, 0, 0, OpReadClockKHR>;
defm : DemangledNativeBuiltin<"clock_read_hilo_work_group", OpenCL_std, KernelClock, 0, 0, OpReadClockKHR>;
defm : DemangledNativeBuiltin<"clock_read_hilo_sub_group", OpenCL_std, KernelClock, 0, 0, OpReadClockKHR>;
+defm : DemangledNativeBuiltin<"__spirv_ReadClockKHR", OpenCL_std, KernelClock, 1, 1, OpReadClockKHR>;
//===----------------------------------------------------------------------===//
// Class defining an atomic instruction on floating-point numbers.
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index d49f25a..4dfc400 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -2632,6 +2632,10 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(Op, MVT::f32, Promote);
}
+ setOperationPromotedToType(ISD::ATOMIC_LOAD, MVT::f16, MVT::i16);
+ setOperationPromotedToType(ISD::ATOMIC_LOAD, MVT::f32, MVT::i32);
+ setOperationPromotedToType(ISD::ATOMIC_LOAD, MVT::f64, MVT::i64);
+
// We have target-specific dag combine patterns for the following nodes:
setTargetDAGCombine({ISD::VECTOR_SHUFFLE,
ISD::SCALAR_TO_VECTOR,
diff --git a/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp b/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp
index a0f7ec6..2dd0fde 100644
--- a/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp
+++ b/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp
@@ -948,17 +948,17 @@ void llvm::updateVCallVisibilityInIndex(
// linker, as we have no information on their eventual use.
if (DynamicExportSymbols.count(P.first))
continue;
+ // With validation enabled, we want to exclude symbols visible to regular
+ // objects. Local symbols will be in this group due to the current
+ // implementation but those with VCallVisibilityTranslationUnit will have
+ // already been marked in clang so are unaffected.
+ if (VisibleToRegularObjSymbols.count(P.first))
+ continue;
for (auto &S : P.second.getSummaryList()) {
auto *GVar = dyn_cast<GlobalVarSummary>(S.get());
if (!GVar ||
GVar->getVCallVisibility() != GlobalObject::VCallVisibilityPublic)
continue;
- // With validation enabled, we want to exclude symbols visible to regular
- // objects. Local symbols will be in this group due to the current
- // implementation but those with VCallVisibilityTranslationUnit will have
- // already been marked in clang so are unaffected.
- if (VisibleToRegularObjSymbols.count(P.first))
- continue;
GVar->setVCallVisibility(GlobalObject::VCallVisibilityLinkageUnit);
}
}
@@ -1161,14 +1161,10 @@ bool DevirtIndex::tryFindVirtualCallTargets(
// and therefore the same GUID. This can happen if there isn't enough
// distinguishing path when compiling the source file. In that case we
// conservatively return false early.
+ if (P.VTableVI.hasLocal() && P.VTableVI.getSummaryList().size() > 1)
+ return false;
const GlobalVarSummary *VS = nullptr;
- bool LocalFound = false;
for (const auto &S : P.VTableVI.getSummaryList()) {
- if (GlobalValue::isLocalLinkage(S->linkage())) {
- if (LocalFound)
- return false;
- LocalFound = true;
- }
auto *CurVS = cast<GlobalVarSummary>(S->getBaseObject());
if (!CurVS->vTableFuncs().empty() ||
// Previously clang did not attach the necessary type metadata to
@@ -1184,6 +1180,7 @@ bool DevirtIndex::tryFindVirtualCallTargets(
// with public LTO visibility.
if (VS->getVCallVisibility() == GlobalObject::VCallVisibilityPublic)
return false;
+ break;
}
}
// There will be no VS if all copies are available_externally having no
@@ -1411,9 +1408,8 @@ bool DevirtIndex::trySingleImplDevirt(MutableArrayRef<ValueInfo> TargetsForSlot,
// If the summary list contains multiple summaries where at least one is
// a local, give up, as we won't know which (possibly promoted) name to use.
- for (const auto &S : TheFn.getSummaryList())
- if (GlobalValue::isLocalLinkage(S->linkage()) && Size > 1)
- return false;
+ if (TheFn.hasLocal() && Size > 1)
+ return false;
// Collect functions devirtualized at least for one call site for stats.
if (PrintSummaryDevirt || AreStatisticsEnabled())
@@ -2591,6 +2587,11 @@ void DevirtIndex::run() {
if (ExportSummary.typeIdCompatibleVtableMap().empty())
return;
+ // Assert that we haven't made any changes that would affect the hasLocal()
+ // flag on the GUID summary info.
+ assert(!ExportSummary.withInternalizeAndPromote() &&
+ "Expect index-based WPD to run before internalization and promotion");
+
DenseMap<GlobalValue::GUID, std::vector<StringRef>> NameByGUID;
for (const auto &P : ExportSummary.typeIdCompatibleVtableMap()) {
NameByGUID[GlobalValue::getGUIDAssumingExternalLinkage(P.first)].push_back(
diff --git a/llvm/lib/Transforms/Scalar/ConstraintElimination.cpp b/llvm/lib/Transforms/Scalar/ConstraintElimination.cpp
index 4acc3f2..d347ced 100644
--- a/llvm/lib/Transforms/Scalar/ConstraintElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/ConstraintElimination.cpp
@@ -614,6 +614,16 @@ static Decomposition decompose(Value *V,
return {V, IsKnownNonNegative};
}
+ if (match(V, m_Add(m_Value(Op0), m_ConstantInt(CI))) && CI->isNegative() &&
+ canUseSExt(CI)) {
+ Preconditions.emplace_back(
+ CmpInst::ICMP_UGE, Op0,
+ ConstantInt::get(Op0->getType(), CI->getSExtValue() * -1));
+ if (auto Decomp = MergeResults(Op0, CI, true))
+ return *Decomp;
+ return {V, IsKnownNonNegative};
+ }
+
if (match(V, m_NSWAdd(m_Value(Op0), m_Value(Op1)))) {
if (!isKnownNonNegative(Op0, DL))
Preconditions.emplace_back(CmpInst::ICMP_SGE, Op0,
@@ -627,16 +637,6 @@ static Decomposition decompose(Value *V,
return {V, IsKnownNonNegative};
}
- if (match(V, m_Add(m_Value(Op0), m_ConstantInt(CI))) && CI->isNegative() &&
- canUseSExt(CI)) {
- Preconditions.emplace_back(
- CmpInst::ICMP_UGE, Op0,
- ConstantInt::get(Op0->getType(), CI->getSExtValue() * -1));
- if (auto Decomp = MergeResults(Op0, CI, true))
- return *Decomp;
- return {V, IsKnownNonNegative};
- }
-
// Decompose or as an add if there are no common bits between the operands.
if (match(V, m_DisjointOr(m_Value(Op0), m_ConstantInt(CI)))) {
if (auto Decomp = MergeResults(Op0, CI, IsSigned))
diff --git a/llvm/lib/Transforms/Scalar/MergeICmps.cpp b/llvm/lib/Transforms/Scalar/MergeICmps.cpp
index a83cbd17a7..f273e9d 100644
--- a/llvm/lib/Transforms/Scalar/MergeICmps.cpp
+++ b/llvm/lib/Transforms/Scalar/MergeICmps.cpp
@@ -64,10 +64,10 @@
using namespace llvm;
-namespace {
-
#define DEBUG_TYPE "mergeicmps"
+namespace {
+
// A BCE atom "Binary Compare Expression Atom" represents an integer load
// that is a constant offset from a base value, e.g. `a` or `o.c` in the example
// at the top.
@@ -128,11 +128,12 @@ private:
unsigned Order = 1;
DenseMap<const Value*, int> BaseToIndex;
};
+} // namespace
// If this value is a load from a constant offset w.r.t. a base address, and
// there are no other users of the load or address, returns the base address and
// the offset.
-BCEAtom visitICmpLoadOperand(Value *const Val, BaseIdentifier &BaseId) {
+static BCEAtom visitICmpLoadOperand(Value *const Val, BaseIdentifier &BaseId) {
auto *const LoadI = dyn_cast<LoadInst>(Val);
if (!LoadI)
return {};
@@ -175,6 +176,7 @@ BCEAtom visitICmpLoadOperand(Value *const Val, BaseIdentifier &BaseId) {
return BCEAtom(GEP, LoadI, BaseId.getBaseId(Base), Offset);
}
+namespace {
// A comparison between two BCE atoms, e.g. `a == o.a` in the example at the
// top.
// Note: the terminology is misleading: the comparison is symmetric, so there
@@ -239,6 +241,7 @@ class BCECmpBlock {
private:
BCECmp Cmp;
};
+} // namespace
bool BCECmpBlock::canSinkBCECmpInst(const Instruction *Inst,
AliasAnalysis &AA) const {
@@ -302,9 +305,9 @@ bool BCECmpBlock::doesOtherWork() const {
// Visit the given comparison. If this is a comparison between two valid
// BCE atoms, returns the comparison.
-std::optional<BCECmp> visitICmp(const ICmpInst *const CmpI,
- const ICmpInst::Predicate ExpectedPredicate,
- BaseIdentifier &BaseId) {
+static std::optional<BCECmp>
+visitICmp(const ICmpInst *const CmpI,
+ const ICmpInst::Predicate ExpectedPredicate, BaseIdentifier &BaseId) {
// The comparison can only be used once:
// - For intermediate blocks, as a branch condition.
// - For the final block, as an incoming value for the Phi.
@@ -332,10 +335,9 @@ std::optional<BCECmp> visitICmp(const ICmpInst *const CmpI,
// Visit the given comparison block. If this is a comparison between two valid
// BCE atoms, returns the comparison.
-std::optional<BCECmpBlock> visitCmpBlock(Value *const Val,
- BasicBlock *const Block,
- const BasicBlock *const PhiBlock,
- BaseIdentifier &BaseId) {
+static std::optional<BCECmpBlock>
+visitCmpBlock(Value *const Val, BasicBlock *const Block,
+ const BasicBlock *const PhiBlock, BaseIdentifier &BaseId) {
if (Block->empty())
return std::nullopt;
auto *const BranchI = dyn_cast<BranchInst>(Block->getTerminator());
@@ -397,6 +399,7 @@ static inline void enqueueBlock(std::vector<BCECmpBlock> &Comparisons,
Comparisons.push_back(std::move(Comparison));
}
+namespace {
// A chain of comparisons.
class BCECmpChain {
public:
@@ -420,6 +423,7 @@ private:
// The original entry block (before sorting);
BasicBlock *EntryBlock_;
};
+} // namespace
static bool areContiguous(const BCECmpBlock &First, const BCECmpBlock &Second) {
return First.Lhs().BaseId == Second.Lhs().BaseId &&
@@ -742,9 +746,8 @@ bool BCECmpChain::simplify(const TargetLibraryInfo &TLI, AliasAnalysis &AA,
return true;
}
-std::vector<BasicBlock *> getOrderedBlocks(PHINode &Phi,
- BasicBlock *const LastBlock,
- int NumBlocks) {
+static std::vector<BasicBlock *>
+getOrderedBlocks(PHINode &Phi, BasicBlock *const LastBlock, int NumBlocks) {
// Walk up from the last block to find other blocks.
std::vector<BasicBlock *> Blocks(NumBlocks);
assert(LastBlock && "invalid last block");
@@ -777,8 +780,8 @@ std::vector<BasicBlock *> getOrderedBlocks(PHINode &Phi,
return Blocks;
}
-bool processPhi(PHINode &Phi, const TargetLibraryInfo &TLI, AliasAnalysis &AA,
- DomTreeUpdater &DTU) {
+static bool processPhi(PHINode &Phi, const TargetLibraryInfo &TLI,
+ AliasAnalysis &AA, DomTreeUpdater &DTU) {
LLVM_DEBUG(dbgs() << "processPhi()\n");
if (Phi.getNumIncomingValues() <= 1) {
LLVM_DEBUG(dbgs() << "skip: only one incoming value in phi\n");
@@ -874,6 +877,7 @@ static bool runImpl(Function &F, const TargetLibraryInfo &TLI,
return MadeChange;
}
+namespace {
class MergeICmpsLegacyPass : public FunctionPass {
public:
static char ID;
diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h
index 06bea2f..a1ad2db 100644
--- a/llvm/lib/Transforms/Vectorize/VPlan.h
+++ b/llvm/lib/Transforms/Vectorize/VPlan.h
@@ -2712,7 +2712,8 @@ public:
static inline bool classof(const VPRecipeBase *R) {
return R->getVPDefID() == VPRecipeBase::VPReductionSC ||
- R->getVPDefID() == VPRecipeBase::VPReductionEVLSC;
+ R->getVPDefID() == VPRecipeBase::VPReductionEVLSC ||
+ R->getVPDefID() == VPRecipeBase::VPPartialReductionSC;
}
static inline bool classof(const VPUser *U) {
@@ -2783,7 +2784,10 @@ public:
Opcode(Opcode), VFScaleFactor(ScaleFactor) {
[[maybe_unused]] auto *AccumulatorRecipe =
getChainOp()->getDefiningRecipe();
- assert((isa<VPReductionPHIRecipe>(AccumulatorRecipe) ||
+ // When cloning as part of a VPExpressionRecipe the chain op could have
+ // replaced by a temporary VPValue, so it doesn't have a defining recipe.
+ assert((!AccumulatorRecipe ||
+ isa<VPReductionPHIRecipe>(AccumulatorRecipe) ||
isa<VPPartialReductionRecipe>(AccumulatorRecipe)) &&
"Unexpected operand order for partial reduction recipe");
}
@@ -3093,6 +3097,11 @@ public:
/// removed before codegen.
void decompose();
+ unsigned getVFScaleFactor() const {
+ auto *PR = dyn_cast<VPPartialReductionRecipe>(ExpressionRecipes.back());
+ return PR ? PR->getVFScaleFactor() : 1;
+ }
+
/// Method for generating code, must not be called as this recipe is abstract.
void execute(VPTransformState &State) override {
llvm_unreachable("recipe must be removed before execute");
diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
index 1f1b42b..931a5b7 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
@@ -168,6 +168,7 @@ bool VPRecipeBase::mayHaveSideEffects() const {
return cast<VPWidenIntrinsicRecipe>(this)->mayHaveSideEffects();
case VPBlendSC:
case VPReductionEVLSC:
+ case VPPartialReductionSC:
case VPReductionSC:
case VPScalarIVStepsSC:
case VPVectorPointerSC:
@@ -300,14 +301,23 @@ InstructionCost
VPPartialReductionRecipe::computeCost(ElementCount VF,
VPCostContext &Ctx) const {
std::optional<unsigned> Opcode;
- VPValue *Op = getOperand(0);
- VPRecipeBase *OpR = Op->getDefiningRecipe();
-
- // If the partial reduction is predicated, a select will be operand 0
- if (match(getOperand(1), m_Select(m_VPValue(), m_VPValue(Op), m_VPValue()))) {
- OpR = Op->getDefiningRecipe();
+ VPValue *Op = getVecOp();
+ uint64_t MulConst;
+ // If the partial reduction is predicated, a select will be operand 1.
+ // If it isn't predicated and the mul isn't operating on a constant, then it
+ // should have been turned into a VPExpressionRecipe.
+ // FIXME: Replace the entire function with this once all partial reduction
+ // variants are bundled into VPExpressionRecipe.
+ if (!match(Op, m_Select(m_VPValue(), m_VPValue(Op), m_VPValue())) &&
+ !match(Op, m_Mul(m_VPValue(), m_ConstantInt(MulConst)))) {
+ auto *PhiType = Ctx.Types.inferScalarType(getChainOp());
+ auto *InputType = Ctx.Types.inferScalarType(getVecOp());
+ return Ctx.TTI.getPartialReductionCost(getOpcode(), InputType, InputType,
+ PhiType, VF, TTI::PR_None,
+ TTI::PR_None, {}, Ctx.CostKind);
}
+ VPRecipeBase *OpR = Op->getDefiningRecipe();
Type *InputTypeA = nullptr, *InputTypeB = nullptr;
TTI::PartialReductionExtendKind ExtAType = TTI::PR_None,
ExtBType = TTI::PR_None;
@@ -2856,11 +2866,19 @@ InstructionCost VPExpressionRecipe::computeCost(ElementCount VF,
cast<VPReductionRecipe>(ExpressionRecipes.back())->getRecurrenceKind());
switch (ExpressionType) {
case ExpressionTypes::ExtendedReduction: {
- return Ctx.TTI.getExtendedReductionCost(
- Opcode,
- cast<VPWidenCastRecipe>(ExpressionRecipes.front())->getOpcode() ==
- Instruction::ZExt,
- RedTy, SrcVecTy, std::nullopt, Ctx.CostKind);
+ unsigned Opcode = RecurrenceDescriptor::getOpcode(
+ cast<VPReductionRecipe>(ExpressionRecipes[1])->getRecurrenceKind());
+ auto *ExtR = cast<VPWidenCastRecipe>(ExpressionRecipes[0]);
+ return isa<VPPartialReductionRecipe>(ExpressionRecipes.back())
+ ? Ctx.TTI.getPartialReductionCost(
+ Opcode, Ctx.Types.inferScalarType(getOperand(0)), nullptr,
+ RedTy, VF,
+ TargetTransformInfo::getPartialReductionExtendKind(
+ ExtR->getOpcode()),
+ TargetTransformInfo::PR_None, std::nullopt, Ctx.CostKind)
+ : Ctx.TTI.getExtendedReductionCost(
+ Opcode, ExtR->getOpcode() == Instruction::ZExt, RedTy,
+ SrcVecTy, std::nullopt, Ctx.CostKind);
}
case ExpressionTypes::MulAccReduction:
return Ctx.TTI.getMulAccReductionCost(false, Opcode, RedTy, SrcVecTy,
@@ -2871,6 +2889,19 @@ InstructionCost VPExpressionRecipe::computeCost(ElementCount VF,
Opcode = Instruction::Sub;
[[fallthrough]];
case ExpressionTypes::ExtMulAccReduction: {
+ if (isa<VPPartialReductionRecipe>(ExpressionRecipes.back())) {
+ auto *Ext0R = cast<VPWidenCastRecipe>(ExpressionRecipes[0]);
+ auto *Ext1R = cast<VPWidenCastRecipe>(ExpressionRecipes[1]);
+ auto *Mul = cast<VPWidenRecipe>(ExpressionRecipes[2]);
+ return Ctx.TTI.getPartialReductionCost(
+ Opcode, Ctx.Types.inferScalarType(getOperand(0)),
+ Ctx.Types.inferScalarType(getOperand(1)), RedTy, VF,
+ TargetTransformInfo::getPartialReductionExtendKind(
+ Ext0R->getOpcode()),
+ TargetTransformInfo::getPartialReductionExtendKind(
+ Ext1R->getOpcode()),
+ Mul->getOpcode(), Ctx.CostKind);
+ }
return Ctx.TTI.getMulAccReductionCost(
cast<VPWidenCastRecipe>(ExpressionRecipes.front())->getOpcode() ==
Instruction::ZExt,
@@ -2910,12 +2941,13 @@ void VPExpressionRecipe::print(raw_ostream &O, const Twine &Indent,
O << " = ";
auto *Red = cast<VPReductionRecipe>(ExpressionRecipes.back());
unsigned Opcode = RecurrenceDescriptor::getOpcode(Red->getRecurrenceKind());
+ bool IsPartialReduction = isa<VPPartialReductionRecipe>(Red);
switch (ExpressionType) {
case ExpressionTypes::ExtendedReduction: {
getOperand(1)->printAsOperand(O, SlotTracker);
- O << " +";
- O << " reduce." << Instruction::getOpcodeName(Opcode) << " (";
+ O << " + " << (IsPartialReduction ? "partial." : "") << "reduce.";
+ O << Instruction::getOpcodeName(Opcode) << " (";
getOperand(0)->printAsOperand(O, SlotTracker);
Red->printFlags(O);
@@ -2931,8 +2963,8 @@ void VPExpressionRecipe::print(raw_ostream &O, const Twine &Indent,
}
case ExpressionTypes::ExtNegatedMulAccReduction: {
getOperand(getNumOperands() - 1)->printAsOperand(O, SlotTracker);
- O << " + reduce."
- << Instruction::getOpcodeName(
+ O << " + " << (IsPartialReduction ? "partial." : "") << "reduce.";
+ O << Instruction::getOpcodeName(
RecurrenceDescriptor::getOpcode(Red->getRecurrenceKind()))
<< " (sub (0, mul";
auto *Mul = cast<VPWidenRecipe>(ExpressionRecipes[2]);
@@ -2956,9 +2988,8 @@ void VPExpressionRecipe::print(raw_ostream &O, const Twine &Indent,
case ExpressionTypes::MulAccReduction:
case ExpressionTypes::ExtMulAccReduction: {
getOperand(getNumOperands() - 1)->printAsOperand(O, SlotTracker);
- O << " + ";
- O << "reduce."
- << Instruction::getOpcodeName(
+ O << " + " << (IsPartialReduction ? "partial." : "") << "reduce.";
+ O << Instruction::getOpcodeName(
RecurrenceDescriptor::getOpcode(Red->getRecurrenceKind()))
<< " (";
O << "mul";
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index f5a3af4..3e85e6f 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -3519,18 +3519,31 @@ tryToMatchAndCreateExtendedReduction(VPReductionRecipe *Red, VPCostContext &Ctx,
VPValue *VecOp = Red->getVecOp();
// Clamp the range if using extended-reduction is profitable.
- auto IsExtendedRedValidAndClampRange = [&](unsigned Opcode, bool isZExt,
- Type *SrcTy) -> bool {
+ auto IsExtendedRedValidAndClampRange =
+ [&](unsigned Opcode, Instruction::CastOps ExtOpc, Type *SrcTy) -> bool {
return LoopVectorizationPlanner::getDecisionAndClampRange(
[&](ElementCount VF) {
auto *SrcVecTy = cast<VectorType>(toVectorTy(SrcTy, VF));
TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
- InstructionCost ExtRedCost = Ctx.TTI.getExtendedReductionCost(
- Opcode, isZExt, RedTy, SrcVecTy, Red->getFastMathFlags(),
- CostKind);
+
+ InstructionCost ExtRedCost;
InstructionCost ExtCost =
cast<VPWidenCastRecipe>(VecOp)->computeCost(VF, Ctx);
InstructionCost RedCost = Red->computeCost(VF, Ctx);
+
+ if (isa<VPPartialReductionRecipe>(Red)) {
+ TargetTransformInfo::PartialReductionExtendKind ExtKind =
+ TargetTransformInfo::getPartialReductionExtendKind(ExtOpc);
+ // FIXME: Move partial reduction creation, costing and clamping
+ // here from LoopVectorize.cpp.
+ ExtRedCost = Ctx.TTI.getPartialReductionCost(
+ Opcode, SrcTy, nullptr, RedTy, VF, ExtKind,
+ llvm::TargetTransformInfo::PR_None, std::nullopt, Ctx.CostKind);
+ } else {
+ ExtRedCost = Ctx.TTI.getExtendedReductionCost(
+ Opcode, ExtOpc == Instruction::CastOps::ZExt, RedTy, SrcVecTy,
+ Red->getFastMathFlags(), CostKind);
+ }
return ExtRedCost.isValid() && ExtRedCost < ExtCost + RedCost;
},
Range);
@@ -3541,8 +3554,7 @@ tryToMatchAndCreateExtendedReduction(VPReductionRecipe *Red, VPCostContext &Ctx,
if (match(VecOp, m_ZExtOrSExt(m_VPValue(A))) &&
IsExtendedRedValidAndClampRange(
RecurrenceDescriptor::getOpcode(Red->getRecurrenceKind()),
- cast<VPWidenCastRecipe>(VecOp)->getOpcode() ==
- Instruction::CastOps::ZExt,
+ cast<VPWidenCastRecipe>(VecOp)->getOpcode(),
Ctx.Types.inferScalarType(A)))
return new VPExpressionRecipe(cast<VPWidenCastRecipe>(VecOp), Red);
@@ -3560,6 +3572,8 @@ tryToMatchAndCreateExtendedReduction(VPReductionRecipe *Red, VPCostContext &Ctx,
static VPExpressionRecipe *
tryToMatchAndCreateMulAccumulateReduction(VPReductionRecipe *Red,
VPCostContext &Ctx, VFRange &Range) {
+ bool IsPartialReduction = isa<VPPartialReductionRecipe>(Red);
+
unsigned Opcode = RecurrenceDescriptor::getOpcode(Red->getRecurrenceKind());
if (Opcode != Instruction::Add && Opcode != Instruction::Sub)
return nullptr;
@@ -3568,16 +3582,41 @@ tryToMatchAndCreateMulAccumulateReduction(VPReductionRecipe *Red,
// Clamp the range if using multiply-accumulate-reduction is profitable.
auto IsMulAccValidAndClampRange =
- [&](bool isZExt, VPWidenRecipe *Mul, VPWidenCastRecipe *Ext0,
- VPWidenCastRecipe *Ext1, VPWidenCastRecipe *OuterExt) -> bool {
+ [&](VPWidenRecipe *Mul, VPWidenCastRecipe *Ext0, VPWidenCastRecipe *Ext1,
+ VPWidenCastRecipe *OuterExt) -> bool {
return LoopVectorizationPlanner::getDecisionAndClampRange(
[&](ElementCount VF) {
TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
Type *SrcTy =
Ext0 ? Ctx.Types.inferScalarType(Ext0->getOperand(0)) : RedTy;
- auto *SrcVecTy = cast<VectorType>(toVectorTy(SrcTy, VF));
- InstructionCost MulAccCost = Ctx.TTI.getMulAccReductionCost(
- isZExt, Opcode, RedTy, SrcVecTy, CostKind);
+ InstructionCost MulAccCost;
+
+ if (IsPartialReduction) {
+ Type *SrcTy2 =
+ Ext1 ? Ctx.Types.inferScalarType(Ext1->getOperand(0)) : nullptr;
+ // FIXME: Move partial reduction creation, costing and clamping
+ // here from LoopVectorize.cpp.
+ MulAccCost = Ctx.TTI.getPartialReductionCost(
+ Opcode, SrcTy, SrcTy2, RedTy, VF,
+ Ext0 ? TargetTransformInfo::getPartialReductionExtendKind(
+ Ext0->getOpcode())
+ : TargetTransformInfo::PR_None,
+ Ext1 ? TargetTransformInfo::getPartialReductionExtendKind(
+ Ext1->getOpcode())
+ : TargetTransformInfo::PR_None,
+ Mul->getOpcode(), CostKind);
+ } else {
+ // Only partial reductions support mixed extends at the moment.
+ if (Ext0 && Ext1 && Ext0->getOpcode() != Ext1->getOpcode())
+ return false;
+
+ bool IsZExt =
+ !Ext0 || Ext0->getOpcode() == Instruction::CastOps::ZExt;
+ auto *SrcVecTy = cast<VectorType>(toVectorTy(SrcTy, VF));
+ MulAccCost = Ctx.TTI.getMulAccReductionCost(IsZExt, Opcode, RedTy,
+ SrcVecTy, CostKind);
+ }
+
InstructionCost MulCost = Mul->computeCost(VF, Ctx);
InstructionCost RedCost = Red->computeCost(VF, Ctx);
InstructionCost ExtCost = 0;
@@ -3611,14 +3650,10 @@ tryToMatchAndCreateMulAccumulateReduction(VPReductionRecipe *Red,
dyn_cast_if_present<VPWidenCastRecipe>(B->getDefiningRecipe());
auto *Mul = cast<VPWidenRecipe>(VecOp->getDefiningRecipe());
- // Match reduce.add(mul(ext, ext)).
- if (RecipeA && RecipeB &&
- (RecipeA->getOpcode() == RecipeB->getOpcode() || A == B) &&
- match(RecipeA, m_ZExtOrSExt(m_VPValue())) &&
+ // Match reduce.add/sub(mul(ext, ext)).
+ if (RecipeA && RecipeB && match(RecipeA, m_ZExtOrSExt(m_VPValue())) &&
match(RecipeB, m_ZExtOrSExt(m_VPValue())) &&
- IsMulAccValidAndClampRange(RecipeA->getOpcode() ==
- Instruction::CastOps::ZExt,
- Mul, RecipeA, RecipeB, nullptr)) {
+ IsMulAccValidAndClampRange(Mul, RecipeA, RecipeB, nullptr)) {
if (Sub)
return new VPExpressionRecipe(RecipeA, RecipeB, Mul,
cast<VPWidenRecipe>(Sub), Red);
@@ -3626,8 +3661,7 @@ tryToMatchAndCreateMulAccumulateReduction(VPReductionRecipe *Red,
}
// Match reduce.add(mul).
// TODO: Add an expression type for this variant with a negated mul
- if (!Sub &&
- IsMulAccValidAndClampRange(true, Mul, nullptr, nullptr, nullptr))
+ if (!Sub && IsMulAccValidAndClampRange(Mul, nullptr, nullptr, nullptr))
return new VPExpressionRecipe(Mul, Red);
}
// TODO: Add an expression type for negated versions of other expression
@@ -3647,9 +3681,7 @@ tryToMatchAndCreateMulAccumulateReduction(VPReductionRecipe *Red,
cast<VPWidenCastRecipe>(Mul->getOperand(1)->getDefiningRecipe());
if ((Ext->getOpcode() == Ext0->getOpcode() || Ext0 == Ext1) &&
Ext0->getOpcode() == Ext1->getOpcode() &&
- IsMulAccValidAndClampRange(Ext0->getOpcode() ==
- Instruction::CastOps::ZExt,
- Mul, Ext0, Ext1, Ext)) {
+ IsMulAccValidAndClampRange(Mul, Ext0, Ext1, Ext) && Mul->hasOneUse()) {
auto *NewExt0 = new VPWidenCastRecipe(
Ext0->getOpcode(), Ext0->getOperand(0), Ext->getResultType(), *Ext0,
*Ext0, Ext0->getDebugLoc());
diff --git a/llvm/lib/Transforms/Vectorize/VPlanUtils.cpp b/llvm/lib/Transforms/Vectorize/VPlanUtils.cpp
index 32e4b88..06c3d75 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanUtils.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanUtils.cpp
@@ -151,6 +151,8 @@ unsigned vputils::getVFScaleFactor(VPRecipeBase *R) {
return RR->getVFScaleFactor();
if (auto *RR = dyn_cast<VPPartialReductionRecipe>(R))
return RR->getVFScaleFactor();
+ if (auto *ER = dyn_cast<VPExpressionRecipe>(R))
+ return ER->getVFScaleFactor();
assert(
(!isa<VPInstruction>(R) || cast<VPInstruction>(R)->getOpcode() !=
VPInstruction::ReductionStartVector) &&
diff --git a/llvm/test/Analysis/CostModel/ARM/fparith.ll b/llvm/test/Analysis/CostModel/ARM/fparith.ll
index f9424e8..6f2626c 100644
--- a/llvm/test/Analysis/CostModel/ARM/fparith.ll
+++ b/llvm/test/Analysis/CostModel/ARM/fparith.ll
@@ -1,21 +1,21 @@
; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py
-; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=thumbv8.1m.main-none-eabi -mattr=+mve < %s | FileCheck %s --check-prefix=CHECK-MVE
-; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=thumbv8.1m.main-none-eabi -mattr=+mve.fp < %s | FileCheck %s --check-prefix=CHECK-MVEFP
+; RUN: opt -passes="print<cost-model>" -cost-kind=all 2>&1 -disable-output -mtriple=thumbv8.1m.main-none-eabi -mattr=+mve < %s | FileCheck %s --check-prefix=CHECK-MVE
+; RUN: opt -passes="print<cost-model>" -cost-kind=all 2>&1 -disable-output -mtriple=thumbv8.1m.main-none-eabi -mattr=+mve.fp < %s | FileCheck %s --check-prefix=CHECK-MVEFP
target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
define void @f32() {
; CHECK-MVE-LABEL: 'f32'
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %c = fadd float undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %d = fsub float undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %e = fmul float undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+; CHECK-MVE-NEXT: Cost Model: Found costs of 1 for: %c = fadd float undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 1 for: %d = fsub float undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 1 for: %e = fmul float undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
; CHECK-MVEFP-LABEL: 'f32'
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %c = fadd float undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %d = fsub float undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %e = fmul float undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of 1 for: %c = fadd float undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of 1 for: %d = fsub float undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of 1 for: %e = fmul float undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
%c = fadd float undef, undef
%d = fsub float undef, undef
@@ -25,16 +25,16 @@ define void @f32() {
define void @f16() {
; CHECK-MVE-LABEL: 'f16'
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %c = fadd half undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %d = fsub half undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %e = fmul half undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+; CHECK-MVE-NEXT: Cost Model: Found costs of 1 for: %c = fadd half undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 1 for: %d = fsub half undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 1 for: %e = fmul half undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
; CHECK-MVEFP-LABEL: 'f16'
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %c = fadd half undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %d = fsub half undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %e = fmul half undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of 1 for: %c = fadd half undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of 1 for: %d = fsub half undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of 1 for: %e = fmul half undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
%c = fadd half undef, undef
%d = fsub half undef, undef
@@ -44,16 +44,16 @@ define void @f16() {
define void @f64() {
; CHECK-MVE-LABEL: 'f64'
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %c = fadd double undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %d = fsub double undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %e = fmul double undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+; CHECK-MVE-NEXT: Cost Model: Found costs of 1 for: %c = fadd double undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 1 for: %d = fsub double undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 1 for: %e = fmul double undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
; CHECK-MVEFP-LABEL: 'f64'
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %c = fadd double undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %d = fsub double undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %e = fmul double undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of 1 for: %c = fadd double undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of 1 for: %d = fsub double undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of 1 for: %e = fmul double undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
%c = fadd double undef, undef
%d = fsub double undef, undef
@@ -63,28 +63,28 @@ define void @f64() {
define void @vf32() {
; CHECK-MVE-LABEL: 'vf32'
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %c2 = fadd <2 x float> undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %d2 = fsub <2 x float> undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %e2 = fmul <2 x float> undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %c4 = fadd <4 x float> undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %d4 = fsub <4 x float> undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %e4 = fmul <4 x float> undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %c8 = fadd <8 x float> undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %d8 = fsub <8 x float> undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %e8 = fmul <8 x float> undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+; CHECK-MVE-NEXT: Cost Model: Found costs of 4 for: %c2 = fadd <2 x float> undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 4 for: %d2 = fsub <2 x float> undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 4 for: %e2 = fmul <2 x float> undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 8 for: %c4 = fadd <4 x float> undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 8 for: %d4 = fsub <4 x float> undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 8 for: %e4 = fmul <4 x float> undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 16 for: %c8 = fadd <8 x float> undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 16 for: %d8 = fsub <8 x float> undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 16 for: %e8 = fmul <8 x float> undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
; CHECK-MVEFP-LABEL: 'vf32'
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %c2 = fadd <2 x float> undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %d2 = fsub <2 x float> undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %e2 = fmul <2 x float> undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %c4 = fadd <4 x float> undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %d4 = fsub <4 x float> undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %e4 = fmul <4 x float> undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %c8 = fadd <8 x float> undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %d8 = fsub <8 x float> undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %e8 = fmul <8 x float> undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %c2 = fadd <2 x float> undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %d2 = fsub <2 x float> undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %e2 = fmul <2 x float> undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %c4 = fadd <4 x float> undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %d4 = fsub <4 x float> undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %e4 = fmul <4 x float> undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:4 CodeSize:2 Lat:4 SizeLat:4 for: %c8 = fadd <8 x float> undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:4 CodeSize:2 Lat:4 SizeLat:4 for: %d8 = fsub <8 x float> undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:4 CodeSize:2 Lat:4 SizeLat:4 for: %e8 = fmul <8 x float> undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
%c2 = fadd <2 x float> undef, undef
%d2 = fsub <2 x float> undef, undef
@@ -100,28 +100,28 @@ define void @vf32() {
define void @vf16() {
; CHECK-MVE-LABEL: 'vf16'
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %c2 = fadd <2 x half> undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %d2 = fsub <2 x half> undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %e2 = fmul <2 x half> undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %c4 = fadd <4 x half> undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %d4 = fsub <4 x half> undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %e4 = fmul <4 x half> undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %c8 = fadd <8 x half> undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %d8 = fsub <8 x half> undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %e8 = fmul <8 x half> undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+; CHECK-MVE-NEXT: Cost Model: Found costs of 4 for: %c2 = fadd <2 x half> undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 4 for: %d2 = fsub <2 x half> undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 4 for: %e2 = fmul <2 x half> undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 8 for: %c4 = fadd <4 x half> undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 8 for: %d4 = fsub <4 x half> undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 8 for: %e4 = fmul <4 x half> undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 16 for: %c8 = fadd <8 x half> undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 16 for: %d8 = fsub <8 x half> undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 16 for: %e8 = fmul <8 x half> undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
; CHECK-MVEFP-LABEL: 'vf16'
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %c2 = fadd <2 x half> undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %d2 = fsub <2 x half> undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %e2 = fmul <2 x half> undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %c4 = fadd <4 x half> undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %d4 = fsub <4 x half> undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %e4 = fmul <4 x half> undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %c8 = fadd <8 x half> undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %d8 = fsub <8 x half> undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %e8 = fmul <8 x half> undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %c2 = fadd <2 x half> undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %d2 = fsub <2 x half> undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %e2 = fmul <2 x half> undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %c4 = fadd <4 x half> undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %d4 = fsub <4 x half> undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %e4 = fmul <4 x half> undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %c8 = fadd <8 x half> undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %d8 = fsub <8 x half> undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %e8 = fmul <8 x half> undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
%c2 = fadd <2 x half> undef, undef
%d2 = fsub <2 x half> undef, undef
@@ -137,28 +137,28 @@ define void @vf16() {
define void @vf64() {
; CHECK-MVE-LABEL: 'vf64'
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %c2 = fadd <2 x double> undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %d2 = fsub <2 x double> undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %e2 = fmul <2 x double> undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %c4 = fadd <4 x double> undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %d4 = fsub <4 x double> undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %e4 = fmul <4 x double> undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %c8 = fadd <8 x double> undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %d8 = fsub <8 x double> undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %e8 = fmul <8 x double> undef, undef
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+; CHECK-MVE-NEXT: Cost Model: Found costs of 4 for: %c2 = fadd <2 x double> undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 4 for: %d2 = fsub <2 x double> undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 4 for: %e2 = fmul <2 x double> undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 8 for: %c4 = fadd <4 x double> undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 8 for: %d4 = fsub <4 x double> undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 8 for: %e4 = fmul <4 x double> undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 16 for: %c8 = fadd <8 x double> undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 16 for: %d8 = fsub <8 x double> undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of 16 for: %e8 = fmul <8 x double> undef, undef
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
; CHECK-MVEFP-LABEL: 'vf64'
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %c2 = fadd <2 x double> undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %d2 = fsub <2 x double> undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %e2 = fmul <2 x double> undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %c4 = fadd <4 x double> undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %d4 = fsub <4 x double> undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %e4 = fmul <4 x double> undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %c8 = fadd <8 x double> undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %d8 = fsub <8 x double> undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %e8 = fmul <8 x double> undef, undef
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of 4 for: %c2 = fadd <2 x double> undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of 4 for: %d2 = fsub <2 x double> undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of 4 for: %e2 = fmul <2 x double> undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of 8 for: %c4 = fadd <4 x double> undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of 8 for: %d4 = fsub <4 x double> undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of 8 for: %e4 = fmul <4 x double> undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of 16 for: %c8 = fadd <8 x double> undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of 16 for: %d8 = fsub <8 x double> undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of 16 for: %e8 = fmul <8 x double> undef, undef
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
%c2 = fadd <2 x double> undef, undef
%d2 = fsub <2 x double> undef, undef
diff --git a/llvm/test/Analysis/CostModel/ARM/fptoi_sat.ll b/llvm/test/Analysis/CostModel/ARM/fptoi_sat.ll
index aff7b19..af548f6 100644
--- a/llvm/test/Analysis/CostModel/ARM/fptoi_sat.ll
+++ b/llvm/test/Analysis/CostModel/ARM/fptoi_sat.ll
@@ -1,213 +1,213 @@
; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py
-; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=thumbv8.1m.main-none-eabi -mattr=+mve < %s | FileCheck %s --check-prefix=CHECK-MVE
-; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=thumbv8.1m.main-none-eabi -mattr=+mve.fp,+fp64 < %s | FileCheck %s --check-prefix=CHECK-MVEFP
+; RUN: opt -passes="print<cost-model>" -cost-kind=all 2>&1 -disable-output -mtriple=thumbv8.1m.main-none-eabi -mattr=+mve < %s | FileCheck %s --check-prefix=CHECK-MVE
+; RUN: opt -passes="print<cost-model>" -cost-kind=all 2>&1 -disable-output -mtriple=thumbv8.1m.main-none-eabi -mattr=+mve.fp,+fp64 < %s | FileCheck %s --check-prefix=CHECK-MVEFP
define void @casts() {
; CHECK-MVE-LABEL: 'casts'
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %f32s1 = call i1 @llvm.fptosi.sat.i1.f32(float undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %f32u1 = call i1 @llvm.fptoui.sat.i1.f32(float undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %f32s8 = call i8 @llvm.fptosi.sat.i8.f32(float undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %f32u8 = call i8 @llvm.fptoui.sat.i8.f32(float undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %f32s16 = call i16 @llvm.fptosi.sat.i16.f32(float undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %f32u16 = call i16 @llvm.fptoui.sat.i16.f32(float undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %f32s32 = call i32 @llvm.fptosi.sat.i32.f32(float undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %f32u32 = call i32 @llvm.fptoui.sat.i32.f32(float undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %f32s64 = call i64 @llvm.fptosi.sat.i64.f32(float undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %f32u64 = call i64 @llvm.fptoui.sat.i64.f32(float undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %f64s1 = call i1 @llvm.fptosi.sat.i1.f64(double undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %f64u1 = call i1 @llvm.fptoui.sat.i1.f64(double undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %f64s8 = call i8 @llvm.fptosi.sat.i8.f64(double undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %f64u8 = call i8 @llvm.fptoui.sat.i8.f64(double undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %f64s16 = call i16 @llvm.fptosi.sat.i16.f64(double undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %f64u16 = call i16 @llvm.fptoui.sat.i16.f64(double undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %f64s32 = call i32 @llvm.fptosi.sat.i32.f64(double undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %f64u32 = call i32 @llvm.fptoui.sat.i32.f64(double undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %f64s64 = call i64 @llvm.fptosi.sat.i64.f64(double undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %f64u64 = call i64 @llvm.fptoui.sat.i64.f64(double undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 120 for instruction: %v2f32s1 = call <2 x i1> @llvm.fptosi.sat.v2i1.v2f32(<2 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 88 for instruction: %v2f32u1 = call <2 x i1> @llvm.fptoui.sat.v2i1.v2f32(<2 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 102 for instruction: %v2f32s8 = call <2 x i8> @llvm.fptosi.sat.v2i8.v2f32(<2 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 88 for instruction: %v2f32u8 = call <2 x i8> @llvm.fptoui.sat.v2i8.v2f32(<2 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 102 for instruction: %v2f32s16 = call <2 x i16> @llvm.fptosi.sat.v2i16.v2f32(<2 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 88 for instruction: %v2f32u16 = call <2 x i16> @llvm.fptoui.sat.v2i16.v2f32(<2 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 102 for instruction: %v2f32s32 = call <2 x i32> @llvm.fptosi.sat.v2i32.v2f32(<2 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 88 for instruction: %v2f32u32 = call <2 x i32> @llvm.fptoui.sat.v2i32.v2f32(<2 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 134 for instruction: %v2f32s64 = call <2 x i64> @llvm.fptosi.sat.v2i64.v2f32(<2 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 120 for instruction: %v2f32u64 = call <2 x i64> @llvm.fptoui.sat.v2i64.v2f32(<2 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 120 for instruction: %v2f64s1 = call <2 x i1> @llvm.fptosi.sat.v2i1.v2f64(<2 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 88 for instruction: %v2f64u1 = call <2 x i1> @llvm.fptoui.sat.v2i1.v2f64(<2 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 102 for instruction: %v2f64s8 = call <2 x i8> @llvm.fptosi.sat.v2i8.v2f64(<2 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 88 for instruction: %v2f64u8 = call <2 x i8> @llvm.fptoui.sat.v2i8.v2f64(<2 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 102 for instruction: %v2f64s16 = call <2 x i16> @llvm.fptosi.sat.v2i16.v2f64(<2 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 88 for instruction: %v2f64u16 = call <2 x i16> @llvm.fptoui.sat.v2i16.v2f64(<2 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 102 for instruction: %v2f64s32 = call <2 x i32> @llvm.fptosi.sat.v2i32.v2f64(<2 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 88 for instruction: %v2f64u32 = call <2 x i32> @llvm.fptoui.sat.v2i32.v2f64(<2 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 134 for instruction: %v2f64s64 = call <2 x i64> @llvm.fptosi.sat.v2i64.v2f64(<2 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 120 for instruction: %v2f64u64 = call <2 x i64> @llvm.fptoui.sat.v2i64.v2f64(<2 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 240 for instruction: %v4f32s1 = call <4 x i1> @llvm.fptosi.sat.v4i1.v4f32(<4 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 176 for instruction: %v4f32u1 = call <4 x i1> @llvm.fptoui.sat.v4i1.v4f32(<4 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 202 for instruction: %v4f32s8 = call <4 x i8> @llvm.fptosi.sat.v4i8.v4f32(<4 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 176 for instruction: %v4f32u8 = call <4 x i8> @llvm.fptoui.sat.v4i8.v4f32(<4 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 202 for instruction: %v4f32s16 = call <4 x i16> @llvm.fptosi.sat.v4i16.v4f32(<4 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 176 for instruction: %v4f32u16 = call <4 x i16> @llvm.fptoui.sat.v4i16.v4f32(<4 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 202 for instruction: %v4f32s32 = call <4 x i32> @llvm.fptosi.sat.v4i32.v4f32(<4 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 176 for instruction: %v4f32u32 = call <4 x i32> @llvm.fptoui.sat.v4i32.v4f32(<4 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 406 for instruction: %v4f32s64 = call <4 x i64> @llvm.fptosi.sat.v4i64.v4f32(<4 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 378 for instruction: %v4f32u64 = call <4 x i64> @llvm.fptoui.sat.v4i64.v4f32(<4 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 314 for instruction: %v4f64s1 = call <4 x i1> @llvm.fptosi.sat.v4i1.v4f64(<4 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 250 for instruction: %v4f64u1 = call <4 x i1> @llvm.fptoui.sat.v4i1.v4f64(<4 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 276 for instruction: %v4f64s8 = call <4 x i8> @llvm.fptosi.sat.v4i8.v4f64(<4 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 250 for instruction: %v4f64u8 = call <4 x i8> @llvm.fptoui.sat.v4i8.v4f64(<4 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 276 for instruction: %v4f64s16 = call <4 x i16> @llvm.fptosi.sat.v4i16.v4f64(<4 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 250 for instruction: %v4f64u16 = call <4 x i16> @llvm.fptoui.sat.v4i16.v4f64(<4 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 276 for instruction: %v4f64s32 = call <4 x i32> @llvm.fptosi.sat.v4i32.v4f64(<4 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 250 for instruction: %v4f64u32 = call <4 x i32> @llvm.fptoui.sat.v4i32.v4f64(<4 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 404 for instruction: %v4f64s64 = call <4 x i64> @llvm.fptosi.sat.v4i64.v4f64(<4 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 376 for instruction: %v4f64u64 = call <4 x i64> @llvm.fptoui.sat.v4i64.v4f64(<4 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 626 for instruction: %v8f32s1 = call <8 x i1> @llvm.fptosi.sat.v8i1.v8f32(<8 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 498 for instruction: %v8f32u1 = call <8 x i1> @llvm.fptoui.sat.v8i1.v8f32(<8 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 548 for instruction: %v8f32s8 = call <8 x i8> @llvm.fptosi.sat.v8i8.v8f32(<8 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 498 for instruction: %v8f32u8 = call <8 x i8> @llvm.fptoui.sat.v8i8.v8f32(<8 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 548 for instruction: %v8f32s16 = call <8 x i16> @llvm.fptosi.sat.v8i16.v8f32(<8 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 498 for instruction: %v8f32u16 = call <8 x i16> @llvm.fptoui.sat.v8i16.v8f32(<8 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 548 for instruction: %v8f32s32 = call <8 x i32> @llvm.fptosi.sat.v8i32.v8f32(<8 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 496 for instruction: %v8f32u32 = call <8 x i32> @llvm.fptoui.sat.v8i32.v8f32(<8 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 1360 for instruction: %v8f32s64 = call <8 x i64> @llvm.fptosi.sat.v8i64.v8f32(<8 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 1304 for instruction: %v8f32u64 = call <8 x i64> @llvm.fptoui.sat.v8i64.v8f32(<8 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 922 for instruction: %v8f64s1 = call <8 x i1> @llvm.fptosi.sat.v8i1.v8f64(<8 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 794 for instruction: %v8f64u1 = call <8 x i1> @llvm.fptoui.sat.v8i1.v8f64(<8 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 844 for instruction: %v8f64s8 = call <8 x i8> @llvm.fptosi.sat.v8i8.v8f64(<8 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 794 for instruction: %v8f64u8 = call <8 x i8> @llvm.fptoui.sat.v8i8.v8f64(<8 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 844 for instruction: %v8f64s16 = call <8 x i16> @llvm.fptosi.sat.v8i16.v8f64(<8 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 794 for instruction: %v8f64u16 = call <8 x i16> @llvm.fptoui.sat.v8i16.v8f64(<8 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 844 for instruction: %v8f64s32 = call <8 x i32> @llvm.fptosi.sat.v8i32.v8f64(<8 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 792 for instruction: %v8f64u32 = call <8 x i32> @llvm.fptoui.sat.v8i32.v8f64(<8 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 1352 for instruction: %v8f64s64 = call <8 x i64> @llvm.fptosi.sat.v8i64.v8f64(<8 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 1296 for instruction: %v8f64u64 = call <8 x i64> @llvm.fptoui.sat.v8i64.v8f64(<8 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 1834 for instruction: %v16f32s1 = call <16 x i1> @llvm.fptosi.sat.v16i1.v16f32(<16 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 1578 for instruction: %v16f32u1 = call <16 x i1> @llvm.fptoui.sat.v16i1.v16f32(<16 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 1676 for instruction: %v16f32s8 = call <16 x i8> @llvm.fptosi.sat.v16i8.v16f32(<16 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 1578 for instruction: %v16f32u8 = call <16 x i8> @llvm.fptoui.sat.v16i8.v16f32(<16 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 1676 for instruction: %v16f32s16 = call <16 x i16> @llvm.fptosi.sat.v16i16.v16f32(<16 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 1576 for instruction: %v16f32u16 = call <16 x i16> @llvm.fptoui.sat.v16i16.v16f32(<16 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 1672 for instruction: %v16f32s32 = call <16 x i32> @llvm.fptosi.sat.v16i32.v16f32(<16 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 1568 for instruction: %v16f32u32 = call <16 x i32> @llvm.fptoui.sat.v16i32.v16f32(<16 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 4912 for instruction: %v16f32s64 = call <16 x i64> @llvm.fptosi.sat.v16i64.v16f32(<16 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 4800 for instruction: %v16f32u64 = call <16 x i64> @llvm.fptoui.sat.v16i64.v16f32(<16 x float> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 3018 for instruction: %v16f64s1 = call <16 x i1> @llvm.fptosi.sat.v16i1.v16f64(<16 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 2762 for instruction: %v16f64u1 = call <16 x i1> @llvm.fptoui.sat.v16i1.v16f64(<16 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 2860 for instruction: %v16f64s8 = call <16 x i8> @llvm.fptosi.sat.v16i8.v16f64(<16 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 2762 for instruction: %v16f64u8 = call <16 x i8> @llvm.fptoui.sat.v16i8.v16f64(<16 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 2860 for instruction: %v16f64s16 = call <16 x i16> @llvm.fptosi.sat.v16i16.v16f64(<16 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 2760 for instruction: %v16f64u16 = call <16 x i16> @llvm.fptoui.sat.v16i16.v16f64(<16 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 2856 for instruction: %v16f64s32 = call <16 x i32> @llvm.fptosi.sat.v16i32.v16f64(<16 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 2752 for instruction: %v16f64u32 = call <16 x i32> @llvm.fptoui.sat.v16i32.v16f64(<16 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 4880 for instruction: %v16f64s64 = call <16 x i64> @llvm.fptosi.sat.v16i64.v16f64(<16 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 4768 for instruction: %v16f64u64 = call <16 x i64> @llvm.fptoui.sat.v16i64.v16f64(<16 x double> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:23 CodeSize:7 Lat:23 SizeLat:23 for: %f32s1 = call i1 @llvm.fptosi.sat.i1.f32(float undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:21 CodeSize:3 Lat:21 SizeLat:21 for: %f32u1 = call i1 @llvm.fptoui.sat.i1.f32(float undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:23 CodeSize:6 Lat:23 SizeLat:23 for: %f32s8 = call i8 @llvm.fptosi.sat.i8.f32(float undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:21 CodeSize:3 Lat:21 SizeLat:21 for: %f32u8 = call i8 @llvm.fptoui.sat.i8.f32(float undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:23 CodeSize:6 Lat:23 SizeLat:23 for: %f32s16 = call i16 @llvm.fptosi.sat.i16.f32(float undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:21 CodeSize:3 Lat:21 SizeLat:21 for: %f32u16 = call i16 @llvm.fptoui.sat.i16.f32(float undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:23 CodeSize:6 Lat:23 SizeLat:23 for: %f32s32 = call i32 @llvm.fptosi.sat.i32.f32(float undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:21 CodeSize:3 Lat:21 SizeLat:21 for: %f32u32 = call i32 @llvm.fptoui.sat.i32.f32(float undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:24 CodeSize:7 Lat:24 SizeLat:24 for: %f32s64 = call i64 @llvm.fptosi.sat.i64.f32(float undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:21 CodeSize:3 Lat:21 SizeLat:21 for: %f32u64 = call i64 @llvm.fptoui.sat.i64.f32(float undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:23 CodeSize:7 Lat:23 SizeLat:23 for: %f64s1 = call i1 @llvm.fptosi.sat.i1.f64(double undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:21 CodeSize:3 Lat:21 SizeLat:21 for: %f64u1 = call i1 @llvm.fptoui.sat.i1.f64(double undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:23 CodeSize:6 Lat:23 SizeLat:23 for: %f64s8 = call i8 @llvm.fptosi.sat.i8.f64(double undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:21 CodeSize:3 Lat:21 SizeLat:21 for: %f64u8 = call i8 @llvm.fptoui.sat.i8.f64(double undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:23 CodeSize:6 Lat:23 SizeLat:23 for: %f64s16 = call i16 @llvm.fptosi.sat.i16.f64(double undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:21 CodeSize:3 Lat:21 SizeLat:21 for: %f64u16 = call i16 @llvm.fptoui.sat.i16.f64(double undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:23 CodeSize:6 Lat:23 SizeLat:23 for: %f64s32 = call i32 @llvm.fptosi.sat.i32.f64(double undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:21 CodeSize:3 Lat:21 SizeLat:21 for: %f64u32 = call i32 @llvm.fptoui.sat.i32.f64(double undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:24 CodeSize:7 Lat:24 SizeLat:24 for: %f64s64 = call i64 @llvm.fptosi.sat.i64.f64(double undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:21 CodeSize:3 Lat:21 SizeLat:21 for: %f64u64 = call i64 @llvm.fptoui.sat.i64.f64(double undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:120 CodeSize:43 Lat:85 SizeLat:85 for: %v2f32s1 = call <2 x i1> @llvm.fptosi.sat.v2i1.v2f32(<2 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:88 CodeSize:17 Lat:53 SizeLat:53 for: %v2f32u1 = call <2 x i1> @llvm.fptoui.sat.v2i1.v2f32(<2 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:102 CodeSize:30 Lat:67 SizeLat:67 for: %v2f32s8 = call <2 x i8> @llvm.fptosi.sat.v2i8.v2f32(<2 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:88 CodeSize:17 Lat:53 SizeLat:53 for: %v2f32u8 = call <2 x i8> @llvm.fptoui.sat.v2i8.v2f32(<2 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:102 CodeSize:30 Lat:67 SizeLat:67 for: %v2f32s16 = call <2 x i16> @llvm.fptosi.sat.v2i16.v2f32(<2 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:88 CodeSize:17 Lat:53 SizeLat:53 for: %v2f32u16 = call <2 x i16> @llvm.fptoui.sat.v2i16.v2f32(<2 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:102 CodeSize:30 Lat:67 SizeLat:67 for: %v2f32s32 = call <2 x i32> @llvm.fptosi.sat.v2i32.v2f32(<2 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:88 CodeSize:17 Lat:53 SizeLat:53 for: %v2f32u32 = call <2 x i32> @llvm.fptoui.sat.v2i32.v2f32(<2 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:134 CodeSize:30 Lat:67 SizeLat:67 for: %v2f32s64 = call <2 x i64> @llvm.fptosi.sat.v2i64.v2f32(<2 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:120 CodeSize:17 Lat:53 SizeLat:53 for: %v2f32u64 = call <2 x i64> @llvm.fptoui.sat.v2i64.v2f32(<2 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:120 CodeSize:43 Lat:85 SizeLat:85 for: %v2f64s1 = call <2 x i1> @llvm.fptosi.sat.v2i1.v2f64(<2 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:88 CodeSize:17 Lat:53 SizeLat:53 for: %v2f64u1 = call <2 x i1> @llvm.fptoui.sat.v2i1.v2f64(<2 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:102 CodeSize:30 Lat:67 SizeLat:67 for: %v2f64s8 = call <2 x i8> @llvm.fptosi.sat.v2i8.v2f64(<2 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:88 CodeSize:17 Lat:53 SizeLat:53 for: %v2f64u8 = call <2 x i8> @llvm.fptoui.sat.v2i8.v2f64(<2 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:102 CodeSize:30 Lat:67 SizeLat:67 for: %v2f64s16 = call <2 x i16> @llvm.fptosi.sat.v2i16.v2f64(<2 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:88 CodeSize:17 Lat:53 SizeLat:53 for: %v2f64u16 = call <2 x i16> @llvm.fptoui.sat.v2i16.v2f64(<2 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:102 CodeSize:30 Lat:67 SizeLat:67 for: %v2f64s32 = call <2 x i32> @llvm.fptosi.sat.v2i32.v2f64(<2 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:88 CodeSize:17 Lat:53 SizeLat:53 for: %v2f64u32 = call <2 x i32> @llvm.fptoui.sat.v2i32.v2f64(<2 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:134 CodeSize:30 Lat:67 SizeLat:67 for: %v2f64s64 = call <2 x i64> @llvm.fptosi.sat.v2i64.v2f64(<2 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:120 CodeSize:17 Lat:53 SizeLat:53 for: %v2f64u64 = call <2 x i64> @llvm.fptoui.sat.v2i64.v2f64(<2 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:240 CodeSize:85 Lat:169 SizeLat:169 for: %v4f32s1 = call <4 x i1> @llvm.fptosi.sat.v4i1.v4f32(<4 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:176 CodeSize:33 Lat:105 SizeLat:105 for: %v4f32u1 = call <4 x i1> @llvm.fptoui.sat.v4i1.v4f32(<4 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:202 CodeSize:58 Lat:131 SizeLat:131 for: %v4f32s8 = call <4 x i8> @llvm.fptosi.sat.v4i8.v4f32(<4 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:176 CodeSize:33 Lat:105 SizeLat:105 for: %v4f32u8 = call <4 x i8> @llvm.fptoui.sat.v4i8.v4f32(<4 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:202 CodeSize:58 Lat:131 SizeLat:131 for: %v4f32s16 = call <4 x i16> @llvm.fptosi.sat.v4i16.v4f32(<4 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:176 CodeSize:33 Lat:105 SizeLat:105 for: %v4f32u16 = call <4 x i16> @llvm.fptoui.sat.v4i16.v4f32(<4 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:202 CodeSize:58 Lat:131 SizeLat:131 for: %v4f32s32 = call <4 x i32> @llvm.fptosi.sat.v4i32.v4f32(<4 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:176 CodeSize:33 Lat:105 SizeLat:105 for: %v4f32u32 = call <4 x i32> @llvm.fptoui.sat.v4i32.v4f32(<4 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:406 CodeSize:59 Lat:133 SizeLat:133 for: %v4f32s64 = call <4 x i64> @llvm.fptosi.sat.v4i64.v4f32(<4 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:378 CodeSize:33 Lat:105 SizeLat:105 for: %v4f32u64 = call <4 x i64> @llvm.fptoui.sat.v4i64.v4f32(<4 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:314 CodeSize:85 Lat:169 SizeLat:169 for: %v4f64s1 = call <4 x i1> @llvm.fptosi.sat.v4i1.v4f64(<4 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:250 CodeSize:33 Lat:105 SizeLat:105 for: %v4f64u1 = call <4 x i1> @llvm.fptoui.sat.v4i1.v4f64(<4 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:276 CodeSize:58 Lat:131 SizeLat:131 for: %v4f64s8 = call <4 x i8> @llvm.fptosi.sat.v4i8.v4f64(<4 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:250 CodeSize:33 Lat:105 SizeLat:105 for: %v4f64u8 = call <4 x i8> @llvm.fptoui.sat.v4i8.v4f64(<4 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:276 CodeSize:58 Lat:131 SizeLat:131 for: %v4f64s16 = call <4 x i16> @llvm.fptosi.sat.v4i16.v4f64(<4 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:250 CodeSize:33 Lat:105 SizeLat:105 for: %v4f64u16 = call <4 x i16> @llvm.fptoui.sat.v4i16.v4f64(<4 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:276 CodeSize:58 Lat:131 SizeLat:131 for: %v4f64s32 = call <4 x i32> @llvm.fptosi.sat.v4i32.v4f64(<4 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:250 CodeSize:33 Lat:105 SizeLat:105 for: %v4f64u32 = call <4 x i32> @llvm.fptoui.sat.v4i32.v4f64(<4 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:404 CodeSize:59 Lat:133 SizeLat:133 for: %v4f64s64 = call <4 x i64> @llvm.fptosi.sat.v4i64.v4f64(<4 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:376 CodeSize:33 Lat:105 SizeLat:105 for: %v4f64u64 = call <4 x i64> @llvm.fptoui.sat.v4i64.v4f64(<4 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:626 CodeSize:169 Lat:337 SizeLat:337 for: %v8f32s1 = call <8 x i1> @llvm.fptosi.sat.v8i1.v8f32(<8 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:498 CodeSize:65 Lat:209 SizeLat:209 for: %v8f32u1 = call <8 x i1> @llvm.fptoui.sat.v8i1.v8f32(<8 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:548 CodeSize:114 Lat:259 SizeLat:259 for: %v8f32s8 = call <8 x i8> @llvm.fptosi.sat.v8i8.v8f32(<8 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:498 CodeSize:65 Lat:209 SizeLat:209 for: %v8f32u8 = call <8 x i8> @llvm.fptoui.sat.v8i8.v8f32(<8 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:548 CodeSize:114 Lat:259 SizeLat:259 for: %v8f32s16 = call <8 x i16> @llvm.fptosi.sat.v8i16.v8f32(<8 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:498 CodeSize:65 Lat:209 SizeLat:209 for: %v8f32u16 = call <8 x i16> @llvm.fptoui.sat.v8i16.v8f32(<8 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:548 CodeSize:115 Lat:261 SizeLat:261 for: %v8f32s32 = call <8 x i32> @llvm.fptosi.sat.v8i32.v8f32(<8 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:496 CodeSize:65 Lat:209 SizeLat:209 for: %v8f32u32 = call <8 x i32> @llvm.fptoui.sat.v8i32.v8f32(<8 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:1360 CodeSize:117 Lat:265 SizeLat:265 for: %v8f32s64 = call <8 x i64> @llvm.fptosi.sat.v8i64.v8f32(<8 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:1304 CodeSize:65 Lat:209 SizeLat:209 for: %v8f32u64 = call <8 x i64> @llvm.fptoui.sat.v8i64.v8f32(<8 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:922 CodeSize:169 Lat:337 SizeLat:337 for: %v8f64s1 = call <8 x i1> @llvm.fptosi.sat.v8i1.v8f64(<8 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:794 CodeSize:65 Lat:209 SizeLat:209 for: %v8f64u1 = call <8 x i1> @llvm.fptoui.sat.v8i1.v8f64(<8 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:844 CodeSize:114 Lat:259 SizeLat:259 for: %v8f64s8 = call <8 x i8> @llvm.fptosi.sat.v8i8.v8f64(<8 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:794 CodeSize:65 Lat:209 SizeLat:209 for: %v8f64u8 = call <8 x i8> @llvm.fptoui.sat.v8i8.v8f64(<8 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:844 CodeSize:114 Lat:259 SizeLat:259 for: %v8f64s16 = call <8 x i16> @llvm.fptosi.sat.v8i16.v8f64(<8 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:794 CodeSize:65 Lat:209 SizeLat:209 for: %v8f64u16 = call <8 x i16> @llvm.fptoui.sat.v8i16.v8f64(<8 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:844 CodeSize:115 Lat:261 SizeLat:261 for: %v8f64s32 = call <8 x i32> @llvm.fptosi.sat.v8i32.v8f64(<8 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:792 CodeSize:65 Lat:209 SizeLat:209 for: %v8f64u32 = call <8 x i32> @llvm.fptoui.sat.v8i32.v8f64(<8 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:1352 CodeSize:117 Lat:265 SizeLat:265 for: %v8f64s64 = call <8 x i64> @llvm.fptosi.sat.v8i64.v8f64(<8 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:1296 CodeSize:65 Lat:209 SizeLat:209 for: %v8f64u64 = call <8 x i64> @llvm.fptoui.sat.v8i64.v8f64(<8 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:1834 CodeSize:337 Lat:673 SizeLat:673 for: %v16f32s1 = call <16 x i1> @llvm.fptosi.sat.v16i1.v16f32(<16 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:1578 CodeSize:129 Lat:417 SizeLat:417 for: %v16f32u1 = call <16 x i1> @llvm.fptoui.sat.v16i1.v16f32(<16 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:1676 CodeSize:226 Lat:515 SizeLat:515 for: %v16f32s8 = call <16 x i8> @llvm.fptosi.sat.v16i8.v16f32(<16 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:1578 CodeSize:129 Lat:417 SizeLat:417 for: %v16f32u8 = call <16 x i8> @llvm.fptoui.sat.v16i8.v16f32(<16 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:1676 CodeSize:227 Lat:517 SizeLat:517 for: %v16f32s16 = call <16 x i16> @llvm.fptosi.sat.v16i16.v16f32(<16 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:1576 CodeSize:129 Lat:417 SizeLat:417 for: %v16f32u16 = call <16 x i16> @llvm.fptoui.sat.v16i16.v16f32(<16 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:1672 CodeSize:229 Lat:521 SizeLat:521 for: %v16f32s32 = call <16 x i32> @llvm.fptosi.sat.v16i32.v16f32(<16 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:1568 CodeSize:129 Lat:417 SizeLat:417 for: %v16f32u32 = call <16 x i32> @llvm.fptoui.sat.v16i32.v16f32(<16 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:4912 CodeSize:233 Lat:529 SizeLat:529 for: %v16f32s64 = call <16 x i64> @llvm.fptosi.sat.v16i64.v16f32(<16 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:4800 CodeSize:129 Lat:417 SizeLat:417 for: %v16f32u64 = call <16 x i64> @llvm.fptoui.sat.v16i64.v16f32(<16 x float> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:3018 CodeSize:337 Lat:673 SizeLat:673 for: %v16f64s1 = call <16 x i1> @llvm.fptosi.sat.v16i1.v16f64(<16 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:2762 CodeSize:129 Lat:417 SizeLat:417 for: %v16f64u1 = call <16 x i1> @llvm.fptoui.sat.v16i1.v16f64(<16 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:2860 CodeSize:226 Lat:515 SizeLat:515 for: %v16f64s8 = call <16 x i8> @llvm.fptosi.sat.v16i8.v16f64(<16 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:2762 CodeSize:129 Lat:417 SizeLat:417 for: %v16f64u8 = call <16 x i8> @llvm.fptoui.sat.v16i8.v16f64(<16 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:2860 CodeSize:227 Lat:517 SizeLat:517 for: %v16f64s16 = call <16 x i16> @llvm.fptosi.sat.v16i16.v16f64(<16 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:2760 CodeSize:129 Lat:417 SizeLat:417 for: %v16f64u16 = call <16 x i16> @llvm.fptoui.sat.v16i16.v16f64(<16 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:2856 CodeSize:229 Lat:521 SizeLat:521 for: %v16f64s32 = call <16 x i32> @llvm.fptosi.sat.v16i32.v16f64(<16 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:2752 CodeSize:129 Lat:417 SizeLat:417 for: %v16f64u32 = call <16 x i32> @llvm.fptoui.sat.v16i32.v16f64(<16 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:4880 CodeSize:233 Lat:529 SizeLat:529 for: %v16f64s64 = call <16 x i64> @llvm.fptosi.sat.v16i64.v16f64(<16 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:4768 CodeSize:129 Lat:417 SizeLat:417 for: %v16f64u64 = call <16 x i64> @llvm.fptoui.sat.v16i64.v16f64(<16 x double> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
; CHECK-MVEFP-LABEL: 'casts'
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %f32s1 = call i1 @llvm.fptosi.sat.i1.f32(float undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %f32u1 = call i1 @llvm.fptoui.sat.i1.f32(float undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %f32s8 = call i8 @llvm.fptosi.sat.i8.f32(float undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %f32u8 = call i8 @llvm.fptoui.sat.i8.f32(float undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %f32s16 = call i16 @llvm.fptosi.sat.i16.f32(float undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %f32u16 = call i16 @llvm.fptoui.sat.i16.f32(float undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f32s32 = call i32 @llvm.fptosi.sat.i32.f32(float undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f32u32 = call i32 @llvm.fptoui.sat.i32.f32(float undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %f32s64 = call i64 @llvm.fptosi.sat.i64.f32(float undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %f32u64 = call i64 @llvm.fptoui.sat.i64.f32(float undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %f64s1 = call i1 @llvm.fptosi.sat.i1.f64(double undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %f64u1 = call i1 @llvm.fptoui.sat.i1.f64(double undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %f64s8 = call i8 @llvm.fptosi.sat.i8.f64(double undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %f64u8 = call i8 @llvm.fptoui.sat.i8.f64(double undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %f64s16 = call i16 @llvm.fptosi.sat.i16.f64(double undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %f64u16 = call i16 @llvm.fptoui.sat.i16.f64(double undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f64s32 = call i32 @llvm.fptosi.sat.i32.f64(double undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f64u32 = call i32 @llvm.fptoui.sat.i32.f64(double undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %f64s64 = call i64 @llvm.fptosi.sat.i64.f64(double undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %f64u64 = call i64 @llvm.fptoui.sat.i64.f64(double undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v2f32s1 = call <2 x i1> @llvm.fptosi.sat.v2i1.v2f32(<2 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v2f32u1 = call <2 x i1> @llvm.fptoui.sat.v2i1.v2f32(<2 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v2f32s8 = call <2 x i8> @llvm.fptosi.sat.v2i8.v2f32(<2 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v2f32u8 = call <2 x i8> @llvm.fptoui.sat.v2i8.v2f32(<2 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v2f32s16 = call <2 x i16> @llvm.fptosi.sat.v2i16.v2f32(<2 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v2f32u16 = call <2 x i16> @llvm.fptoui.sat.v2i16.v2f32(<2 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2f32s32 = call <2 x i32> @llvm.fptosi.sat.v2i32.v2f32(<2 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2f32u32 = call <2 x i32> @llvm.fptoui.sat.v2i32.v2f32(<2 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 76 for instruction: %v2f32s64 = call <2 x i64> @llvm.fptosi.sat.v2i64.v2f32(<2 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 72 for instruction: %v2f32u64 = call <2 x i64> @llvm.fptoui.sat.v2i64.v2f32(<2 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 80 for instruction: %v2f64s1 = call <2 x i1> @llvm.fptosi.sat.v2i1.v2f64(<2 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 52 for instruction: %v2f64u1 = call <2 x i1> @llvm.fptoui.sat.v2i1.v2f64(<2 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 62 for instruction: %v2f64s8 = call <2 x i8> @llvm.fptosi.sat.v2i8.v2f64(<2 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 52 for instruction: %v2f64u8 = call <2 x i8> @llvm.fptoui.sat.v2i8.v2f64(<2 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 62 for instruction: %v2f64s16 = call <2 x i16> @llvm.fptosi.sat.v2i16.v2f64(<2 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 52 for instruction: %v2f64u16 = call <2 x i16> @llvm.fptoui.sat.v2i16.v2f64(<2 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 62 for instruction: %v2f64s32 = call <2 x i32> @llvm.fptosi.sat.v2i32.v2f64(<2 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 52 for instruction: %v2f64u32 = call <2 x i32> @llvm.fptoui.sat.v2i32.v2f64(<2 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 94 for instruction: %v2f64s64 = call <2 x i64> @llvm.fptosi.sat.v2i64.v2f64(<2 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 84 for instruction: %v2f64u64 = call <2 x i64> @llvm.fptoui.sat.v2i64.v2f64(<2 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v4f32s1 = call <4 x i1> @llvm.fptosi.sat.v4i1.v4f32(<4 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v4f32u1 = call <4 x i1> @llvm.fptoui.sat.v4i1.v4f32(<4 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v4f32s8 = call <4 x i8> @llvm.fptosi.sat.v4i8.v4f32(<4 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v4f32u8 = call <4 x i8> @llvm.fptoui.sat.v4i8.v4f32(<4 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v4f32s16 = call <4 x i16> @llvm.fptosi.sat.v4i16.v4f32(<4 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v4f32u16 = call <4 x i16> @llvm.fptoui.sat.v4i16.v4f32(<4 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4f32s32 = call <4 x i32> @llvm.fptosi.sat.v4i32.v4f32(<4 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4f32u32 = call <4 x i32> @llvm.fptoui.sat.v4i32.v4f32(<4 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 284 for instruction: %v4f32s64 = call <4 x i64> @llvm.fptosi.sat.v4i64.v4f32(<4 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 278 for instruction: %v4f32u64 = call <4 x i64> @llvm.fptoui.sat.v4i64.v4f32(<4 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 234 for instruction: %v4f64s1 = call <4 x i1> @llvm.fptosi.sat.v4i1.v4f64(<4 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 178 for instruction: %v4f64u1 = call <4 x i1> @llvm.fptoui.sat.v4i1.v4f64(<4 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 196 for instruction: %v4f64s8 = call <4 x i8> @llvm.fptosi.sat.v4i8.v4f64(<4 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 178 for instruction: %v4f64u8 = call <4 x i8> @llvm.fptoui.sat.v4i8.v4f64(<4 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 196 for instruction: %v4f64s16 = call <4 x i16> @llvm.fptosi.sat.v4i16.v4f64(<4 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 178 for instruction: %v4f64u16 = call <4 x i16> @llvm.fptoui.sat.v4i16.v4f64(<4 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 196 for instruction: %v4f64s32 = call <4 x i32> @llvm.fptosi.sat.v4i32.v4f64(<4 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 178 for instruction: %v4f64u32 = call <4 x i32> @llvm.fptoui.sat.v4i32.v4f64(<4 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 324 for instruction: %v4f64s64 = call <4 x i64> @llvm.fptosi.sat.v4i64.v4f64(<4 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 304 for instruction: %v4f64u64 = call <4 x i64> @llvm.fptoui.sat.v4i64.v4f64(<4 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %v8f32s1 = call <8 x i1> @llvm.fptosi.sat.v8i1.v8f32(<8 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %v8f32u1 = call <8 x i1> @llvm.fptoui.sat.v8i1.v8f32(<8 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %v8f32s8 = call <8 x i8> @llvm.fptosi.sat.v8i8.v8f32(<8 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %v8f32u8 = call <8 x i8> @llvm.fptoui.sat.v8i8.v8f32(<8 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %v8f32s16 = call <8 x i16> @llvm.fptosi.sat.v8i16.v8f32(<8 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %v8f32u16 = call <8 x i16> @llvm.fptoui.sat.v8i16.v8f32(<8 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8f32s32 = call <8 x i32> @llvm.fptosi.sat.v8i32.v8f32(<8 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8f32u32 = call <8 x i32> @llvm.fptoui.sat.v8i32.v8f32(<8 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 1148 for instruction: %v8f32s64 = call <8 x i64> @llvm.fptosi.sat.v8i64.v8f32(<8 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 1104 for instruction: %v8f32u64 = call <8 x i64> @llvm.fptoui.sat.v8i64.v8f32(<8 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 762 for instruction: %v8f64s1 = call <8 x i1> @llvm.fptosi.sat.v8i1.v8f64(<8 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 650 for instruction: %v8f64u1 = call <8 x i1> @llvm.fptoui.sat.v8i1.v8f64(<8 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 684 for instruction: %v8f64s8 = call <8 x i8> @llvm.fptosi.sat.v8i8.v8f64(<8 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 650 for instruction: %v8f64u8 = call <8 x i8> @llvm.fptoui.sat.v8i8.v8f64(<8 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 684 for instruction: %v8f64s16 = call <8 x i16> @llvm.fptosi.sat.v8i16.v8f64(<8 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 650 for instruction: %v8f64u16 = call <8 x i16> @llvm.fptoui.sat.v8i16.v8f64(<8 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 684 for instruction: %v8f64s32 = call <8 x i32> @llvm.fptosi.sat.v8i32.v8f64(<8 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 648 for instruction: %v8f64u32 = call <8 x i32> @llvm.fptoui.sat.v8i32.v8f64(<8 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 1192 for instruction: %v8f64s64 = call <8 x i64> @llvm.fptosi.sat.v8i64.v8f64(<8 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 1152 for instruction: %v8f64u64 = call <8 x i64> @llvm.fptoui.sat.v8i64.v8f64(<8 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %v16f32s1 = call <16 x i1> @llvm.fptosi.sat.v16i1.v16f32(<16 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %v16f32u1 = call <16 x i1> @llvm.fptoui.sat.v16i1.v16f32(<16 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %v16f32s8 = call <16 x i8> @llvm.fptosi.sat.v16i8.v16f32(<16 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %v16f32u8 = call <16 x i8> @llvm.fptoui.sat.v16i8.v16f32(<16 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %v16f32s16 = call <16 x i16> @llvm.fptosi.sat.v16i16.v16f32(<16 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %v16f32u16 = call <16 x i16> @llvm.fptoui.sat.v16i16.v16f32(<16 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16f32s32 = call <16 x i32> @llvm.fptosi.sat.v16i32.v16f32(<16 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16f32u32 = call <16 x i32> @llvm.fptoui.sat.v16i32.v16f32(<16 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 4488 for instruction: %v16f32s64 = call <16 x i64> @llvm.fptosi.sat.v16i64.v16f32(<16 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 4400 for instruction: %v16f32u64 = call <16 x i64> @llvm.fptoui.sat.v16i64.v16f32(<16 x float> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2698 for instruction: %v16f64s1 = call <16 x i1> @llvm.fptosi.sat.v16i1.v16f64(<16 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2474 for instruction: %v16f64u1 = call <16 x i1> @llvm.fptoui.sat.v16i1.v16f64(<16 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2540 for instruction: %v16f64s8 = call <16 x i8> @llvm.fptosi.sat.v16i8.v16f64(<16 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2474 for instruction: %v16f64u8 = call <16 x i8> @llvm.fptoui.sat.v16i8.v16f64(<16 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2540 for instruction: %v16f64s16 = call <16 x i16> @llvm.fptosi.sat.v16i16.v16f64(<16 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2472 for instruction: %v16f64u16 = call <16 x i16> @llvm.fptoui.sat.v16i16.v16f64(<16 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2536 for instruction: %v16f64s32 = call <16 x i32> @llvm.fptosi.sat.v16i32.v16f64(<16 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2464 for instruction: %v16f64u32 = call <16 x i32> @llvm.fptoui.sat.v16i32.v16f64(<16 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 4560 for instruction: %v16f64s64 = call <16 x i64> @llvm.fptosi.sat.v16i64.v16f64(<16 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 4480 for instruction: %v16f64u64 = call <16 x i64> @llvm.fptoui.sat.v16i64.v16f64(<16 x double> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:5 CodeSize:7 Lat:5 SizeLat:5 for: %f32s1 = call i1 @llvm.fptosi.sat.i1.f32(float undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:5 CodeSize:7 Lat:5 SizeLat:5 for: %f32u1 = call i1 @llvm.fptoui.sat.i1.f32(float undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:5 CodeSize:7 Lat:5 SizeLat:5 for: %f32s8 = call i8 @llvm.fptosi.sat.i8.f32(float undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:5 CodeSize:7 Lat:5 SizeLat:5 for: %f32u8 = call i8 @llvm.fptoui.sat.i8.f32(float undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:5 CodeSize:7 Lat:5 SizeLat:5 for: %f32s16 = call i16 @llvm.fptosi.sat.i16.f32(float undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:5 CodeSize:7 Lat:5 SizeLat:5 for: %f32u16 = call i16 @llvm.fptoui.sat.i16.f32(float undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of 1 for: %f32s32 = call i32 @llvm.fptosi.sat.i32.f32(float undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of 1 for: %f32u32 = call i32 @llvm.fptoui.sat.i32.f32(float undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:6 CodeSize:7 Lat:6 SizeLat:6 for: %f32s64 = call i64 @llvm.fptosi.sat.i64.f32(float undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of 3 for: %f32u64 = call i64 @llvm.fptoui.sat.i64.f32(float undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:7 CodeSize:9 Lat:7 SizeLat:7 for: %f64s1 = call i1 @llvm.fptosi.sat.i1.f64(double undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:7 CodeSize:9 Lat:7 SizeLat:7 for: %f64u1 = call i1 @llvm.fptoui.sat.i1.f64(double undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:7 CodeSize:9 Lat:7 SizeLat:7 for: %f64s8 = call i8 @llvm.fptosi.sat.i8.f64(double undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:7 CodeSize:9 Lat:7 SizeLat:7 for: %f64u8 = call i8 @llvm.fptoui.sat.i8.f64(double undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:7 CodeSize:9 Lat:7 SizeLat:7 for: %f64s16 = call i16 @llvm.fptosi.sat.i16.f64(double undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:7 CodeSize:9 Lat:7 SizeLat:7 for: %f64u16 = call i16 @llvm.fptoui.sat.i16.f64(double undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of 1 for: %f64s32 = call i32 @llvm.fptosi.sat.i32.f64(double undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of 1 for: %f64u32 = call i32 @llvm.fptoui.sat.i32.f64(double undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:7 CodeSize:9 Lat:7 SizeLat:7 for: %f64s64 = call i64 @llvm.fptosi.sat.i64.f64(double undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:7 CodeSize:9 Lat:7 SizeLat:7 for: %f64u64 = call i64 @llvm.fptoui.sat.i64.f64(double undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:6 CodeSize:7 Lat:6 SizeLat:6 for: %v2f32s1 = call <2 x i1> @llvm.fptosi.sat.v2i1.v2f32(<2 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:6 CodeSize:7 Lat:6 SizeLat:6 for: %v2f32u1 = call <2 x i1> @llvm.fptoui.sat.v2i1.v2f32(<2 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:6 CodeSize:7 Lat:6 SizeLat:6 for: %v2f32s8 = call <2 x i8> @llvm.fptosi.sat.v2i8.v2f32(<2 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:6 CodeSize:7 Lat:6 SizeLat:6 for: %v2f32u8 = call <2 x i8> @llvm.fptoui.sat.v2i8.v2f32(<2 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:6 CodeSize:7 Lat:6 SizeLat:6 for: %v2f32s16 = call <2 x i16> @llvm.fptosi.sat.v2i16.v2f32(<2 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:6 CodeSize:7 Lat:6 SizeLat:6 for: %v2f32u16 = call <2 x i16> @llvm.fptoui.sat.v2i16.v2f32(<2 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %v2f32s32 = call <2 x i32> @llvm.fptosi.sat.v2i32.v2f32(<2 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %v2f32u32 = call <2 x i32> @llvm.fptoui.sat.v2i32.v2f32(<2 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:76 CodeSize:5 Lat:9 SizeLat:9 for: %v2f32s64 = call <2 x i64> @llvm.fptosi.sat.v2i64.v2f32(<2 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:72 CodeSize:3 Lat:5 SizeLat:5 for: %v2f32u64 = call <2 x i64> @llvm.fptoui.sat.v2i64.v2f32(<2 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:80 CodeSize:35 Lat:45 SizeLat:45 for: %v2f64s1 = call <2 x i1> @llvm.fptosi.sat.v2i1.v2f64(<2 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:52 CodeSize:17 Lat:17 SizeLat:17 for: %v2f64u1 = call <2 x i1> @llvm.fptoui.sat.v2i1.v2f64(<2 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:62 CodeSize:22 Lat:27 SizeLat:27 for: %v2f64s8 = call <2 x i8> @llvm.fptosi.sat.v2i8.v2f64(<2 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:52 CodeSize:17 Lat:17 SizeLat:17 for: %v2f64u8 = call <2 x i8> @llvm.fptoui.sat.v2i8.v2f64(<2 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:62 CodeSize:22 Lat:27 SizeLat:27 for: %v2f64s16 = call <2 x i16> @llvm.fptosi.sat.v2i16.v2f64(<2 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:52 CodeSize:17 Lat:17 SizeLat:17 for: %v2f64u16 = call <2 x i16> @llvm.fptoui.sat.v2i16.v2f64(<2 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:62 CodeSize:22 Lat:27 SizeLat:27 for: %v2f64s32 = call <2 x i32> @llvm.fptosi.sat.v2i32.v2f64(<2 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:52 CodeSize:17 Lat:17 SizeLat:17 for: %v2f64u32 = call <2 x i32> @llvm.fptoui.sat.v2i32.v2f64(<2 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:94 CodeSize:22 Lat:27 SizeLat:27 for: %v2f64s64 = call <2 x i64> @llvm.fptosi.sat.v2i64.v2f64(<2 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:84 CodeSize:17 Lat:17 SizeLat:17 for: %v2f64u64 = call <2 x i64> @llvm.fptoui.sat.v2i64.v2f64(<2 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:6 CodeSize:7 Lat:6 SizeLat:6 for: %v4f32s1 = call <4 x i1> @llvm.fptosi.sat.v4i1.v4f32(<4 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:6 CodeSize:7 Lat:6 SizeLat:6 for: %v4f32u1 = call <4 x i1> @llvm.fptoui.sat.v4i1.v4f32(<4 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:6 CodeSize:7 Lat:6 SizeLat:6 for: %v4f32s8 = call <4 x i8> @llvm.fptosi.sat.v4i8.v4f32(<4 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:6 CodeSize:7 Lat:6 SizeLat:6 for: %v4f32u8 = call <4 x i8> @llvm.fptoui.sat.v4i8.v4f32(<4 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:6 CodeSize:7 Lat:6 SizeLat:6 for: %v4f32s16 = call <4 x i16> @llvm.fptosi.sat.v4i16.v4f32(<4 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:6 CodeSize:7 Lat:6 SizeLat:6 for: %v4f32u16 = call <4 x i16> @llvm.fptoui.sat.v4i16.v4f32(<4 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %v4f32s32 = call <4 x i32> @llvm.fptosi.sat.v4i32.v4f32(<4 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %v4f32u32 = call <4 x i32> @llvm.fptoui.sat.v4i32.v4f32(<4 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:284 CodeSize:6 Lat:11 SizeLat:11 for: %v4f32s64 = call <4 x i64> @llvm.fptosi.sat.v4i64.v4f32(<4 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:278 CodeSize:3 Lat:5 SizeLat:5 for: %v4f32u64 = call <4 x i64> @llvm.fptoui.sat.v4i64.v4f32(<4 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:234 CodeSize:69 Lat:89 SizeLat:89 for: %v4f64s1 = call <4 x i1> @llvm.fptosi.sat.v4i1.v4f64(<4 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:178 CodeSize:33 Lat:33 SizeLat:33 for: %v4f64u1 = call <4 x i1> @llvm.fptoui.sat.v4i1.v4f64(<4 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:196 CodeSize:42 Lat:51 SizeLat:51 for: %v4f64s8 = call <4 x i8> @llvm.fptosi.sat.v4i8.v4f64(<4 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:178 CodeSize:33 Lat:33 SizeLat:33 for: %v4f64u8 = call <4 x i8> @llvm.fptoui.sat.v4i8.v4f64(<4 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:196 CodeSize:42 Lat:51 SizeLat:51 for: %v4f64s16 = call <4 x i16> @llvm.fptosi.sat.v4i16.v4f64(<4 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:178 CodeSize:33 Lat:33 SizeLat:33 for: %v4f64u16 = call <4 x i16> @llvm.fptoui.sat.v4i16.v4f64(<4 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:196 CodeSize:42 Lat:51 SizeLat:51 for: %v4f64s32 = call <4 x i32> @llvm.fptosi.sat.v4i32.v4f64(<4 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:178 CodeSize:33 Lat:33 SizeLat:33 for: %v4f64u32 = call <4 x i32> @llvm.fptoui.sat.v4i32.v4f64(<4 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:324 CodeSize:43 Lat:53 SizeLat:53 for: %v4f64s64 = call <4 x i64> @llvm.fptosi.sat.v4i64.v4f64(<4 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:304 CodeSize:33 Lat:33 SizeLat:33 for: %v4f64u64 = call <4 x i64> @llvm.fptoui.sat.v4i64.v4f64(<4 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:12 CodeSize:14 Lat:12 SizeLat:12 for: %v8f32s1 = call <8 x i1> @llvm.fptosi.sat.v8i1.v8f32(<8 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:12 CodeSize:14 Lat:12 SizeLat:12 for: %v8f32u1 = call <8 x i1> @llvm.fptoui.sat.v8i1.v8f32(<8 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:12 CodeSize:14 Lat:12 SizeLat:12 for: %v8f32s8 = call <8 x i8> @llvm.fptosi.sat.v8i8.v8f32(<8 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:12 CodeSize:14 Lat:12 SizeLat:12 for: %v8f32u8 = call <8 x i8> @llvm.fptoui.sat.v8i8.v8f32(<8 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:12 CodeSize:14 Lat:12 SizeLat:12 for: %v8f32s16 = call <8 x i16> @llvm.fptosi.sat.v8i16.v8f32(<8 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:12 CodeSize:14 Lat:12 SizeLat:12 for: %v8f32u16 = call <8 x i16> @llvm.fptoui.sat.v8i16.v8f32(<8 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:4 CodeSize:2 Lat:4 SizeLat:4 for: %v8f32s32 = call <8 x i32> @llvm.fptosi.sat.v8i32.v8f32(<8 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:4 CodeSize:2 Lat:4 SizeLat:4 for: %v8f32u32 = call <8 x i32> @llvm.fptoui.sat.v8i32.v8f32(<8 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:1148 CodeSize:43 Lat:53 SizeLat:53 for: %v8f32s64 = call <8 x i64> @llvm.fptosi.sat.v8i64.v8f32(<8 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:1104 CodeSize:5 Lat:9 SizeLat:9 for: %v8f32u64 = call <8 x i64> @llvm.fptoui.sat.v8i64.v8f32(<8 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:762 CodeSize:137 Lat:177 SizeLat:177 for: %v8f64s1 = call <8 x i1> @llvm.fptosi.sat.v8i1.v8f64(<8 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:650 CodeSize:65 Lat:65 SizeLat:65 for: %v8f64u1 = call <8 x i1> @llvm.fptoui.sat.v8i1.v8f64(<8 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:684 CodeSize:82 Lat:99 SizeLat:99 for: %v8f64s8 = call <8 x i8> @llvm.fptosi.sat.v8i8.v8f64(<8 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:650 CodeSize:65 Lat:65 SizeLat:65 for: %v8f64u8 = call <8 x i8> @llvm.fptoui.sat.v8i8.v8f64(<8 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:684 CodeSize:82 Lat:99 SizeLat:99 for: %v8f64s16 = call <8 x i16> @llvm.fptosi.sat.v8i16.v8f64(<8 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:650 CodeSize:65 Lat:65 SizeLat:65 for: %v8f64u16 = call <8 x i16> @llvm.fptoui.sat.v8i16.v8f64(<8 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:684 CodeSize:83 Lat:101 SizeLat:101 for: %v8f64s32 = call <8 x i32> @llvm.fptosi.sat.v8i32.v8f64(<8 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:648 CodeSize:65 Lat:65 SizeLat:65 for: %v8f64u32 = call <8 x i32> @llvm.fptoui.sat.v8i32.v8f64(<8 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:1192 CodeSize:85 Lat:105 SizeLat:105 for: %v8f64s64 = call <8 x i64> @llvm.fptosi.sat.v8i64.v8f64(<8 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:1152 CodeSize:65 Lat:65 SizeLat:65 for: %v8f64u64 = call <8 x i64> @llvm.fptoui.sat.v8i64.v8f64(<8 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:24 CodeSize:28 Lat:24 SizeLat:24 for: %v16f32s1 = call <16 x i1> @llvm.fptosi.sat.v16i1.v16f32(<16 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:24 CodeSize:28 Lat:24 SizeLat:24 for: %v16f32u1 = call <16 x i1> @llvm.fptoui.sat.v16i1.v16f32(<16 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:24 CodeSize:28 Lat:24 SizeLat:24 for: %v16f32s8 = call <16 x i8> @llvm.fptosi.sat.v16i8.v16f32(<16 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:24 CodeSize:28 Lat:24 SizeLat:24 for: %v16f32u8 = call <16 x i8> @llvm.fptoui.sat.v16i8.v16f32(<16 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:24 CodeSize:28 Lat:24 SizeLat:24 for: %v16f32s16 = call <16 x i16> @llvm.fptosi.sat.v16i16.v16f32(<16 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:24 CodeSize:28 Lat:24 SizeLat:24 for: %v16f32u16 = call <16 x i16> @llvm.fptoui.sat.v16i16.v16f32(<16 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:8 CodeSize:4 Lat:8 SizeLat:8 for: %v16f32s32 = call <16 x i32> @llvm.fptosi.sat.v16i32.v16f32(<16 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:8 CodeSize:4 Lat:8 SizeLat:8 for: %v16f32u32 = call <16 x i32> @llvm.fptoui.sat.v16i32.v16f32(<16 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:4488 CodeSize:85 Lat:105 SizeLat:105 for: %v16f32s64 = call <16 x i64> @llvm.fptosi.sat.v16i64.v16f32(<16 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:4400 CodeSize:9 Lat:17 SizeLat:17 for: %v16f32u64 = call <16 x i64> @llvm.fptoui.sat.v16i64.v16f32(<16 x float> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2698 CodeSize:273 Lat:353 SizeLat:353 for: %v16f64s1 = call <16 x i1> @llvm.fptosi.sat.v16i1.v16f64(<16 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2474 CodeSize:129 Lat:129 SizeLat:129 for: %v16f64u1 = call <16 x i1> @llvm.fptoui.sat.v16i1.v16f64(<16 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2540 CodeSize:162 Lat:195 SizeLat:195 for: %v16f64s8 = call <16 x i8> @llvm.fptosi.sat.v16i8.v16f64(<16 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2474 CodeSize:129 Lat:129 SizeLat:129 for: %v16f64u8 = call <16 x i8> @llvm.fptoui.sat.v16i8.v16f64(<16 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2540 CodeSize:163 Lat:197 SizeLat:197 for: %v16f64s16 = call <16 x i16> @llvm.fptosi.sat.v16i16.v16f64(<16 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2472 CodeSize:129 Lat:129 SizeLat:129 for: %v16f64u16 = call <16 x i16> @llvm.fptoui.sat.v16i16.v16f64(<16 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2536 CodeSize:165 Lat:201 SizeLat:201 for: %v16f64s32 = call <16 x i32> @llvm.fptosi.sat.v16i32.v16f64(<16 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2464 CodeSize:129 Lat:129 SizeLat:129 for: %v16f64u32 = call <16 x i32> @llvm.fptoui.sat.v16i32.v16f64(<16 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:4560 CodeSize:169 Lat:209 SizeLat:209 for: %v16f64s64 = call <16 x i64> @llvm.fptosi.sat.v16i64.v16f64(<16 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:4480 CodeSize:129 Lat:129 SizeLat:129 for: %v16f64u64 = call <16 x i64> @llvm.fptoui.sat.v16i64.v16f64(<16 x double> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
%f32s1 = call i1 @llvm.fptosi.sat.i1.f32(float undef)
%f32u1 = call i1 @llvm.fptoui.sat.i1.f32(float undef)
@@ -324,110 +324,110 @@ define void @casts() {
define void @fp16() {
; CHECK-MVE-LABEL: 'fp16'
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %f16s1 = call i1 @llvm.fptosi.sat.i1.f16(half undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %f16u1 = call i1 @llvm.fptoui.sat.i1.f16(half undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %f16s8 = call i8 @llvm.fptosi.sat.i8.f16(half undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %f16u8 = call i8 @llvm.fptoui.sat.i8.f16(half undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %f16s16 = call i16 @llvm.fptosi.sat.i16.f16(half undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %f16u16 = call i16 @llvm.fptoui.sat.i16.f16(half undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 23 for instruction: %f16s32 = call i32 @llvm.fptosi.sat.i32.f16(half undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %f16u32 = call i32 @llvm.fptoui.sat.i32.f16(half undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %f16s64 = call i64 @llvm.fptosi.sat.i64.f16(half undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %f16u64 = call i64 @llvm.fptoui.sat.i64.f16(half undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 120 for instruction: %v2f16s1 = call <2 x i1> @llvm.fptosi.sat.v2i1.v2f16(<2 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 88 for instruction: %v2f16u1 = call <2 x i1> @llvm.fptoui.sat.v2i1.v2f16(<2 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 102 for instruction: %v2f16s8 = call <2 x i8> @llvm.fptosi.sat.v2i8.v2f16(<2 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 88 for instruction: %v2f16u8 = call <2 x i8> @llvm.fptoui.sat.v2i8.v2f16(<2 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 102 for instruction: %v2f16s16 = call <2 x i16> @llvm.fptosi.sat.v2i16.v2f16(<2 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 88 for instruction: %v2f16u16 = call <2 x i16> @llvm.fptoui.sat.v2i16.v2f16(<2 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 102 for instruction: %v2f16s32 = call <2 x i32> @llvm.fptosi.sat.v2i32.v2f16(<2 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 88 for instruction: %v2f16u32 = call <2 x i32> @llvm.fptoui.sat.v2i32.v2f16(<2 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 134 for instruction: %v2f16s64 = call <2 x i64> @llvm.fptosi.sat.v2i64.v2f16(<2 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 120 for instruction: %v2f16u64 = call <2 x i64> @llvm.fptoui.sat.v2i64.v2f16(<2 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 240 for instruction: %v4f16s1 = call <4 x i1> @llvm.fptosi.sat.v4i1.v4f16(<4 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 176 for instruction: %v4f16u1 = call <4 x i1> @llvm.fptoui.sat.v4i1.v4f16(<4 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 202 for instruction: %v4f16s8 = call <4 x i8> @llvm.fptosi.sat.v4i8.v4f16(<4 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 176 for instruction: %v4f16u8 = call <4 x i8> @llvm.fptoui.sat.v4i8.v4f16(<4 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 202 for instruction: %v4f16s16 = call <4 x i16> @llvm.fptosi.sat.v4i16.v4f16(<4 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 176 for instruction: %v4f16u16 = call <4 x i16> @llvm.fptoui.sat.v4i16.v4f16(<4 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 202 for instruction: %v4f16s32 = call <4 x i32> @llvm.fptosi.sat.v4i32.v4f16(<4 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 176 for instruction: %v4f16u32 = call <4 x i32> @llvm.fptoui.sat.v4i32.v4f16(<4 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 406 for instruction: %v4f16s64 = call <4 x i64> @llvm.fptosi.sat.v4i64.v4f16(<4 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 378 for instruction: %v4f16u64 = call <4 x i64> @llvm.fptoui.sat.v4i64.v4f16(<4 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 480 for instruction: %v8f16s1 = call <8 x i1> @llvm.fptosi.sat.v8i1.v8f16(<8 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 352 for instruction: %v8f16u1 = call <8 x i1> @llvm.fptoui.sat.v8i1.v8f16(<8 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 402 for instruction: %v8f16s8 = call <8 x i8> @llvm.fptosi.sat.v8i8.v8f16(<8 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 352 for instruction: %v8f16u8 = call <8 x i8> @llvm.fptoui.sat.v8i8.v8f16(<8 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 402 for instruction: %v8f16s16 = call <8 x i16> @llvm.fptosi.sat.v8i16.v8f16(<8 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 352 for instruction: %v8f16u16 = call <8 x i16> @llvm.fptoui.sat.v8i16.v8f16(<8 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 550 for instruction: %v8f16s32 = call <8 x i32> @llvm.fptosi.sat.v8i32.v8f16(<8 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 498 for instruction: %v8f16u32 = call <8 x i32> @llvm.fptoui.sat.v8i32.v8f16(<8 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 1362 for instruction: %v8f16s64 = call <8 x i64> @llvm.fptosi.sat.v8i64.v8f16(<8 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 1306 for instruction: %v8f16u64 = call <8 x i64> @llvm.fptoui.sat.v8i64.v8f16(<8 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 1250 for instruction: %v16f16s1 = call <16 x i1> @llvm.fptosi.sat.v16i1.v16f16(<16 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 994 for instruction: %v16f16u1 = call <16 x i1> @llvm.fptoui.sat.v16i1.v16f16(<16 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 1092 for instruction: %v16f16s8 = call <16 x i8> @llvm.fptosi.sat.v16i8.v16f16(<16 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 994 for instruction: %v16f16u8 = call <16 x i8> @llvm.fptoui.sat.v16i8.v16f16(<16 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 1092 for instruction: %v16f16s16 = call <16 x i16> @llvm.fptosi.sat.v16i16.v16f16(<16 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 992 for instruction: %v16f16u16 = call <16 x i16> @llvm.fptoui.sat.v16i16.v16f16(<16 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 1680 for instruction: %v16f16s32 = call <16 x i32> @llvm.fptosi.sat.v16i32.v16f16(<16 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 1576 for instruction: %v16f16u32 = call <16 x i32> @llvm.fptoui.sat.v16i32.v16f16(<16 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 4920 for instruction: %v16f16s64 = call <16 x i64> @llvm.fptosi.sat.v16i64.v16f16(<16 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 4808 for instruction: %v16f16u64 = call <16 x i64> @llvm.fptoui.sat.v16i64.v16f16(<16 x half> undef)
-; CHECK-MVE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:23 CodeSize:7 Lat:23 SizeLat:23 for: %f16s1 = call i1 @llvm.fptosi.sat.i1.f16(half undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:21 CodeSize:3 Lat:21 SizeLat:21 for: %f16u1 = call i1 @llvm.fptoui.sat.i1.f16(half undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:23 CodeSize:6 Lat:23 SizeLat:23 for: %f16s8 = call i8 @llvm.fptosi.sat.i8.f16(half undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:21 CodeSize:3 Lat:21 SizeLat:21 for: %f16u8 = call i8 @llvm.fptoui.sat.i8.f16(half undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:23 CodeSize:6 Lat:23 SizeLat:23 for: %f16s16 = call i16 @llvm.fptosi.sat.i16.f16(half undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:21 CodeSize:3 Lat:21 SizeLat:21 for: %f16u16 = call i16 @llvm.fptoui.sat.i16.f16(half undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:23 CodeSize:6 Lat:23 SizeLat:23 for: %f16s32 = call i32 @llvm.fptosi.sat.i32.f16(half undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:21 CodeSize:3 Lat:21 SizeLat:21 for: %f16u32 = call i32 @llvm.fptoui.sat.i32.f16(half undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:24 CodeSize:7 Lat:24 SizeLat:24 for: %f16s64 = call i64 @llvm.fptosi.sat.i64.f16(half undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:21 CodeSize:3 Lat:21 SizeLat:21 for: %f16u64 = call i64 @llvm.fptoui.sat.i64.f16(half undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:120 CodeSize:43 Lat:85 SizeLat:85 for: %v2f16s1 = call <2 x i1> @llvm.fptosi.sat.v2i1.v2f16(<2 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:88 CodeSize:17 Lat:53 SizeLat:53 for: %v2f16u1 = call <2 x i1> @llvm.fptoui.sat.v2i1.v2f16(<2 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:102 CodeSize:30 Lat:67 SizeLat:67 for: %v2f16s8 = call <2 x i8> @llvm.fptosi.sat.v2i8.v2f16(<2 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:88 CodeSize:17 Lat:53 SizeLat:53 for: %v2f16u8 = call <2 x i8> @llvm.fptoui.sat.v2i8.v2f16(<2 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:102 CodeSize:30 Lat:67 SizeLat:67 for: %v2f16s16 = call <2 x i16> @llvm.fptosi.sat.v2i16.v2f16(<2 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:88 CodeSize:17 Lat:53 SizeLat:53 for: %v2f16u16 = call <2 x i16> @llvm.fptoui.sat.v2i16.v2f16(<2 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:102 CodeSize:30 Lat:67 SizeLat:67 for: %v2f16s32 = call <2 x i32> @llvm.fptosi.sat.v2i32.v2f16(<2 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:88 CodeSize:17 Lat:53 SizeLat:53 for: %v2f16u32 = call <2 x i32> @llvm.fptoui.sat.v2i32.v2f16(<2 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:134 CodeSize:30 Lat:67 SizeLat:67 for: %v2f16s64 = call <2 x i64> @llvm.fptosi.sat.v2i64.v2f16(<2 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:120 CodeSize:17 Lat:53 SizeLat:53 for: %v2f16u64 = call <2 x i64> @llvm.fptoui.sat.v2i64.v2f16(<2 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:240 CodeSize:85 Lat:169 SizeLat:169 for: %v4f16s1 = call <4 x i1> @llvm.fptosi.sat.v4i1.v4f16(<4 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:176 CodeSize:33 Lat:105 SizeLat:105 for: %v4f16u1 = call <4 x i1> @llvm.fptoui.sat.v4i1.v4f16(<4 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:202 CodeSize:58 Lat:131 SizeLat:131 for: %v4f16s8 = call <4 x i8> @llvm.fptosi.sat.v4i8.v4f16(<4 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:176 CodeSize:33 Lat:105 SizeLat:105 for: %v4f16u8 = call <4 x i8> @llvm.fptoui.sat.v4i8.v4f16(<4 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:202 CodeSize:58 Lat:131 SizeLat:131 for: %v4f16s16 = call <4 x i16> @llvm.fptosi.sat.v4i16.v4f16(<4 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:176 CodeSize:33 Lat:105 SizeLat:105 for: %v4f16u16 = call <4 x i16> @llvm.fptoui.sat.v4i16.v4f16(<4 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:202 CodeSize:58 Lat:131 SizeLat:131 for: %v4f16s32 = call <4 x i32> @llvm.fptosi.sat.v4i32.v4f16(<4 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:176 CodeSize:33 Lat:105 SizeLat:105 for: %v4f16u32 = call <4 x i32> @llvm.fptoui.sat.v4i32.v4f16(<4 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:406 CodeSize:59 Lat:133 SizeLat:133 for: %v4f16s64 = call <4 x i64> @llvm.fptosi.sat.v4i64.v4f16(<4 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:378 CodeSize:33 Lat:105 SizeLat:105 for: %v4f16u64 = call <4 x i64> @llvm.fptoui.sat.v4i64.v4f16(<4 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:480 CodeSize:169 Lat:337 SizeLat:337 for: %v8f16s1 = call <8 x i1> @llvm.fptosi.sat.v8i1.v8f16(<8 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:352 CodeSize:65 Lat:209 SizeLat:209 for: %v8f16u1 = call <8 x i1> @llvm.fptoui.sat.v8i1.v8f16(<8 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:402 CodeSize:114 Lat:259 SizeLat:259 for: %v8f16s8 = call <8 x i8> @llvm.fptosi.sat.v8i8.v8f16(<8 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:352 CodeSize:65 Lat:209 SizeLat:209 for: %v8f16u8 = call <8 x i8> @llvm.fptoui.sat.v8i8.v8f16(<8 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:402 CodeSize:114 Lat:259 SizeLat:259 for: %v8f16s16 = call <8 x i16> @llvm.fptosi.sat.v8i16.v8f16(<8 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:352 CodeSize:65 Lat:209 SizeLat:209 for: %v8f16u16 = call <8 x i16> @llvm.fptoui.sat.v8i16.v8f16(<8 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:550 CodeSize:115 Lat:261 SizeLat:261 for: %v8f16s32 = call <8 x i32> @llvm.fptosi.sat.v8i32.v8f16(<8 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:498 CodeSize:65 Lat:209 SizeLat:209 for: %v8f16u32 = call <8 x i32> @llvm.fptoui.sat.v8i32.v8f16(<8 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:1362 CodeSize:117 Lat:265 SizeLat:265 for: %v8f16s64 = call <8 x i64> @llvm.fptosi.sat.v8i64.v8f16(<8 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:1306 CodeSize:65 Lat:209 SizeLat:209 for: %v8f16u64 = call <8 x i64> @llvm.fptoui.sat.v8i64.v8f16(<8 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:1250 CodeSize:337 Lat:673 SizeLat:673 for: %v16f16s1 = call <16 x i1> @llvm.fptosi.sat.v16i1.v16f16(<16 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:994 CodeSize:129 Lat:417 SizeLat:417 for: %v16f16u1 = call <16 x i1> @llvm.fptoui.sat.v16i1.v16f16(<16 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:1092 CodeSize:226 Lat:515 SizeLat:515 for: %v16f16s8 = call <16 x i8> @llvm.fptosi.sat.v16i8.v16f16(<16 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:994 CodeSize:129 Lat:417 SizeLat:417 for: %v16f16u8 = call <16 x i8> @llvm.fptoui.sat.v16i8.v16f16(<16 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:1092 CodeSize:227 Lat:517 SizeLat:517 for: %v16f16s16 = call <16 x i16> @llvm.fptosi.sat.v16i16.v16f16(<16 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:992 CodeSize:129 Lat:417 SizeLat:417 for: %v16f16u16 = call <16 x i16> @llvm.fptoui.sat.v16i16.v16f16(<16 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:1680 CodeSize:229 Lat:521 SizeLat:521 for: %v16f16s32 = call <16 x i32> @llvm.fptosi.sat.v16i32.v16f16(<16 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:1576 CodeSize:129 Lat:417 SizeLat:417 for: %v16f16u32 = call <16 x i32> @llvm.fptoui.sat.v16i32.v16f16(<16 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:4920 CodeSize:233 Lat:529 SizeLat:529 for: %v16f16s64 = call <16 x i64> @llvm.fptosi.sat.v16i64.v16f16(<16 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:4808 CodeSize:129 Lat:417 SizeLat:417 for: %v16f16u64 = call <16 x i64> @llvm.fptoui.sat.v16i64.v16f16(<16 x half> undef)
+; CHECK-MVE-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
; CHECK-MVEFP-LABEL: 'fp16'
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %f16s1 = call i1 @llvm.fptosi.sat.i1.f16(half undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %f16u1 = call i1 @llvm.fptoui.sat.i1.f16(half undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %f16s8 = call i8 @llvm.fptosi.sat.i8.f16(half undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %f16u8 = call i8 @llvm.fptoui.sat.i8.f16(half undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %f16s16 = call i16 @llvm.fptosi.sat.i16.f16(half undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %f16u16 = call i16 @llvm.fptoui.sat.i16.f16(half undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f16s32 = call i32 @llvm.fptosi.sat.i32.f16(half undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %f16u32 = call i32 @llvm.fptoui.sat.i32.f16(half undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %f16s64 = call i64 @llvm.fptosi.sat.i64.f16(half undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %f16u64 = call i64 @llvm.fptoui.sat.i64.f16(half undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v2f16s1 = call <2 x i1> @llvm.fptosi.sat.v2i1.v2f16(<2 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v2f16u1 = call <2 x i1> @llvm.fptoui.sat.v2i1.v2f16(<2 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v2f16s8 = call <2 x i8> @llvm.fptosi.sat.v2i8.v2f16(<2 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v2f16u8 = call <2 x i8> @llvm.fptoui.sat.v2i8.v2f16(<2 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2f16s16 = call <2 x i16> @llvm.fptosi.sat.v2i16.v2f16(<2 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2f16u16 = call <2 x i16> @llvm.fptoui.sat.v2i16.v2f16(<2 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 44 for instruction: %v2f16s32 = call <2 x i32> @llvm.fptosi.sat.v2i32.v2f16(<2 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 40 for instruction: %v2f16u32 = call <2 x i32> @llvm.fptoui.sat.v2i32.v2f16(<2 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 76 for instruction: %v2f16s64 = call <2 x i64> @llvm.fptosi.sat.v2i64.v2f16(<2 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 72 for instruction: %v2f16u64 = call <2 x i64> @llvm.fptoui.sat.v2i64.v2f16(<2 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v4f16s1 = call <4 x i1> @llvm.fptosi.sat.v4i1.v4f16(<4 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v4f16u1 = call <4 x i1> @llvm.fptoui.sat.v4i1.v4f16(<4 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v4f16s8 = call <4 x i8> @llvm.fptosi.sat.v4i8.v4f16(<4 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v4f16u8 = call <4 x i8> @llvm.fptoui.sat.v4i8.v4f16(<4 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4f16s16 = call <4 x i16> @llvm.fptosi.sat.v4i16.v4f16(<4 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4f16u16 = call <4 x i16> @llvm.fptoui.sat.v4i16.v4f16(<4 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %v4f16s32 = call <4 x i32> @llvm.fptosi.sat.v4i32.v4f16(<4 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v4f16u32 = call <4 x i32> @llvm.fptoui.sat.v4i32.v4f16(<4 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 284 for instruction: %v4f16s64 = call <4 x i64> @llvm.fptosi.sat.v4i64.v4f16(<4 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 278 for instruction: %v4f16u64 = call <4 x i64> @llvm.fptoui.sat.v4i64.v4f16(<4 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v8f16s1 = call <8 x i1> @llvm.fptosi.sat.v8i1.v8f16(<8 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v8f16u1 = call <8 x i1> @llvm.fptoui.sat.v8i1.v8f16(<8 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v8f16s8 = call <8 x i8> @llvm.fptosi.sat.v8i8.v8f16(<8 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v8f16u8 = call <8 x i8> @llvm.fptoui.sat.v8i8.v8f16(<8 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8f16s16 = call <8 x i16> @llvm.fptosi.sat.v8i16.v8f16(<8 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8f16u16 = call <8 x i16> @llvm.fptoui.sat.v8i16.v8f16(<8 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %v8f16s32 = call <8 x i32> @llvm.fptosi.sat.v8i32.v8f16(<8 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %v8f16u32 = call <8 x i32> @llvm.fptoui.sat.v8i32.v8f16(<8 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 1112 for instruction: %v8f16s64 = call <8 x i64> @llvm.fptosi.sat.v8i64.v8f16(<8 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 1102 for instruction: %v8f16u64 = call <8 x i64> @llvm.fptoui.sat.v8i64.v8f16(<8 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %v16f16s1 = call <16 x i1> @llvm.fptosi.sat.v16i1.v16f16(<16 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %v16f16u1 = call <16 x i1> @llvm.fptoui.sat.v16i1.v16f16(<16 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %v16f16s8 = call <16 x i8> @llvm.fptosi.sat.v16i8.v16f16(<16 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %v16f16u8 = call <16 x i8> @llvm.fptoui.sat.v16i8.v16f16(<16 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v16f16s16 = call <16 x i16> @llvm.fptosi.sat.v16i16.v16f16(<16 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v16f16u16 = call <16 x i16> @llvm.fptoui.sat.v16i16.v16f16(<16 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 124 for instruction: %v16f16s32 = call <16 x i32> @llvm.fptosi.sat.v16i32.v16f16(<16 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %v16f16u32 = call <16 x i32> @llvm.fptoui.sat.v16i32.v16f16(<16 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 4484 for instruction: %v16f16s64 = call <16 x i64> @llvm.fptosi.sat.v16i64.v16f16(<16 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 4400 for instruction: %v16f16u64 = call <16 x i64> @llvm.fptoui.sat.v16i64.v16f16(<16 x half> undef)
-; CHECK-MVEFP-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:5 CodeSize:7 Lat:5 SizeLat:5 for: %f16s1 = call i1 @llvm.fptosi.sat.i1.f16(half undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:5 CodeSize:7 Lat:5 SizeLat:5 for: %f16u1 = call i1 @llvm.fptoui.sat.i1.f16(half undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:5 CodeSize:7 Lat:5 SizeLat:5 for: %f16s8 = call i8 @llvm.fptosi.sat.i8.f16(half undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:5 CodeSize:7 Lat:5 SizeLat:5 for: %f16u8 = call i8 @llvm.fptoui.sat.i8.f16(half undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:5 CodeSize:7 Lat:5 SizeLat:5 for: %f16s16 = call i16 @llvm.fptosi.sat.i16.f16(half undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:5 CodeSize:7 Lat:5 SizeLat:5 for: %f16u16 = call i16 @llvm.fptoui.sat.i16.f16(half undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of 1 for: %f16s32 = call i32 @llvm.fptosi.sat.i32.f16(half undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of 1 for: %f16u32 = call i32 @llvm.fptoui.sat.i32.f16(half undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:6 CodeSize:7 Lat:6 SizeLat:6 for: %f16s64 = call i64 @llvm.fptosi.sat.i64.f16(half undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of 3 for: %f16u64 = call i64 @llvm.fptoui.sat.i64.f16(half undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:6 CodeSize:7 Lat:6 SizeLat:6 for: %v2f16s1 = call <2 x i1> @llvm.fptosi.sat.v2i1.v2f16(<2 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:6 CodeSize:7 Lat:6 SizeLat:6 for: %v2f16u1 = call <2 x i1> @llvm.fptoui.sat.v2i1.v2f16(<2 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:6 CodeSize:7 Lat:6 SizeLat:6 for: %v2f16s8 = call <2 x i8> @llvm.fptosi.sat.v2i8.v2f16(<2 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:6 CodeSize:7 Lat:6 SizeLat:6 for: %v2f16u8 = call <2 x i8> @llvm.fptoui.sat.v2i8.v2f16(<2 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %v2f16s16 = call <2 x i16> @llvm.fptosi.sat.v2i16.v2f16(<2 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %v2f16u16 = call <2 x i16> @llvm.fptoui.sat.v2i16.v2f16(<2 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:44 CodeSize:5 Lat:9 SizeLat:9 for: %v2f16s32 = call <2 x i32> @llvm.fptosi.sat.v2i32.v2f16(<2 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:40 CodeSize:3 Lat:5 SizeLat:5 for: %v2f16u32 = call <2 x i32> @llvm.fptoui.sat.v2i32.v2f16(<2 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:76 CodeSize:5 Lat:9 SizeLat:9 for: %v2f16s64 = call <2 x i64> @llvm.fptosi.sat.v2i64.v2f16(<2 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:72 CodeSize:3 Lat:5 SizeLat:5 for: %v2f16u64 = call <2 x i64> @llvm.fptoui.sat.v2i64.v2f16(<2 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:6 CodeSize:7 Lat:6 SizeLat:6 for: %v4f16s1 = call <4 x i1> @llvm.fptosi.sat.v4i1.v4f16(<4 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:6 CodeSize:7 Lat:6 SizeLat:6 for: %v4f16u1 = call <4 x i1> @llvm.fptoui.sat.v4i1.v4f16(<4 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:6 CodeSize:7 Lat:6 SizeLat:6 for: %v4f16s8 = call <4 x i8> @llvm.fptosi.sat.v4i8.v4f16(<4 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:6 CodeSize:7 Lat:6 SizeLat:6 for: %v4f16u8 = call <4 x i8> @llvm.fptoui.sat.v4i8.v4f16(<4 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %v4f16s16 = call <4 x i16> @llvm.fptosi.sat.v4i16.v4f16(<4 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %v4f16u16 = call <4 x i16> @llvm.fptoui.sat.v4i16.v4f16(<4 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:10 CodeSize:5 Lat:9 SizeLat:9 for: %v4f16s32 = call <4 x i32> @llvm.fptosi.sat.v4i32.v4f16(<4 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:6 CodeSize:3 Lat:5 SizeLat:5 for: %v4f16u32 = call <4 x i32> @llvm.fptoui.sat.v4i32.v4f16(<4 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:284 CodeSize:6 Lat:11 SizeLat:11 for: %v4f16s64 = call <4 x i64> @llvm.fptosi.sat.v4i64.v4f16(<4 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:278 CodeSize:3 Lat:5 SizeLat:5 for: %v4f16u64 = call <4 x i64> @llvm.fptoui.sat.v4i64.v4f16(<4 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:6 CodeSize:7 Lat:6 SizeLat:6 for: %v8f16s1 = call <8 x i1> @llvm.fptosi.sat.v8i1.v8f16(<8 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:6 CodeSize:7 Lat:6 SizeLat:6 for: %v8f16u1 = call <8 x i1> @llvm.fptoui.sat.v8i1.v8f16(<8 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:6 CodeSize:7 Lat:6 SizeLat:6 for: %v8f16s8 = call <8 x i8> @llvm.fptosi.sat.v8i8.v8f16(<8 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:6 CodeSize:7 Lat:6 SizeLat:6 for: %v8f16u8 = call <8 x i8> @llvm.fptoui.sat.v8i8.v8f16(<8 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %v8f16s16 = call <8 x i16> @llvm.fptosi.sat.v8i16.v8f16(<8 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:2 CodeSize:1 Lat:2 SizeLat:2 for: %v8f16u16 = call <8 x i16> @llvm.fptoui.sat.v8i16.v8f16(<8 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:20 CodeSize:6 Lat:11 SizeLat:11 for: %v8f16s32 = call <8 x i32> @llvm.fptosi.sat.v8i32.v8f16(<8 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:14 CodeSize:3 Lat:5 SizeLat:5 for: %v8f16u32 = call <8 x i32> @llvm.fptoui.sat.v8i32.v8f16(<8 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:1112 CodeSize:8 Lat:15 SizeLat:15 for: %v8f16s64 = call <8 x i64> @llvm.fptosi.sat.v8i64.v8f16(<8 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:1102 CodeSize:3 Lat:5 SizeLat:5 for: %v8f16u64 = call <8 x i64> @llvm.fptoui.sat.v8i64.v8f16(<8 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:12 CodeSize:14 Lat:12 SizeLat:12 for: %v16f16s1 = call <16 x i1> @llvm.fptosi.sat.v16i1.v16f16(<16 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:12 CodeSize:14 Lat:12 SizeLat:12 for: %v16f16u1 = call <16 x i1> @llvm.fptoui.sat.v16i1.v16f16(<16 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:12 CodeSize:14 Lat:12 SizeLat:12 for: %v16f16s8 = call <16 x i8> @llvm.fptosi.sat.v16i8.v16f16(<16 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:12 CodeSize:14 Lat:12 SizeLat:12 for: %v16f16u8 = call <16 x i8> @llvm.fptoui.sat.v16i8.v16f16(<16 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:4 CodeSize:2 Lat:4 SizeLat:4 for: %v16f16s16 = call <16 x i16> @llvm.fptosi.sat.v16i16.v16f16(<16 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:4 CodeSize:2 Lat:4 SizeLat:4 for: %v16f16u16 = call <16 x i16> @llvm.fptoui.sat.v16i16.v16f16(<16 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:124 CodeSize:75 Lat:85 SizeLat:85 for: %v16f16s32 = call <16 x i32> @llvm.fptosi.sat.v16i32.v16f16(<16 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:48 CodeSize:5 Lat:9 SizeLat:9 for: %v16f16u32 = call <16 x i32> @llvm.fptoui.sat.v16i32.v16f16(<16 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:4484 CodeSize:79 Lat:93 SizeLat:93 for: %v16f16s64 = call <16 x i64> @llvm.fptosi.sat.v16i64.v16f16(<16 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:4400 CodeSize:5 Lat:9 SizeLat:9 for: %v16f16u64 = call <16 x i64> @llvm.fptoui.sat.v16i64.v16f16(<16 x half> undef)
+; CHECK-MVEFP-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
;
%f16s1 = call i1 @llvm.fptosi.sat.i1.f16(half undef)
%f16u1 = call i1 @llvm.fptoui.sat.i1.f16(half undef)
diff --git a/llvm/test/Analysis/CostModel/ARM/intrinsic-cost-kinds.ll b/llvm/test/Analysis/CostModel/ARM/intrinsic-cost-kinds.ll
deleted file mode 100644
index 6377437..0000000
--- a/llvm/test/Analysis/CostModel/ARM/intrinsic-cost-kinds.ll
+++ /dev/null
@@ -1,371 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py
-; RUN: opt -mtriple=armv8.1m.main -mattr=+mve.fp -passes="print<cost-model>" 2>&1 -disable-output -cost-kind=throughput < %s | FileCheck %s --check-prefix=THRU
-; RUN: opt -mtriple=armv8.1m.main -mattr=+mve.fp -passes="print<cost-model>" 2>&1 -disable-output -cost-kind=latency < %s | FileCheck %s --check-prefix=LATE
-; RUN: opt -mtriple=armv8.1m.main -mattr=+mve.fp -passes="print<cost-model>" 2>&1 -disable-output -cost-kind=code-size < %s | FileCheck %s --check-prefix=SIZE
-; RUN: opt -mtriple=armv8.1m.main -mattr=+mve.fp -passes="print<cost-model>" 2>&1 -disable-output -cost-kind=size-latency < %s | FileCheck %s --check-prefix=SIZE_LATE
-
-target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
-
-; Test a cross-section of intrinsics for various cost-kinds.
-; Other test files may check for accuracy of a particular intrinsic
-; across subtargets or types. This is just a basic correctness check using an
-; ARM target and a legal scalar type (i32/float) and/or an
-; illegal vector type (16 x i32/float).
-
-declare i32 @llvm.smax.i32(i32, i32)
-declare <16 x i32> @llvm.smax.v16i32(<16 x i32>, <16 x i32>)
-
-declare float @llvm.fmuladd.f32(float, float, float)
-declare <16 x float> @llvm.fmuladd.v16f32(<16 x float>, <16 x float>, <16 x float>)
-
-declare float @llvm.log2.f32(float)
-declare <16 x float> @llvm.log2.v16f32(<16 x float>)
-
-declare float @llvm.experimental.constrained.fadd.f32(float, float, metadata, metadata)
-declare <16 x float> @llvm.experimental.constrained.fadd.v16f32(<16 x float>, <16 x float>, metadata, metadata)
-
-declare float @llvm.maximum.f32(float, float)
-declare <16 x float> @llvm.maximum.v16f32(<16 x float>, <16 x float>)
-
-declare i32 @llvm.cttz.i32(i32, i1)
-declare <16 x i32> @llvm.cttz.v16i32(<16 x i32>, i1)
-
-declare i32 @llvm.ctlz.i32(i32, i1)
-declare <16 x i32> @llvm.ctlz.v16i32(<16 x i32>, i1)
-
-declare i32 @llvm.fshl.i32(i32, i32, i32)
-declare <16 x i32> @llvm.fshl.v16i32(<16 x i32>, <16 x i32>, <16 x i32>)
-
-declare <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr>, i32, <16 x i1>, <16 x float>)
-declare void @llvm.masked.scatter.v16f32.v16p0(<16 x float>, <16 x ptr>, i32, <16 x i1>)
-declare float @llvm.vector.reduce.fmax.v16f32(<16 x float>)
-
-declare void @llvm.memcpy.p0.p0.i32(ptr, ptr, i32, i1)
-
-declare i32 @llvm.ssa.copy.i32(i32)
-declare float @llvm.ssa.copy.f32(float)
-declare ptr @llvm.ssa.copy.p0(ptr)
-
-define void @smax(i32 %a, i32 %b, <16 x i32> %va, <16 x i32> %vb) {
-; THRU-LABEL: 'smax'
-; THRU-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %s = call i32 @llvm.smax.i32(i32 %a, i32 %b)
-; THRU-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v = call <16 x i32> @llvm.smax.v16i32(<16 x i32> %va, <16 x i32> %vb)
-; THRU-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
-;
-; LATE-LABEL: 'smax'
-; LATE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %s = call i32 @llvm.smax.i32(i32 %a, i32 %b)
-; LATE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v = call <16 x i32> @llvm.smax.v16i32(<16 x i32> %va, <16 x i32> %vb)
-; LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
-; SIZE-LABEL: 'smax'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %s = call i32 @llvm.smax.i32(i32 %a, i32 %b)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v = call <16 x i32> @llvm.smax.v16i32(<16 x i32> %va, <16 x i32> %vb)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
-; SIZE_LATE-LABEL: 'smax'
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %s = call i32 @llvm.smax.i32(i32 %a, i32 %b)
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v = call <16 x i32> @llvm.smax.v16i32(<16 x i32> %va, <16 x i32> %vb)
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
- %s = call i32 @llvm.smax.i32(i32 %a, i32 %b)
- %v = call <16 x i32> @llvm.smax.v16i32(<16 x i32> %va, <16 x i32> %vb)
- ret void
-}
-
-define void @fmuladd(float %a, float %b, float %c, <16 x float> %va, <16 x float> %vb, <16 x float> %vc) {
-; THRU-LABEL: 'fmuladd'
-; THRU-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %s = call float @llvm.fmuladd.f32(float %a, float %b, float %c)
-; THRU-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v = call <16 x float> @llvm.fmuladd.v16f32(<16 x float> %va, <16 x float> %vb, <16 x float> %vc)
-; THRU-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
-;
-; LATE-LABEL: 'fmuladd'
-; LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %s = call float @llvm.fmuladd.f32(float %a, float %b, float %c)
-; LATE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v = call <16 x float> @llvm.fmuladd.v16f32(<16 x float> %va, <16 x float> %vb, <16 x float> %vc)
-; LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
-; SIZE-LABEL: 'fmuladd'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %s = call float @llvm.fmuladd.f32(float %a, float %b, float %c)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v = call <16 x float> @llvm.fmuladd.v16f32(<16 x float> %va, <16 x float> %vb, <16 x float> %vc)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
-; SIZE_LATE-LABEL: 'fmuladd'
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %s = call float @llvm.fmuladd.f32(float %a, float %b, float %c)
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v = call <16 x float> @llvm.fmuladd.v16f32(<16 x float> %va, <16 x float> %vb, <16 x float> %vc)
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
- %s = call float @llvm.fmuladd.f32(float %a, float %b, float %c)
- %v = call <16 x float> @llvm.fmuladd.v16f32(<16 x float> %va, <16 x float> %vb, <16 x float> %vc)
- ret void
-}
-
-define void @log2(float %a, <16 x float> %va) {
-; THRU-LABEL: 'log2'
-; THRU-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %s = call float @llvm.log2.f32(float %a)
-; THRU-NEXT: Cost Model: Found an estimated cost of 192 for instruction: %v = call <16 x float> @llvm.log2.v16f32(<16 x float> %va)
-; THRU-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
-;
-; LATE-LABEL: 'log2'
-; LATE-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %s = call float @llvm.log2.f32(float %a)
-; LATE-NEXT: Cost Model: Found an estimated cost of 192 for instruction: %v = call <16 x float> @llvm.log2.v16f32(<16 x float> %va)
-; LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
-; SIZE-LABEL: 'log2'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %s = call float @llvm.log2.f32(float %a)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %v = call <16 x float> @llvm.log2.v16f32(<16 x float> %va)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
-; SIZE_LATE-LABEL: 'log2'
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %s = call float @llvm.log2.f32(float %a)
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 192 for instruction: %v = call <16 x float> @llvm.log2.v16f32(<16 x float> %va)
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
- %s = call float @llvm.log2.f32(float %a)
- %v = call <16 x float> @llvm.log2.v16f32(<16 x float> %va)
- ret void
-}
-
-define void @constrained_fadd(float %a, <16 x float> %va) strictfp {
-; THRU-LABEL: 'constrained_fadd'
-; THRU-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %s = call float @llvm.experimental.constrained.fadd.f32(float %a, float %a, metadata !"round.dynamic", metadata !"fpexcept.ignore")
-; THRU-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %t = call <16 x float> @llvm.experimental.constrained.fadd.v16f32(<16 x float> %va, <16 x float> %va, metadata !"round.dynamic", metadata !"fpexcept.ignore")
-; THRU-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
-;
-; LATE-LABEL: 'constrained_fadd'
-; LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %s = call float @llvm.experimental.constrained.fadd.f32(float %a, float %a, metadata !"round.dynamic", metadata !"fpexcept.ignore")
-; LATE-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %t = call <16 x float> @llvm.experimental.constrained.fadd.v16f32(<16 x float> %va, <16 x float> %va, metadata !"round.dynamic", metadata !"fpexcept.ignore")
-; LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
-; SIZE-LABEL: 'constrained_fadd'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %s = call float @llvm.experimental.constrained.fadd.f32(float %a, float %a, metadata !"round.dynamic", metadata !"fpexcept.ignore")
-; SIZE-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %t = call <16 x float> @llvm.experimental.constrained.fadd.v16f32(<16 x float> %va, <16 x float> %va, metadata !"round.dynamic", metadata !"fpexcept.ignore")
-; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
-; SIZE_LATE-LABEL: 'constrained_fadd'
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %s = call float @llvm.experimental.constrained.fadd.f32(float %a, float %a, metadata !"round.dynamic", metadata !"fpexcept.ignore")
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %t = call <16 x float> @llvm.experimental.constrained.fadd.v16f32(<16 x float> %va, <16 x float> %va, metadata !"round.dynamic", metadata !"fpexcept.ignore")
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
- %s = call float @llvm.experimental.constrained.fadd.f32(float %a, float %a, metadata !"round.dynamic", metadata !"fpexcept.ignore")
- %t = call <16 x float> @llvm.experimental.constrained.fadd.v16f32(<16 x float> %va, <16 x float> %va, metadata !"round.dynamic", metadata !"fpexcept.ignore")
- ret void
-}
-
-define void @fmaximum(float %a, float %b, <16 x float> %va, <16 x float> %vb) {
-; THRU-LABEL: 'fmaximum'
-; THRU-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %s = call float @llvm.maximum.f32(float %a, float %b)
-; THRU-NEXT: Cost Model: Found an estimated cost of 208 for instruction: %v = call <16 x float> @llvm.maximum.v16f32(<16 x float> %va, <16 x float> %vb)
-; THRU-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
-;
-; LATE-LABEL: 'fmaximum'
-; LATE-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %s = call float @llvm.maximum.f32(float %a, float %b)
-; LATE-NEXT: Cost Model: Found an estimated cost of 208 for instruction: %v = call <16 x float> @llvm.maximum.v16f32(<16 x float> %va, <16 x float> %vb)
-; LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
-; SIZE-LABEL: 'fmaximum'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %s = call float @llvm.maximum.f32(float %a, float %b)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %v = call <16 x float> @llvm.maximum.v16f32(<16 x float> %va, <16 x float> %vb)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
-; SIZE_LATE-LABEL: 'fmaximum'
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %s = call float @llvm.maximum.f32(float %a, float %b)
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 208 for instruction: %v = call <16 x float> @llvm.maximum.v16f32(<16 x float> %va, <16 x float> %vb)
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
- %s = call float @llvm.maximum.f32(float %a, float %b)
- %v = call <16 x float> @llvm.maximum.v16f32(<16 x float> %va, <16 x float> %vb)
- ret void
-}
-
-define void @cttz(i32 %a, <16 x i32> %va) {
-; THRU-LABEL: 'cttz'
-; THRU-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %s = call i32 @llvm.cttz.i32(i32 %a, i1 false)
-; THRU-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v = call <16 x i32> @llvm.cttz.v16i32(<16 x i32> %va, i1 false)
-; THRU-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
-;
-; LATE-LABEL: 'cttz'
-; LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %s = call i32 @llvm.cttz.i32(i32 %a, i1 false)
-; LATE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v = call <16 x i32> @llvm.cttz.v16i32(<16 x i32> %va, i1 false)
-; LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
-; SIZE-LABEL: 'cttz'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %s = call i32 @llvm.cttz.i32(i32 %a, i1 false)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v = call <16 x i32> @llvm.cttz.v16i32(<16 x i32> %va, i1 false)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
-; SIZE_LATE-LABEL: 'cttz'
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %s = call i32 @llvm.cttz.i32(i32 %a, i1 false)
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v = call <16 x i32> @llvm.cttz.v16i32(<16 x i32> %va, i1 false)
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
- %s = call i32 @llvm.cttz.i32(i32 %a, i1 false)
- %v = call <16 x i32> @llvm.cttz.v16i32(<16 x i32> %va, i1 false)
- ret void
-}
-
-define void @ctlz(i32 %a, <16 x i32> %va) {
-; THRU-LABEL: 'ctlz'
-; THRU-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %s = call i32 @llvm.ctlz.i32(i32 %a, i1 true)
-; THRU-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v = call <16 x i32> @llvm.ctlz.v16i32(<16 x i32> %va, i1 true)
-; THRU-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
-;
-; LATE-LABEL: 'ctlz'
-; LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %s = call i32 @llvm.ctlz.i32(i32 %a, i1 true)
-; LATE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v = call <16 x i32> @llvm.ctlz.v16i32(<16 x i32> %va, i1 true)
-; LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
-; SIZE-LABEL: 'ctlz'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %s = call i32 @llvm.ctlz.i32(i32 %a, i1 true)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v = call <16 x i32> @llvm.ctlz.v16i32(<16 x i32> %va, i1 true)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
-; SIZE_LATE-LABEL: 'ctlz'
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %s = call i32 @llvm.ctlz.i32(i32 %a, i1 true)
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v = call <16 x i32> @llvm.ctlz.v16i32(<16 x i32> %va, i1 true)
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
- %s = call i32 @llvm.ctlz.i32(i32 %a, i1 true)
- %v = call <16 x i32> @llvm.ctlz.v16i32(<16 x i32> %va, i1 true)
- ret void
-}
-
-define void @fshl(i32 %a, i32 %b, i32 %c, <16 x i32> %va, <16 x i32> %vb, <16 x i32> %vc) {
-; THRU-LABEL: 'fshl'
-; THRU-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %s = call i32 @llvm.fshl.i32(i32 %a, i32 %b, i32 %c)
-; THRU-NEXT: Cost Model: Found an estimated cost of 120 for instruction: %v = call <16 x i32> @llvm.fshl.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i32> %vc)
-; THRU-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
-;
-; LATE-LABEL: 'fshl'
-; LATE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %s = call i32 @llvm.fshl.i32(i32 %a, i32 %b, i32 %c)
-; LATE-NEXT: Cost Model: Found an estimated cost of 120 for instruction: %v = call <16 x i32> @llvm.fshl.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i32> %vc)
-; LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
-; SIZE-LABEL: 'fshl'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %s = call i32 @llvm.fshl.i32(i32 %a, i32 %b, i32 %c)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 92 for instruction: %v = call <16 x i32> @llvm.fshl.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i32> %vc)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
-; SIZE_LATE-LABEL: 'fshl'
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %s = call i32 @llvm.fshl.i32(i32 %a, i32 %b, i32 %c)
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 120 for instruction: %v = call <16 x i32> @llvm.fshl.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i32> %vc)
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
- %s = call i32 @llvm.fshl.i32(i32 %a, i32 %b, i32 %c)
- %v = call <16 x i32> @llvm.fshl.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i32> %vc)
- ret void
-}
-
-define void @maskedgather(<16 x ptr> %va, <16 x i1> %vb, <16 x float> %vc) {
-; THRU-LABEL: 'maskedgather'
-; THRU-NEXT: Cost Model: Found an estimated cost of 176 for instruction: %v = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> align 1 %va, <16 x i1> %vb, <16 x float> %vc)
-; THRU-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
-;
-; LATE-LABEL: 'maskedgather'
-; LATE-NEXT: Cost Model: Found an estimated cost of 176 for instruction: %v = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> align 1 %va, <16 x i1> %vb, <16 x float> %vc)
-; LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
-; SIZE-LABEL: 'maskedgather'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 176 for instruction: %v = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> align 1 %va, <16 x i1> %vb, <16 x float> %vc)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
-; SIZE_LATE-LABEL: 'maskedgather'
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 176 for instruction: %v = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> align 1 %va, <16 x i1> %vb, <16 x float> %vc)
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
- %v = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> %va, i32 1, <16 x i1> %vb, <16 x float> %vc)
- ret void
-}
-
-define void @maskedscatter(<16 x float> %va, <16 x ptr> %vb, <16 x i1> %vc) {
-; THRU-LABEL: 'maskedscatter'
-; THRU-NEXT: Cost Model: Found an estimated cost of 176 for instruction: call void @llvm.masked.scatter.v16f32.v16p0(<16 x float> %va, <16 x ptr> align 1 %vb, <16 x i1> %vc)
-; THRU-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
-;
-; LATE-LABEL: 'maskedscatter'
-; LATE-NEXT: Cost Model: Found an estimated cost of 176 for instruction: call void @llvm.masked.scatter.v16f32.v16p0(<16 x float> %va, <16 x ptr> align 1 %vb, <16 x i1> %vc)
-; LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
-; SIZE-LABEL: 'maskedscatter'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 176 for instruction: call void @llvm.masked.scatter.v16f32.v16p0(<16 x float> %va, <16 x ptr> align 1 %vb, <16 x i1> %vc)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
-; SIZE_LATE-LABEL: 'maskedscatter'
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 176 for instruction: call void @llvm.masked.scatter.v16f32.v16p0(<16 x float> %va, <16 x ptr> align 1 %vb, <16 x i1> %vc)
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
- call void @llvm.masked.scatter.v16f32.v16p0(<16 x float> %va, <16 x ptr> %vb, i32 1, <16 x i1> %vc)
- ret void
-}
-
-define void @reduce_fmax(<16 x float> %va) {
-; THRU-LABEL: 'reduce_fmax'
-; THRU-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %v = call float @llvm.vector.reduce.fmax.v16f32(<16 x float> %va)
-; THRU-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
-;
-; LATE-LABEL: 'reduce_fmax'
-; LATE-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %v = call float @llvm.vector.reduce.fmax.v16f32(<16 x float> %va)
-; LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
-; SIZE-LABEL: 'reduce_fmax'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v = call float @llvm.vector.reduce.fmax.v16f32(<16 x float> %va)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
-; SIZE_LATE-LABEL: 'reduce_fmax'
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %v = call float @llvm.vector.reduce.fmax.v16f32(<16 x float> %va)
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
- %v = call float @llvm.vector.reduce.fmax.v16f32(<16 x float> %va)
- ret void
-}
-
-define void @memcpy(ptr %a, ptr %b, i32 %c) {
-; THRU-LABEL: 'memcpy'
-; THRU-NEXT: Cost Model: Found an estimated cost of 4 for instruction: call void @llvm.memcpy.p0.p0.i32(ptr align 1 %a, ptr align 1 %b, i32 32, i1 false)
-; THRU-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
-;
-; LATE-LABEL: 'memcpy'
-; LATE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: call void @llvm.memcpy.p0.p0.i32(ptr align 1 %a, ptr align 1 %b, i32 32, i1 false)
-; LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
-; SIZE-LABEL: 'memcpy'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: call void @llvm.memcpy.p0.p0.i32(ptr align 1 %a, ptr align 1 %b, i32 32, i1 false)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
-; SIZE_LATE-LABEL: 'memcpy'
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: call void @llvm.memcpy.p0.p0.i32(ptr align 1 %a, ptr align 1 %b, i32 32, i1 false)
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
- call void @llvm.memcpy.p0.p0.i32(ptr align 1 %a, ptr align 1 %b, i32 32, i1 false)
- ret void
-}
-
-define void @ssa_copy() {
- ; CHECK: %{{.*}} = llvm.intr.ssa.copy %{{.*}} : f32
-; THRU-LABEL: 'ssa_copy'
-; THRU-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %i = call i32 @llvm.ssa.copy.i32(i32 undef)
-; THRU-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %f = call float @llvm.ssa.copy.f32(float undef)
-; THRU-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %p = call ptr @llvm.ssa.copy.p0(ptr undef)
-; THRU-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
-;
-; LATE-LABEL: 'ssa_copy'
-; LATE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %i = call i32 @llvm.ssa.copy.i32(i32 undef)
-; LATE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %f = call float @llvm.ssa.copy.f32(float undef)
-; LATE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %p = call ptr @llvm.ssa.copy.p0(ptr undef)
-; LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
-; SIZE-LABEL: 'ssa_copy'
-; SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %i = call i32 @llvm.ssa.copy.i32(i32 undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %f = call float @llvm.ssa.copy.f32(float undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %p = call ptr @llvm.ssa.copy.p0(ptr undef)
-; SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
-; SIZE_LATE-LABEL: 'ssa_copy'
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %i = call i32 @llvm.ssa.copy.i32(i32 undef)
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %f = call float @llvm.ssa.copy.f32(float undef)
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %p = call ptr @llvm.ssa.copy.p0(ptr undef)
-; SIZE_LATE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
- %i = call i32 @llvm.ssa.copy.i32(i32 undef)
- %f = call float @llvm.ssa.copy.f32(float undef)
- %p = call ptr @llvm.ssa.copy.p0(ptr undef)
- ret void
-}
diff --git a/llvm/test/Analysis/CostModel/ARM/mve-intrinsic-cost-kinds.ll b/llvm/test/Analysis/CostModel/ARM/mve-intrinsic-cost-kinds.ll
new file mode 100644
index 0000000..b3ad818
--- /dev/null
+++ b/llvm/test/Analysis/CostModel/ARM/mve-intrinsic-cost-kinds.ll
@@ -0,0 +1,181 @@
+; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py
+; RUN: opt -mtriple=armv8.1m.main -mattr=+mve.fp -passes="print<cost-model>" -cost-kind=all 2>&1 -disable-output < %s | FileCheck %s
+
+target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
+
+; Test a cross-section of intrinsics for various cost-kinds.
+; Other test files may check for accuracy of a particular intrinsic
+; across subtargets or types. This is just a basic correctness check using an
+; ARM target and a legal scalar type (i32/float) and/or an
+; illegal vector type (16 x i32/float).
+
+declare i32 @llvm.smax.i32(i32, i32)
+declare <16 x i32> @llvm.smax.v16i32(<16 x i32>, <16 x i32>)
+
+declare float @llvm.fmuladd.f32(float, float, float)
+declare <16 x float> @llvm.fmuladd.v16f32(<16 x float>, <16 x float>, <16 x float>)
+
+declare float @llvm.log2.f32(float)
+declare <16 x float> @llvm.log2.v16f32(<16 x float>)
+
+declare float @llvm.experimental.constrained.fadd.f32(float, float, metadata, metadata)
+declare <16 x float> @llvm.experimental.constrained.fadd.v16f32(<16 x float>, <16 x float>, metadata, metadata)
+
+declare float @llvm.maximum.f32(float, float)
+declare <16 x float> @llvm.maximum.v16f32(<16 x float>, <16 x float>)
+
+declare i32 @llvm.cttz.i32(i32, i1)
+declare <16 x i32> @llvm.cttz.v16i32(<16 x i32>, i1)
+
+declare i32 @llvm.ctlz.i32(i32, i1)
+declare <16 x i32> @llvm.ctlz.v16i32(<16 x i32>, i1)
+
+declare i32 @llvm.fshl.i32(i32, i32, i32)
+declare <16 x i32> @llvm.fshl.v16i32(<16 x i32>, <16 x i32>, <16 x i32>)
+
+declare <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr>, i32, <16 x i1>, <16 x float>)
+declare void @llvm.masked.scatter.v16f32.v16p0(<16 x float>, <16 x ptr>, i32, <16 x i1>)
+declare float @llvm.vector.reduce.fmax.v16f32(<16 x float>)
+
+declare void @llvm.memcpy.p0.p0.i32(ptr, ptr, i32, i1)
+
+declare i32 @llvm.ssa.copy.i32(i32)
+declare float @llvm.ssa.copy.f32(float)
+declare ptr @llvm.ssa.copy.p0(ptr)
+
+define void @smax(i32 %a, i32 %b, <16 x i32> %va, <16 x i32> %vb) {
+; CHECK-LABEL: 'smax'
+; CHECK-NEXT: Cost Model: Found costs of RThru:2 CodeSize:3 Lat:2 SizeLat:2 for: %s = call i32 @llvm.smax.i32(i32 %a, i32 %b)
+; CHECK-NEXT: Cost Model: Found costs of RThru:8 CodeSize:4 Lat:8 SizeLat:8 for: %v = call <16 x i32> @llvm.smax.v16i32(<16 x i32> %va, <16 x i32> %vb)
+; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
+;
+ %s = call i32 @llvm.smax.i32(i32 %a, i32 %b)
+ %v = call <16 x i32> @llvm.smax.v16i32(<16 x i32> %va, <16 x i32> %vb)
+ ret void
+}
+
+define void @fmuladd(float %a, float %b, float %c, <16 x float> %va, <16 x float> %vb, <16 x float> %vc) {
+; CHECK-LABEL: 'fmuladd'
+; CHECK-NEXT: Cost Model: Found costs of 1 for: %s = call float @llvm.fmuladd.f32(float %a, float %b, float %c)
+; CHECK-NEXT: Cost Model: Found costs of 8 for: %v = call <16 x float> @llvm.fmuladd.v16f32(<16 x float> %va, <16 x float> %vb, <16 x float> %vc)
+; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
+;
+ %s = call float @llvm.fmuladd.f32(float %a, float %b, float %c)
+ %v = call <16 x float> @llvm.fmuladd.v16f32(<16 x float> %va, <16 x float> %vb, <16 x float> %vc)
+ ret void
+}
+
+define void @log2(float %a, <16 x float> %va) {
+; CHECK-LABEL: 'log2'
+; CHECK-NEXT: Cost Model: Found costs of RThru:10 CodeSize:1 Lat:10 SizeLat:10 for: %s = call float @llvm.log2.f32(float %a)
+; CHECK-NEXT: Cost Model: Found costs of RThru:192 CodeSize:48 Lat:192 SizeLat:192 for: %v = call <16 x float> @llvm.log2.v16f32(<16 x float> %va)
+; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
+;
+ %s = call float @llvm.log2.f32(float %a)
+ %v = call <16 x float> @llvm.log2.v16f32(<16 x float> %va)
+ ret void
+}
+
+define void @constrained_fadd(float %a, <16 x float> %va) strictfp {
+; CHECK-LABEL: 'constrained_fadd'
+; CHECK-NEXT: Cost Model: Found costs of 1 for: %s = call float @llvm.experimental.constrained.fadd.f32(float %a, float %a, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+; CHECK-NEXT: Cost Model: Found costs of 48 for: %t = call <16 x float> @llvm.experimental.constrained.fadd.v16f32(<16 x float> %va, <16 x float> %va, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
+;
+ %s = call float @llvm.experimental.constrained.fadd.f32(float %a, float %a, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+ %t = call <16 x float> @llvm.experimental.constrained.fadd.v16f32(<16 x float> %va, <16 x float> %va, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+ ret void
+}
+
+define void @fmaximum(float %a, float %b, <16 x float> %va, <16 x float> %vb) {
+; CHECK-LABEL: 'fmaximum'
+; CHECK-NEXT: Cost Model: Found costs of RThru:10 CodeSize:1 Lat:10 SizeLat:10 for: %s = call float @llvm.maximum.f32(float %a, float %b)
+; CHECK-NEXT: Cost Model: Found costs of RThru:208 CodeSize:64 Lat:208 SizeLat:208 for: %v = call <16 x float> @llvm.maximum.v16f32(<16 x float> %va, <16 x float> %vb)
+; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
+;
+ %s = call float @llvm.maximum.f32(float %a, float %b)
+ %v = call <16 x float> @llvm.maximum.v16f32(<16 x float> %va, <16 x float> %vb)
+ ret void
+}
+
+define void @cttz(i32 %a, <16 x i32> %va) {
+; CHECK-LABEL: 'cttz'
+; CHECK-NEXT: Cost Model: Found costs of 1 for: %s = call i32 @llvm.cttz.i32(i32 %a, i1 false)
+; CHECK-NEXT: Cost Model: Found costs of 8 for: %v = call <16 x i32> @llvm.cttz.v16i32(<16 x i32> %va, i1 false)
+; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
+;
+ %s = call i32 @llvm.cttz.i32(i32 %a, i1 false)
+ %v = call <16 x i32> @llvm.cttz.v16i32(<16 x i32> %va, i1 false)
+ ret void
+}
+
+define void @ctlz(i32 %a, <16 x i32> %va) {
+; CHECK-LABEL: 'ctlz'
+; CHECK-NEXT: Cost Model: Found costs of 1 for: %s = call i32 @llvm.ctlz.i32(i32 %a, i1 true)
+; CHECK-NEXT: Cost Model: Found costs of 8 for: %v = call <16 x i32> @llvm.ctlz.v16i32(<16 x i32> %va, i1 true)
+; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
+;
+ %s = call i32 @llvm.ctlz.i32(i32 %a, i1 true)
+ %v = call <16 x i32> @llvm.ctlz.v16i32(<16 x i32> %va, i1 true)
+ ret void
+}
+
+define void @fshl(i32 %a, i32 %b, i32 %c, <16 x i32> %va, <16 x i32> %vb, <16 x i32> %vc) {
+; CHECK-LABEL: 'fshl'
+; CHECK-NEXT: Cost Model: Found costs of RThru:7 CodeSize:8 Lat:7 SizeLat:7 for: %s = call i32 @llvm.fshl.i32(i32 %a, i32 %b, i32 %c)
+; CHECK-NEXT: Cost Model: Found costs of RThru:120 CodeSize:92 Lat:120 SizeLat:120 for: %v = call <16 x i32> @llvm.fshl.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i32> %vc)
+; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
+;
+ %s = call i32 @llvm.fshl.i32(i32 %a, i32 %b, i32 %c)
+ %v = call <16 x i32> @llvm.fshl.v16i32(<16 x i32> %va, <16 x i32> %vb, <16 x i32> %vc)
+ ret void
+}
+
+define void @maskedgather(<16 x ptr> %va, <16 x i1> %vb, <16 x float> %vc) {
+; CHECK-LABEL: 'maskedgather'
+; CHECK-NEXT: Cost Model: Found costs of 176 for: %v = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> align 1 %va, <16 x i1> %vb, <16 x float> %vc)
+; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
+;
+ %v = call <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> %va, i32 1, <16 x i1> %vb, <16 x float> %vc)
+ ret void
+}
+
+define void @maskedscatter(<16 x float> %va, <16 x ptr> %vb, <16 x i1> %vc) {
+; CHECK-LABEL: 'maskedscatter'
+; CHECK-NEXT: Cost Model: Found costs of 176 for: call void @llvm.masked.scatter.v16f32.v16p0(<16 x float> %va, <16 x ptr> align 1 %vb, <16 x i1> %vc)
+; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
+;
+ call void @llvm.masked.scatter.v16f32.v16p0(<16 x float> %va, <16 x ptr> %vb, i32 1, <16 x i1> %vc)
+ ret void
+}
+
+define void @reduce_fmax(<16 x float> %va) {
+; CHECK-LABEL: 'reduce_fmax'
+; CHECK-NEXT: Cost Model: Found costs of RThru:9 CodeSize:6 Lat:9 SizeLat:9 for: %v = call float @llvm.vector.reduce.fmax.v16f32(<16 x float> %va)
+; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
+;
+ %v = call float @llvm.vector.reduce.fmax.v16f32(<16 x float> %va)
+ ret void
+}
+
+define void @memcpy(ptr %a, ptr %b, i32 %c) {
+; CHECK-LABEL: 'memcpy'
+; CHECK-NEXT: Cost Model: Found costs of 4 for: call void @llvm.memcpy.p0.p0.i32(ptr align 1 %a, ptr align 1 %b, i32 32, i1 false)
+; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
+;
+ call void @llvm.memcpy.p0.p0.i32(ptr align 1 %a, ptr align 1 %b, i32 32, i1 false)
+ ret void
+}
+
+define void @ssa_copy() {
+; CHECK-LABEL: 'ssa_copy'
+; CHECK-NEXT: Cost Model: Found costs of 0 for: %i = call i32 @llvm.ssa.copy.i32(i32 undef)
+; CHECK-NEXT: Cost Model: Found costs of 0 for: %f = call float @llvm.ssa.copy.f32(float undef)
+; CHECK-NEXT: Cost Model: Found costs of 0 for: %p = call ptr @llvm.ssa.copy.p0(ptr undef)
+; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
+;
+ %i = call i32 @llvm.ssa.copy.i32(i32 undef)
+ %f = call float @llvm.ssa.copy.f32(float undef)
+ %p = call ptr @llvm.ssa.copy.p0(ptr undef)
+ ret void
+}
diff --git a/llvm/test/Analysis/CostModel/ARM/mve-target-intrinsics.ll b/llvm/test/Analysis/CostModel/ARM/mve-target-intrinsics.ll
new file mode 100644
index 0000000..d09216a
--- /dev/null
+++ b/llvm/test/Analysis/CostModel/ARM/mve-target-intrinsics.ll
@@ -0,0 +1,24 @@
+; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py
+; RUN: opt < %s -passes="print<cost-model>" -cost-kind=all 2>&1 -disable-output -mtriple=thumbv8.1m.main -mattr=+mve | FileCheck %s
+
+target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
+
+define void @intrinsics() {
+; CHECK-LABEL: 'intrinsics'
+; CHECK-NEXT: Cost Model: Found costs of 1 for: %t1 = call i32 @llvm.arm.ssat(i32 undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found costs of 1 for: %t2 = tail call { <8 x half>, <8 x half> } @llvm.arm.mve.vld2q.v8f16.p0(ptr undef)
+; CHECK-NEXT: Cost Model: Found costs of 1 for: %t3 = call { i32, i32 } @llvm.arm.mve.sqrshrl(i32 undef, i32 undef, i32 undef, i32 48)
+; CHECK-NEXT: Cost Model: Found costs of 1 for: %t4 = tail call { i32, i32 } @llvm.arm.mve.vmlldava.v8i16(i32 0, i32 0, i32 0, i32 0, i32 0, <8 x i16> undef, <8 x i16> undef)
+; CHECK-NEXT: Cost Model: Found costs of RThru:0 CodeSize:1 Lat:1 SizeLat:1 for: ret void
+;
+ %t1 = call i32 @llvm.arm.ssat(i32 undef, i32 undef)
+ %t2 = tail call { <8 x half>, <8 x half> } @llvm.arm.mve.vld2q.v8f16.p0(ptr undef)
+ %t3 = call { i32, i32 } @llvm.arm.mve.sqrshrl(i32 undef, i32 undef, i32 undef, i32 48)
+ %t4 = tail call { i32, i32 } @llvm.arm.mve.vmlldava.v8i16(i32 0, i32 0, i32 0, i32 0, i32 0, <8 x i16> undef, <8 x i16> undef)
+ ret void
+}
+
+declare i32 @llvm.arm.ssat(i32, i32)
+declare { <8 x half>, <8 x half> } @llvm.arm.mve.vld2q.v8f16.p0(ptr)
+declare { i32, i32 } @llvm.arm.mve.sqrshrl(i32, i32, i32, i32)
+declare { i32, i32 } @llvm.arm.mve.vmlldava.v8i16(i32, i32, i32, i32, i32, <8 x i16>, <8 x i16>)
diff --git a/llvm/test/Analysis/CostModel/ARM/target-intrinsics.ll b/llvm/test/Analysis/CostModel/ARM/target-intrinsics.ll
deleted file mode 100644
index 2d6b318..0000000
--- a/llvm/test/Analysis/CostModel/ARM/target-intrinsics.ll
+++ /dev/null
@@ -1,41 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py
-; RUN: opt < %s -passes="print<cost-model>" 2>&1 -disable-output -mtriple=thumbv8.1m.main -mattr=+mve | FileCheck %s --check-prefix=CHECK-THUMB2-RECIP
-; RUN: opt < %s -passes="print<cost-model>" 2>&1 -disable-output -cost-kind=throughput -mtriple=thumbv8.1m.main -mattr=+mve | FileCheck %s --check-prefix=CHECK-THUMB2-RECIP
-; RUN: opt < %s -passes="print<cost-model>" 2>&1 -disable-output -cost-kind=latency -mtriple=thumbv8.1m.main -mattr=+mve | FileCheck %s --check-prefix=CHECK-THUMB2-LAT
-; RUN: opt < %s -passes="print<cost-model>" 2>&1 -disable-output -cost-kind=code-size -mtriple=thumbv8.1m.main -mattr=+mve | FileCheck %s --check-prefix=CHECK-THUMB2-SIZE
-
-target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
-
-define void @intrinsics() {
-; CHECK-THUMB2-RECIP-LABEL: 'intrinsics'
-; CHECK-THUMB2-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %t1 = call i32 @llvm.arm.ssat(i32 undef, i32 undef)
-; CHECK-THUMB2-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %t2 = tail call { <8 x half>, <8 x half> } @llvm.arm.mve.vld2q.v8f16.p0(ptr undef)
-; CHECK-THUMB2-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %t3 = call { i32, i32 } @llvm.arm.mve.sqrshrl(i32 undef, i32 undef, i32 undef, i32 48)
-; CHECK-THUMB2-RECIP-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %t4 = tail call { i32, i32 } @llvm.arm.mve.vmlldava.v8i16(i32 0, i32 0, i32 0, i32 0, i32 0, <8 x i16> undef, <8 x i16> undef)
-; CHECK-THUMB2-RECIP-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
-;
-; CHECK-THUMB2-LAT-LABEL: 'intrinsics'
-; CHECK-THUMB2-LAT-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %t1 = call i32 @llvm.arm.ssat(i32 undef, i32 undef)
-; CHECK-THUMB2-LAT-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %t2 = tail call { <8 x half>, <8 x half> } @llvm.arm.mve.vld2q.v8f16.p0(ptr undef)
-; CHECK-THUMB2-LAT-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %t3 = call { i32, i32 } @llvm.arm.mve.sqrshrl(i32 undef, i32 undef, i32 undef, i32 48)
-; CHECK-THUMB2-LAT-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %t4 = tail call { i32, i32 } @llvm.arm.mve.vmlldava.v8i16(i32 0, i32 0, i32 0, i32 0, i32 0, <8 x i16> undef, <8 x i16> undef)
-; CHECK-THUMB2-LAT-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
-; CHECK-THUMB2-SIZE-LABEL: 'intrinsics'
-; CHECK-THUMB2-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %t1 = call i32 @llvm.arm.ssat(i32 undef, i32 undef)
-; CHECK-THUMB2-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %t2 = tail call { <8 x half>, <8 x half> } @llvm.arm.mve.vld2q.v8f16.p0(ptr undef)
-; CHECK-THUMB2-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %t3 = call { i32, i32 } @llvm.arm.mve.sqrshrl(i32 undef, i32 undef, i32 undef, i32 48)
-; CHECK-THUMB2-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %t4 = tail call { i32, i32 } @llvm.arm.mve.vmlldava.v8i16(i32 0, i32 0, i32 0, i32 0, i32 0, <8 x i16> undef, <8 x i16> undef)
-; CHECK-THUMB2-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
-;
- %t1 = call i32 @llvm.arm.ssat(i32 undef, i32 undef)
- %t2 = tail call { <8 x half>, <8 x half> } @llvm.arm.mve.vld2q.v8f16.p0(ptr undef)
- %t3 = call { i32, i32 } @llvm.arm.mve.sqrshrl(i32 undef, i32 undef, i32 undef, i32 48)
- %t4 = tail call { i32, i32 } @llvm.arm.mve.vmlldava.v8i16(i32 0, i32 0, i32 0, i32 0, i32 0, <8 x i16> undef, <8 x i16> undef)
- ret void
-}
-
-declare i32 @llvm.arm.ssat(i32, i32)
-declare { <8 x half>, <8 x half> } @llvm.arm.mve.vld2q.v8f16.p0(ptr)
-declare { i32, i32 } @llvm.arm.mve.sqrshrl(i32, i32, i32, i32)
-declare { i32, i32 } @llvm.arm.mve.vmlldava.v8i16(i32, i32, i32, i32, i32, <8 x i16>, <8 x i16>)
diff --git a/llvm/test/CodeGen/AArch64/load-zext-bitcast.ll b/llvm/test/CodeGen/AArch64/load-zext-bitcast.ll
index 9193025..6177ae5 100644
--- a/llvm/test/CodeGen/AArch64/load-zext-bitcast.ll
+++ b/llvm/test/CodeGen/AArch64/load-zext-bitcast.ll
@@ -112,8 +112,7 @@ entry:
define double @load_u64_from_u8_off1(ptr %n){
; CHECK-LABEL: load_u64_from_u8_off1:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ldrb w8, [x0, #1]
-; CHECK-NEXT: fmov d0, x8
+; CHECK-NEXT: ldr b0, [x0, #1]
; CHECK-NEXT: ret
entry:
%p = getelementptr i8, ptr %n, i64 1
@@ -140,8 +139,7 @@ entry:
define float @load_u32_from_u8_off1(ptr %n){
; CHECK-LABEL: load_u32_from_u8_off1:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ldrb w8, [x0, #1]
-; CHECK-NEXT: fmov s0, w8
+; CHECK-NEXT: ldr b0, [x0, #1]
; CHECK-NEXT: ret
entry:
%p = getelementptr i8, ptr %n, i64 1
@@ -154,8 +152,7 @@ entry:
define half @load_u16_from_u8_off1(ptr %n){
; CHECK-LABEL: load_u16_from_u8_off1:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ldrb w8, [x0, #1]
-; CHECK-NEXT: fmov s0, w8
+; CHECK-NEXT: ldr b0, [x0, #1]
; CHECK-NEXT: // kill: def $h0 killed $h0 killed $s0
; CHECK-NEXT: ret
entry:
@@ -185,8 +182,7 @@ entry:
define double @load_u64_from_u16_off2(ptr %n){
; CHECK-LABEL: load_u64_from_u16_off2:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ldrh w8, [x0, #2]
-; CHECK-NEXT: fmov d0, x8
+; CHECK-NEXT: ldr h0, [x0, #2]
; CHECK-NEXT: ret
entry:
%p = getelementptr i8, ptr %n, i64 2
@@ -199,8 +195,7 @@ entry:
define double @load_u64_from_u8_off2(ptr %n){
; CHECK-LABEL: load_u64_from_u8_off2:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ldrb w8, [x0, #2]
-; CHECK-NEXT: fmov d0, x8
+; CHECK-NEXT: ldr b0, [x0, #2]
; CHECK-NEXT: ret
entry:
%p = getelementptr i8, ptr %n, i64 2
@@ -226,7 +221,7 @@ entry:
define float @load_u32_from_u8_off2(ptr %n){
; CHECK-LABEL: load_u32_from_u8_off2:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ldr b0, [x0, #1]
+; CHECK-NEXT: ldr b0, [x0, #2]
; CHECK-NEXT: ret
entry:
%p = getelementptr i8, ptr %n, i64 2
@@ -239,7 +234,7 @@ entry:
define half @load_u16_from_u8_off2(ptr %n){
; CHECK-LABEL: load_u16_from_u8_off2:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ldr b0, [x0, #1]
+; CHECK-NEXT: ldr b0, [x0, #2]
; CHECK-NEXT: // kill: def $h0 killed $h0 killed $s0
; CHECK-NEXT: ret
entry:
@@ -283,8 +278,7 @@ entry:
define double @load_u64_from_u8_off255(ptr %n){
; CHECK-LABEL: load_u64_from_u8_off255:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ldrb w8, [x0, #255]
-; CHECK-NEXT: fmov d0, x8
+; CHECK-NEXT: ldr b0, [x0, #255]
; CHECK-NEXT: ret
entry:
%p = getelementptr i8, ptr %n, i64 255
@@ -311,8 +305,7 @@ entry:
define float @load_u32_from_u8_off255(ptr %n){
; CHECK-LABEL: load_u32_from_u8_off255:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ldrb w8, [x0, #255]
-; CHECK-NEXT: fmov s0, w8
+; CHECK-NEXT: ldr b0, [x0, #255]
; CHECK-NEXT: ret
entry:
%p = getelementptr i8, ptr %n, i64 255
@@ -325,8 +318,7 @@ entry:
define half @load_u16_from_u8_off255(ptr %n){
; CHECK-LABEL: load_u16_from_u8_off255:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ldrb w8, [x0, #255]
-; CHECK-NEXT: fmov s0, w8
+; CHECK-NEXT: ldr b0, [x0, #255]
; CHECK-NEXT: // kill: def $h0 killed $h0 killed $s0
; CHECK-NEXT: ret
entry:
@@ -354,7 +346,7 @@ entry:
define double @load_u64_from_u16_off256(ptr %n){
; CHECK-LABEL: load_u64_from_u16_off256:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ldr h0, [x0, #128]
+; CHECK-NEXT: ldr h0, [x0, #256]
; CHECK-NEXT: ret
entry:
%p = getelementptr i8, ptr %n, i64 256
@@ -367,7 +359,7 @@ entry:
define double @load_u64_from_u8_off256(ptr %n){
; CHECK-LABEL: load_u64_from_u8_off256:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ldr b0, [x0, #64]
+; CHECK-NEXT: ldr b0, [x0, #256]
; CHECK-NEXT: ret
entry:
%p = getelementptr i8, ptr %n, i64 256
@@ -393,7 +385,7 @@ entry:
define float @load_u32_from_u8_off256(ptr %n){
; CHECK-LABEL: load_u32_from_u8_off256:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ldr b0, [x0, #128]
+; CHECK-NEXT: ldr b0, [x0, #256]
; CHECK-NEXT: ret
entry:
%p = getelementptr i8, ptr %n, i64 256
@@ -406,7 +398,7 @@ entry:
define half @load_u16_from_u8_off256(ptr %n){
; CHECK-LABEL: load_u16_from_u8_off256:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ldr b0, [x0, #128]
+; CHECK-NEXT: ldr b0, [x0, #256]
; CHECK-NEXT: // kill: def $h0 killed $h0 killed $s0
; CHECK-NEXT: ret
entry:
@@ -435,8 +427,7 @@ entry:
define double @load_u64_from_u16_offn(ptr %n){
; CHECK-LABEL: load_u64_from_u16_offn:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: mov w8, #8190 // =0x1ffe
-; CHECK-NEXT: ldr h0, [x0, x8]
+; CHECK-NEXT: ldr h0, [x0, #8190]
; CHECK-NEXT: ret
entry:
%p = getelementptr i8, ptr %n, i64 8190
@@ -517,7 +508,8 @@ entry:
define double @load_u64_from_u16_offnp1(ptr %n){
; CHECK-LABEL: load_u64_from_u16_offnp1:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ldr h0, [x0, #4096]
+; CHECK-NEXT: add x8, x0, #2, lsl #12 // =8192
+; CHECK-NEXT: ldr h0, [x8]
; CHECK-NEXT: ret
entry:
%p = getelementptr i8, ptr %n, i64 8192
@@ -530,7 +522,8 @@ entry:
define double @load_u64_from_u8_offnp1(ptr %n){
; CHECK-LABEL: load_u64_from_u8_offnp1:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ldr b0, [x0, #1024]
+; CHECK-NEXT: add x8, x0, #1, lsl #12 // =4096
+; CHECK-NEXT: ldr b0, [x8]
; CHECK-NEXT: ret
entry:
%p = getelementptr i8, ptr %n, i64 4096
@@ -557,7 +550,8 @@ entry:
define float @load_u32_from_u8_offnp1(ptr %n){
; CHECK-LABEL: load_u32_from_u8_offnp1:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ldr b0, [x0, #2048]
+; CHECK-NEXT: add x8, x0, #1, lsl #12 // =4096
+; CHECK-NEXT: ldr b0, [x8]
; CHECK-NEXT: ret
entry:
%p = getelementptr i8, ptr %n, i64 4096
@@ -570,7 +564,8 @@ entry:
define half @load_u16_from_u8_offnp1(ptr %n){
; CHECK-LABEL: load_u16_from_u8_offnp1:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: ldr b0, [x0, #2048]
+; CHECK-NEXT: add x8, x0, #1, lsl #12 // =4096
+; CHECK-NEXT: ldr b0, [x8]
; CHECK-NEXT: // kill: def $h0 killed $h0 killed $s0
; CHECK-NEXT: ret
entry:
diff --git a/llvm/test/CodeGen/ARM/O3-pipeline.ll b/llvm/test/CodeGen/ARM/O3-pipeline.ll
index 9601a2e..2731148 100644
--- a/llvm/test/CodeGen/ARM/O3-pipeline.ll
+++ b/llvm/test/CodeGen/ARM/O3-pipeline.ll
@@ -166,6 +166,7 @@
; CHECK-NEXT: ARM Execution Domain Fix
; CHECK-NEXT: BreakFalseDeps
; CHECK-NEXT: ARM pseudo instruction expansion pass
+; CHECK-NEXT: Insert KCFI indirect call checks
; CHECK-NEXT: Thumb2 instruction size reduce pass
; CHECK-NEXT: MachineDominator Tree Construction
; CHECK-NEXT: Machine Natural Loop Construction
diff --git a/llvm/test/CodeGen/ARM/kcfi-arm.ll b/llvm/test/CodeGen/ARM/kcfi-arm.ll
new file mode 100644
index 0000000..e3696cf
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/kcfi-arm.ll
@@ -0,0 +1,138 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc -mtriple=armv7-linux-gnueabi -verify-machineinstrs < %s | FileCheck %s --check-prefix=ASM
+; RUN: llc -mtriple=armv7-linux-gnueabi -verify-machineinstrs -stop-after=finalize-isel < %s | FileCheck %s --check-prefixes=MIR,ISEL
+; RUN: llc -mtriple=armv7-linux-gnueabi -verify-machineinstrs -stop-after=kcfi < %s | FileCheck %s --check-prefixes=MIR,KCFI
+
+; MIR checks for all functions (grouped here to prevent update_llc_test_checks.py from removing them)
+
+; MIR-LABEL: name: f1
+; MIR: body:
+
+; ISEL: BLX %0, csr_aapcs,{{.*}} cfi-type 12345678
+
+; KCFI: BUNDLE{{.*}} {
+; KCFI-NEXT: KCFI_CHECK_ARM $r0, 12345678
+; KCFI-NEXT: BLX killed $r0, csr_aapcs,{{.*}}
+; KCFI-NEXT: }
+
+; MIR-LABEL: name: f2
+; MIR: body:
+
+; ISEL: TCRETURNri %0, 0, csr_aapcs, implicit $sp, cfi-type 12345678
+
+; KCFI: BUNDLE{{.*}} {
+; KCFI-NEXT: KCFI_CHECK_ARM $r0, 12345678
+; KCFI-NEXT: TAILJMPr killed $r0, csr_aapcs, implicit $sp, implicit $sp
+; KCFI-NEXT: }
+
+; ASM: .long 12345678
+define void @f1(ptr noundef %x) !kcfi_type !1 {
+; ASM-LABEL: f1:
+; ASM: @ %bb.0:
+; ASM-NEXT: .save {r11, lr}
+; ASM-NEXT: push {r11, lr}
+; ASM-NEXT: bic r12, r0, #1
+; ASM-NEXT: ldr r12, [r12, #-4]
+; ASM-NEXT: eor r12, r12, #78
+; ASM-NEXT: eor r12, r12, #24832
+; ASM-NEXT: eor r12, r12, #12320768
+; ASM-NEXT: eors r12, r12, #0
+; ASM-NEXT: beq .Ltmp0
+; ASM-NEXT: udf #33760
+; ASM-NEXT: .Ltmp0:
+; ASM-NEXT: blx r0
+; ASM-NEXT: pop {r11, pc}
+
+ call void %x() [ "kcfi"(i32 12345678) ]
+ ret void
+}
+
+; Test with tail call
+define void @f2(ptr noundef %x) !kcfi_type !1 {
+; ASM-LABEL: f2:
+; ASM: @ %bb.0:
+; ASM-NEXT: bic r12, r0, #1
+; ASM-NEXT: ldr r12, [r12, #-4]
+; ASM-NEXT: eor r12, r12, #78
+; ASM-NEXT: eor r12, r12, #24832
+; ASM-NEXT: eor r12, r12, #12320768
+; ASM-NEXT: eors r12, r12, #0
+; ASM-NEXT: beq .Ltmp1
+; ASM-NEXT: udf #33760
+; ASM-NEXT: .Ltmp1:
+; ASM-NEXT: bx r0
+
+ tail call void %x() [ "kcfi"(i32 12345678) ]
+ ret void
+}
+
+; Test r3 spill/reload when target is r12 and r3 is a call argument.
+; With 5+ arguments (target + 4 args), r0-r3 are all used for arguments,
+; forcing r3 to be spilled when we need it as scratch register.
+define void @f3_r3_spill(ptr noundef %target, i32 %a, i32 %b, i32 %c, i32 %d) !kcfi_type !1 {
+; ASM-LABEL: f3_r3_spill:
+; ASM: @ %bb.0:
+; ASM-NEXT: .save {r11, lr}
+; ASM-NEXT: push {r11, lr}
+; ASM-NEXT: mov lr, r3
+; ASM-NEXT: ldr r3, [sp, #8]
+; ASM-NEXT: mov r12, r0
+; ASM-NEXT: mov r0, r1
+; ASM-NEXT: mov r1, r2
+; ASM-NEXT: mov r2, lr
+; ASM-NEXT: stmdb sp!, {r3}
+; ASM-NEXT: bic r3, r12, #1
+; ASM-NEXT: ldr r3, [r3, #-4]
+; ASM-NEXT: eor r3, r3, #78
+; ASM-NEXT: eor r3, r3, #24832
+; ASM-NEXT: eor r3, r3, #12320768
+; ASM-NEXT: eors r3, r3, #0
+; ASM-NEXT: ldm sp!, {r3}
+; ASM-NEXT: beq .Ltmp2
+; ASM-NEXT: udf #33772
+; ASM-NEXT: .Ltmp2:
+; ASM-NEXT: blx r12
+; ASM-NEXT: pop {r11, pc}
+; Arguments: r0=%target, r1=%a, r2=%b, r3=%c, [sp]=%d
+; Call needs: r0=%a, r1=%b, r2=%c, r3=%d, target in r12
+; Compiler shuffles arguments into place, saving r3 (c) in lr, loading d from stack
+; r3 is live as 4th argument, so push it before KCFI check
+; Restore r3 immediately after comparison, before branch
+ call void %target(i32 %a, i32 %b, i32 %c, i32 %d) [ "kcfi"(i32 12345678) ]
+ ret void
+}
+
+; Test with 3 arguments - r3 not live, target in r12, so r3 used as scratch without spilling
+define void @f4_r3_unused(ptr noundef %target, i32 %a, i32 %b) !kcfi_type !1 {
+; ASM-LABEL: f4_r3_unused:
+; ASM: @ %bb.0:
+; ASM-NEXT: .save {r11, lr}
+; ASM-NEXT: push {r11, lr}
+; ASM-NEXT: mov r3, r0
+; ASM-NEXT: mov r0, r1
+; ASM-NEXT: mov r1, r2
+; ASM-NEXT: bic r12, r3, #1
+; ASM-NEXT: ldr r12, [r12, #-4]
+; ASM-NEXT: eor r12, r12, #78
+; ASM-NEXT: eor r12, r12, #24832
+; ASM-NEXT: eor r12, r12, #12320768
+; ASM-NEXT: eors r12, r12, #0
+; ASM-NEXT: beq .Ltmp3
+; ASM-NEXT: udf #33763
+; ASM-NEXT: .Ltmp3:
+; ASM-NEXT: blx r3
+; ASM-NEXT: pop {r11, pc}
+; Only 3 arguments total, so r3 is not used as call argument
+; Compiler puts target→r3, a→r0, b→r1
+; r3 is the target, so we use r12 as scratch (no spill needed)
+ call void %target(i32 %a, i32 %b) [ "kcfi"(i32 12345678) ]
+ ret void
+}
+
+!llvm.module.flags = !{!0}
+!0 = !{i32 4, !"kcfi", i32 1}
+!1 = !{i32 12345678}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; ISEL: {{.*}}
+; KCFI: {{.*}}
+; MIR: {{.*}}
diff --git a/llvm/test/CodeGen/ARM/kcfi-cbz-range.ll b/llvm/test/CodeGen/ARM/kcfi-cbz-range.ll
new file mode 100644
index 0000000..8e71cae
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/kcfi-cbz-range.ll
@@ -0,0 +1,81 @@
+; RUN: llc -mtriple=thumbv7-linux-gnueabi -filetype=obj < %s
+; RUN: llc -mtriple=thumbv7-linux-gnueabi < %s | FileCheck %s
+
+; This test verifies that KCFI instrumentation doesn't cause "out of range
+; pc-relative fixup value" errors when generating object files.
+;
+; The test creates a scenario with enough KCFI-instrumented indirect calls
+; (~32 bytes each) that would push a cbz/cbnz instruction out of its ±126 byte
+; range if the KCFI_CHECK pseudo-instruction size is not properly accounted for.
+;
+; Without the fix (KCFI_CHECK returns size 0):
+; - Backend thinks KCFI checks take no space
+; - Generates cbz to branch over the code
+; - During assembly, cbz target is >126 bytes away
+; - Assembly fails with "error: out of range pc-relative fixup value"
+;
+; With the fix (KCFI_CHECK returns size 32 for Thumb2):
+; - Backend correctly accounts for KCFI check expansion
+; - Avoids cbz or uses longer-range branch instructions
+; - Assembly succeeds, object file is generated
+
+declare void @external_function(i32)
+
+; Test WITHOUT KCFI: should generate cbz since calls are small
+; CHECK-LABEL: test_without_kcfi:
+; CHECK: cbz
+; CHECK-NOT: bic{{.*}}#1
+define i32 @test_without_kcfi(ptr %callback, i32 %x) {
+entry:
+ %cmp = icmp eq i32 %x, 0
+ br i1 %cmp, label %if_zero, label %if_nonzero
+
+if_nonzero:
+ ; Regular (non-KCFI) indirect calls - much smaller
+ call void %callback()
+ call void %callback()
+ call void %callback()
+ call void %callback()
+ call void %callback()
+ call void %callback()
+
+ call void @external_function(i32 %x)
+ %add1 = add i32 %x, 1
+ ret i32 %add1
+
+if_zero:
+ call void @external_function(i32 0)
+ ret i32 0
+}
+
+; Test WITH KCFI: should NOT generate cbz due to large KCFI checks
+; CHECK-LABEL: test_with_kcfi:
+; CHECK-NOT: cbz
+; CHECK: bic{{.*}}#1
+define i32 @test_with_kcfi(ptr %callback, i32 %x) !kcfi_type !1 {
+entry:
+ %cmp = icmp eq i32 %x, 0
+ br i1 %cmp, label %if_zero, label %if_nonzero
+
+if_nonzero:
+ ; Six KCFI-instrumented indirect calls (~192 bytes total, exceeds cbz range)
+ call void %callback() [ "kcfi"(i32 12345678) ]
+ call void %callback() [ "kcfi"(i32 12345678) ]
+ call void %callback() [ "kcfi"(i32 12345678) ]
+ call void %callback() [ "kcfi"(i32 12345678) ]
+ call void %callback() [ "kcfi"(i32 12345678) ]
+ call void %callback() [ "kcfi"(i32 12345678) ]
+
+ ; Regular call to prevent optimization
+ call void @external_function(i32 %x)
+ %add1 = add i32 %x, 1
+ ret i32 %add1
+
+if_zero:
+ call void @external_function(i32 0)
+ ret i32 0
+}
+
+!llvm.module.flags = !{!0}
+!0 = !{i32 4, !"kcfi", i32 1}
+!1 = !{i32 12345678}
diff --git a/llvm/test/CodeGen/ARM/kcfi-patchable-function-prefix.ll b/llvm/test/CodeGen/ARM/kcfi-patchable-function-prefix.ll
new file mode 100644
index 0000000..f8e0838
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/kcfi-patchable-function-prefix.ll
@@ -0,0 +1,99 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc -mtriple=armv7-linux-gnueabi -verify-machineinstrs < %s | FileCheck %s
+
+; CHECK: .p2align 2
+; CHECK-NOT: nop
+; CHECK: .long 12345678
+define void @f1(ptr noundef %x) !kcfi_type !1 {
+; CHECK-LABEL: f1:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: bic r12, r0, #1
+; CHECK-NEXT: ldr r12, [r12, #-4]
+; CHECK-NEXT: eor r12, r12, #78
+; CHECK-NEXT: eor r12, r12, #24832
+; CHECK-NEXT: eor r12, r12, #12320768
+; CHECK-NEXT: eors r12, r12, #0
+; CHECK-NEXT: beq .Ltmp0
+; CHECK-NEXT: udf #33760
+; CHECK-NEXT: .Ltmp0:
+; CHECK-NEXT: blx r0
+; CHECK-NEXT: pop {r11, pc}
+ call void %x() [ "kcfi"(i32 12345678) ]
+ ret void
+}
+
+; CHECK: .p2align 2
+; CHECK-NOT: .long
+; CHECK-NOT: nop
+define void @f2(ptr noundef %x) {
+; CHECK-LABEL: f2:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: bic r12, r0, #1
+; CHECK-NEXT: ldr r12, [r12, #-4]
+; CHECK-NEXT: eor r12, r12, #78
+; CHECK-NEXT: eor r12, r12, #24832
+; CHECK-NEXT: eor r12, r12, #12320768
+; CHECK-NEXT: eors r12, r12, #0
+; CHECK-NEXT: beq .Ltmp1
+; CHECK-NEXT: udf #33760
+; CHECK-NEXT: .Ltmp1:
+; CHECK-NEXT: blx r0
+; CHECK-NEXT: pop {r11, pc}
+ call void %x() [ "kcfi"(i32 12345678) ]
+ ret void
+}
+
+; CHECK: .p2align 2
+; CHECK: .long 12345678
+; CHECK-COUNT-11: nop
+define void @f3(ptr noundef %x) #0 !kcfi_type !1 {
+; CHECK-LABEL: f3:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: bic r12, r0, #1
+; CHECK-NEXT: ldr r12, [r12, #-48]
+; CHECK-NEXT: eor r12, r12, #78
+; CHECK-NEXT: eor r12, r12, #24832
+; CHECK-NEXT: eor r12, r12, #12320768
+; CHECK-NEXT: eors r12, r12, #0
+; CHECK-NEXT: beq .Ltmp3
+; CHECK-NEXT: udf #33760
+; CHECK-NEXT: .Ltmp3:
+; CHECK-NEXT: blx r0
+; CHECK-NEXT: pop {r11, pc}
+ call void %x() [ "kcfi"(i32 12345678) ]
+ ret void
+}
+
+; CHECK: .p2align 2
+; CHECK-COUNT-11: nop
+define void @f4(ptr noundef %x) #0 {
+; CHECK-LABEL: f4:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: .save {r11, lr}
+; CHECK-NEXT: push {r11, lr}
+; CHECK-NEXT: bic r12, r0, #1
+; CHECK-NEXT: ldr r12, [r12, #-48]
+; CHECK-NEXT: eor r12, r12, #78
+; CHECK-NEXT: eor r12, r12, #24832
+; CHECK-NEXT: eor r12, r12, #12320768
+; CHECK-NEXT: eors r12, r12, #0
+; CHECK-NEXT: beq .Ltmp5
+; CHECK-NEXT: udf #33760
+; CHECK-NEXT: .Ltmp5:
+; CHECK-NEXT: blx r0
+; CHECK-NEXT: pop {r11, pc}
+ call void %x() [ "kcfi"(i32 12345678) ]
+ ret void
+}
+
+attributes #0 = { "patchable-function-prefix"="11" }
+
+!llvm.module.flags = !{!0}
+!0 = !{i32 4, !"kcfi", i32 1}
+!1 = !{i32 12345678}
diff --git a/llvm/test/CodeGen/ARM/kcfi-thumb.ll b/llvm/test/CodeGen/ARM/kcfi-thumb.ll
new file mode 100644
index 0000000..7c02d830
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/kcfi-thumb.ll
@@ -0,0 +1,215 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc -mtriple=thumbv6m-none-eabi < %s | FileCheck %s
+
+; This test verifies that Thumb1 (ARMv6-M) generates correct code for backend KCFI.
+; Thumb1 uses the backend KCFI implementation with Thumb1-specific instructions.
+
+; Test function without KCFI annotation
+; CHECK-LABEL: .globl nosan
+; CHECK-NEXT: .p2align 1
+; CHECK-NEXT: .type nosan,%function
+; CHECK-NEXT: .code 16
+; CHECK-NEXT: .thumb_func
+define dso_local void @nosan() nounwind {
+; CHECK-LABEL: nosan:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: bx lr
+ ret void
+}
+
+; Test function with KCFI annotation - verifies type hash emission
+;; The alignment is at least 4 to avoid unaligned type hash loads when this
+;; instrumented function is indirectly called.
+; CHECK-LABEL: .globl target_func
+; CHECK-NEXT: .p2align 2
+; CHECK-NEXT: .type target_func,%function
+; CHECK-NEXT: .long 3170468932
+; CHECK-NEXT: .code 16
+; CHECK-NEXT: .thumb_func
+define void @target_func() !kcfi_type !1 {
+; CHECK-LABEL: target_func:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: bx lr
+ ret void
+}
+
+; Test indirect call with KCFI check using operand bundles
+; CHECK-LABEL: .globl f1
+; CHECK: .p2align 2
+; CHECK-NEXT: .type f1,%function
+; CHECK-NEXT: .long 3170468932
+; CHECK-NEXT: .code 16
+; CHECK-NEXT: .thumb_func
+define void @f1(ptr noundef %x) !kcfi_type !1 {
+; CHECK-LABEL: f1:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: .save {r7, lr}
+; CHECK-NEXT: push {r7, lr}
+; CHECK-NEXT: movs r3, #1
+; CHECK-NEXT: mov r2, r0
+; CHECK-NEXT: bics r2, r3
+; CHECK-NEXT: subs r2, #4
+; CHECK-NEXT: ldr r2, [r2]
+; CHECK-NEXT: movs r3, #188
+; CHECK-NEXT: lsls r3, r3, #8
+; CHECK-NEXT: adds r3, #249
+; CHECK-NEXT: lsls r3, r3, #8
+; CHECK-NEXT: adds r3, #132
+; CHECK-NEXT: lsls r3, r3, #8
+; CHECK-NEXT: adds r3, #68
+; CHECK-NEXT: cmp r2, r3
+; CHECK-NEXT: beq .Ltmp0
+; CHECK-NEXT: bkpt #0
+; CHECK-NEXT: .Ltmp0:
+; CHECK-NEXT: blx r0
+; CHECK-NEXT: pop {r7, pc}
+ call void %x() [ "kcfi"(i32 -1124498364) ]
+ ret void
+}
+
+; Test with tail call - backend KCFI supports tail calls
+define void @f2(ptr noundef %x) !kcfi_type !1 {
+; CHECK-LABEL: f2:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: .save {r7, lr}
+; CHECK-NEXT: push {r7, lr}
+; CHECK-NEXT: movs r3, #1
+; CHECK-NEXT: mov r2, r0
+; CHECK-NEXT: bics r2, r3
+; CHECK-NEXT: subs r2, #4
+; CHECK-NEXT: ldr r2, [r2]
+; CHECK-NEXT: movs r3, #188
+; CHECK-NEXT: lsls r3, r3, #8
+; CHECK-NEXT: adds r3, #249
+; CHECK-NEXT: lsls r3, r3, #8
+; CHECK-NEXT: adds r3, #132
+; CHECK-NEXT: lsls r3, r3, #8
+; CHECK-NEXT: adds r3, #68
+; CHECK-NEXT: cmp r2, r3
+; CHECK-NEXT: beq .Ltmp1
+; CHECK-NEXT: bkpt #0
+; CHECK-NEXT: .Ltmp1:
+; CHECK-NEXT: blx r0
+; CHECK-NEXT: pop {r7, pc}
+ tail call void %x() [ "kcfi"(i32 -1124498364) ]
+ ret void
+}
+
+; Test with R2 live (3 arguments) - compiler shuffles args, no spilling needed
+define void @f3_r2_live(ptr noundef %x, i32 %a, i32 %b, i32 %c) !kcfi_type !1 {
+; CHECK-LABEL: f3_r2_live:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: .save {r4, lr}
+; CHECK-NEXT: push {r4, lr}
+; CHECK-NEXT: mov r4, r0
+; CHECK-NEXT: mov r0, r1
+; CHECK-NEXT: mov r1, r2
+; CHECK-NEXT: mov r2, r3
+; CHECK-NEXT: push {r2}
+; CHECK-NEXT: movs r3, #1
+; CHECK-NEXT: mov r2, r4
+; CHECK-NEXT: bics r2, r3
+; CHECK-NEXT: subs r2, #4
+; CHECK-NEXT: ldr r2, [r2]
+; CHECK-NEXT: movs r3, #188
+; CHECK-NEXT: lsls r3, r3, #8
+; CHECK-NEXT: adds r3, #249
+; CHECK-NEXT: lsls r3, r3, #8
+; CHECK-NEXT: adds r3, #132
+; CHECK-NEXT: lsls r3, r3, #8
+; CHECK-NEXT: adds r3, #68
+; CHECK-NEXT: cmp r2, r3
+; CHECK-NEXT: pop {r2}
+; CHECK-NEXT: beq .Ltmp2
+; CHECK-NEXT: bkpt #0
+; CHECK-NEXT: .Ltmp2:
+; CHECK-NEXT: blx r4
+; CHECK-NEXT: pop {r4, pc}
+; Compiler shuffles: target→r4, c→r2, a→r0, b→r1
+; R2 is live (3rd arg), so we push it, then uses R3 as temp, R2 as scratch
+ call void %x(i32 %a, i32 %b, i32 %c) [ "kcfi"(i32 -1124498364) ]
+ ret void
+}
+
+; Test with both R2 and R3 live (4 arguments) - compiler moves to r5/r4, uses R3 temp and R12 scratch
+define void @f4_r2_r3_live(ptr noundef %x, i32 %a, i32 %b, i32 %c, i32 %d) !kcfi_type !1 {
+; CHECK-LABEL: f4_r2_r3_live:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: .save {r4, r5, r7, lr}
+; CHECK-NEXT: push {r4, r5, r7, lr}
+; CHECK-NEXT: mov r5, r3
+; CHECK-NEXT: mov r4, r0
+; CHECK-NEXT: ldr r3, [sp, #16]
+; CHECK-NEXT: mov r0, r1
+; CHECK-NEXT: mov r1, r2
+; CHECK-NEXT: mov r2, r5
+; CHECK-NEXT: push {r3}
+; CHECK-NEXT: push {r2}
+; CHECK-NEXT: movs r3, #1
+; CHECK-NEXT: mov r2, r4
+; CHECK-NEXT: bics r2, r3
+; CHECK-NEXT: subs r2, #4
+; CHECK-NEXT: ldr r2, [r2]
+; CHECK-NEXT: movs r3, #188
+; CHECK-NEXT: lsls r3, r3, #8
+; CHECK-NEXT: adds r3, #249
+; CHECK-NEXT: lsls r3, r3, #8
+; CHECK-NEXT: adds r3, #132
+; CHECK-NEXT: lsls r3, r3, #8
+; CHECK-NEXT: adds r3, #68
+; CHECK-NEXT: cmp r2, r3
+; CHECK-NEXT: pop {r2}
+; CHECK-NEXT: pop {r3}
+; CHECK-NEXT: beq .Ltmp3
+; CHECK-NEXT: bkpt #0
+; CHECK-NEXT: .Ltmp3:
+; CHECK-NEXT: blx r4
+; CHECK-NEXT: pop {r4, r5, r7, pc}
+; Compiler shuffles: r3→r5, target→r4, d→r3 (from stack), a→r0, b→r1, c→r2
+; Then pushes r3 (d value), then r2, uses R3 as temp, R2 as scratch
+ call void %x(i32 %a, i32 %b, i32 %c, i32 %d) [ "kcfi"(i32 -1124498364) ]
+ ret void
+}
+
+; Test where target ends up in R12, forcing R2 as scratch, with both R2 and R3 live
+; This uses inline asm to force target into R12, with 4 call arguments to make R2/R3 live
+define void @f5_r12_target_r2_r3_live(i32 %a, i32 %b, i32 %c, i32 %d) !kcfi_type !1 {
+; CHECK-LABEL: f5_r12_target_r2_r3_live:
+; CHECK: @ %bb.0:
+; CHECK-NEXT: .save {r7, lr}
+; CHECK-NEXT: push {r7, lr}
+; CHECK-NEXT: @APP
+; CHECK-NEXT: @NO_APP
+; CHECK-NEXT: push {r3}
+; CHECK-NEXT: push {r2}
+; CHECK-NEXT: movs r3, #1
+; CHECK-NEXT: mov r2, r12
+; CHECK-NEXT: bics r2, r3
+; CHECK-NEXT: subs r2, #4
+; CHECK-NEXT: ldr r2, [r2]
+; CHECK-NEXT: movs r3, #188
+; CHECK-NEXT: lsls r3, r3, #8
+; CHECK-NEXT: adds r3, #249
+; CHECK-NEXT: lsls r3, r3, #8
+; CHECK-NEXT: adds r3, #132
+; CHECK-NEXT: lsls r3, r3, #8
+; CHECK-NEXT: adds r3, #68
+; CHECK-NEXT: cmp r2, r3
+; CHECK-NEXT: pop {r2}
+; CHECK-NEXT: pop {r3}
+; CHECK-NEXT: beq .Ltmp4
+; CHECK-NEXT: bkpt #0
+; CHECK-NEXT: .Ltmp4:
+; CHECK-NEXT: blx r12
+; CHECK-NEXT: pop {r7, pc}
+; Use inline asm to get function pointer into R12
+; With 4 arguments (r0-r3), both R2 and R3 are live
+; Target in R12 means R2 is scratch, R3 is temp, and both need spilling
+ %target = call ptr asm "", "={r12}"()
+ call void %target(i32 %a, i32 %b, i32 %c, i32 %d) [ "kcfi"(i32 -1124498364) ]
+ ret void
+}
+
+!llvm.module.flags = !{!0}
+!0 = !{i32 4, !"kcfi", i32 1}
+!1 = !{i32 -1124498364}
diff --git a/llvm/test/CodeGen/ARM/kcfi-thumb2.ll b/llvm/test/CodeGen/ARM/kcfi-thumb2.ll
new file mode 100644
index 0000000..f319d98
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/kcfi-thumb2.ll
@@ -0,0 +1,163 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc -mtriple=thumbv7-linux-gnueabi -verify-machineinstrs < %s | FileCheck %s --check-prefix=ASM
+; RUN: llc -mtriple=thumbv7-linux-gnueabi -verify-machineinstrs -stop-after=finalize-isel < %s | FileCheck %s --check-prefixes=MIR,ISEL
+; RUN: llc -mtriple=thumbv7-linux-gnueabi -verify-machineinstrs -stop-after=kcfi < %s | FileCheck %s --check-prefixes=MIR,KCFI
+
+; MIR checks for all functions (grouped here to prevent update_llc_test_checks.py from removing them)
+
+; MIR-LABEL: name: f1
+; MIR: body:
+
+; ISEL: tBLXr 14 /* CC::al */, $noreg, %0, csr_aapcs,{{.*}} cfi-type 12345678
+
+; KCFI: BUNDLE{{.*}} {
+; KCFI-NEXT: KCFI_CHECK_Thumb2 $r0, 12345678
+; KCFI-NEXT: tBLXr 14 /* CC::al */, $noreg, {{(killed )?}}$r0, csr_aapcs,{{.*}}
+; KCFI-NEXT: }
+
+; MIR-LABEL: name: f2
+; MIR: body:
+
+; ISEL: TCRETURNri %0, 0, csr_aapcs, implicit $sp, cfi-type 12345678
+
+; KCFI: BUNDLE{{.*}} {
+; KCFI-NEXT: KCFI_CHECK_Thumb2 $r0, 12345678
+; KCFI-NEXT: tTAILJMPr {{(killed )?}}$r0, csr_aapcs, implicit $sp, implicit $sp
+; KCFI-NEXT: }
+
+; Test function without KCFI annotation
+; ASM-LABEL: .globl nosan
+; ASM-NEXT: .p2align 1
+; ASM-NEXT: .type nosan,%function
+; ASM-NEXT: .code 16
+; ASM-NEXT: .thumb_func
+define dso_local void @nosan() nounwind {
+; ASM-LABEL: nosan:
+; ASM: @ %bb.0:
+; ASM-NEXT: bx lr
+ ret void
+}
+
+; Test function with KCFI annotation - verifies type hash emission
+;; The alignment is at least 4 to avoid unaligned type hash loads when this
+;; instrumented function is indirectly called.
+; ASM-LABEL: .globl target_func
+; ASM-NEXT: .p2align 2
+; ASM-NEXT: .type target_func,%function
+; ASM-NEXT: .long 12345678
+; ASM-NEXT: .code 16
+; ASM-NEXT: .thumb_func
+define void @target_func() !kcfi_type !1 {
+; ASM-LABEL: target_func:
+; ASM: @ %bb.0:
+; ASM-NEXT: bx lr
+ ret void
+}
+
+; Test indirect call with KCFI check
+; ASM: .long 12345678
+define void @f1(ptr noundef %x) !kcfi_type !1 {
+; ASM-LABEL: f1:
+; ASM: @ %bb.0:
+; ASM-NEXT: .save {r7, lr}
+; ASM-NEXT: push {r7, lr}
+; ASM-NEXT: bic r12, r0, #1
+; ASM-NEXT: ldr r12, [r12, #-4]
+; ASM-NEXT: eor r12, r12, #78
+; ASM-NEXT: eor r12, r12, #24832
+; ASM-NEXT: eor r12, r12, #12320768
+; ASM-NEXT: eors r12, r12, #0
+; ASM-NEXT: beq.w .Ltmp0
+; ASM-NEXT: udf #128
+; ASM-NEXT: .Ltmp0:
+; ASM-NEXT: blx r0
+; ASM-NEXT: pop {r7, pc}
+
+ call void %x() [ "kcfi"(i32 12345678) ]
+ ret void
+}
+
+; Test with tail call
+define void @f2(ptr noundef %x) !kcfi_type !1 {
+; ASM-LABEL: f2:
+; ASM: @ %bb.0:
+; ASM-NEXT: bic r12, r0, #1
+; ASM-NEXT: ldr r12, [r12, #-4]
+; ASM-NEXT: eor r12, r12, #78
+; ASM-NEXT: eor r12, r12, #24832
+; ASM-NEXT: eor r12, r12, #12320768
+; ASM-NEXT: eors r12, r12, #0
+; ASM-NEXT: beq.w .Ltmp1
+; ASM-NEXT: udf #128
+; ASM-NEXT: .Ltmp1:
+; ASM-NEXT: bx r0
+
+ tail call void %x() [ "kcfi"(i32 12345678) ]
+ ret void
+}
+
+; Test r3 spill/reload when target is r12 and r3 is a call argument (Thumb2)
+define void @f3_r3_spill(ptr noundef %target, i32 %a, i32 %b, i32 %c, i32 %d) !kcfi_type !1 {
+; ASM-LABEL: f3_r3_spill:
+; ASM: @ %bb.0:
+; ASM-NEXT: .save {r7, lr}
+; ASM-NEXT: push {r7, lr}
+; ASM-NEXT: mov lr, r3
+; ASM-NEXT: ldr r3, [sp, #8]
+; ASM-NEXT: mov r12, r0
+; ASM-NEXT: mov r0, r1
+; ASM-NEXT: mov r1, r2
+; ASM-NEXT: mov r2, lr
+; ASM-NEXT: push {r3}
+; ASM-NEXT: bic r3, r12, #1
+; ASM-NEXT: ldr r3, [r3, #-4]
+; ASM-NEXT: eor r3, r3, #78
+; ASM-NEXT: eor r3, r3, #24832
+; ASM-NEXT: eor r3, r3, #12320768
+; ASM-NEXT: eors r3, r3, #0
+; ASM-NEXT: pop {r3}
+; ASM-NEXT: beq.w .Ltmp2
+; ASM-NEXT: udf #140
+; ASM-NEXT: .Ltmp2:
+; ASM-NEXT: blx r12
+; ASM-NEXT: pop {r7, pc}
+; Arguments: r0=%target, r1=%a, r2=%b, r3=%c, [sp+8]=%d
+; Call needs: r0=%a, r1=%b, r2=%c, r3=%d, target in r12
+; r3 is live as 4th argument, so push it before KCFI check
+ call void %target(i32 %a, i32 %b, i32 %c, i32 %d) [ "kcfi"(i32 12345678) ]
+ ret void
+}
+
+; Test with 3 arguments - r3 not live, target in r12 or elsewhere, r12 used as scratch
+define void @f4_r3_unused(ptr noundef %target, i32 %a, i32 %b) !kcfi_type !1 {
+; ASM-LABEL: f4_r3_unused:
+; ASM: @ %bb.0:
+; ASM-NEXT: .save {r7, lr}
+; ASM-NEXT: push {r7, lr}
+; ASM-NEXT: mov r3, r0
+; ASM-NEXT: mov r0, r1
+; ASM-NEXT: mov r1, r2
+; ASM-NEXT: bic r12, r3, #1
+; ASM-NEXT: ldr r12, [r12, #-4]
+; ASM-NEXT: eor r12, r12, #78
+; ASM-NEXT: eor r12, r12, #24832
+; ASM-NEXT: eor r12, r12, #12320768
+; ASM-NEXT: eors r12, r12, #0
+; ASM-NEXT: beq.w .Ltmp3
+; ASM-NEXT: udf #131
+; ASM-NEXT: .Ltmp3:
+; ASM-NEXT: blx r3
+; ASM-NEXT: pop {r7, pc}
+; Only 3 arguments total, so r3 is not used as call argument
+; Target might be in r3, using r12 as scratch (no spill needed)
+ call void %target(i32 %a, i32 %b) [ "kcfi"(i32 12345678) ]
+ ret void
+}
+
+!llvm.module.flags = !{!0}
+!0 = !{i32 4, !"kcfi", i32 1}
+!1 = !{i32 12345678}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; ISEL: {{.*}}
+; KCFI: {{.*}}
+; MIR: {{.*}}
diff --git a/llvm/test/CodeGen/ARM/kcfi.ll b/llvm/test/CodeGen/ARM/kcfi.ll
deleted file mode 100644
index 9e16468..0000000
--- a/llvm/test/CodeGen/ARM/kcfi.ll
+++ /dev/null
@@ -1,28 +0,0 @@
-; RUN: llc -mtriple=thumbv6m-none-eabi < %s | FileCheck %s
-
-; CHECK-LABEL: .globl nosan
-; CHECK-NEXT: .p2align 1
-; CHECK-NEXT: .type nosan,%function
-; CHECK-NEXT: .code 16
-; CHECK-NEXT: .thumb_func
-; CHECK-NEXT: nosan:
-define dso_local void @nosan() nounwind {
- ret void
-}
-
-;; The alignment is at least 4 to avoid unaligned type hash loads when this
-;; instrumented function is indirectly called.
-; CHECK-LABEL: .globl f1
-; CHECK-NEXT: .p2align 2
-; CHECK-NEXT: .type f1,%function
-; CHECK-NEXT: .long 3170468932
-; CHECK-NEXT: .code 16
-; CHECK-NEXT: .thumb_func
-; CHECK-NEXT: f1:
-define void @f1(ptr noundef %x) !kcfi_type !1 {
- ret void
-}
-
-!llvm.module.flags = !{!0}
-!0 = !{i32 4, !"kcfi", i32 1}
-!1 = !{i32 -1124498364}
diff --git a/llvm/test/CodeGen/DirectX/CBufferAccess/unused.ll b/llvm/test/CodeGen/DirectX/CBufferAccess/unused.ll
index 8c0d82e..6f1bbd0 100644
--- a/llvm/test/CodeGen/DirectX/CBufferAccess/unused.ll
+++ b/llvm/test/CodeGen/DirectX/CBufferAccess/unused.ll
@@ -2,7 +2,7 @@
; Check that we correctly ignore cbuffers that were nulled out by optimizations.
%__cblayout_CB = type <{ float }>
-@CB.cb = local_unnamed_addr global target("dx.CBuffer", %__cblayout_CB) poison
+@CB.cb = local_unnamed_addr global target("dx.CBuffer", target("dx.Layout", %__cblayout_CB, 4, 0)) poison
@x = external local_unnamed_addr addrspace(2) global float, align 4
; CHECK-NOT: !hlsl.cbs =
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_shader_clock/shader_clock.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_shader_clock/shader_clock.ll
index bd07ba1..eb4cf76 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_shader_clock/shader_clock.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_shader_clock/shader_clock.ll
@@ -20,6 +20,9 @@
; CHECK: OpReadClockKHR [[v2uint]] [[uint_1]]
; CHECK: OpReadClockKHR [[v2uint]] [[uint_2]]
; CHECK: OpReadClockKHR [[v2uint]] [[uint_3]]
+; CHECK: OpReadClockKHR [[ulong]] [[uint_1]]
+; CHECK: OpReadClockKHR [[ulong]] [[uint_2]]
+; CHECK: OpReadClockKHR [[ulong]] [[uint_3]]
define dso_local spir_kernel void @test_clocks(ptr addrspace(1) nocapture noundef writeonly align 8 %out64, ptr addrspace(1) nocapture noundef writeonly align 8 %outv2) {
entry:
@@ -39,6 +42,9 @@ entry:
%call9 = tail call spir_func <2 x i32> @_Z25clock_read_hilo_sub_groupv()
%arrayidx10 = getelementptr inbounds i8, ptr addrspace(1) %outv2, i32 16
store <2 x i32> %call9, ptr addrspace(1) %arrayidx10, align 8
+ %call10 = call spir_func i64 @_Z27__spirv_ReadClockKHR_Rulongi(i32 1)
+ %call11 = call spir_func i64 @_Z27__spirv_ReadClockKHR_Rulongi(i32 2)
+ %call12 = call spir_func i64 @_Z27__spirv_ReadClockKHR_Rulongi(i32 3)
ret void
}
@@ -59,3 +65,6 @@ declare spir_func <2 x i32> @_Z26clock_read_hilo_work_groupv() local_unnamed_add
; Function Attrs: convergent nounwind
declare spir_func <2 x i32> @_Z25clock_read_hilo_sub_groupv() local_unnamed_addr
+
+; Function Attrs: nounwind
+declare spir_func i64 @_Z27__spirv_ReadClockKHR_Rulongi(i32)
diff --git a/llvm/test/CodeGen/X86/atomic-load-store.ll b/llvm/test/CodeGen/X86/atomic-load-store.ll
index 4f5cb5a..9fab8b9 100644
--- a/llvm/test/CodeGen/X86/atomic-load-store.ll
+++ b/llvm/test/CodeGen/X86/atomic-load-store.ll
@@ -269,3 +269,120 @@ define <1 x i64> @atomic_vec1_i64_align(ptr %x) nounwind {
%ret = load atomic <1 x i64>, ptr %x acquire, align 8
ret <1 x i64> %ret
}
+
+define <1 x half> @atomic_vec1_half(ptr %x) {
+; CHECK-O3-LABEL: atomic_vec1_half:
+; CHECK-O3: # %bb.0:
+; CHECK-O3-NEXT: movzwl (%rdi), %eax
+; CHECK-O3-NEXT: pinsrw $0, %eax, %xmm0
+; CHECK-O3-NEXT: retq
+;
+; CHECK-SSE-O3-LABEL: atomic_vec1_half:
+; CHECK-SSE-O3: # %bb.0:
+; CHECK-SSE-O3-NEXT: movzwl (%rdi), %eax
+; CHECK-SSE-O3-NEXT: pinsrw $0, %eax, %xmm0
+; CHECK-SSE-O3-NEXT: retq
+;
+; CHECK-AVX-O3-LABEL: atomic_vec1_half:
+; CHECK-AVX-O3: # %bb.0:
+; CHECK-AVX-O3-NEXT: movzwl (%rdi), %eax
+; CHECK-AVX-O3-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0
+; CHECK-AVX-O3-NEXT: retq
+;
+; CHECK-O0-LABEL: atomic_vec1_half:
+; CHECK-O0: # %bb.0:
+; CHECK-O0-NEXT: movw (%rdi), %cx
+; CHECK-O0-NEXT: # implicit-def: $eax
+; CHECK-O0-NEXT: movw %cx, %ax
+; CHECK-O0-NEXT: # implicit-def: $xmm0
+; CHECK-O0-NEXT: pinsrw $0, %eax, %xmm0
+; CHECK-O0-NEXT: retq
+;
+; CHECK-SSE-O0-LABEL: atomic_vec1_half:
+; CHECK-SSE-O0: # %bb.0:
+; CHECK-SSE-O0-NEXT: movw (%rdi), %cx
+; CHECK-SSE-O0-NEXT: # implicit-def: $eax
+; CHECK-SSE-O0-NEXT: movw %cx, %ax
+; CHECK-SSE-O0-NEXT: # implicit-def: $xmm0
+; CHECK-SSE-O0-NEXT: pinsrw $0, %eax, %xmm0
+; CHECK-SSE-O0-NEXT: retq
+;
+; CHECK-AVX-O0-LABEL: atomic_vec1_half:
+; CHECK-AVX-O0: # %bb.0:
+; CHECK-AVX-O0-NEXT: movw (%rdi), %cx
+; CHECK-AVX-O0-NEXT: # implicit-def: $eax
+; CHECK-AVX-O0-NEXT: movw %cx, %ax
+; CHECK-AVX-O0-NEXT: # implicit-def: $xmm0
+; CHECK-AVX-O0-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0
+; CHECK-AVX-O0-NEXT: retq
+ %ret = load atomic <1 x half>, ptr %x acquire, align 2
+ ret <1 x half> %ret
+}
+
+define <1 x float> @atomic_vec1_float(ptr %x) {
+; CHECK-O3-LABEL: atomic_vec1_float:
+; CHECK-O3: # %bb.0:
+; CHECK-O3-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-O3-NEXT: retq
+;
+; CHECK-SSE-O3-LABEL: atomic_vec1_float:
+; CHECK-SSE-O3: # %bb.0:
+; CHECK-SSE-O3-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-SSE-O3-NEXT: retq
+;
+; CHECK-AVX-O3-LABEL: atomic_vec1_float:
+; CHECK-AVX-O3: # %bb.0:
+; CHECK-AVX-O3-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-AVX-O3-NEXT: retq
+;
+; CHECK-O0-LABEL: atomic_vec1_float:
+; CHECK-O0: # %bb.0:
+; CHECK-O0-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-O0-NEXT: retq
+;
+; CHECK-SSE-O0-LABEL: atomic_vec1_float:
+; CHECK-SSE-O0: # %bb.0:
+; CHECK-SSE-O0-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-SSE-O0-NEXT: retq
+;
+; CHECK-AVX-O0-LABEL: atomic_vec1_float:
+; CHECK-AVX-O0: # %bb.0:
+; CHECK-AVX-O0-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; CHECK-AVX-O0-NEXT: retq
+ %ret = load atomic <1 x float>, ptr %x acquire, align 4
+ ret <1 x float> %ret
+}
+
+define <1 x double> @atomic_vec1_double_align(ptr %x) nounwind {
+; CHECK-O3-LABEL: atomic_vec1_double_align:
+; CHECK-O3: # %bb.0:
+; CHECK-O3-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-O3-NEXT: retq
+;
+; CHECK-SSE-O3-LABEL: atomic_vec1_double_align:
+; CHECK-SSE-O3: # %bb.0:
+; CHECK-SSE-O3-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-SSE-O3-NEXT: retq
+;
+; CHECK-AVX-O3-LABEL: atomic_vec1_double_align:
+; CHECK-AVX-O3: # %bb.0:
+; CHECK-AVX-O3-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-AVX-O3-NEXT: retq
+;
+; CHECK-O0-LABEL: atomic_vec1_double_align:
+; CHECK-O0: # %bb.0:
+; CHECK-O0-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-O0-NEXT: retq
+;
+; CHECK-SSE-O0-LABEL: atomic_vec1_double_align:
+; CHECK-SSE-O0: # %bb.0:
+; CHECK-SSE-O0-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-SSE-O0-NEXT: retq
+;
+; CHECK-AVX-O0-LABEL: atomic_vec1_double_align:
+; CHECK-AVX-O0: # %bb.0:
+; CHECK-AVX-O0-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; CHECK-AVX-O0-NEXT: retq
+ %ret = load atomic <1 x double>, ptr %x acquire, align 8
+ ret <1 x double> %ret
+}
diff --git a/llvm/test/CodeGen/X86/basic-block-address-map-empty-function.ll b/llvm/test/CodeGen/X86/basic-block-address-map-empty-function.ll
index 4e76262..423e318 100644
--- a/llvm/test/CodeGen/X86/basic-block-address-map-empty-function.ll
+++ b/llvm/test/CodeGen/X86/basic-block-address-map-empty-function.ll
@@ -19,7 +19,7 @@ entry:
; CHECK: func:
; CHECK: .Lfunc_begin1:
; CHECK: .section .llvm_bb_addr_map,"o",@llvm_bb_addr_map,.text{{$}}
-; CHECK-NEXT: .byte 3 # version
+; CHECK-NEXT: .byte 4 # version
; BASIC-NEXT: .byte 0 # feature
; PGO-NEXT: .byte 3 # feature
; CHECK-NEXT: .quad .Lfunc_begin1 # function address
diff --git a/llvm/test/CodeGen/X86/basic-block-address-map-function-sections.ll b/llvm/test/CodeGen/X86/basic-block-address-map-function-sections.ll
index f610b04..e32e522 100644
--- a/llvm/test/CodeGen/X86/basic-block-address-map-function-sections.ll
+++ b/llvm/test/CodeGen/X86/basic-block-address-map-function-sections.ll
@@ -10,7 +10,7 @@ define dso_local i32 @_Z3barv() {
; CHECK-LABEL: _Z3barv:
; CHECK-NEXT: [[BAR_BEGIN:.Lfunc_begin[0-9]+]]:
; CHECK: .section .llvm_bb_addr_map,"o",@llvm_bb_addr_map,.text._Z3barv{{$}}
-; CHECK-NEXT: .byte 3 # version
+; CHECK-NEXT: .byte 4 # version
; CHECK-NEXT: .byte 0 # feature
; CHECK-NEXT: .quad [[BAR_BEGIN]] # function address
@@ -23,7 +23,7 @@ define dso_local i32 @_Z3foov() {
; CHECK-LABEL: _Z3foov:
; CHECK-NEXT: [[FOO_BEGIN:.Lfunc_begin[0-9]+]]:
; CHECK: .section .llvm_bb_addr_map,"o",@llvm_bb_addr_map,.text._Z3foov{{$}}
-; CHECK-NEXT: .byte 3 # version
+; CHECK-NEXT: .byte 4 # version
; CHECK-NEXT: .byte 32 # feature
; CHECK-NEXT: .quad [[FOO_BEGIN]] # function address
@@ -36,6 +36,6 @@ define linkonce_odr dso_local i32 @_Z4fooTIiET_v() comdat {
; CHECK-LABEL: _Z4fooTIiET_v:
; CHECK-NEXT: [[FOOCOMDAT_BEGIN:.Lfunc_begin[0-9]+]]:
; CHECK: .section .llvm_bb_addr_map,"oG",@llvm_bb_addr_map,.text._Z4fooTIiET_v,_Z4fooTIiET_v,comdat{{$}}
-; CHECK-NEXT: .byte 3 # version
+; CHECK-NEXT: .byte 4 # version
; CHECK-NEXT: .byte 0 # feature
; CHECK-NEXT: .quad [[FOOCOMDAT_BEGIN]] # function address
diff --git a/llvm/test/CodeGen/X86/basic-block-address-map-pgo-features.ll b/llvm/test/CodeGen/X86/basic-block-address-map-pgo-features.ll
index ba76f3e..12b1297 100644
--- a/llvm/test/CodeGen/X86/basic-block-address-map-pgo-features.ll
+++ b/llvm/test/CodeGen/X86/basic-block-address-map-pgo-features.ll
@@ -69,7 +69,7 @@ declare i32 @__gxx_personality_v0(...)
; CHECK-LABEL: .Lfunc_end0:
; CHECK: .section .llvm_bb_addr_map,"o",@llvm_bb_addr_map,.text._Z3bazb{{$}}
-; CHECK-NEXT: .byte 3 # version
+; CHECK-NEXT: .byte 4 # version
; BASIC-NEXT: .byte 32 # feature
; PGO-ALL-NEXT: .byte 39 # feature
; FEC-ONLY-NEXT:.byte 33 # feature
diff --git a/llvm/test/CodeGen/X86/basic-block-address-map-with-basic-block-sections.ll b/llvm/test/CodeGen/X86/basic-block-address-map-with-basic-block-sections.ll
index 6157f1a..aeb6dc95 100644
--- a/llvm/test/CodeGen/X86/basic-block-address-map-with-basic-block-sections.ll
+++ b/llvm/test/CodeGen/X86/basic-block-address-map-with-basic-block-sections.ll
@@ -47,7 +47,7 @@ declare i32 @__gxx_personality_v0(...)
; CHECK-LABEL: .Lfunc_end0:
; CHECK: .section .llvm_bb_addr_map,"o",@llvm_bb_addr_map,.text.hot._Z3bazb
-; CHECK-NEXT: .byte 3 # version
+; CHECK-NEXT: .byte 4 # version
; CHECK-NEXT: .byte 40 # feature
; CHECK-NEXT: .byte 2 # number of basic block ranges
; CHECK-NEXT: .quad .Lfunc_begin0 # base address
diff --git a/llvm/test/CodeGen/X86/basic-block-address-map-with-emit-bb-hash.ll b/llvm/test/CodeGen/X86/basic-block-address-map-with-emit-bb-hash.ll
new file mode 100644
index 0000000..a5678877
--- /dev/null
+++ b/llvm/test/CodeGen/X86/basic-block-address-map-with-emit-bb-hash.ll
@@ -0,0 +1,94 @@
+; Check the basic block sections labels option works when used along with -emit-bb-hash.
+; RUN: llc < %s -mtriple=x86_64 -function-sections -unique-section-names=true -basic-block-address-map -emit-bb-hash | FileCheck %s --check-prefixes=CHECK,UNIQ
+
+define void @_Z3bazb(i1 zeroext, i1 zeroext) personality ptr @__gxx_personality_v0 {
+ br i1 %0, label %3, label %8
+
+3:
+ %4 = invoke i32 @_Z3barv()
+ to label %8 unwind label %6
+ br label %10
+
+6:
+ landingpad { ptr, i32 }
+ catch ptr null
+ br label %12
+
+8:
+ %9 = call i32 @_Z3foov()
+ br i1 %1, label %12, label %10
+
+10:
+ %11 = select i1 %1, ptr blockaddress(@_Z3bazb, %3), ptr blockaddress(@_Z3bazb, %12) ; <ptr> [#uses=1]
+ indirectbr ptr %11, [label %3, label %12]
+
+12:
+ ret void
+}
+
+declare i32 @_Z3barv() #1
+
+declare i32 @_Z3foov() #1
+
+declare i32 @__gxx_personality_v0(...)
+
+; UNIQ: .section .text._Z3bazb,"ax",@progbits{{$}}
+; NOUNIQ: .section .text,"ax",@progbits,unique,1
+; CHECK-LABEL: _Z3bazb:
+; CHECK-LABEL: .Lfunc_begin0:
+; CHECK-LABEL: .LBB_END0_0:
+; CHECK-LABEL: .LBB0_1:
+; CHECK-LABEL: .LBB0_1_CS0:
+; CHECK-LABEL: .LBB_END0_1:
+; CHECK-LABEL: .LBB0_2:
+; CHECK-LABEL: .LBB0_2_CS0:
+; CHECK-LABEL: .LBB_END0_2:
+; CHECK-LABEL: .LBB0_3:
+; CHECK-LABEL: .LBB_END0_3:
+; CHECK-LABEL: .Lfunc_end0:
+
+; UNIQ: .section .llvm_bb_addr_map,"o",@llvm_bb_addr_map,.text._Z3bazb{{$}}
+;; Verify that with -unique-section-names=false, the unique id of the text section gets assigned to the llvm_bb_addr_map section.
+; NOUNIQ: .section .llvm_bb_addr_map,"o",@llvm_bb_addr_map,.text,unique,1
+; CHECK-NEXT: .byte 4 # version
+; CHECK-NEXT: .byte 96 # feature
+; CHECK-NEXT: .quad .Lfunc_begin0 # function address
+; CHECK-NEXT: .byte 6 # number of basic blocks
+; CHECK-NEXT: .byte 0 # BB id
+; CHECK-NEXT: .uleb128 .Lfunc_begin0-.Lfunc_begin0
+; CHECK-NEXT: .byte 0 # number of callsites
+; CHECK-NEXT: .uleb128 .LBB_END0_0-.Lfunc_begin0
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .quad {{-?[0-9]+}}
+; CHECK-NEXT: .byte 1 # BB id
+; CHECK-NEXT: .uleb128 .LBB0_1-.LBB_END0_0
+; CHECK-NEXT: .byte 1 # number of callsites
+; CHECK-NEXT: .uleb128 .LBB0_1_CS0-.LBB0_1
+; CHECK-NEXT: .uleb128 .LBB_END0_1-.LBB0_1_CS0
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .quad {{-?[0-9]+}}
+; CHECK-NEXT: .byte 3 # BB id
+; CHECK-NEXT: .uleb128 .LBB0_2-.LBB_END0_1
+; CHECK-NEXT: .byte 1 # number of callsites
+; CHECK-NEXT: .uleb128 .LBB0_2_CS0-.LBB0_2
+; CHECK-NEXT: .uleb128 .LBB_END0_2-.LBB0_2_CS0
+; CHECK-NEXT: .byte 8
+; CHECK-NEXT: .quad {{-?[0-9]+}}
+; CHECK-NEXT: .byte 4 # BB id
+; CHECK-NEXT: .uleb128 .LBB0_3-.LBB_END0_2
+; CHECK-NEXT: .byte 0 # number of callsites
+; CHECK-NEXT: .uleb128 .LBB_END0_3-.LBB0_3
+; CHECK-NEXT: .byte 16
+; CHECK-NEXT: .quad {{-?[0-9]+}}
+; CHECK-NEXT: .byte 5 # BB id
+; CHECK-NEXT: .uleb128 .LBB0_4-.LBB_END0_3
+; CHECK-NEXT: .byte 0 # number of callsites
+; CHECK-NEXT: .uleb128 .LBB_END0_4-.LBB0_4
+; CHECK-NEXT: .byte 1
+; CHECK-NEXT: .quad {{-?[0-9]+}}
+; CHECK-NEXT: .byte 2 # BB id
+; CHECK-NEXT: .uleb128 .LBB0_5-.LBB_END0_4
+; CHECK-NEXT: .byte 0 # number of callsites
+; CHECK-NEXT: .uleb128 .LBB_END0_5-.LBB0_5
+; CHECK-NEXT: .byte 5
+; CHECK-NEXT: .quad {{-?[0-9]+}}
diff --git a/llvm/test/CodeGen/X86/basic-block-address-map-with-mfs.ll b/llvm/test/CodeGen/X86/basic-block-address-map-with-mfs.ll
index 1e8cee4..d49b313 100644
--- a/llvm/test/CodeGen/X86/basic-block-address-map-with-mfs.ll
+++ b/llvm/test/CodeGen/X86/basic-block-address-map-with-mfs.ll
@@ -58,7 +58,7 @@ declare i32 @qux()
; CHECK-LABEL: .Lfunc_end0:
; CHECK: .section .llvm_bb_addr_map,"o",@llvm_bb_addr_map,.text.hot.foo
-; CHECK-NEXT: .byte 3 # version
+; CHECK-NEXT: .byte 4 # version
; BASIC-NEXT: .byte 40 # feature
; PGO-NEXT: .byte 47 # feature
; CHECK-NEXT: .byte 2 # number of basic block ranges
diff --git a/llvm/test/CodeGen/X86/basic-block-address-map.ll b/llvm/test/CodeGen/X86/basic-block-address-map.ll
index 5c8f3a6..64cf2c7 100644
--- a/llvm/test/CodeGen/X86/basic-block-address-map.ll
+++ b/llvm/test/CodeGen/X86/basic-block-address-map.ll
@@ -52,7 +52,7 @@ declare i32 @__gxx_personality_v0(...)
; UNIQ: .section .llvm_bb_addr_map,"o",@llvm_bb_addr_map,.text._Z3bazb{{$}}
;; Verify that with -unique-section-names=false, the unique id of the text section gets assigned to the llvm_bb_addr_map section.
; NOUNIQ: .section .llvm_bb_addr_map,"o",@llvm_bb_addr_map,.text,unique,1
-; CHECK-NEXT: .byte 3 # version
+; CHECK-NEXT: .byte 4 # version
; CHECK-NEXT: .byte 32 # feature
; CHECK-NEXT: .quad .Lfunc_begin0 # function address
; CHECK-NEXT: .byte 6 # number of basic blocks
diff --git a/llvm/test/TableGen/intrinsic-manual-name.td b/llvm/test/TableGen/intrinsic-manual-name.td
new file mode 100644
index 0000000..5751fc2
--- /dev/null
+++ b/llvm/test/TableGen/intrinsic-manual-name.td
@@ -0,0 +1,6 @@
+// RUN: llvm-tblgen -gen-intrinsic-impl -I %p/../../include %s -DTEST_INTRINSICS_SUPPRESS_DEFS 2>&1 | FileCheck %s -DFILE=%s
+
+include "llvm/IR/Intrinsics.td"
+
+// CHECK: [[FILE]]:[[@LINE+1]]:5: note: Explicitly specified name matches default name, consider dropping it
+def int_foo0 : Intrinsic<[llvm_anyint_ty], [], [], "llvm.foo0">;
diff --git a/llvm/test/ThinLTO/X86/devirt_external_comdat_same_guid.ll b/llvm/test/ThinLTO/X86/devirt_external_comdat_same_guid.ll
index 1f0737b..d0b8d14 100644
--- a/llvm/test/ThinLTO/X86/devirt_external_comdat_same_guid.ll
+++ b/llvm/test/ThinLTO/X86/devirt_external_comdat_same_guid.ll
@@ -24,9 +24,9 @@
; RUN: llvm-dis %t5.1.4.opt.bc -o - | FileCheck %s --check-prefix=CHECK-IR1
; RUN: llvm-dis %t5.2.4.opt.bc -o - | FileCheck %s --check-prefix=CHECK-IR2
-; PRINT-DAG: Devirtualized call to {{.*}} (_ZN1A1nEi)
+; PRINT-DAG: Devirtualized call to {{.*}} (_ZN1B1nEi)
-; REMARK-DAG: single-impl: devirtualized a call to _ZN1A1nEi
+; REMARK-DAG: single-impl: devirtualized a call to _ZN1B1nEi
target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-grtev4-linux-gnu"
@@ -55,7 +55,7 @@ entry:
ret i32 0
}
-; CHECK-IR1: define i32 @test(
+; CHECK-IR1: define noundef i32 @test(
define i32 @test(ptr %obj, i32 %a) {
entry:
%vtable = load ptr, ptr %obj
@@ -65,7 +65,7 @@ entry:
%fptr1 = load ptr, ptr %fptrptr, align 8
; Check that the call was devirtualized.
- ; CHECK-IR1: tail call i32 {{.*}}@_ZN1A1nEi
+ ; CHECK-IR1: tail call i32 {{.*}}@_ZN1B1nEi
%call = tail call i32 %fptr1(ptr nonnull %obj, i32 %a)
ret i32 %call
@@ -73,7 +73,7 @@ entry:
; CHECK-IR2: define i32 @test2
; Check that the call was devirtualized.
-; CHECK-IR2: tail call i32 @_ZN1A1nEi
+; CHECK-IR2: tail call i32 @_ZN1B1nEi
declare i1 @llvm.type.test(ptr, metadata)
declare void @llvm.assume(i1)
diff --git a/llvm/test/ThinLTO/X86/dtlto/json.ll b/llvm/test/ThinLTO/X86/dtlto/json.ll
index 1a38438..ee1c428 100644
--- a/llvm/test/ThinLTO/X86/dtlto/json.ll
+++ b/llvm/test/ThinLTO/X86/dtlto/json.ll
@@ -7,21 +7,33 @@ RUN: rm -rf %t && split-file %s %t && cd %t
RUN: opt -thinlto-bc t1.ll -o t1.bc
RUN: opt -thinlto-bc t2.ll -o t2.bc
-; Perform DTLTO.
+; Perform DTLTO with clang.
RUN: not llvm-lto2 run t1.bc t2.bc -o my.output \
RUN: -r=t1.bc,t1,px -r=t2.bc,t2,px \
RUN: -dtlto-distributor=%python \
RUN: -dtlto-distributor-arg=%llvm_src_root/utils/dtlto/validate.py,--da1=10,--da2=10 \
RUN: -dtlto-compiler=my_clang.exe \
RUN: -dtlto-compiler-arg=--rota1=10,--rota2=20 \
-RUN: 2>&1 | FileCheck %s
+RUN: 2>&1 | FileCheck --check-prefixes=CHECK,CLANG %s
+
+; Perform DTLTO with LLVM driver.
+RUN: not llvm-lto2 run t1.bc t2.bc -o my.output \
+RUN: -r=t1.bc,t1,px -r=t2.bc,t2,px \
+RUN: -dtlto-distributor=%python \
+RUN: -dtlto-distributor-arg=%llvm_src_root/utils/dtlto/validate.py,--da1=10,--da2=10 \
+RUN: -dtlto-compiler=llvm \
+RUN: -dtlto-compiler-prepend-arg=clang \
+RUN: -dtlto-compiler-arg=--rota1=10,--rota2=20 \
+RUN: 2>&1 | FileCheck --check-prefixes=CHECK,LLVM %s
CHECK: distributor_args=['--da1=10', '--da2=10']
; Check the common object.
CHECK: "linker_output": "my.output"
CHECK: "args":
-CHECK-NEXT: "my_clang.exe"
+CLANG-NEXT: "my_clang.exe"
+LLVM-NEXT: "llvm"
+LLVM-NEXT: "clang"
CHECK-NEXT: "-c"
CHECK-NEXT: "--target=x86_64-unknown-linux-gnu"
CHECK-NEXT: "-O2"
@@ -30,7 +42,7 @@ CHECK-NEXT: "-Wno-unused-command-line-argument"
CHECK-NEXT: "--rota1=10"
CHECK-NEXT: "--rota2=20"
CHECK-NEXT: ]
-CHECK: "inputs": []
+CHECK: "inputs": []
; Check the first job entry.
CHECK: "args":
diff --git a/llvm/test/Transforms/ConstraintElimination/add-nsw.ll b/llvm/test/Transforms/ConstraintElimination/add-nsw.ll
index 5127e92..4b8ac09 100644
--- a/llvm/test/Transforms/ConstraintElimination/add-nsw.ll
+++ b/llvm/test/Transforms/ConstraintElimination/add-nsw.ll
@@ -757,8 +757,7 @@ define i1 @add_neg_1_known_sge_ult_1(i32 %a) {
; CHECK-NEXT: [[A_SGE:%.*]] = icmp sge i32 [[A:%.*]], 1
; CHECK-NEXT: call void @llvm.assume(i1 [[A_SGE]])
; CHECK-NEXT: [[SUB:%.*]] = add nsw i32 [[A]], -1
-; CHECK-NEXT: [[C:%.*]] = icmp ult i32 [[SUB]], [[A]]
-; CHECK-NEXT: ret i1 [[C]]
+; CHECK-NEXT: ret i1 true
;
entry:
%a.sge = icmp sge i32 %a, 1
@@ -823,8 +822,7 @@ define i1 @add_neg_3_known_sge_ult_1(i32 %a) {
; CHECK-NEXT: [[A_SGE:%.*]] = icmp sge i32 [[A:%.*]], 3
; CHECK-NEXT: call void @llvm.assume(i1 [[A_SGE]])
; CHECK-NEXT: [[SUB:%.*]] = add nsw i32 [[A]], -3
-; CHECK-NEXT: [[C:%.*]] = icmp ult i32 [[SUB]], [[A]]
-; CHECK-NEXT: ret i1 [[C]]
+; CHECK-NEXT: ret i1 true
;
entry:
%a.sge = icmp sge i32 %a, 3
diff --git a/llvm/test/Transforms/ConstraintElimination/gep-arithmetic-add.ll b/llvm/test/Transforms/ConstraintElimination/gep-arithmetic-add.ll
index 52adc78..8dcac78 100644
--- a/llvm/test/Transforms/ConstraintElimination/gep-arithmetic-add.ll
+++ b/llvm/test/Transforms/ConstraintElimination/gep-arithmetic-add.ll
@@ -389,8 +389,7 @@ define i1 @gep_count_add_1_sge_known_ult_1(i32 %count, ptr %p) {
; CHECK-NEXT: [[SUB:%.*]] = add nsw i32 [[COUNT]], -1
; CHECK-NEXT: [[SUB_EXT:%.*]] = zext i32 [[SUB]] to i64
; CHECK-NEXT: [[GEP_SUB:%.*]] = getelementptr inbounds i32, ptr [[P]], i64 [[SUB_EXT]]
-; CHECK-NEXT: [[C:%.*]] = icmp ult ptr [[GEP_SUB]], [[GEP_COUNT]]
-; CHECK-NEXT: ret i1 [[C]]
+; CHECK-NEXT: ret i1 true
;
entry:
%sge = icmp sge i32 %count, 1
@@ -415,8 +414,7 @@ define i1 @gep_count_add_1_sge_known_uge_1(i32 %count, ptr %p) {
; CHECK-NEXT: [[SUB:%.*]] = add nsw i32 [[COUNT]], -1
; CHECK-NEXT: [[SUB_EXT:%.*]] = zext i32 [[SUB]] to i64
; CHECK-NEXT: [[GEP_SUB:%.*]] = getelementptr inbounds i32, ptr [[P]], i64 [[SUB_EXT]]
-; CHECK-NEXT: [[C:%.*]] = icmp uge ptr [[GEP_SUB]], [[P]]
-; CHECK-NEXT: ret i1 [[C]]
+; CHECK-NEXT: ret i1 true
;
entry:
%sge = icmp sge i32 %count, 1
diff --git a/llvm/test/Transforms/DeadArgElim/dbginfo.ll b/llvm/test/Transforms/DeadArgElim/dbginfo.ll
index a27ca9d..f6313c7 100644
--- a/llvm/test/Transforms/DeadArgElim/dbginfo.ll
+++ b/llvm/test/Transforms/DeadArgElim/dbginfo.ll
@@ -21,14 +21,14 @@
; updated LLVM functions.
; Function Attrs: uwtable
-define void @_Z2f2v() #0 !dbg !4 {
+define void @_Z2f2v() !dbg !4 {
entry:
call void (i32, ...) @_ZL2f1iz(i32 1), !dbg !15
ret void, !dbg !16
}
; Function Attrs: nounwind uwtable
-define internal void @_ZL2f1iz(i32, ...) #1 !dbg !8 {
+define internal void @_ZL2f1iz(i32, ...) !dbg !8 {
entry:
call void @llvm.dbg.value(metadata i32 %0, metadata !17, metadata !18), !dbg !19
ret void, !dbg !20
@@ -40,8 +40,6 @@ declare void @llvm.dbg.declare(metadata, metadata, metadata) #2
; Function Attrs: nounwind readnone
declare void @llvm.dbg.value(metadata, metadata, metadata) #2
-attributes #0 = { uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #2 = { nounwind readnone }
!llvm.dbg.cu = !{!0}
diff --git a/llvm/test/Transforms/DeadStoreElimination/mda-with-dbg-values.ll b/llvm/test/Transforms/DeadStoreElimination/mda-with-dbg-values.ll
index f66aa0f..2cec6b5 100644
--- a/llvm/test/Transforms/DeadStoreElimination/mda-with-dbg-values.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/mda-with-dbg-values.ll
@@ -14,7 +14,7 @@ target triple = "x86_64-unknown-linux-gnu"
@g = common global [1 x i8] zeroinitializer, align 1, !dbg !0
; Function Attrs: noinline nounwind uwtable
-define void @foo() #0 !dbg !14 {
+define void @foo() !dbg !14 {
entry:
%i = alloca i8, align 1
store i8 1, ptr %i, align 1, !dbg !19
@@ -37,7 +37,6 @@ declare void @llvm.dbg.value(metadata, i64, metadata, metadata) #1
; Function Attrs: argmemonly nounwind
declare void @llvm.memcpy.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1) #2
-attributes #0 = { noinline nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #1 = { nounwind readnone speculatable }
attributes #2 = { argmemonly nounwind }
diff --git a/llvm/test/Transforms/FunctionImport/Inputs/funcimport_debug.ll b/llvm/test/Transforms/FunctionImport/Inputs/funcimport_debug.ll
index 4d3a241..628669d 100644
--- a/llvm/test/Transforms/FunctionImport/Inputs/funcimport_debug.ll
+++ b/llvm/test/Transforms/FunctionImport/Inputs/funcimport_debug.ll
@@ -3,13 +3,11 @@ target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16
target triple = "x86_64-unknown-linux-gnu"
; Function Attrs: nounwind uwtable
-define void @func() #0 !dbg !4 {
+define void @func() !dbg !4 {
entry:
ret void, !dbg !10
}
-attributes #0 = { nounwind uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!7, !8}
!llvm.ident = !{!9}
diff --git a/llvm/test/Transforms/FunctionImport/funcimport_debug.ll b/llvm/test/Transforms/FunctionImport/funcimport_debug.ll
index f449047..713dc4e 100644
--- a/llvm/test/Transforms/FunctionImport/funcimport_debug.ll
+++ b/llvm/test/Transforms/FunctionImport/funcimport_debug.ll
@@ -24,16 +24,13 @@ target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16
target triple = "x86_64-unknown-linux-gnu"
; Function Attrs: nounwind uwtable
-define i32 @main() #0 !dbg !4 {
+define i32 @main() !dbg !4 {
entry:
call void (...) @func(), !dbg !11
ret i32 0, !dbg !12
}
-declare void @func(...) #1
-
-attributes #0 = { nounwind uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2" "unsafe-fp-math"="false" "use-soft-float"="false" }
+declare void @func(...)
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!8, !9}
diff --git a/llvm/test/Transforms/GCOVProfiling/exit-block.ll b/llvm/test/Transforms/GCOVProfiling/exit-block.ll
index 1840f04..543edac 100644
--- a/llvm/test/Transforms/GCOVProfiling/exit-block.ll
+++ b/llvm/test/Transforms/GCOVProfiling/exit-block.ll
@@ -13,7 +13,7 @@ target triple = "x86_64-unknown-linux-gnu"
@A = common global i32 0, align 4, !dbg !9
; Function Attrs: nounwind uwtable
-define void @test() #0 !dbg !4 {
+define void @test() !dbg !4 {
entry:
tail call void (...) @f() #2, !dbg !14
%0 = load i32, ptr @A, align 4, !dbg !15
@@ -28,12 +28,10 @@ if.end: ; preds = %entry, %if.then
ret void, !dbg !18
}
-declare void @f(...) #1
+declare void @f(...)
-declare void @g(...) #1
+declare void @g(...)
-attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
attributes #2 = { nounwind }
!llvm.gcov = !{!19}
diff --git a/llvm/test/Transforms/GCOVProfiling/linezero.ll b/llvm/test/Transforms/GCOVProfiling/linezero.ll
index 8bfeabd..1eae413 100644
--- a/llvm/test/Transforms/GCOVProfiling/linezero.ll
+++ b/llvm/test/Transforms/GCOVProfiling/linezero.ll
@@ -9,7 +9,7 @@ target triple = "x86_64-unknown-linux-gnu"
%struct.vector = type { i8 }
; Function Attrs: nounwind
-define i32 @_Z4testv() #0 !dbg !15 {
+define i32 @_Z4testv() !dbg !15 {
entry:
%retval = alloca i32, align 4
%__range = alloca ptr, align 8
@@ -63,19 +63,19 @@ return: ; No predecessors!
}
; Function Attrs: nounwind readnone
-declare void @llvm.dbg.declare(metadata, metadata, metadata) #1
+declare void @llvm.dbg.declare(metadata, metadata, metadata)
-declare void @_Z13TagFieldSpecsv() #2
+declare void @_Z13TagFieldSpecsv()
-declare ptr @_ZN6vector5beginEv(ptr) #2
+declare ptr @_ZN6vector5beginEv(ptr)
-declare ptr @_ZN6vector3endEv(ptr) #2
+declare ptr @_ZN6vector3endEv(ptr)
; Function Attrs: noreturn nounwind
-declare void @llvm.trap() #3
+declare void @llvm.trap()
; Function Attrs: nounwind
-define void @_Z2f1v() #0 !dbg !20 {
+define void @_Z2f1v() !dbg !20 {
entry:
br label %0
@@ -83,11 +83,6 @@ entry:
ret void, !dbg !45
}
-attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-realign-stack" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind readnone }
-attributes #2 = { "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-realign-stack" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #3 = { noreturn nounwind }
-
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!23, !24}
!llvm.gcov = !{!25}
diff --git a/llvm/test/Transforms/GCOVProfiling/split-indirectbr-critical-edges.ll b/llvm/test/Transforms/GCOVProfiling/split-indirectbr-critical-edges.ll
index 7f169e2..98bdd74 100644
--- a/llvm/test/Transforms/GCOVProfiling/split-indirectbr-critical-edges.ll
+++ b/llvm/test/Transforms/GCOVProfiling/split-indirectbr-critical-edges.ll
@@ -10,7 +10,7 @@
; CHECK-NEXT: load {{.*}} @__llvm_gcov_ctr
; CHECK-NOT: load {{.*}} @__llvm_gcov_ctr
-define dso_local i32 @cannot_split(ptr nocapture readonly %p) #0 !dbg !7 {
+define dso_local i32 @cannot_split(ptr nocapture readonly %p) !dbg !7 {
entry:
%targets = alloca <2 x ptr>, align 16
store <2 x ptr> <ptr blockaddress(@cannot_split, %indirect), ptr blockaddress(@cannot_split, %end)>, ptr %targets, align 16, !dbg !9
@@ -42,8 +42,6 @@ end: ; preds = %indirect
ret i32 0, !dbg !22
}
-attributes #0 = { norecurse nounwind readonly uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!3, !4, !5}
diff --git a/llvm/test/Transforms/GVN/cond_br2.ll b/llvm/test/Transforms/GVN/cond_br2.ll
index 6ceec95..4811ad0 100644
--- a/llvm/test/Transforms/GVN/cond_br2.ll
+++ b/llvm/test/Transforms/GVN/cond_br2.ll
@@ -11,12 +11,12 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
%"union.llvm::SmallVectorBase::U" = type { x86_fp80 }
; Function Attrs: ssp uwtable
-define void @_Z4testv() #0 personality ptr @__gxx_personality_v0 {
+define void @_Z4testv() personality ptr @__gxx_personality_v0 {
; CHECK-LABEL: define void @_Z4testv(
-; CHECK-SAME: ) #[[ATTR0:[0-9]+]] personality ptr @__gxx_personality_v0 {
+; CHECK-SAME: ) personality ptr @__gxx_personality_v0 {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[SV:%.*]] = alloca %"class.llvm::SmallVector", align 16
-; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[SV]]) #[[ATTR4:[0-9]+]]
+; CHECK-NEXT: call void @llvm.lifetime.start.p0(ptr [[SV]])
; CHECK-NEXT: [[FIRSTEL_I_I_I_I_I_I:%.*]] = getelementptr inbounds %"class.llvm::SmallVector", ptr [[SV]], i64 0, i32 0, i32 0, i32 0, i32 0, i32 3
; CHECK-NEXT: store ptr [[FIRSTEL_I_I_I_I_I_I]], ptr [[SV]], align 16, !tbaa [[ANYPTR_TBAA0:![0-9]+]]
; CHECK-NEXT: [[ENDX_I_I_I_I_I_I:%.*]] = getelementptr inbounds %"class.llvm::SmallVector", ptr [[SV]], i64 0, i32 0, i32 0, i32 0, i32 0, i32 1
@@ -66,10 +66,10 @@ define void @_Z4testv() #0 personality ptr @__gxx_personality_v0 {
; CHECK-NEXT: [[CMP_I_I_I_I19:%.*]] = icmp eq ptr [[TMP0]], [[FIRSTEL_I_I_I_I_I_I]]
; CHECK-NEXT: br i1 [[CMP_I_I_I_I19]], label %[[_ZN4LLVM11SMALLVECTORIILJ8EED1EV_EXIT21:.*]], label %[[IF_THEN_I_I_I20:.*]]
; CHECK: [[IF_THEN_I_I_I20]]:
-; CHECK-NEXT: call void @free(ptr [[TMP0]]) #[[ATTR4]]
+; CHECK-NEXT: call void @free(ptr [[TMP0]])
; CHECK-NEXT: br label %[[_ZN4LLVM11SMALLVECTORIILJ8EED1EV_EXIT21]]
; CHECK: [[_ZN4LLVM11SMALLVECTORIILJ8EED1EV_EXIT21]]:
-; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[SV]]) #[[ATTR4]]
+; CHECK-NEXT: call void @llvm.lifetime.end.p0(ptr [[SV]])
; CHECK-NEXT: ret void
; CHECK: [[LPAD]]:
; CHECK-NEXT: [[TMP1:%.*]] = landingpad { ptr, i32 }
@@ -78,7 +78,7 @@ define void @_Z4testv() #0 personality ptr @__gxx_personality_v0 {
; CHECK-NEXT: [[CMP_I_I_I_I:%.*]] = icmp eq ptr [[TMP2]], [[FIRSTEL_I_I_I_I_I_I]]
; CHECK-NEXT: br i1 [[CMP_I_I_I_I]], label %[[EH_RESUME:.*]], label %[[IF_THEN_I_I_I:.*]]
; CHECK: [[IF_THEN_I_I_I]]:
-; CHECK-NEXT: call void @free(ptr [[TMP2]]) #[[ATTR4]]
+; CHECK-NEXT: call void @free(ptr [[TMP2]])
; CHECK-NEXT: br label %[[EH_RESUME]]
; CHECK: [[EH_RESUME]]:
; CHECK-NEXT: resume { ptr, i32 } [[TMP1]]
@@ -86,7 +86,7 @@ define void @_Z4testv() #0 personality ptr @__gxx_personality_v0 {
entry:
%sv = alloca %"class.llvm::SmallVector", align 16
- call void @llvm.lifetime.start.p0(ptr %sv) #1
+ call void @llvm.lifetime.start.p0(ptr %sv)
%FirstEl.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SmallVector", ptr %sv, i64 0, i32 0, i32 0, i32 0, i32 0, i32 3
store ptr %FirstEl.i.i.i.i.i.i, ptr %sv, align 16, !tbaa !4
%EndX.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SmallVector", ptr %sv, i64 0, i32 0, i32 0, i32 0, i32 0, i32 1
@@ -151,11 +151,11 @@ invoke.cont3: ; preds = %invoke.cont2
br i1 %cmp.i.i.i.i19, label %_ZN4llvm11SmallVectorIiLj8EED1Ev.exit21, label %if.then.i.i.i20
if.then.i.i.i20: ; preds = %invoke.cont3
- call void @free(ptr %5) #1
+ call void @free(ptr %5)
br label %_ZN4llvm11SmallVectorIiLj8EED1Ev.exit21
_ZN4llvm11SmallVectorIiLj8EED1Ev.exit21: ; preds = %invoke.cont3, %if.then.i.i.i20
- call void @llvm.lifetime.end.p0(ptr %sv) #1
+ call void @llvm.lifetime.end.p0(ptr %sv)
ret void
lpad: ; preds = %if.end.i14, %if.end.i, %invoke.cont2
@@ -166,7 +166,7 @@ lpad: ; preds = %if.end.i14, %if.end
br i1 %cmp.i.i.i.i, label %eh.resume, label %if.then.i.i.i
if.then.i.i.i: ; preds = %lpad
- call void @free(ptr %7) #1
+ call void @free(ptr %7)
br label %eh.resume
eh.resume: ; preds = %if.then.i.i.i, %lpad
@@ -174,24 +174,19 @@ eh.resume: ; preds = %if.then.i.i.i, %lpa
}
; Function Attrs: nounwind
-declare void @llvm.lifetime.start.p0(ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture)
declare i32 @__gxx_personality_v0(...)
-declare void @_Z1gRN4llvm11SmallVectorIiLj8EEE(ptr) #2
+declare void @_Z1gRN4llvm11SmallVectorIiLj8EEE(ptr)
; Function Attrs: nounwind
-declare void @llvm.lifetime.end.p0(ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture)
-declare void @_ZN4llvm15SmallVectorBase8grow_podEmm(ptr, i64, i64) #2
+declare void @_ZN4llvm15SmallVectorBase8grow_podEmm(ptr, i64, i64)
; Function Attrs: nounwind
-declare void @free(ptr nocapture) #3
-
-attributes #0 = { ssp uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind }
-attributes #2 = { "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #3 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+declare void @free(ptr nocapture)
!0 = !{!"any pointer", !1}
!1 = !{!"omnipotent char", !2}
diff --git a/llvm/test/Transforms/GVN/pr33549.ll b/llvm/test/Transforms/GVN/pr33549.ll
index a8ce37c..a1e28f7 100644
--- a/llvm/test/Transforms/GVN/pr33549.ll
+++ b/llvm/test/Transforms/GVN/pr33549.ll
@@ -4,9 +4,9 @@
@Data = common local_unnamed_addr global [32 x i32] zeroinitializer, align 4
; Function Attrs: norecurse nounwind
-define void @testshl() local_unnamed_addr #0 {
+define void @testshl() local_unnamed_addr {
; CHECK-LABEL: define void @testshl(
-; CHECK-SAME: ) local_unnamed_addr #[[ATTR0:[0-9]+]] {
+; CHECK-SAME: ) local_unnamed_addr {
; CHECK-NEXT: [[ENTRY:.*]]:
; CHECK-NEXT: br label %[[FOR_BODY:.*]]
; CHECK: [[FOR_BODY]]:
@@ -78,8 +78,6 @@ for.end10: ; preds = %for.inc8
ret void
}
-attributes #0 = { norecurse nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="cortex-m7" "target-features"="+d16,+dsp,+fp-armv8,+hwdiv,+strict-align,+thumb-mode" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
!llvm.module.flags = !{!0, !1}
!llvm.ident = !{!2}
diff --git a/llvm/test/Transforms/GVN/pr42605.ll b/llvm/test/Transforms/GVN/pr42605.ll
index 3e6241c..98b447f8 100644
--- a/llvm/test/Transforms/GVN/pr42605.ll
+++ b/llvm/test/Transforms/GVN/pr42605.ll
@@ -114,7 +114,7 @@ if.end: ; preds = %if.then, %entry
ret void
}
-attributes #0 = { noinline norecurse nounwind readonly uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { noinline norecurse nounwind readonly uwtable }
!llvm.module.flags = !{!0}
!llvm.ident = !{!1}
diff --git a/llvm/test/Transforms/GVNHoist/hoist-unsafe-pr31729.ll b/llvm/test/Transforms/GVNHoist/hoist-unsafe-pr31729.ll
index b9c8537..082d016 100644
--- a/llvm/test/Transforms/GVNHoist/hoist-unsafe-pr31729.ll
+++ b/llvm/test/Transforms/GVNHoist/hoist-unsafe-pr31729.ll
@@ -14,7 +14,7 @@
@res = common global i32 0, align 4
; Function Attrs:
-define i64 @func() #0 {
+define i64 @func() {
entry:
ret i64 1
}
@@ -27,7 +27,7 @@ entry:
%2 = load volatile i32, ptr @g_x_u, align 4
%3 = load volatile i32, ptr @g_z_u, align 4
%4 = load volatile i32, ptr @g_m, align 4
- %call = call i64 @func() #4
+ %call = call i64 @func()
%conv = sext i32 %1 to i64
%cmp = icmp ne i64 %call, %conv
br i1 %cmp, label %if.end, label %lor.lhs.false
@@ -42,7 +42,7 @@ if.then:
br label %cleanup
if.end:
- %call4 = call i64 @func() #4
+ %call4 = call i64 @func()
%conv5 = zext i32 %3 to i64
%cmp6 = icmp ne i64 %call4, %conv5
br i1 %cmp6, label %if.end14, label %lor.lhs.false8
@@ -57,7 +57,7 @@ if.then13:
br label %cleanup
if.end14:
- %call15 = call i64 @func() #4
+ %call15 = call i64 @func()
%cmp17 = icmp ne i64 %call15, %conv
br i1 %cmp17, label %if.end25, label %lor.lhs.false19
@@ -77,5 +77,3 @@ cleanup:
%retval.0 = phi i32 [ 0, %if.end25 ], [ 1, %if.then24 ], [ 1, %if.then13 ], [ 1, %if.then ]
ret i32 %retval.0
}
-
-attributes #0 = { minsize noinline nounwind optsize uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/llvm/test/Transforms/GVNHoist/pr30499.ll b/llvm/test/Transforms/GVNHoist/pr30499.ll
index bd6f98c..6df9484b 100644
--- a/llvm/test/Transforms/GVNHoist/pr30499.ll
+++ b/llvm/test/Transforms/GVNHoist/pr30499.ll
@@ -1,6 +1,6 @@
; RUN: opt -S -passes=gvn-hoist < %s
-define void @_Z3fn2v() #0 {
+define void @_Z3fn2v() {
entry:
%a = alloca ptr, align 8
%b = alloca i32, align 4
@@ -11,7 +11,7 @@ entry:
br i1 %tobool, label %if.then, label %if.end
if.then: ; preds = %entry
- %call = call i64 @_Z3fn1v() #2
+ %call = call i64 @_Z3fn1v()
%conv = trunc i64 %call to i32
store i32 %conv, ptr %b, align 4
br label %if.end
@@ -23,8 +23,4 @@ if.end: ; preds = %if.then, %entry
}
; Function Attrs: nounwind readonly
-declare i64 @_Z3fn1v() #1
-
-attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind readonly "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #2 = { nounwind readonly }
+declare i64 @_Z3fn1v()
diff --git a/llvm/test/Transforms/IndVarSimplify/X86/widen-nsw.ll b/llvm/test/Transforms/IndVarSimplify/X86/widen-nsw.ll
index 7cba30d..81a9323 100644
--- a/llvm/test/Transforms/IndVarSimplify/X86/widen-nsw.ll
+++ b/llvm/test/Transforms/IndVarSimplify/X86/widen-nsw.ll
@@ -4,7 +4,7 @@ target triple = "x86_64-apple-macosx"
; CHECK-LABEL: @test1
; CHECK: %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
-define i32 @test1(ptr %a) #0 {
+define i32 @test1(ptr %a) {
entry:
br label %for.cond
@@ -25,5 +25,3 @@ for.body: ; preds = %for.cond
for.end: ; preds = %for.cond
ret i32 %sum.0
}
-
-attributes #0 = { nounwind ssp uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/llvm/test/Transforms/Inline/always-inline-attr.ll b/llvm/test/Transforms/Inline/always-inline-attr.ll
index 08ea307..4e69822 100644
--- a/llvm/test/Transforms/Inline/always-inline-attr.ll
+++ b/llvm/test/Transforms/Inline/always-inline-attr.ll
@@ -6,7 +6,7 @@ target triple = "x86_64-grtev4-linux-gnu"
; After AlwaysInline the callee's attributes should be merged into caller's attibutes.
-; CHECK: define dso_local <2 x i64> @foo(ptr byval(<8 x i64>) align 64 %0) #0
+; CHECK: define dso_local <2 x i64> @foo(ptr byval(<8 x i64>) align 64 %0)
; CHECK: attributes #0 = { mustprogress uwtable "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="512"
; Function Attrs: uwtable mustprogress
@@ -36,12 +36,10 @@ entry:
}
; Function Attrs: nounwind readnone
-declare <16 x i8> @llvm.x86.avx512.mask.pmovs.db.512(<16 x i32>, <16 x i8>, i16) #2
-
-attributes #0 = { uwtable mustprogress "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="128" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="true" "prefer-vector-width"="128" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+aes,+avx,+avx2,+avx512bw,+avx512dq,+avx512f,+avx512vl,+bmi2,+cx16,+cx8,+f16c,+fma,+fxsr,+mmx,+pclmul,+popcnt,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+x87,+xsave" "tune-cpu"="generic" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { alwaysinline nounwind uwtable mustprogress "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="512" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="true" "prefer-vector-width"="128" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+aes,+avx,+avx2,+avx512f,+cx16,+cx8,+f16c,+fma,+fxsr,+mmx,+pclmul,+popcnt,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+x87,+xsave" "tune-cpu"="generic" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #2 = { nounwind readnone }
+declare <16 x i8> @llvm.x86.avx512.mask.pmovs.db.512(<16 x i32>, <16 x i8>, i16)
+attributes #0 = { uwtable mustprogress "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="128" "prefer-vector-width"="128" }
+attributes #1 = { alwaysinline nounwind uwtable mustprogress "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="512" "prefer-vector-width"="128" }
!2 = !{!3, !3, i64 0}
!3 = !{!"omnipotent char", !4, i64 0}
diff --git a/llvm/test/Transforms/Inline/debug-info-duplicate-calls.ll b/llvm/test/Transforms/Inline/debug-info-duplicate-calls.ll
index a4c4134..bf0dfa2 100644
--- a/llvm/test/Transforms/Inline/debug-info-duplicate-calls.ll
+++ b/llvm/test/Transforms/Inline/debug-info-duplicate-calls.ll
@@ -89,11 +89,10 @@ entry:
ret void, !dbg !20
}
-declare void @_Z2f1v() #2
+attributes #0 = { uwtable }
+attributes #1 = { alwaysinline inlinehint uwtable }
-attributes #0 = { uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { alwaysinline inlinehint uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #2 = { "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+declare void @_Z2f1v()
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!10, !11}
diff --git a/llvm/test/Transforms/Inline/inline-vla.ll b/llvm/test/Transforms/Inline/inline-vla.ll
index 8e4bb3d..1e3c235 100644
--- a/llvm/test/Transforms/Inline/inline-vla.ll
+++ b/llvm/test/Transforms/Inline/inline-vla.ll
@@ -9,7 +9,7 @@
@.str1 = private unnamed_addr constant [3 x i8] c"ab\00", align 1
; Function Attrs: nounwind ssp uwtable
-define i32 @main(i32 %argc, ptr nocapture readnone %argv) #0 {
+define i32 @main(i32 %argc, ptr nocapture readnone %argv) {
entry:
%data = alloca [2 x i8], align 1
call fastcc void @memcpy2(ptr %data, ptr @.str, i64 1)
@@ -18,7 +18,7 @@ entry:
}
; Function Attrs: inlinehint nounwind ssp uwtable
-define internal fastcc void @memcpy2(ptr nocapture %dst, ptr nocapture readonly %src, i64 %size) #1 {
+define internal fastcc void @memcpy2(ptr nocapture %dst, ptr nocapture readonly %src, i64 %size) {
entry:
%vla = alloca i64, i64 %size, align 16
call void @llvm.memcpy.p0.p0.i64(ptr %vla, ptr %src, i64 %size, i1 false)
@@ -27,11 +27,7 @@ entry:
}
; Function Attrs: nounwind
-declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1) #2
-
-attributes #0 = { nounwind ssp uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { inlinehint nounwind ssp uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #2 = { nounwind }
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1)
!llvm.ident = !{!0}
diff --git a/llvm/test/Transforms/Inline/optimization-remarks-hotness-threshold.ll b/llvm/test/Transforms/Inline/optimization-remarks-hotness-threshold.ll
index 3021935..2b4aedd 100644
--- a/llvm/test/Transforms/Inline/optimization-remarks-hotness-threshold.ll
+++ b/llvm/test/Transforms/Inline/optimization-remarks-hotness-threshold.ll
@@ -27,20 +27,18 @@ target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.11.0"
; Function Attrs: nounwind ssp uwtable
-define i32 @foo() #0 !dbg !7 {
+define i32 @foo() !dbg !7 {
entry:
ret i32 1, !dbg !9
}
; Function Attrs: nounwind ssp uwtable
-define i32 @bar() #0 !dbg !10 {
+define i32 @bar() !dbg !10 {
entry:
%call = call i32 @foo(), !dbg !11
ret i32 %call, !dbg !12
}
-attributes #0 = { nounwind ssp uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="core2" "target-features"="+cx16,+fxsr,+mmx,+sse,+sse2,+sse3,+ssse3,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!3, !4, !5}
!llvm.ident = !{!6}
diff --git a/llvm/test/Transforms/Inline/optimization-remarks-passed-deleted-callee-yaml.ll b/llvm/test/Transforms/Inline/optimization-remarks-passed-deleted-callee-yaml.ll
index d394713..f547c37 100644
--- a/llvm/test/Transforms/Inline/optimization-remarks-passed-deleted-callee-yaml.ll
+++ b/llvm/test/Transforms/Inline/optimization-remarks-passed-deleted-callee-yaml.ll
@@ -63,20 +63,18 @@ target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.11.0"
; Function Attrs: nounwind ssp uwtable
-define internal i32 @foo() #0 !dbg !7 {
+define internal i32 @foo() !dbg !7 {
entry:
ret i32 1, !dbg !9
}
; Function Attrs: nounwind ssp uwtable
-define i32 @bar() #0 !dbg !10 !prof !13 {
+define i32 @bar() !dbg !10 !prof !13 {
entry:
%call = call i32 @foo(), !dbg !11
ret i32 %call, !dbg !12
}
-attributes #0 = { nounwind ssp uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="core2" "target-features"="+cx16,+fxsr,+mmx,+sse,+sse2,+sse3,+ssse3,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!3, !4, !5}
!llvm.ident = !{!6}
diff --git a/llvm/test/Transforms/Inline/optimization-remarks-passed-yaml.ll b/llvm/test/Transforms/Inline/optimization-remarks-passed-yaml.ll
index b0a238f..64b305b 100644
--- a/llvm/test/Transforms/Inline/optimization-remarks-passed-yaml.ll
+++ b/llvm/test/Transforms/Inline/optimization-remarks-passed-yaml.ll
@@ -72,20 +72,18 @@ target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.11.0"
; Function Attrs: nounwind ssp uwtable
-define i32 @foo() #0 !dbg !7 {
+define i32 @foo() !dbg !7 {
entry:
ret i32 1, !dbg !9
}
; Function Attrs: nounwind ssp uwtable
-define i32 @bar() #0 !dbg !10 !prof !13 {
+define i32 @bar() !dbg !10 !prof !13 {
entry:
%call = call i32 @foo(), !dbg !11
ret i32 %call, !dbg !12
}
-attributes #0 = { nounwind ssp uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="core2" "target-features"="+cx16,+fxsr,+mmx,+sse,+sse2,+sse3,+ssse3,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!3, !4, !5}
!llvm.ident = !{!6}
diff --git a/llvm/test/Transforms/Inline/optimization-remarks.ll b/llvm/test/Transforms/Inline/optimization-remarks.ll
index bc1e690..135d835 100644
--- a/llvm/test/Transforms/Inline/optimization-remarks.ll
+++ b/llvm/test/Transforms/Inline/optimization-remarks.ll
@@ -81,9 +81,9 @@ entry:
ret i32 %add
}
-attributes #0 = { alwaysinline nounwind uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { noinline nounwind uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #2 = { nounwind uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { alwaysinline nounwind uwtable }
+attributes #1 = { noinline nounwind uwtable }
+attributes #2 = { nounwind uwtable }
!llvm.ident = !{!0}
diff --git a/llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-to-svbool-binops.ll b/llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-to-svbool-binops.ll
index ecedbdb..abe1ed0 100644
--- a/llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-to-svbool-binops.ll
+++ b/llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-to-svbool-binops.ll
@@ -124,6 +124,39 @@ define <vscale x 8 x i1> @try_combine_svbool_binop_orr(<vscale x 8 x i1> %a, <vs
ret <vscale x 8 x i1> %t3
}
+; Verify predicate cast does not hinder "isAllActive" knowledge.
+define <vscale x 8 x half> @try_combine_svbool_binop_fadd(<vscale x 8 x half> %a, <vscale x 8 x half> %b) {
+; CHECK-LABEL: @try_combine_svbool_binop_fadd(
+; CHECK-NEXT: [[T2:%.*]] = fadd <vscale x 8 x half> [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: ret <vscale x 8 x half> [[T2]]
+;
+ %t1 = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> splat (i1 true))
+ %t2 = tail call <vscale x 8 x half> @llvm.aarch64.sve.fadd.nxv8f16(<vscale x 8 x i1> %t1, <vscale x 8 x half> %a, <vscale x 8 x half> %b)
+ ret <vscale x 8 x half> %t2
+}
+
+; Verify predicate cast does not hinder "isAllActive" knowledge.
+define <vscale x 4 x float> @try_combine_svbool_binop_fmul(<vscale x 4 x float> %a, <vscale x 4 x float> %b) {
+; CHECK-LABEL: @try_combine_svbool_binop_fmul(
+; CHECK-NEXT: [[T2:%.*]] = fmul <vscale x 4 x float> [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: ret <vscale x 4 x float> [[T2]]
+;
+ %t1 = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> splat (i1 true))
+ %t2 = tail call <vscale x 4 x float> @llvm.aarch64.sve.fmul.nxv4f32(<vscale x 4 x i1> %t1, <vscale x 4 x float> %a, <vscale x 4 x float> %b)
+ ret <vscale x 4 x float> %t2
+}
+
+; Verify predicate cast does not hinder "isAllActive" knowledge.
+define <vscale x 2 x double> @try_combine_svbool_binop_fsub(<vscale x 2 x double> %a, <vscale x 2 x double> %b) {
+; CHECK-LABEL: @try_combine_svbool_binop_fsub(
+; CHECK-NEXT: [[T2:%.*]] = fsub <vscale x 2 x double> [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: ret <vscale x 2 x double> [[T2]]
+;
+ %t1 = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> splat (i1 true))
+ %t2 = tail call <vscale x 2 x double> @llvm.aarch64.sve.fsub.nxv2f64(<vscale x 2 x i1> %t1, <vscale x 2 x double> %a, <vscale x 2 x double> %b)
+ ret <vscale x 2 x double> %t2
+}
+
declare <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1>)
declare <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1>)
declare <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1>)
diff --git a/llvm/test/Transforms/InstCombine/bitreverse-hang.ll b/llvm/test/Transforms/InstCombine/bitreverse-hang.ll
index bb01299..3f29509 100644
--- a/llvm/test/Transforms/InstCombine/bitreverse-hang.ll
+++ b/llvm/test/Transforms/InstCombine/bitreverse-hang.ll
@@ -21,7 +21,7 @@
@b = common global i32 0, align 4
; CHECK: define i32 @fn1
-define i32 @fn1() #0 {
+define i32 @fn1() {
entry:
%b.promoted = load i32, ptr @b, align 4, !tbaa !2
br label %for.body
@@ -40,8 +40,6 @@ for.end: ; preds = %for.body
ret i32 undef
}
-attributes #0 = { norecurse nounwind ssp uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="core2" "target-features"="+cx16,+fxsr,+mmx,+sse,+sse2,+sse3,+ssse3" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
!llvm.module.flags = !{!0}
!llvm.ident = !{!1}
diff --git a/llvm/test/Transforms/InstCombine/select-extractelement-inseltpoison.ll b/llvm/test/Transforms/InstCombine/select-extractelement-inseltpoison.ll
index 2348490..2d06f42 100644
--- a/llvm/test/Transforms/InstCombine/select-extractelement-inseltpoison.ll
+++ b/llvm/test/Transforms/InstCombine/select-extractelement-inseltpoison.ll
@@ -208,6 +208,3 @@ define <4 x i32> @extract_cond_type_mismatch(<4 x i32> %x, <4 x i32> %y, <5 x i1
%r = select i1 %cond, <4 x i32> %x, <4 x i32> %y
ret <4 x i32> %r
}
-
-
-attributes #0 = { nounwind ssp uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/llvm/test/Transforms/InstCombine/select-extractelement.ll b/llvm/test/Transforms/InstCombine/select-extractelement.ll
index 621d278..6f80b7d 100644
--- a/llvm/test/Transforms/InstCombine/select-extractelement.ll
+++ b/llvm/test/Transforms/InstCombine/select-extractelement.ll
@@ -1,9 +1,9 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -S -passes=instcombine < %s | FileCheck %s
-declare void @v4float_user(<4 x float>) #0
+declare void @v4float_user(<4 x float>)
-define float @extract_one_select(<4 x float> %a, <4 x float> %b, i32 %c) #0 {
+define float @extract_one_select(<4 x float> %a, <4 x float> %b, i32 %c) {
; CHECK-LABEL: @extract_one_select(
; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp eq i32 [[C:%.*]], 0
; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP_NOT]], <4 x float> [[B:%.*]], <4 x float> [[A:%.*]]
@@ -17,7 +17,7 @@ define float @extract_one_select(<4 x float> %a, <4 x float> %b, i32 %c) #0 {
}
; Multiple extractelements
-define <2 x float> @extract_two_select(<4 x float> %a, <4 x float> %b, i32 %c) #0 {
+define <2 x float> @extract_two_select(<4 x float> %a, <4 x float> %b, i32 %c) {
; CHECK-LABEL: @extract_two_select(
; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp eq i32 [[C:%.*]], 0
; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP_NOT]], <4 x float> [[B:%.*]], <4 x float> [[A:%.*]]
@@ -34,7 +34,7 @@ define <2 x float> @extract_two_select(<4 x float> %a, <4 x float> %b, i32 %c) #
}
; Select has an extra non-extractelement user, don't change it
-define float @extract_one_select_user(<4 x float> %a, <4 x float> %b, i32 %c) #0 {
+define float @extract_one_select_user(<4 x float> %a, <4 x float> %b, i32 %c) {
; CHECK-LABEL: @extract_one_select_user(
; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp eq i32 [[C:%.*]], 0
; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP_NOT]], <4 x float> [[B:%.*]], <4 x float> [[A:%.*]]
@@ -49,7 +49,7 @@ define float @extract_one_select_user(<4 x float> %a, <4 x float> %b, i32 %c) #0
ret float %extract
}
-define float @extract_one_vselect_user(<4 x float> %a, <4 x float> %b, <4 x i32> %c) #0 {
+define float @extract_one_vselect_user(<4 x float> %a, <4 x float> %b, <4 x i32> %c) {
; CHECK-LABEL: @extract_one_vselect_user(
; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp eq <4 x i32> [[C:%.*]], zeroinitializer
; CHECK-NEXT: [[SEL:%.*]] = select <4 x i1> [[CMP_NOT]], <4 x float> [[B:%.*]], <4 x float> [[A:%.*]]
@@ -67,7 +67,7 @@ define float @extract_one_vselect_user(<4 x float> %a, <4 x float> %b, <4 x i32>
; Do not convert the vector select into a scalar select. That would increase
; the instruction count and potentially obfuscate a vector min/max idiom.
-define float @extract_one_vselect(<4 x float> %a, <4 x float> %b, <4 x i32> %c) #0 {
+define float @extract_one_vselect(<4 x float> %a, <4 x float> %b, <4 x i32> %c) {
; CHECK-LABEL: @extract_one_vselect(
; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp eq <4 x i32> [[C:%.*]], zeroinitializer
; CHECK-NEXT: [[SELECT:%.*]] = select <4 x i1> [[CMP_NOT]], <4 x float> [[B:%.*]], <4 x float> [[A:%.*]]
@@ -81,7 +81,7 @@ define float @extract_one_vselect(<4 x float> %a, <4 x float> %b, <4 x i32> %c)
}
; Multiple extractelements from a vector select
-define <2 x float> @extract_two_vselect(<4 x float> %a, <4 x float> %b, <4 x i32> %c) #0 {
+define <2 x float> @extract_two_vselect(<4 x float> %a, <4 x float> %b, <4 x i32> %c) {
; CHECK-LABEL: @extract_two_vselect(
; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp eq <4 x i32> [[C:%.*]], zeroinitializer
; CHECK-NEXT: [[SEL:%.*]] = select <4 x i1> [[CMP_NOT]], <4 x float> [[B:%.*]], <4 x float> [[A:%.*]]
@@ -100,7 +100,7 @@ define <2 x float> @extract_two_vselect(<4 x float> %a, <4 x float> %b, <4 x i32
; The vector selects are not decomposed into scalar selects because that would increase
; the instruction count. Extract+insert is converted to non-lane-crossing shuffles.
; Test multiple extractelements
-define <4 x float> @simple_vector_select(<4 x float> %a, <4 x float> %b, <4 x i32> %c) #0 {
+define <4 x float> @simple_vector_select(<4 x float> %a, <4 x float> %b, <4 x i32> %c) {
; CHECK-LABEL: @simple_vector_select(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = extractelement <4 x i32> [[C:%.*]], i64 0
@@ -232,5 +232,3 @@ define i32 @inf_loop_partial_undef(<2 x i1> %a, <2 x i1> %b, <2 x i32> %x, <2 x
%t11 = extractelement <2 x i32> %p, i32 0
ret i32 %t11
}
-
-attributes #0 = { nounwind ssp uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/llvm/test/Transforms/JumpThreading/ddt-crash3.ll b/llvm/test/Transforms/JumpThreading/ddt-crash3.ll
index edaade32..26ba857 100644
--- a/llvm/test/Transforms/JumpThreading/ddt-crash3.ll
+++ b/llvm/test/Transforms/JumpThreading/ddt-crash3.ll
@@ -9,9 +9,9 @@ target triple = "x86_64-unknown-linux-gnu"
@global.2 = external local_unnamed_addr global i64, align 8
; Function Attrs: norecurse noreturn nounwind uwtable
-define void @hoge() local_unnamed_addr #0 {
+define void @hoge() local_unnamed_addr {
; CHECK-LABEL: define void @hoge(
-; CHECK-SAME: ) local_unnamed_addr #[[ATTR0:[0-9]+]] {
+; CHECK-SAME: ) local_unnamed_addr {
; CHECK-NEXT: [[BB:.*:]]
; CHECK-NEXT: br label %[[BB1:.*]]
; CHECK: [[BB1]]:
@@ -48,8 +48,6 @@ bb27: ; preds = %bb1
br label %bb26
}
-attributes #0 = { norecurse noreturn nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
!llvm.ident = !{!0}
!0 = !{!"clang version 7.0.0 "}
diff --git a/llvm/test/Transforms/LICM/volatile-alias.ll b/llvm/test/Transforms/LICM/volatile-alias.ll
index 410f3be..35e8844 100644
--- a/llvm/test/Transforms/LICM/volatile-alias.ll
+++ b/llvm/test/Transforms/LICM/volatile-alias.ll
@@ -10,7 +10,7 @@
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
; Function Attrs: nounwind uwtable
-define i32 @foo(ptr dereferenceable(4) nonnull %p, ptr %q, i32 %n) #0 {
+define i32 @foo(ptr dereferenceable(4) nonnull %p, ptr %q, i32 %n) {
entry:
%p.addr = alloca ptr, align 8
%q.addr = alloca ptr, align 8
@@ -51,5 +51,3 @@ for.end: ; preds = %for.cond
%8 = load i32, ptr %s, align 4
ret i32 %8
}
-
-attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
diff --git a/llvm/test/Transforms/LoopRotate/noalias.ll b/llvm/test/Transforms/LoopRotate/noalias.ll
index f709bba..d6b2414 100644
--- a/llvm/test/Transforms/LoopRotate/noalias.ll
+++ b/llvm/test/Transforms/LoopRotate/noalias.ll
@@ -146,11 +146,7 @@ for.end: ; preds = %for.cond
}
; Function Attrs: inaccessiblememonly nounwind
-declare void @llvm.experimental.noalias.scope.decl(metadata) #1
-
-attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="all" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { inaccessiblememonly nounwind }
-attributes #2 = { nounwind readnone speculatable }
+declare void @llvm.experimental.noalias.scope.decl(metadata)
!llvm.module.flags = !{!0}
!llvm.ident = !{!1}
diff --git a/llvm/test/Transforms/LoopStrengthReduce/AMDGPU/different-addrspace-addressing-mode-loops.ll b/llvm/test/Transforms/LoopStrengthReduce/AMDGPU/different-addrspace-addressing-mode-loops.ll
index 5423368..25ded4b 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/AMDGPU/different-addrspace-addressing-mode-loops.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/AMDGPU/different-addrspace-addressing-mode-loops.ll
@@ -157,4 +157,4 @@ bb:
br i1 %exitcond, label %._crit_edge.loopexit, label %.lr.ph
}
-attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="hawaii" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="hawaii" "use-soft-float"="false" }
diff --git a/llvm/test/Transforms/LoopStrengthReduce/X86/pr17473.ll b/llvm/test/Transforms/LoopStrengthReduce/X86/pr17473.ll
index 6c34d1b..a878fc2 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/X86/pr17473.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/X86/pr17473.ll
@@ -62,6 +62,6 @@ for.end: ; preds = %fn3.exit
; Function Attrs: nounwind optsize
declare i32 @printf(ptr nocapture readonly, ...) #1
-attributes #0 = { nounwind optsize ssp uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind optsize "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind optsize ssp uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "use-soft-float"="false" }
+attributes #1 = { nounwind optsize "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "use-soft-float"="false" }
attributes #2 = { nounwind optsize }
diff --git a/llvm/test/Transforms/LoopStrengthReduce/pr18165.ll b/llvm/test/Transforms/LoopStrengthReduce/pr18165.ll
index 914ea7a..9c20685 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/pr18165.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/pr18165.ll
@@ -71,8 +71,8 @@ fn1.exit: ; preds = %lor.end.i
; Function Attrs: nounwind optsize
declare i32 @printf(ptr nocapture readonly, ...) #1
-attributes #0 = { nounwind optsize ssp uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind optsize "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind optsize ssp uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "use-soft-float"="false" }
+attributes #1 = { nounwind optsize "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "use-soft-float"="false" }
attributes #2 = { nounwind optsize }
!llvm.ident = !{!0}
diff --git a/llvm/test/Transforms/LoopStrengthReduce/two-combinations-bug.ll b/llvm/test/Transforms/LoopStrengthReduce/two-combinations-bug.ll
index 3364465..37e2f68 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/two-combinations-bug.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/two-combinations-bug.ll
@@ -46,7 +46,7 @@ for.body3: ; preds = %for.body3, %for.bod
br i1 %exitcond, label %for.cond.loopexit, label %for.body3
}
-attributes #0 = { nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "use-soft-float"="false" }
!0 = !{!1, !2, i64 16}
!1 = !{!"planet", !2, i64 0, !2, i64 8, !2, i64 16, !2, i64 24, !2, i64 32, !2, i64 40, !2, i64 48}
diff --git a/llvm/test/Transforms/LoopUnroll/runtime-epilog-debuginfo.ll b/llvm/test/Transforms/LoopUnroll/runtime-epilog-debuginfo.ll
index ee28aa1..df4dfa6 100644
--- a/llvm/test/Transforms/LoopUnroll/runtime-epilog-debuginfo.ll
+++ b/llvm/test/Transforms/LoopUnroll/runtime-epilog-debuginfo.ll
@@ -76,7 +76,7 @@ lee1.exit: ; preds = %lee1.exit.loopexit,
; Function Attrs: nounwind readnone
declare void @llvm.dbg.value(metadata, i64, metadata, metadata) #1
-attributes #0 = { nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="arm7tdmi" "target-features"="+neon,+strict-align,+vfp3,-crypto,-fp-armv8,-fp16,-vfp4" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="arm7tdmi" "target-features"="+neon,+strict-align,+vfp3,-crypto,-fp-armv8,-fp16,-vfp4" "use-soft-float"="false" }
attributes #1 = { nounwind readnone }
!llvm.dbg.cu = !{!0}
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/fully-unrolled-cost.ll b/llvm/test/Transforms/LoopVectorize/AArch64/fully-unrolled-cost.ll
index 27ca414..199203a 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/fully-unrolled-cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/fully-unrolled-cost.ll
@@ -86,7 +86,8 @@ define i64 @test_two_ivs(ptr %a, ptr %b, i64 %start) #0 {
; CHECK-NEXT: Cost of 0 for VF 16: induction instruction %i.iv = phi i64 [ 0, %entry ], [ %i.iv.next, %for.body ]
; CHECK-NEXT: Cost of 0 for VF 16: induction instruction %j.iv = phi i64 [ %start, %entry ], [ %j.iv.next, %for.body ]
; CHECK-NEXT: Cost of 0 for VF 16: EMIT vp<{{.+}}> = CANONICAL-INDUCTION ir<0>, vp<%index.next>
-; CHECK: Cost for VF 16: 41
+; CHECK: Cost of 1 for VF 16: EXPRESSION vp<%11> = ir<%sum> + partial.reduce.add (mul nuw nsw (ir<%1> zext to i64), (ir<%0> zext to i64))
+; CHECK: Cost for VF 16: 3
; CHECK: LV: Selecting VF: 16
entry:
br label %for.body
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/maxbandwidth-regpressure.ll b/llvm/test/Transforms/LoopVectorize/AArch64/maxbandwidth-regpressure.ll
index 2d15431..8109d068 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/maxbandwidth-regpressure.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/maxbandwidth-regpressure.ll
@@ -6,8 +6,7 @@ target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
target triple = "aarch64-none-unknown-elf"
define i32 @dotp(ptr %a, ptr %b) #0 {
-; CHECK-REGS-VP-NOT: LV(REG): Not considering vector loop of width vscale x 16 because it uses too many registers
-; CHECK-REGS-VP: LV: Selecting VF: vscale x 8.
+; CHECK-REGS-VP: LV: Selecting VF: vscale x 16.
;
; CHECK-NOREGS-VP: LV(REG): Not considering vector loop of width vscale x 8 because it uses too many registers
; CHECK-NOREGS-VP: LV(REG): Not considering vector loop of width vscale x 16 because it uses too many registers
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-chained.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-chained.ll
index 3dfa6df..287226f 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-chained.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-chained.ll
@@ -31,9 +31,9 @@ define i32 @chained_partial_reduce_add_sub(ptr %a, ptr %b, ptr %c, i32 %N) #0 {
; CHECK-NEON-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1
; CHECK-NEON-NEXT: [[TMP7:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-NEON-NEXT: [[TMP8:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
-; CHECK-NEON-NEXT: [[TMP9:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-NEON-NEXT: [[TMP10:%.*]] = mul nsw <16 x i32> [[TMP7]], [[TMP8]]
; CHECK-NEON-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP10]])
+; CHECK-NEON-NEXT: [[TMP9:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-NEON-NEXT: [[TMP12:%.*]] = mul nsw <16 x i32> [[TMP7]], [[TMP9]]
; CHECK-NEON-NEXT: [[TMP13:%.*]] = sub <16 x i32> zeroinitializer, [[TMP12]]
; CHECK-NEON-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP13]])
@@ -52,37 +52,34 @@ define i32 @chained_partial_reduce_add_sub(ptr %a, ptr %b, ptr %c, i32 %N) #0 {
; CHECK-SVE-NEXT: [[CMP28_NOT:%.*]] = icmp ult i32 [[N]], 2
; CHECK-SVE-NEXT: [[DIV27:%.*]] = lshr i32 [[N]], 1
; CHECK-SVE-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext nneg i32 [[DIV27]] to i64
-; CHECK-SVE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-SVE-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
-; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], [[TMP1]]
+; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], 16
; CHECK-SVE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK-SVE: vector.ph:
-; CHECK-SVE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-SVE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]]
+; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 16
; CHECK-SVE-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]]
; CHECK-SVE-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-SVE: vector.body:
; CHECK-SVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP19:%.*]], [[VECTOR_BODY]] ]
+; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ]
; CHECK-SVE-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDEX]]
; CHECK-SVE-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[INDEX]]
; CHECK-SVE-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i8, ptr [[C]], i64 [[INDEX]]
-; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i8>, ptr [[TMP7]], align 1
-; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x i8>, ptr [[TMP8]], align 1
-; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 4 x i8>, ptr [[TMP9]], align 1
-; CHECK-SVE-NEXT: [[TMP13:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD]] to <vscale x 4 x i32>
-; CHECK-SVE-NEXT: [[TMP14:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD1]] to <vscale x 4 x i32>
-; CHECK-SVE-NEXT: [[TMP15:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD2]] to <vscale x 4 x i32>
-; CHECK-SVE-NEXT: [[TMP16:%.*]] = mul nsw <vscale x 4 x i32> [[TMP13]], [[TMP14]]
-; CHECK-SVE-NEXT: [[TMP17:%.*]] = add <vscale x 4 x i32> [[VEC_PHI]], [[TMP16]]
-; CHECK-SVE-NEXT: [[TMP18:%.*]] = mul nsw <vscale x 4 x i32> [[TMP13]], [[TMP15]]
-; CHECK-SVE-NEXT: [[TMP19]] = sub <vscale x 4 x i32> [[TMP17]], [[TMP18]]
-; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
+; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1
+; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1
+; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP9]], align 1
+; CHECK-SVE-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
+; CHECK-SVE-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
+; CHECK-SVE-NEXT: [[TMP5:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP4]]
+; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP5]])
+; CHECK-SVE-NEXT: [[TMP6:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
+; CHECK-SVE-NEXT: [[TMP11:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP6]]
+; CHECK-SVE-NEXT: [[TMP12:%.*]] = sub <16 x i32> zeroinitializer, [[TMP11]]
+; CHECK-SVE-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP12]])
+; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-SVE-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-SVE-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK-SVE: middle.block:
-; CHECK-SVE-NEXT: [[TMP21:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP19]])
+; CHECK-SVE-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE3]])
; CHECK-SVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]]
; CHECK-SVE-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
; CHECK-SVE: scalar.ph:
@@ -114,10 +111,10 @@ define i32 @chained_partial_reduce_add_sub(ptr %a, ptr %b, ptr %c, i32 %N) #0 {
; CHECK-SVE-MAXBW-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 8 x i8>, ptr [[TMP9]], align 1
; CHECK-SVE-MAXBW-NEXT: [[TMP13:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD]] to <vscale x 8 x i32>
; CHECK-SVE-MAXBW-NEXT: [[TMP14:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD1]] to <vscale x 8 x i32>
-; CHECK-SVE-MAXBW-NEXT: [[TMP15:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD2]] to <vscale x 8 x i32>
; CHECK-SVE-MAXBW-NEXT: [[TMP16:%.*]] = mul nsw <vscale x 8 x i32> [[TMP13]], [[TMP14]]
; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE:%.*]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI]], <vscale x 8 x i32> [[TMP16]])
-; CHECK-SVE-MAXBW-NEXT: [[TMP17:%.*]] = mul nsw <vscale x 8 x i32> [[TMP13]], [[TMP15]]
+; CHECK-SVE-MAXBW-NEXT: [[TMP11:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD2]] to <vscale x 8 x i32>
+; CHECK-SVE-MAXBW-NEXT: [[TMP17:%.*]] = mul nsw <vscale x 8 x i32> [[TMP13]], [[TMP11]]
; CHECK-SVE-MAXBW-NEXT: [[TMP18:%.*]] = sub <vscale x 8 x i32> zeroinitializer, [[TMP17]]
; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE3]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[PARTIAL_REDUCE]], <vscale x 8 x i32> [[TMP18]])
; CHECK-SVE-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
@@ -184,9 +181,9 @@ define i32 @chained_partial_reduce_add_add(ptr %a, ptr %b, ptr %c, i32 %N) #0 {
; CHECK-NEON-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1
; CHECK-NEON-NEXT: [[TMP7:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-NEON-NEXT: [[TMP8:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
-; CHECK-NEON-NEXT: [[TMP9:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-NEON-NEXT: [[TMP10:%.*]] = mul nsw <16 x i32> [[TMP7]], [[TMP8]]
; CHECK-NEON-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP10]])
+; CHECK-NEON-NEXT: [[TMP9:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-NEON-NEXT: [[TMP11:%.*]] = mul nsw <16 x i32> [[TMP7]], [[TMP9]]
; CHECK-NEON-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP11]])
; CHECK-NEON-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
@@ -221,9 +218,9 @@ define i32 @chained_partial_reduce_add_add(ptr %a, ptr %b, ptr %c, i32 %N) #0 {
; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP9]], align 1
; CHECK-SVE-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-SVE-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
-; CHECK-SVE-NEXT: [[TMP5:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-SVE-NEXT: [[TMP6:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP4]]
; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP6]])
+; CHECK-SVE-NEXT: [[TMP5:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-SVE-NEXT: [[TMP10:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP5]]
; CHECK-SVE-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP10]])
; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
@@ -262,10 +259,10 @@ define i32 @chained_partial_reduce_add_add(ptr %a, ptr %b, ptr %c, i32 %N) #0 {
; CHECK-SVE-MAXBW-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 8 x i8>, ptr [[TMP9]], align 1
; CHECK-SVE-MAXBW-NEXT: [[TMP13:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD]] to <vscale x 8 x i32>
; CHECK-SVE-MAXBW-NEXT: [[TMP14:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD1]] to <vscale x 8 x i32>
-; CHECK-SVE-MAXBW-NEXT: [[TMP15:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD2]] to <vscale x 8 x i32>
; CHECK-SVE-MAXBW-NEXT: [[TMP16:%.*]] = mul nsw <vscale x 8 x i32> [[TMP13]], [[TMP14]]
; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE:%.*]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI]], <vscale x 8 x i32> [[TMP16]])
-; CHECK-SVE-MAXBW-NEXT: [[TMP17:%.*]] = mul nsw <vscale x 8 x i32> [[TMP13]], [[TMP15]]
+; CHECK-SVE-MAXBW-NEXT: [[TMP11:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD2]] to <vscale x 8 x i32>
+; CHECK-SVE-MAXBW-NEXT: [[TMP17:%.*]] = mul nsw <vscale x 8 x i32> [[TMP13]], [[TMP11]]
; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE3]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[PARTIAL_REDUCE]], <vscale x 8 x i32> [[TMP17]])
; CHECK-SVE-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-SVE-MAXBW-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -331,11 +328,11 @@ define i32 @chained_partial_reduce_sub_add(ptr %a, ptr %b, ptr %c, i32 %N) #0 {
; CHECK-NEON-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1
; CHECK-NEON-NEXT: [[TMP7:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-NEON-NEXT: [[TMP8:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
-; CHECK-NEON-NEXT: [[TMP9:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-NEON-NEXT: [[TMP10:%.*]] = mul nsw <16 x i32> [[TMP7]], [[TMP8]]
; CHECK-NEON-NEXT: [[TMP11:%.*]] = sub nsw <16 x i32> zeroinitializer, [[TMP10]]
; CHECK-NEON-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP11]])
-; CHECK-NEON-NEXT: [[TMP12:%.*]] = mul nsw <16 x i32> [[TMP7]], [[TMP9]]
+; CHECK-NEON-NEXT: [[TMP13:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
+; CHECK-NEON-NEXT: [[TMP12:%.*]] = mul nsw <16 x i32> [[TMP7]], [[TMP13]]
; CHECK-NEON-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP12]])
; CHECK-NEON-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-NEON-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -352,37 +349,34 @@ define i32 @chained_partial_reduce_sub_add(ptr %a, ptr %b, ptr %c, i32 %N) #0 {
; CHECK-SVE-NEXT: [[CMP28_NOT:%.*]] = icmp ult i32 [[N]], 2
; CHECK-SVE-NEXT: [[DIV27:%.*]] = lshr i32 [[N]], 1
; CHECK-SVE-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext nneg i32 [[DIV27]] to i64
-; CHECK-SVE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-SVE-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
-; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], [[TMP1]]
+; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], 16
; CHECK-SVE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK-SVE: vector.ph:
-; CHECK-SVE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-SVE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]]
+; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 16
; CHECK-SVE-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]]
; CHECK-SVE-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-SVE: vector.body:
; CHECK-SVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP19:%.*]], [[VECTOR_BODY]] ]
+; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ]
; CHECK-SVE-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDEX]]
; CHECK-SVE-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[INDEX]]
; CHECK-SVE-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i8, ptr [[C]], i64 [[INDEX]]
-; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i8>, ptr [[TMP7]], align 1
-; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x i8>, ptr [[TMP8]], align 1
-; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 4 x i8>, ptr [[TMP9]], align 1
-; CHECK-SVE-NEXT: [[TMP13:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD]] to <vscale x 4 x i32>
-; CHECK-SVE-NEXT: [[TMP14:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD1]] to <vscale x 4 x i32>
-; CHECK-SVE-NEXT: [[TMP15:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD2]] to <vscale x 4 x i32>
-; CHECK-SVE-NEXT: [[TMP16:%.*]] = mul nsw <vscale x 4 x i32> [[TMP13]], [[TMP14]]
-; CHECK-SVE-NEXT: [[TMP17:%.*]] = sub <vscale x 4 x i32> [[VEC_PHI]], [[TMP16]]
-; CHECK-SVE-NEXT: [[TMP18:%.*]] = mul nsw <vscale x 4 x i32> [[TMP13]], [[TMP15]]
-; CHECK-SVE-NEXT: [[TMP19]] = add <vscale x 4 x i32> [[TMP17]], [[TMP18]]
-; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
+; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1
+; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1
+; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP9]], align 1
+; CHECK-SVE-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
+; CHECK-SVE-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
+; CHECK-SVE-NEXT: [[TMP5:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP4]]
+; CHECK-SVE-NEXT: [[TMP6:%.*]] = sub nsw <16 x i32> zeroinitializer, [[TMP5]]
+; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP6]])
+; CHECK-SVE-NEXT: [[TMP11:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
+; CHECK-SVE-NEXT: [[TMP12:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP11]]
+; CHECK-SVE-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP12]])
+; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-SVE-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-SVE-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; CHECK-SVE: middle.block:
-; CHECK-SVE-NEXT: [[TMP21:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP19]])
+; CHECK-SVE-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE3]])
; CHECK-SVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]]
; CHECK-SVE-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
; CHECK-SVE: scalar.ph:
@@ -414,11 +408,11 @@ define i32 @chained_partial_reduce_sub_add(ptr %a, ptr %b, ptr %c, i32 %N) #0 {
; CHECK-SVE-MAXBW-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 8 x i8>, ptr [[TMP9]], align 1
; CHECK-SVE-MAXBW-NEXT: [[TMP13:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD]] to <vscale x 8 x i32>
; CHECK-SVE-MAXBW-NEXT: [[TMP14:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD1]] to <vscale x 8 x i32>
-; CHECK-SVE-MAXBW-NEXT: [[TMP15:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD2]] to <vscale x 8 x i32>
; CHECK-SVE-MAXBW-NEXT: [[TMP16:%.*]] = mul nsw <vscale x 8 x i32> [[TMP13]], [[TMP14]]
; CHECK-SVE-MAXBW-NEXT: [[TMP17:%.*]] = sub nsw <vscale x 8 x i32> zeroinitializer, [[TMP16]]
; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE:%.*]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI]], <vscale x 8 x i32> [[TMP17]])
-; CHECK-SVE-MAXBW-NEXT: [[TMP18:%.*]] = mul nsw <vscale x 8 x i32> [[TMP13]], [[TMP15]]
+; CHECK-SVE-MAXBW-NEXT: [[TMP12:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD2]] to <vscale x 8 x i32>
+; CHECK-SVE-MAXBW-NEXT: [[TMP18:%.*]] = mul nsw <vscale x 8 x i32> [[TMP13]], [[TMP12]]
; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE3]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[PARTIAL_REDUCE]], <vscale x 8 x i32> [[TMP18]])
; CHECK-SVE-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-SVE-MAXBW-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -486,11 +480,11 @@ define i32 @chained_partial_reduce_sub_sub(ptr %a, ptr %b, ptr %c, i32 %N) #0 {
; CHECK-NEON-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1
; CHECK-NEON-NEXT: [[TMP7:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-NEON-NEXT: [[TMP8:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
-; CHECK-NEON-NEXT: [[TMP9:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-NEON-NEXT: [[TMP10:%.*]] = mul nsw <16 x i32> [[TMP7]], [[TMP8]]
; CHECK-NEON-NEXT: [[TMP11:%.*]] = sub nsw <16 x i32> zeroinitializer, [[TMP10]]
; CHECK-NEON-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP11]])
-; CHECK-NEON-NEXT: [[TMP12:%.*]] = mul nsw <16 x i32> [[TMP7]], [[TMP9]]
+; CHECK-NEON-NEXT: [[TMP16:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
+; CHECK-NEON-NEXT: [[TMP12:%.*]] = mul nsw <16 x i32> [[TMP7]], [[TMP16]]
; CHECK-NEON-NEXT: [[TMP13:%.*]] = sub <16 x i32> zeroinitializer, [[TMP12]]
; CHECK-NEON-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP13]])
; CHECK-NEON-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
@@ -508,37 +502,35 @@ define i32 @chained_partial_reduce_sub_sub(ptr %a, ptr %b, ptr %c, i32 %N) #0 {
; CHECK-SVE-NEXT: [[CMP28_NOT:%.*]] = icmp ult i32 [[N]], 2
; CHECK-SVE-NEXT: [[DIV27:%.*]] = lshr i32 [[N]], 1
; CHECK-SVE-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext nneg i32 [[DIV27]] to i64
-; CHECK-SVE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-SVE-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
-; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], [[TMP1]]
+; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], 16
; CHECK-SVE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK-SVE: vector.ph:
-; CHECK-SVE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-SVE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]]
+; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 16
; CHECK-SVE-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]]
; CHECK-SVE-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-SVE: vector.body:
; CHECK-SVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP19:%.*]], [[VECTOR_BODY]] ]
+; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE3:%.*]], [[VECTOR_BODY]] ]
; CHECK-SVE-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDEX]]
; CHECK-SVE-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[INDEX]]
; CHECK-SVE-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i8, ptr [[C]], i64 [[INDEX]]
-; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i8>, ptr [[TMP7]], align 1
-; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x i8>, ptr [[TMP8]], align 1
-; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 4 x i8>, ptr [[TMP9]], align 1
-; CHECK-SVE-NEXT: [[TMP13:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD]] to <vscale x 4 x i32>
-; CHECK-SVE-NEXT: [[TMP14:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD1]] to <vscale x 4 x i32>
-; CHECK-SVE-NEXT: [[TMP15:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD2]] to <vscale x 4 x i32>
-; CHECK-SVE-NEXT: [[TMP16:%.*]] = mul nsw <vscale x 4 x i32> [[TMP13]], [[TMP14]]
-; CHECK-SVE-NEXT: [[TMP17:%.*]] = sub <vscale x 4 x i32> [[VEC_PHI]], [[TMP16]]
-; CHECK-SVE-NEXT: [[TMP18:%.*]] = mul nsw <vscale x 4 x i32> [[TMP13]], [[TMP15]]
-; CHECK-SVE-NEXT: [[TMP19]] = sub <vscale x 4 x i32> [[TMP17]], [[TMP18]]
-; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
+; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1
+; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1
+; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP9]], align 1
+; CHECK-SVE-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
+; CHECK-SVE-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
+; CHECK-SVE-NEXT: [[TMP5:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP4]]
+; CHECK-SVE-NEXT: [[TMP6:%.*]] = sub nsw <16 x i32> zeroinitializer, [[TMP5]]
+; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP6]])
+; CHECK-SVE-NEXT: [[TMP10:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
+; CHECK-SVE-NEXT: [[TMP12:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP10]]
+; CHECK-SVE-NEXT: [[TMP13:%.*]] = sub <16 x i32> zeroinitializer, [[TMP12]]
+; CHECK-SVE-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP13]])
+; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-SVE-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-SVE-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK-SVE: middle.block:
-; CHECK-SVE-NEXT: [[TMP21:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP19]])
+; CHECK-SVE-NEXT: [[TMP11:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE3]])
; CHECK-SVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]]
; CHECK-SVE-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
; CHECK-SVE: scalar.ph:
@@ -570,11 +562,11 @@ define i32 @chained_partial_reduce_sub_sub(ptr %a, ptr %b, ptr %c, i32 %N) #0 {
; CHECK-SVE-MAXBW-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 8 x i8>, ptr [[TMP9]], align 1
; CHECK-SVE-MAXBW-NEXT: [[TMP13:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD]] to <vscale x 8 x i32>
; CHECK-SVE-MAXBW-NEXT: [[TMP14:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD1]] to <vscale x 8 x i32>
-; CHECK-SVE-MAXBW-NEXT: [[TMP15:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD2]] to <vscale x 8 x i32>
; CHECK-SVE-MAXBW-NEXT: [[TMP16:%.*]] = mul nsw <vscale x 8 x i32> [[TMP13]], [[TMP14]]
; CHECK-SVE-MAXBW-NEXT: [[TMP17:%.*]] = sub nsw <vscale x 8 x i32> zeroinitializer, [[TMP16]]
; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE:%.*]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI]], <vscale x 8 x i32> [[TMP17]])
-; CHECK-SVE-MAXBW-NEXT: [[TMP18:%.*]] = mul nsw <vscale x 8 x i32> [[TMP13]], [[TMP15]]
+; CHECK-SVE-MAXBW-NEXT: [[TMP12:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD2]] to <vscale x 8 x i32>
+; CHECK-SVE-MAXBW-NEXT: [[TMP18:%.*]] = mul nsw <vscale x 8 x i32> [[TMP13]], [[TMP12]]
; CHECK-SVE-MAXBW-NEXT: [[TMP19:%.*]] = sub <vscale x 8 x i32> zeroinitializer, [[TMP18]]
; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE3]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[PARTIAL_REDUCE]], <vscale x 8 x i32> [[TMP19]])
; CHECK-SVE-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
@@ -644,12 +636,12 @@ define i32 @chained_partial_reduce_add_add_add(ptr %a, ptr %b, ptr %c, i32 %N) #
; CHECK-NEON-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1
; CHECK-NEON-NEXT: [[TMP7:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-NEON-NEXT: [[TMP8:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
-; CHECK-NEON-NEXT: [[TMP9:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-NEON-NEXT: [[TMP10:%.*]] = mul nsw <16 x i32> [[TMP7]], [[TMP8]]
; CHECK-NEON-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP10]])
-; CHECK-NEON-NEXT: [[TMP11:%.*]] = mul nsw <16 x i32> [[TMP7]], [[TMP9]]
+; CHECK-NEON-NEXT: [[TMP15:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
+; CHECK-NEON-NEXT: [[TMP11:%.*]] = mul nsw <16 x i32> [[TMP7]], [[TMP15]]
; CHECK-NEON-NEXT: [[PARTIAL_REDUCE3:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP11]])
-; CHECK-NEON-NEXT: [[TMP12:%.*]] = mul nsw <16 x i32> [[TMP8]], [[TMP9]]
+; CHECK-NEON-NEXT: [[TMP12:%.*]] = mul nsw <16 x i32> [[TMP8]], [[TMP15]]
; CHECK-NEON-NEXT: [[PARTIAL_REDUCE4]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE3]], <16 x i32> [[TMP12]])
; CHECK-NEON-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-NEON-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -683,9 +675,9 @@ define i32 @chained_partial_reduce_add_add_add(ptr %a, ptr %b, ptr %c, i32 %N) #
; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP9]], align 1
; CHECK-SVE-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-SVE-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
-; CHECK-SVE-NEXT: [[TMP5:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-SVE-NEXT: [[TMP6:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP4]]
; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP6]])
+; CHECK-SVE-NEXT: [[TMP5:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-SVE-NEXT: [[TMP11:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP5]]
; CHECK-SVE-NEXT: [[PARTIAL_REDUCE3:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP11]])
; CHECK-SVE-NEXT: [[TMP12:%.*]] = mul nsw <16 x i32> [[TMP4]], [[TMP5]]
@@ -726,12 +718,12 @@ define i32 @chained_partial_reduce_add_add_add(ptr %a, ptr %b, ptr %c, i32 %N) #
; CHECK-SVE-MAXBW-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 8 x i8>, ptr [[TMP9]], align 1
; CHECK-SVE-MAXBW-NEXT: [[TMP13:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD]] to <vscale x 8 x i32>
; CHECK-SVE-MAXBW-NEXT: [[TMP14:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD1]] to <vscale x 8 x i32>
-; CHECK-SVE-MAXBW-NEXT: [[TMP15:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD2]] to <vscale x 8 x i32>
; CHECK-SVE-MAXBW-NEXT: [[TMP16:%.*]] = mul nsw <vscale x 8 x i32> [[TMP13]], [[TMP14]]
; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE:%.*]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI]], <vscale x 8 x i32> [[TMP16]])
-; CHECK-SVE-MAXBW-NEXT: [[TMP17:%.*]] = mul nsw <vscale x 8 x i32> [[TMP13]], [[TMP15]]
+; CHECK-SVE-MAXBW-NEXT: [[TMP11:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD2]] to <vscale x 8 x i32>
+; CHECK-SVE-MAXBW-NEXT: [[TMP17:%.*]] = mul nsw <vscale x 8 x i32> [[TMP13]], [[TMP11]]
; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE3:%.*]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[PARTIAL_REDUCE]], <vscale x 8 x i32> [[TMP17]])
-; CHECK-SVE-MAXBW-NEXT: [[TMP18:%.*]] = mul nsw <vscale x 8 x i32> [[TMP14]], [[TMP15]]
+; CHECK-SVE-MAXBW-NEXT: [[TMP18:%.*]] = mul nsw <vscale x 8 x i32> [[TMP14]], [[TMP11]]
; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE4]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[PARTIAL_REDUCE3]], <vscale x 8 x i32> [[TMP18]])
; CHECK-SVE-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-SVE-MAXBW-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -802,13 +794,13 @@ define i32 @chained_partial_reduce_sub_add_sub(ptr %a, ptr %b, ptr %c, i32 %N) #
; CHECK-NEON-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1
; CHECK-NEON-NEXT: [[TMP7:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-NEON-NEXT: [[TMP8:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
-; CHECK-NEON-NEXT: [[TMP9:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-NEON-NEXT: [[TMP10:%.*]] = mul nsw <16 x i32> [[TMP7]], [[TMP8]]
; CHECK-NEON-NEXT: [[TMP11:%.*]] = sub nsw <16 x i32> zeroinitializer, [[TMP10]]
; CHECK-NEON-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP11]])
-; CHECK-NEON-NEXT: [[TMP12:%.*]] = mul nsw <16 x i32> [[TMP7]], [[TMP9]]
+; CHECK-NEON-NEXT: [[TMP13:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
+; CHECK-NEON-NEXT: [[TMP12:%.*]] = mul nsw <16 x i32> [[TMP7]], [[TMP13]]
; CHECK-NEON-NEXT: [[PARTIAL_REDUCE3:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP12]])
-; CHECK-NEON-NEXT: [[TMP14:%.*]] = mul nsw <16 x i32> [[TMP8]], [[TMP9]]
+; CHECK-NEON-NEXT: [[TMP14:%.*]] = mul nsw <16 x i32> [[TMP8]], [[TMP13]]
; CHECK-NEON-NEXT: [[TMP15:%.*]] = sub <16 x i32> zeroinitializer, [[TMP14]]
; CHECK-NEON-NEXT: [[PARTIAL_REDUCE4]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE3]], <16 x i32> [[TMP15]])
; CHECK-NEON-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
@@ -826,39 +818,37 @@ define i32 @chained_partial_reduce_sub_add_sub(ptr %a, ptr %b, ptr %c, i32 %N) #
; CHECK-SVE-NEXT: [[CMP28_NOT:%.*]] = icmp ult i32 [[N]], 2
; CHECK-SVE-NEXT: [[DIV27:%.*]] = lshr i32 [[N]], 1
; CHECK-SVE-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext nneg i32 [[DIV27]] to i64
-; CHECK-SVE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-SVE-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
-; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], [[TMP1]]
+; CHECK-SVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], 16
; CHECK-SVE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK-SVE: vector.ph:
-; CHECK-SVE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-SVE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], [[TMP3]]
+; CHECK-SVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 16
; CHECK-SVE-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]]
; CHECK-SVE-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-SVE: vector.body:
; CHECK-SVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP21:%.*]], [[VECTOR_BODY]] ]
+; CHECK-SVE-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE4:%.*]], [[VECTOR_BODY]] ]
; CHECK-SVE-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDEX]]
; CHECK-SVE-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[INDEX]]
; CHECK-SVE-NEXT: [[TMP9:%.*]] = getelementptr inbounds nuw i8, ptr [[C]], i64 [[INDEX]]
-; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i8>, ptr [[TMP7]], align 1
-; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x i8>, ptr [[TMP8]], align 1
-; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 4 x i8>, ptr [[TMP9]], align 1
-; CHECK-SVE-NEXT: [[TMP13:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD]] to <vscale x 4 x i32>
-; CHECK-SVE-NEXT: [[TMP14:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD1]] to <vscale x 4 x i32>
-; CHECK-SVE-NEXT: [[TMP15:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD2]] to <vscale x 4 x i32>
-; CHECK-SVE-NEXT: [[TMP16:%.*]] = mul nsw <vscale x 4 x i32> [[TMP13]], [[TMP14]]
-; CHECK-SVE-NEXT: [[TMP17:%.*]] = sub <vscale x 4 x i32> [[VEC_PHI]], [[TMP16]]
-; CHECK-SVE-NEXT: [[TMP18:%.*]] = mul nsw <vscale x 4 x i32> [[TMP13]], [[TMP15]]
-; CHECK-SVE-NEXT: [[TMP19:%.*]] = add <vscale x 4 x i32> [[TMP17]], [[TMP18]]
-; CHECK-SVE-NEXT: [[TMP20:%.*]] = mul nsw <vscale x 4 x i32> [[TMP14]], [[TMP15]]
-; CHECK-SVE-NEXT: [[TMP21]] = sub <vscale x 4 x i32> [[TMP19]], [[TMP20]]
-; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
+; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1
+; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1
+; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP9]], align 1
+; CHECK-SVE-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
+; CHECK-SVE-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
+; CHECK-SVE-NEXT: [[TMP5:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP4]]
+; CHECK-SVE-NEXT: [[TMP6:%.*]] = sub nsw <16 x i32> zeroinitializer, [[TMP5]]
+; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP6]])
+; CHECK-SVE-NEXT: [[TMP11:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
+; CHECK-SVE-NEXT: [[TMP13:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP11]]
+; CHECK-SVE-NEXT: [[PARTIAL_REDUCE3:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP13]])
+; CHECK-SVE-NEXT: [[TMP14:%.*]] = mul nsw <16 x i32> [[TMP4]], [[TMP11]]
+; CHECK-SVE-NEXT: [[TMP10:%.*]] = sub <16 x i32> zeroinitializer, [[TMP14]]
+; CHECK-SVE-NEXT: [[PARTIAL_REDUCE4]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE3]], <16 x i32> [[TMP10]])
+; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-SVE-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-SVE-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; CHECK-SVE: middle.block:
-; CHECK-SVE-NEXT: [[TMP23:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP21]])
+; CHECK-SVE-NEXT: [[TMP12:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE4]])
; CHECK-SVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]]
; CHECK-SVE-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
; CHECK-SVE: scalar.ph:
@@ -890,13 +880,13 @@ define i32 @chained_partial_reduce_sub_add_sub(ptr %a, ptr %b, ptr %c, i32 %N) #
; CHECK-SVE-MAXBW-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 8 x i8>, ptr [[TMP9]], align 1
; CHECK-SVE-MAXBW-NEXT: [[TMP13:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD]] to <vscale x 8 x i32>
; CHECK-SVE-MAXBW-NEXT: [[TMP14:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD1]] to <vscale x 8 x i32>
-; CHECK-SVE-MAXBW-NEXT: [[TMP15:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD2]] to <vscale x 8 x i32>
; CHECK-SVE-MAXBW-NEXT: [[TMP16:%.*]] = mul nsw <vscale x 8 x i32> [[TMP13]], [[TMP14]]
; CHECK-SVE-MAXBW-NEXT: [[TMP17:%.*]] = sub nsw <vscale x 8 x i32> zeroinitializer, [[TMP16]]
; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE:%.*]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI]], <vscale x 8 x i32> [[TMP17]])
-; CHECK-SVE-MAXBW-NEXT: [[TMP18:%.*]] = mul nsw <vscale x 8 x i32> [[TMP13]], [[TMP15]]
+; CHECK-SVE-MAXBW-NEXT: [[TMP12:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD2]] to <vscale x 8 x i32>
+; CHECK-SVE-MAXBW-NEXT: [[TMP18:%.*]] = mul nsw <vscale x 8 x i32> [[TMP13]], [[TMP12]]
; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE3:%.*]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[PARTIAL_REDUCE]], <vscale x 8 x i32> [[TMP18]])
-; CHECK-SVE-MAXBW-NEXT: [[TMP19:%.*]] = mul nsw <vscale x 8 x i32> [[TMP14]], [[TMP15]]
+; CHECK-SVE-MAXBW-NEXT: [[TMP19:%.*]] = mul nsw <vscale x 8 x i32> [[TMP14]], [[TMP12]]
; CHECK-SVE-MAXBW-NEXT: [[TMP20:%.*]] = sub <vscale x 8 x i32> zeroinitializer, [[TMP19]]
; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE4]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[PARTIAL_REDUCE3]], <vscale x 8 x i32> [[TMP20]])
; CHECK-SVE-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
@@ -969,9 +959,9 @@ define i32 @chained_partial_reduce_madd_extadd(ptr %a, ptr %b, ptr %c, i32 %N) #
; CHECK-NEON-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1
; CHECK-NEON-NEXT: [[TMP7:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-NEON-NEXT: [[TMP8:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
-; CHECK-NEON-NEXT: [[TMP9:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-NEON-NEXT: [[TMP10:%.*]] = mul nsw <16 x i32> [[TMP7]], [[TMP8]]
; CHECK-NEON-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP10]])
+; CHECK-NEON-NEXT: [[TMP9:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-NEON-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP9]])
; CHECK-NEON-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-NEON-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -1005,9 +995,9 @@ define i32 @chained_partial_reduce_madd_extadd(ptr %a, ptr %b, ptr %c, i32 %N) #
; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP9]], align 1
; CHECK-SVE-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-SVE-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
-; CHECK-SVE-NEXT: [[TMP5:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-SVE-NEXT: [[TMP6:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP4]]
; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP6]])
+; CHECK-SVE-NEXT: [[TMP5:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-SVE-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP5]])
; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-SVE-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -1045,9 +1035,9 @@ define i32 @chained_partial_reduce_madd_extadd(ptr %a, ptr %b, ptr %c, i32 %N) #
; CHECK-SVE-MAXBW-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 8 x i8>, ptr [[TMP9]], align 1
; CHECK-SVE-MAXBW-NEXT: [[TMP13:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD]] to <vscale x 8 x i32>
; CHECK-SVE-MAXBW-NEXT: [[TMP14:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD1]] to <vscale x 8 x i32>
-; CHECK-SVE-MAXBW-NEXT: [[TMP15:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD2]] to <vscale x 8 x i32>
; CHECK-SVE-MAXBW-NEXT: [[TMP16:%.*]] = mul nsw <vscale x 8 x i32> [[TMP13]], [[TMP14]]
; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE:%.*]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI]], <vscale x 8 x i32> [[TMP16]])
+; CHECK-SVE-MAXBW-NEXT: [[TMP15:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD2]] to <vscale x 8 x i32>
; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE3]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[PARTIAL_REDUCE]], <vscale x 8 x i32> [[TMP15]])
; CHECK-SVE-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-SVE-MAXBW-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -1110,8 +1100,8 @@ define i32 @chained_partial_reduce_extadd_extadd(ptr %a, ptr %b, i32 %N) #0 {
; CHECK-NEON-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1
; CHECK-NEON-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1
; CHECK-NEON-NEXT: [[TMP5:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
-; CHECK-NEON-NEXT: [[TMP6:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
; CHECK-NEON-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP5]])
+; CHECK-NEON-NEXT: [[TMP6:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
; CHECK-NEON-NEXT: [[PARTIAL_REDUCE2]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP6]])
; CHECK-NEON-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-NEON-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -1142,8 +1132,8 @@ define i32 @chained_partial_reduce_extadd_extadd(ptr %a, ptr %b, i32 %N) #0 {
; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1
; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1
; CHECK-SVE-NEXT: [[TMP2:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
-; CHECK-SVE-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP2]])
+; CHECK-SVE-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
; CHECK-SVE-NEXT: [[PARTIAL_REDUCE2]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP3]])
; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-SVE-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -1178,8 +1168,8 @@ define i32 @chained_partial_reduce_extadd_extadd(ptr %a, ptr %b, i32 %N) #0 {
; CHECK-SVE-MAXBW-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i8>, ptr [[TMP7]], align 1
; CHECK-SVE-MAXBW-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 8 x i8>, ptr [[TMP8]], align 1
; CHECK-SVE-MAXBW-NEXT: [[TMP11:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD]] to <vscale x 8 x i32>
-; CHECK-SVE-MAXBW-NEXT: [[TMP12:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD1]] to <vscale x 8 x i32>
; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE:%.*]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI]], <vscale x 8 x i32> [[TMP11]])
+; CHECK-SVE-MAXBW-NEXT: [[TMP12:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD1]] to <vscale x 8 x i32>
; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE2]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[PARTIAL_REDUCE]], <vscale x 8 x i32> [[TMP12]])
; CHECK-SVE-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-SVE-MAXBW-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -1240,10 +1230,10 @@ define i32 @chained_partial_reduce_extadd_madd(ptr %a, ptr %b, ptr %c, i32 %N) #
; CHECK-NEON-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1
; CHECK-NEON-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1
; CHECK-NEON-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1
-; CHECK-NEON-NEXT: [[TMP7:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
-; CHECK-NEON-NEXT: [[TMP8:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
; CHECK-NEON-NEXT: [[TMP9:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-NEON-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP9]])
+; CHECK-NEON-NEXT: [[TMP7:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
+; CHECK-NEON-NEXT: [[TMP8:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
; CHECK-NEON-NEXT: [[TMP10:%.*]] = mul nsw <16 x i32> [[TMP7]], [[TMP8]]
; CHECK-NEON-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP10]])
; CHECK-NEON-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
@@ -1276,10 +1266,10 @@ define i32 @chained_partial_reduce_extadd_madd(ptr %a, ptr %b, ptr %c, i32 %N) #
; CHECK-SVE-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1
; CHECK-SVE-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1
; CHECK-SVE-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP9]], align 1
-; CHECK-SVE-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
-; CHECK-SVE-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
; CHECK-SVE-NEXT: [[TMP5:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-SVE-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP5]])
+; CHECK-SVE-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
+; CHECK-SVE-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
; CHECK-SVE-NEXT: [[TMP6:%.*]] = mul nsw <16 x i32> [[TMP3]], [[TMP4]]
; CHECK-SVE-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP6]])
; CHECK-SVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
@@ -1316,10 +1306,10 @@ define i32 @chained_partial_reduce_extadd_madd(ptr %a, ptr %b, ptr %c, i32 %N) #
; CHECK-SVE-MAXBW-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i8>, ptr [[TMP7]], align 1
; CHECK-SVE-MAXBW-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 8 x i8>, ptr [[TMP8]], align 1
; CHECK-SVE-MAXBW-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 8 x i8>, ptr [[TMP9]], align 1
-; CHECK-SVE-MAXBW-NEXT: [[TMP13:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD]] to <vscale x 8 x i32>
-; CHECK-SVE-MAXBW-NEXT: [[TMP14:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD1]] to <vscale x 8 x i32>
; CHECK-SVE-MAXBW-NEXT: [[TMP15:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD2]] to <vscale x 8 x i32>
; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE:%.*]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI]], <vscale x 8 x i32> [[TMP15]])
+; CHECK-SVE-MAXBW-NEXT: [[TMP13:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD]] to <vscale x 8 x i32>
+; CHECK-SVE-MAXBW-NEXT: [[TMP14:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD1]] to <vscale x 8 x i32>
; CHECK-SVE-MAXBW-NEXT: [[TMP16:%.*]] = mul nsw <vscale x 8 x i32> [[TMP13]], [[TMP14]]
; CHECK-SVE-MAXBW-NEXT: [[PARTIAL_REDUCE3]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[PARTIAL_REDUCE]], <vscale x 8 x i32> [[TMP16]])
; CHECK-SVE-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-constant-ops.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-constant-ops.ll
index b033f60..b430efc 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-constant-ops.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-constant-ops.ll
@@ -467,3 +467,83 @@ loop:
exit:
ret i32 %red.next
}
+
+define i64 @partial_reduction_mul_two_users(i64 %n, ptr %a, i16 %b, i32 %c) {
+; CHECK-LABEL: define i64 @partial_reduction_mul_two_users(
+; CHECK-SAME: i64 [[N:%.*]], ptr [[A:%.*]], i16 [[B:%.*]], i32 [[C:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[N]], 1
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], 8
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], 8
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]]
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <8 x i16> poison, i16 [[B]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <8 x i16> [[BROADCAST_SPLATINSERT]], <8 x i16> poison, <8 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP1:%.*]] = sext <8 x i16> [[BROADCAST_SPLAT]] to <8 x i32>
+; CHECK-NEXT: [[TMP2:%.*]] = mul <8 x i32> [[TMP1]], [[TMP1]]
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i64> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP4:%.*]] = load i16, ptr [[A]], align 2
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <8 x i16> poison, i16 [[TMP4]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <8 x i16> [[BROADCAST_SPLATINSERT1]], <8 x i16> poison, <8 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP3:%.*]] = zext <8 x i32> [[TMP2]] to <8 x i64>
+; CHECK-NEXT: [[PARTIAL_REDUCE]] = call <4 x i64> @llvm.vector.partial.reduce.add.v4i64.v8i64(<4 x i64> [[VEC_PHI]], <8 x i64> [[TMP3]])
+; CHECK-NEXT: [[TMP5:%.*]] = sext <8 x i16> [[BROADCAST_SPLAT2]] to <8 x i32>
+; CHECK-NEXT: [[TMP6:%.*]] = sext <8 x i32> [[TMP5]] to <8 x i64>
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
+; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> [[PARTIAL_REDUCE]])
+; CHECK-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <8 x i64> [[TMP6]], i32 7
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i64 [ [[VECTOR_RECUR_EXTRACT]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i64 [ [[TMP8]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[RES1:%.*]] = phi i64 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[LOAD_EXT_EXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[RES2:%.*]] = phi i64 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[ADD:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[LOAD:%.*]] = load i16, ptr [[A]], align 2
+; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
+; CHECK-NEXT: [[CONV:%.*]] = sext i16 [[B]] to i32
+; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[CONV]], [[CONV]]
+; CHECK-NEXT: [[MUL_EXT:%.*]] = zext i32 [[MUL]] to i64
+; CHECK-NEXT: [[ADD]] = add i64 [[RES2]], [[MUL_EXT]]
+; CHECK-NEXT: [[OR:%.*]] = or i32 [[MUL]], [[C]]
+; CHECK-NEXT: [[LOAD_EXT:%.*]] = sext i16 [[LOAD]] to i32
+; CHECK-NEXT: [[LOAD_EXT_EXT]] = sext i32 [[LOAD_EXT]] to i64
+; CHECK-NEXT: [[EXITCOND740_NOT:%.*]] = icmp eq i64 [[IV]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND740_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP19:![0-9]+]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i64 [ [[ADD]], %[[LOOP]] ], [ [[TMP8]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret i64 [[ADD_LCSSA]]
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+ %res1 = phi i64 [ 0, %entry ], [ %load.ext.ext, %loop ]
+ %res2 = phi i64 [ 0, %entry ], [ %add, %loop ]
+ %load = load i16, ptr %a, align 2
+ %iv.next = add i64 %iv, 1
+ %conv = sext i16 %b to i32
+ %mul = mul i32 %conv, %conv
+ %mul.ext = zext i32 %mul to i64
+ %add = add i64 %res2, %mul.ext
+ %second_use = or i32 %mul, %c ; this value is otherwise unused, but that's sufficient for the test
+ %load.ext = sext i16 %load to i32
+ %load.ext.ext = sext i32 %load.ext to i64
+ %exitcond740.not = icmp eq i64 %iv, %n
+ br i1 %exitcond740.not, label %exit, label %loop
+
+exit:
+ ret i64 %add
+}
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-epilogue.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-epilogue.ll
index 8ece59a..d8f1a86 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-epilogue.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-epilogue.ll
@@ -16,10 +16,10 @@ define i32 @dotp(ptr %a, ptr %b) #0 {
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1
-; CHECK-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]]
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP6]], align 1
; CHECK-NEXT: [[TMP8:%.*]] = zext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
+; CHECK-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-NEXT: [[TMP9:%.*]] = mul <16 x i32> [[TMP8]], [[TMP5]]
; CHECK-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP9]])
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
@@ -68,7 +68,6 @@ define void @dotp_small_epilogue_vf(i64 %idx.neg, i8 %a) #1 {
; CHECK-NEXT: [[IV_NEXT:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]]
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[A]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i8> [[BROADCAST_SPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP1:%.*]] = sext <16 x i8> [[BROADCAST_SPLAT]] to <16 x i32>
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -77,11 +76,12 @@ define void @dotp_small_epilogue_vf(i64 %idx.neg, i8 %a) #1 {
; CHECK-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement <16 x i8> poison, i8 [[TMP2]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT3:%.*]] = shufflevector <16 x i8> [[BROADCAST_SPLATINSERT2]], <16 x i8> poison, <16 x i32> zeroinitializer
; CHECK-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[BROADCAST_SPLAT3]] to <16 x i32>
+; CHECK-NEXT: [[TMP1:%.*]] = sext <16 x i8> [[BROADCAST_SPLAT]] to <16 x i32>
; CHECK-NEXT: [[TMP4:%.*]] = mul <16 x i32> [[TMP3]], [[TMP1]]
; CHECK-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP4]])
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[IV_NEXT]]
-; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]])
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[IV_NEXT]]
@@ -89,7 +89,7 @@ define void @dotp_small_epilogue_vf(i64 %idx.neg, i8 %a) #1 {
; CHECK: vec.epilog.iter.check:
; CHECK-NEXT: [[IND_END6:%.*]] = add i64 [[IDX_NEG]], [[IV_NEXT]]
; CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_MOD_VF]], 4
-; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]]
+; CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]], !prof [[PROF4:![0-9]+]]
; CHECK: vec.epilog.ph:
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT]], [[WHILE_BODY]] ], [ 0, [[ENTRY]] ]
; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP6]], [[WHILE_BODY]] ], [ 0, [[ENTRY]] ]
@@ -112,7 +112,7 @@ define void @dotp_small_epilogue_vf(i64 %idx.neg, i8 %a) #1 {
; CHECK-NEXT: [[TMP13]] = add <4 x i32> [[TMP14]], [[VEC_PHI9]]
; CHECK-NEXT: [[INDEX_NEXT14]] = add nuw i64 [[INDEX9]], 4
; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT14]], [[N_VEC5]]
-; CHECK-NEXT: br i1 [[TMP12]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP12]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: vec.epilog.middle.block:
; CHECK-NEXT: [[TMP15:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP13]])
; CHECK-NEXT: [[CMP_N15:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC5]]
@@ -136,7 +136,7 @@ define void @dotp_small_epilogue_vf(i64 %idx.neg, i8 %a) #1 {
; CHECK-NEXT: [[CMP_IV_NEG:%.*]] = icmp ugt i64 [[IV_NEG]], 0
; CHECK-NEXT: [[CMP_IV:%.*]] = icmp ne i64 [[ACCUM1]], -1
; CHECK-NEXT: [[EXITCOND:%.*]] = and i1 [[CMP_IV_NEG]], [[CMP_IV]]
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[WHILE_BODY1]], label [[WHILE_END_LOOPEXIT]], !llvm.loop [[LOOP5:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND]], label [[WHILE_BODY1]], label [[WHILE_END_LOOPEXIT]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK: while.end.loopexit:
; CHECK-NEXT: [[RESULT:%.*]] = phi i32 [ [[ADD]], [[WHILE_BODY1]] ], [ [[TMP6]], [[MIDDLE_BLOCK]] ], [ [[TMP15]], [[VEC_EPILOG_MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret void
@@ -495,7 +495,7 @@ define i32 @dotp_predicated(i64 %N, ptr %a, ptr %b) {
; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 16
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <16 x i64> [[VEC_IND]], splat (i64 16)
; CHECK-NEXT: [[TMP181:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP181]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP181]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[TMP182:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]])
; CHECK-NEXT: br label [[EXIT:%.*]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-mixed.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-mixed.ll
index 09b41fb..26e630f 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-mixed.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-mixed.ll
@@ -26,26 +26,26 @@ define i32 @sudot(ptr %a, ptr %b) #0 {
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[TMP6]], i64 [[TMP9]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP6]], align 1
; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 16 x i8>, ptr [[TMP10]], align 1
-; CHECK-NEXT: [[TMP11:%.*]] = zext <vscale x 16 x i8> [[WIDE_LOAD]] to <vscale x 16 x i32>
-; CHECK-NEXT: [[TMP7:%.*]] = zext <vscale x 16 x i8> [[WIDE_LOAD2]] to <vscale x 16 x i32>
; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP16:%.*]] = shl nuw i64 [[TMP15]], 4
; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[TMP13]], i64 [[TMP16]]
; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 16 x i8>, ptr [[TMP13]], align 1
; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 16 x i8>, ptr [[TMP17]], align 1
-; CHECK-NEXT: [[TMP12:%.*]] = sext <vscale x 16 x i8> [[WIDE_LOAD3]] to <vscale x 16 x i32>
-; CHECK-NEXT: [[TMP18:%.*]] = sext <vscale x 16 x i8> [[WIDE_LOAD4]] to <vscale x 16 x i32>
-; CHECK-NEXT: [[TMP14:%.*]] = mul <vscale x 16 x i32> [[TMP12]], [[TMP11]]
-; CHECK-NEXT: [[TMP19:%.*]] = mul <vscale x 16 x i32> [[TMP18]], [[TMP7]]
-; CHECK-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 4 x i32> @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32(<vscale x 4 x i32> [[VEC_PHI]], <vscale x 16 x i32> [[TMP14]])
-; CHECK-NEXT: [[PARTIAL_REDUCE5]] = call <vscale x 4 x i32> @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32(<vscale x 4 x i32> [[VEC_PHI1]], <vscale x 16 x i32> [[TMP19]])
+; CHECK-NEXT: [[TMP18:%.*]] = sext <vscale x 16 x i8> [[WIDE_LOAD3]] to <vscale x 16 x i32>
+; CHECK-NEXT: [[TMP11:%.*]] = zext <vscale x 16 x i8> [[WIDE_LOAD]] to <vscale x 16 x i32>
+; CHECK-NEXT: [[TMP12:%.*]] = mul <vscale x 16 x i32> [[TMP18]], [[TMP11]]
+; CHECK-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 4 x i32> @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32(<vscale x 4 x i32> [[VEC_PHI]], <vscale x 16 x i32> [[TMP12]])
+; CHECK-NEXT: [[TMP19:%.*]] = sext <vscale x 16 x i8> [[WIDE_LOAD4]] to <vscale x 16 x i32>
+; CHECK-NEXT: [[TMP14:%.*]] = zext <vscale x 16 x i8> [[WIDE_LOAD2]] to <vscale x 16 x i32>
+; CHECK-NEXT: [[TMP20:%.*]] = mul <vscale x 16 x i32> [[TMP19]], [[TMP14]]
+; CHECK-NEXT: [[PARTIAL_REDUCE5]] = call <vscale x 4 x i32> @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32(<vscale x 4 x i32> [[VEC_PHI1]], <vscale x 16 x i32> [[TMP20]])
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[BIN_RDX:%.*]] = add <vscale x 4 x i32> [[PARTIAL_REDUCE5]], [[PARTIAL_REDUCE]]
-; CHECK-NEXT: [[TMP20:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[BIN_RDX]])
+; CHECK-NEXT: [[TMP21:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[BIN_RDX]])
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]]
; CHECK: scalar.ph:
@@ -62,8 +62,8 @@ define i32 @sudot(ptr %a, ptr %b) #0 {
; CHECK-NOI8MM-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-NOI8MM: vector.body:
; CHECK-NOI8MM-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NOI8MM-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP22:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NOI8MM-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 8 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP23:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NOI8MM-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP18:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NOI8MM-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 8 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP20:%.*]], [[VECTOR_BODY]] ]
; CHECK-NOI8MM-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]]
; CHECK-NOI8MM-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NOI8MM-NEXT: [[TMP9:%.*]] = shl nuw i64 [[TMP8]], 3
@@ -71,25 +71,25 @@ define i32 @sudot(ptr %a, ptr %b) #0 {
; CHECK-NOI8MM-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i8>, ptr [[TMP6]], align 1
; CHECK-NOI8MM-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 8 x i8>, ptr [[TMP10]], align 1
; CHECK-NOI8MM-NEXT: [[TMP11:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD]] to <vscale x 8 x i32>
-; CHECK-NOI8MM-NEXT: [[TMP12:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD2]] to <vscale x 8 x i32>
+; CHECK-NOI8MM-NEXT: [[TMP7:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD2]] to <vscale x 8 x i32>
; CHECK-NOI8MM-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]]
; CHECK-NOI8MM-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NOI8MM-NEXT: [[TMP16:%.*]] = shl nuw i64 [[TMP15]], 3
; CHECK-NOI8MM-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[TMP13]], i64 [[TMP16]]
; CHECK-NOI8MM-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 8 x i8>, ptr [[TMP13]], align 1
; CHECK-NOI8MM-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 8 x i8>, ptr [[TMP17]], align 1
-; CHECK-NOI8MM-NEXT: [[TMP18:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD3]] to <vscale x 8 x i32>
-; CHECK-NOI8MM-NEXT: [[TMP19:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD4]] to <vscale x 8 x i32>
-; CHECK-NOI8MM-NEXT: [[TMP20:%.*]] = mul <vscale x 8 x i32> [[TMP18]], [[TMP11]]
-; CHECK-NOI8MM-NEXT: [[TMP21:%.*]] = mul <vscale x 8 x i32> [[TMP19]], [[TMP12]]
-; CHECK-NOI8MM-NEXT: [[TMP22]] = add <vscale x 8 x i32> [[TMP20]], [[VEC_PHI]]
-; CHECK-NOI8MM-NEXT: [[TMP23]] = add <vscale x 8 x i32> [[TMP21]], [[VEC_PHI1]]
+; CHECK-NOI8MM-NEXT: [[TMP12:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD3]] to <vscale x 8 x i32>
+; CHECK-NOI8MM-NEXT: [[TMP21:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD4]] to <vscale x 8 x i32>
+; CHECK-NOI8MM-NEXT: [[TMP14:%.*]] = mul <vscale x 8 x i32> [[TMP12]], [[TMP11]]
+; CHECK-NOI8MM-NEXT: [[TMP22:%.*]] = mul <vscale x 8 x i32> [[TMP21]], [[TMP7]]
+; CHECK-NOI8MM-NEXT: [[TMP18]] = add <vscale x 8 x i32> [[TMP14]], [[VEC_PHI]]
+; CHECK-NOI8MM-NEXT: [[TMP20]] = add <vscale x 8 x i32> [[TMP22]], [[VEC_PHI1]]
; CHECK-NOI8MM-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-NOI8MM-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NOI8MM-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK-NOI8MM: middle.block:
-; CHECK-NOI8MM-NEXT: [[BIN_RDX:%.*]] = add <vscale x 8 x i32> [[TMP23]], [[TMP22]]
-; CHECK-NOI8MM-NEXT: [[TMP25:%.*]] = call i32 @llvm.vector.reduce.add.nxv8i32(<vscale x 8 x i32> [[BIN_RDX]])
+; CHECK-NOI8MM-NEXT: [[BIN_RDX:%.*]] = add <vscale x 8 x i32> [[TMP20]], [[TMP18]]
+; CHECK-NOI8MM-NEXT: [[TMP19:%.*]] = call i32 @llvm.vector.reduce.add.nxv8i32(<vscale x 8 x i32> [[BIN_RDX]])
; CHECK-NOI8MM-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
; CHECK-NOI8MM-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]]
; CHECK-NOI8MM: scalar.ph:
@@ -137,26 +137,26 @@ define i32 @usdot(ptr %a, ptr %b) #0 {
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[TMP6]], i64 [[TMP9]]
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP6]], align 1
; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 16 x i8>, ptr [[TMP10]], align 1
-; CHECK-NEXT: [[TMP11:%.*]] = sext <vscale x 16 x i8> [[WIDE_LOAD]] to <vscale x 16 x i32>
-; CHECK-NEXT: [[TMP7:%.*]] = sext <vscale x 16 x i8> [[WIDE_LOAD2]] to <vscale x 16 x i32>
; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP16:%.*]] = shl nuw i64 [[TMP15]], 4
; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[TMP13]], i64 [[TMP16]]
; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 16 x i8>, ptr [[TMP13]], align 1
; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 16 x i8>, ptr [[TMP17]], align 1
-; CHECK-NEXT: [[TMP12:%.*]] = zext <vscale x 16 x i8> [[WIDE_LOAD3]] to <vscale x 16 x i32>
-; CHECK-NEXT: [[TMP18:%.*]] = zext <vscale x 16 x i8> [[WIDE_LOAD4]] to <vscale x 16 x i32>
-; CHECK-NEXT: [[TMP14:%.*]] = mul <vscale x 16 x i32> [[TMP12]], [[TMP11]]
-; CHECK-NEXT: [[TMP19:%.*]] = mul <vscale x 16 x i32> [[TMP18]], [[TMP7]]
-; CHECK-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 4 x i32> @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32(<vscale x 4 x i32> [[VEC_PHI]], <vscale x 16 x i32> [[TMP14]])
-; CHECK-NEXT: [[PARTIAL_REDUCE5]] = call <vscale x 4 x i32> @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32(<vscale x 4 x i32> [[VEC_PHI1]], <vscale x 16 x i32> [[TMP19]])
+; CHECK-NEXT: [[TMP18:%.*]] = zext <vscale x 16 x i8> [[WIDE_LOAD3]] to <vscale x 16 x i32>
+; CHECK-NEXT: [[TMP11:%.*]] = sext <vscale x 16 x i8> [[WIDE_LOAD]] to <vscale x 16 x i32>
+; CHECK-NEXT: [[TMP12:%.*]] = mul <vscale x 16 x i32> [[TMP18]], [[TMP11]]
+; CHECK-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 4 x i32> @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32(<vscale x 4 x i32> [[VEC_PHI]], <vscale x 16 x i32> [[TMP12]])
+; CHECK-NEXT: [[TMP19:%.*]] = zext <vscale x 16 x i8> [[WIDE_LOAD4]] to <vscale x 16 x i32>
+; CHECK-NEXT: [[TMP14:%.*]] = sext <vscale x 16 x i8> [[WIDE_LOAD2]] to <vscale x 16 x i32>
+; CHECK-NEXT: [[TMP20:%.*]] = mul <vscale x 16 x i32> [[TMP19]], [[TMP14]]
+; CHECK-NEXT: [[PARTIAL_REDUCE5]] = call <vscale x 4 x i32> @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32(<vscale x 4 x i32> [[VEC_PHI1]], <vscale x 16 x i32> [[TMP20]])
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[BIN_RDX:%.*]] = add <vscale x 4 x i32> [[PARTIAL_REDUCE5]], [[PARTIAL_REDUCE]]
-; CHECK-NEXT: [[TMP20:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[BIN_RDX]])
+; CHECK-NEXT: [[TMP21:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[BIN_RDX]])
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]]
; CHECK: scalar.ph:
@@ -173,8 +173,8 @@ define i32 @usdot(ptr %a, ptr %b) #0 {
; CHECK-NOI8MM-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-NOI8MM: vector.body:
; CHECK-NOI8MM-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NOI8MM-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP22:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NOI8MM-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 8 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP23:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NOI8MM-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP18:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NOI8MM-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 8 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP20:%.*]], [[VECTOR_BODY]] ]
; CHECK-NOI8MM-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]]
; CHECK-NOI8MM-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NOI8MM-NEXT: [[TMP9:%.*]] = shl nuw i64 [[TMP8]], 3
@@ -182,25 +182,25 @@ define i32 @usdot(ptr %a, ptr %b) #0 {
; CHECK-NOI8MM-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i8>, ptr [[TMP6]], align 1
; CHECK-NOI8MM-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 8 x i8>, ptr [[TMP10]], align 1
; CHECK-NOI8MM-NEXT: [[TMP11:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD]] to <vscale x 8 x i32>
-; CHECK-NOI8MM-NEXT: [[TMP12:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD2]] to <vscale x 8 x i32>
+; CHECK-NOI8MM-NEXT: [[TMP7:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD2]] to <vscale x 8 x i32>
; CHECK-NOI8MM-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]]
; CHECK-NOI8MM-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NOI8MM-NEXT: [[TMP16:%.*]] = shl nuw i64 [[TMP15]], 3
; CHECK-NOI8MM-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[TMP13]], i64 [[TMP16]]
; CHECK-NOI8MM-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 8 x i8>, ptr [[TMP13]], align 1
; CHECK-NOI8MM-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 8 x i8>, ptr [[TMP17]], align 1
-; CHECK-NOI8MM-NEXT: [[TMP18:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD3]] to <vscale x 8 x i32>
-; CHECK-NOI8MM-NEXT: [[TMP19:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD4]] to <vscale x 8 x i32>
-; CHECK-NOI8MM-NEXT: [[TMP20:%.*]] = mul <vscale x 8 x i32> [[TMP18]], [[TMP11]]
-; CHECK-NOI8MM-NEXT: [[TMP21:%.*]] = mul <vscale x 8 x i32> [[TMP19]], [[TMP12]]
-; CHECK-NOI8MM-NEXT: [[TMP22]] = add <vscale x 8 x i32> [[TMP20]], [[VEC_PHI]]
-; CHECK-NOI8MM-NEXT: [[TMP23]] = add <vscale x 8 x i32> [[TMP21]], [[VEC_PHI1]]
+; CHECK-NOI8MM-NEXT: [[TMP12:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD3]] to <vscale x 8 x i32>
+; CHECK-NOI8MM-NEXT: [[TMP21:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD4]] to <vscale x 8 x i32>
+; CHECK-NOI8MM-NEXT: [[TMP14:%.*]] = mul <vscale x 8 x i32> [[TMP12]], [[TMP11]]
+; CHECK-NOI8MM-NEXT: [[TMP22:%.*]] = mul <vscale x 8 x i32> [[TMP21]], [[TMP7]]
+; CHECK-NOI8MM-NEXT: [[TMP18]] = add <vscale x 8 x i32> [[TMP14]], [[VEC_PHI]]
+; CHECK-NOI8MM-NEXT: [[TMP20]] = add <vscale x 8 x i32> [[TMP22]], [[VEC_PHI1]]
; CHECK-NOI8MM-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-NOI8MM-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NOI8MM-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK-NOI8MM: middle.block:
-; CHECK-NOI8MM-NEXT: [[BIN_RDX:%.*]] = add <vscale x 8 x i32> [[TMP23]], [[TMP22]]
-; CHECK-NOI8MM-NEXT: [[TMP25:%.*]] = call i32 @llvm.vector.reduce.add.nxv8i32(<vscale x 8 x i32> [[BIN_RDX]])
+; CHECK-NOI8MM-NEXT: [[BIN_RDX:%.*]] = add <vscale x 8 x i32> [[TMP20]], [[TMP18]]
+; CHECK-NOI8MM-NEXT: [[TMP19:%.*]] = call i32 @llvm.vector.reduce.add.nxv8i32(<vscale x 8 x i32> [[BIN_RDX]])
; CHECK-NOI8MM-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
; CHECK-NOI8MM-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]]
; CHECK-NOI8MM: scalar.ph:
@@ -242,18 +242,18 @@ define i32 @sudot_neon(ptr %a, ptr %b) #1 {
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i32 16
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP0]], align 1
; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1
-; CHECK-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
-; CHECK-NEXT: [[TMP4:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i32 16
; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1
; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1
-; CHECK-NEXT: [[TMP8:%.*]] = sext <16 x i8> [[WIDE_LOAD3]] to <16 x i32>
-; CHECK-NEXT: [[TMP9:%.*]] = sext <16 x i8> [[WIDE_LOAD4]] to <16 x i32>
-; CHECK-NEXT: [[TMP10:%.*]] = mul <16 x i32> [[TMP8]], [[TMP3]]
+; CHECK-NEXT: [[TMP9:%.*]] = sext <16 x i8> [[WIDE_LOAD3]] to <16 x i32>
+; CHECK-NEXT: [[TMP4:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-NEXT: [[TMP11:%.*]] = mul <16 x i32> [[TMP9]], [[TMP4]]
-; CHECK-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP10]])
-; CHECK-NEXT: [[PARTIAL_REDUCE5]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP11]])
+; CHECK-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP11]])
+; CHECK-NEXT: [[TMP10:%.*]] = sext <16 x i8> [[WIDE_LOAD4]] to <16 x i32>
+; CHECK-NEXT: [[TMP8:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
+; CHECK-NEXT: [[TMP14:%.*]] = mul <16 x i32> [[TMP10]], [[TMP8]]
+; CHECK-NEXT: [[PARTIAL_REDUCE5]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP14]])
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
@@ -337,18 +337,18 @@ define i32 @usdot_neon(ptr %a, ptr %b) #1 {
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i32 16
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP0]], align 1
; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1
-; CHECK-NEXT: [[TMP3:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
-; CHECK-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i32 16
; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1
; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1
-; CHECK-NEXT: [[TMP8:%.*]] = zext <16 x i8> [[WIDE_LOAD3]] to <16 x i32>
-; CHECK-NEXT: [[TMP9:%.*]] = zext <16 x i8> [[WIDE_LOAD4]] to <16 x i32>
-; CHECK-NEXT: [[TMP10:%.*]] = mul <16 x i32> [[TMP8]], [[TMP3]]
+; CHECK-NEXT: [[TMP9:%.*]] = zext <16 x i8> [[WIDE_LOAD3]] to <16 x i32>
+; CHECK-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-NEXT: [[TMP11:%.*]] = mul <16 x i32> [[TMP9]], [[TMP4]]
-; CHECK-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP10]])
-; CHECK-NEXT: [[PARTIAL_REDUCE5]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP11]])
+; CHECK-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP11]])
+; CHECK-NEXT: [[TMP10:%.*]] = zext <16 x i8> [[WIDE_LOAD4]] to <16 x i32>
+; CHECK-NEXT: [[TMP8:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
+; CHECK-NEXT: [[TMP14:%.*]] = mul <16 x i32> [[TMP10]], [[TMP8]]
+; CHECK-NEXT: [[PARTIAL_REDUCE5]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP14]])
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-neon.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-neon.ll
index 801eb81..b847631 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-neon.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product-neon.ll
@@ -18,10 +18,10 @@ define i32 @dotp(ptr %a, ptr %b) {
; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ]
; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]]
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1
-; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]]
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP4]], align 1
; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = zext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
+; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = mul <16 x i32> [[TMP6]], [[TMP3]]
; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP7]])
; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
@@ -47,17 +47,17 @@ define i32 @dotp(ptr %a, ptr %b) {
; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[TMP1]], i32 16
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1
-; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]]
; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[TMP6]], i32 16
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP6]], align 1
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1
; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = zext <16 x i8> [[WIDE_LOAD3]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = zext <16 x i8> [[WIDE_LOAD4]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = mul <16 x i32> [[TMP9]], [[TMP4]]
-; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = mul <16 x i32> [[TMP10]], [[TMP5]]
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP11]])
+; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = zext <16 x i8> [[WIDE_LOAD4]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = mul <16 x i32> [[TMP7]], [[TMP10]]
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE5]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP12]])
; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
@@ -80,10 +80,10 @@ define i32 @dotp(ptr %a, ptr %b) {
; CHECK-MAXBW-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ]
; CHECK-MAXBW-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]]
; CHECK-MAXBW-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1
-; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]]
; CHECK-MAXBW-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP4]], align 1
; CHECK-MAXBW-NEXT: [[TMP6:%.*]] = zext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
+; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-MAXBW-NEXT: [[TMP7:%.*]] = mul <16 x i32> [[TMP6]], [[TMP3]]
; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP7]])
; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
@@ -720,26 +720,26 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) {
; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP9]]
; CHECK-INTERLEAVE1-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP9]]
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1
-; CHECK-INTERLEAVE1-NEXT: [[TMP13:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1
; CHECK-INTERLEAVE1-NEXT: [[TMP15:%.*]] = sext <16 x i8> [[WIDE_LOAD4]] to <16 x i32>
+; CHECK-INTERLEAVE1-NEXT: [[TMP13:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = mul nsw <16 x i32> [[TMP15]], [[TMP13]]
; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI3]], <16 x i32> [[TMP16]])
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD5:%.*]] = load <16 x i8>, ptr [[TMP4]], align 1
-; CHECK-INTERLEAVE1-NEXT: [[TMP18:%.*]] = sext <16 x i8> [[WIDE_LOAD5]] to <16 x i32>
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD6:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1
+; CHECK-INTERLEAVE1-NEXT: [[TMP18:%.*]] = sext <16 x i8> [[WIDE_LOAD5]] to <16 x i32>
; CHECK-INTERLEAVE1-NEXT: [[TMP20:%.*]] = sext <16 x i8> [[WIDE_LOAD6]] to <16 x i32>
; CHECK-INTERLEAVE1-NEXT: [[TMP21:%.*]] = mul nsw <16 x i32> [[TMP18]], [[TMP20]]
; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE7]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI2]], <16 x i32> [[TMP21]])
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD8:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1
-; CHECK-INTERLEAVE1-NEXT: [[TMP23:%.*]] = sext <16 x i8> [[WIDE_LOAD8]] to <16 x i32>
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD9:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1
+; CHECK-INTERLEAVE1-NEXT: [[TMP23:%.*]] = sext <16 x i8> [[WIDE_LOAD8]] to <16 x i32>
; CHECK-INTERLEAVE1-NEXT: [[TMP25:%.*]] = sext <16 x i8> [[WIDE_LOAD9]] to <16 x i32>
; CHECK-INTERLEAVE1-NEXT: [[TMP26:%.*]] = mul nsw <16 x i32> [[TMP23]], [[TMP25]]
; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE10]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP26]])
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD11:%.*]] = load <16 x i8>, ptr [[TMP10]], align 1
-; CHECK-INTERLEAVE1-NEXT: [[TMP28:%.*]] = sext <16 x i8> [[WIDE_LOAD11]] to <16 x i32>
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD12:%.*]] = load <16 x i8>, ptr [[TMP11]], align 1
+; CHECK-INTERLEAVE1-NEXT: [[TMP28:%.*]] = sext <16 x i8> [[WIDE_LOAD11]] to <16 x i32>
; CHECK-INTERLEAVE1-NEXT: [[TMP30:%.*]] = sext <16 x i8> [[WIDE_LOAD12]] to <16 x i32>
; CHECK-INTERLEAVE1-NEXT: [[TMP31:%.*]] = mul nsw <16 x i32> [[TMP28]], [[TMP30]]
; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE13]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP31]])
@@ -788,59 +788,59 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) {
; CHECK-INTERLEAVED-NEXT: [[TMP38:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 16
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD8:%.*]] = load <16 x i8>, ptr [[TMP38]], align 1
-; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = sext <16 x i8> [[WIDE_LOAD8]] to <16 x i32>
; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i32 16
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD9:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD10:%.*]] = load <16 x i8>, ptr [[TMP16]], align 1
-; CHECK-INTERLEAVED-NEXT: [[TMP17:%.*]] = sext <16 x i8> [[WIDE_LOAD9]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = sext <16 x i8> [[WIDE_LOAD10]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP19:%.*]] = mul nsw <16 x i32> [[TMP17]], [[TMP13]]
+; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = sext <16 x i8> [[WIDE_LOAD9]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-INTERLEAVED-NEXT: [[TMP20:%.*]] = mul nsw <16 x i32> [[TMP18]], [[TMP14]]
-; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE1]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI6]], <16 x i32> [[TMP19]])
-; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE11]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI7]], <16 x i32> [[TMP20]])
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE1]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI6]], <16 x i32> [[TMP20]])
+; CHECK-INTERLEAVED-NEXT: [[TMP19:%.*]] = sext <16 x i8> [[WIDE_LOAD10]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP17:%.*]] = sext <16 x i8> [[WIDE_LOAD8]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP21:%.*]] = mul nsw <16 x i32> [[TMP19]], [[TMP17]]
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE11]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI7]], <16 x i32> [[TMP21]])
; CHECK-INTERLEAVED-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i32 16
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD12:%.*]] = load <16 x i8>, ptr [[TMP4]], align 1
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD13:%.*]] = load <16 x i8>, ptr [[TMP22]], align 1
-; CHECK-INTERLEAVED-NEXT: [[TMP23:%.*]] = sext <16 x i8> [[WIDE_LOAD12]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP24:%.*]] = sext <16 x i8> [[WIDE_LOAD13]] to <16 x i32>
; CHECK-INTERLEAVED-NEXT: [[TMP26:%.*]] = getelementptr inbounds i8, ptr [[TMP5]], i32 16
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD14:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD15:%.*]] = load <16 x i8>, ptr [[TMP26]], align 1
-; CHECK-INTERLEAVED-NEXT: [[TMP27:%.*]] = sext <16 x i8> [[WIDE_LOAD14]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP28:%.*]] = sext <16 x i8> [[WIDE_LOAD15]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP29:%.*]] = mul nsw <16 x i32> [[TMP23]], [[TMP27]]
+; CHECK-INTERLEAVED-NEXT: [[TMP24:%.*]] = sext <16 x i8> [[WIDE_LOAD12]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP28:%.*]] = sext <16 x i8> [[WIDE_LOAD14]] to <16 x i32>
; CHECK-INTERLEAVED-NEXT: [[TMP30:%.*]] = mul nsw <16 x i32> [[TMP24]], [[TMP28]]
-; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE16]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI4]], <16 x i32> [[TMP29]])
-; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE17]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI5]], <16 x i32> [[TMP30]])
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE16]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI4]], <16 x i32> [[TMP30]])
+; CHECK-INTERLEAVED-NEXT: [[TMP27:%.*]] = sext <16 x i8> [[WIDE_LOAD13]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP25:%.*]] = sext <16 x i8> [[WIDE_LOAD15]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP29:%.*]] = mul nsw <16 x i32> [[TMP27]], [[TMP25]]
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE17]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI5]], <16 x i32> [[TMP29]])
; CHECK-INTERLEAVED-NEXT: [[TMP32:%.*]] = getelementptr inbounds i8, ptr [[TMP7]], i32 16
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD18:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD19:%.*]] = load <16 x i8>, ptr [[TMP32]], align 1
-; CHECK-INTERLEAVED-NEXT: [[TMP33:%.*]] = sext <16 x i8> [[WIDE_LOAD18]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP34:%.*]] = sext <16 x i8> [[WIDE_LOAD19]] to <16 x i32>
; CHECK-INTERLEAVED-NEXT: [[TMP36:%.*]] = getelementptr inbounds i8, ptr [[TMP8]], i32 16
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD20:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD21:%.*]] = load <16 x i8>, ptr [[TMP36]], align 1
-; CHECK-INTERLEAVED-NEXT: [[TMP37:%.*]] = sext <16 x i8> [[WIDE_LOAD20]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP56:%.*]] = sext <16 x i8> [[WIDE_LOAD21]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP39:%.*]] = mul nsw <16 x i32> [[TMP33]], [[TMP37]]
+; CHECK-INTERLEAVED-NEXT: [[TMP34:%.*]] = sext <16 x i8> [[WIDE_LOAD18]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP56:%.*]] = sext <16 x i8> [[WIDE_LOAD20]] to <16 x i32>
; CHECK-INTERLEAVED-NEXT: [[TMP40:%.*]] = mul nsw <16 x i32> [[TMP34]], [[TMP56]]
-; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE7]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI2]], <16 x i32> [[TMP39]])
-; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI3]], <16 x i32> [[TMP40]])
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE7]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI2]], <16 x i32> [[TMP40]])
+; CHECK-INTERLEAVED-NEXT: [[TMP35:%.*]] = sext <16 x i8> [[WIDE_LOAD19]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP33:%.*]] = sext <16 x i8> [[WIDE_LOAD21]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP37:%.*]] = mul nsw <16 x i32> [[TMP35]], [[TMP33]]
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI3]], <16 x i32> [[TMP37]])
; CHECK-INTERLEAVED-NEXT: [[TMP42:%.*]] = getelementptr inbounds i8, ptr [[TMP10]], i32 16
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD24:%.*]] = load <16 x i8>, ptr [[TMP10]], align 1
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD25:%.*]] = load <16 x i8>, ptr [[TMP42]], align 1
-; CHECK-INTERLEAVED-NEXT: [[TMP43:%.*]] = sext <16 x i8> [[WIDE_LOAD24]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP44:%.*]] = sext <16 x i8> [[WIDE_LOAD25]] to <16 x i32>
; CHECK-INTERLEAVED-NEXT: [[TMP46:%.*]] = getelementptr inbounds i8, ptr [[TMP11]], i32 16
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD26:%.*]] = load <16 x i8>, ptr [[TMP11]], align 1
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD27:%.*]] = load <16 x i8>, ptr [[TMP46]], align 1
-; CHECK-INTERLEAVED-NEXT: [[TMP47:%.*]] = sext <16 x i8> [[WIDE_LOAD26]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP48:%.*]] = sext <16 x i8> [[WIDE_LOAD27]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP49:%.*]] = mul nsw <16 x i32> [[TMP43]], [[TMP47]]
+; CHECK-INTERLEAVED-NEXT: [[TMP44:%.*]] = sext <16 x i8> [[WIDE_LOAD24]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP48:%.*]] = sext <16 x i8> [[WIDE_LOAD26]] to <16 x i32>
; CHECK-INTERLEAVED-NEXT: [[TMP50:%.*]] = mul nsw <16 x i32> [[TMP44]], [[TMP48]]
-; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE13]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP49]])
-; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE10]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP50]])
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE13]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP50]])
+; CHECK-INTERLEAVED-NEXT: [[TMP43:%.*]] = sext <16 x i8> [[WIDE_LOAD25]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP41:%.*]] = sext <16 x i8> [[WIDE_LOAD27]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP45:%.*]] = mul nsw <16 x i32> [[TMP43]], [[TMP41]]
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE10]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP45]])
; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
; CHECK-INTERLEAVED-NEXT: [[TMP51:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-INTERLEAVED-NEXT: br i1 [[TMP51]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
@@ -884,26 +884,26 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) {
; CHECK-MAXBW-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP9]]
; CHECK-MAXBW-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP9]]
; CHECK-MAXBW-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1
-; CHECK-MAXBW-NEXT: [[TMP13:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-MAXBW-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1
; CHECK-MAXBW-NEXT: [[TMP15:%.*]] = sext <16 x i8> [[WIDE_LOAD4]] to <16 x i32>
+; CHECK-MAXBW-NEXT: [[TMP13:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-MAXBW-NEXT: [[TMP16:%.*]] = mul nsw <16 x i32> [[TMP15]], [[TMP13]]
; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI3]], <16 x i32> [[TMP16]])
; CHECK-MAXBW-NEXT: [[WIDE_LOAD5:%.*]] = load <16 x i8>, ptr [[TMP4]], align 1
-; CHECK-MAXBW-NEXT: [[TMP18:%.*]] = sext <16 x i8> [[WIDE_LOAD5]] to <16 x i32>
; CHECK-MAXBW-NEXT: [[WIDE_LOAD6:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1
+; CHECK-MAXBW-NEXT: [[TMP18:%.*]] = sext <16 x i8> [[WIDE_LOAD5]] to <16 x i32>
; CHECK-MAXBW-NEXT: [[TMP20:%.*]] = sext <16 x i8> [[WIDE_LOAD6]] to <16 x i32>
; CHECK-MAXBW-NEXT: [[TMP21:%.*]] = mul nsw <16 x i32> [[TMP18]], [[TMP20]]
; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE7]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI2]], <16 x i32> [[TMP21]])
; CHECK-MAXBW-NEXT: [[WIDE_LOAD8:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1
-; CHECK-MAXBW-NEXT: [[TMP23:%.*]] = sext <16 x i8> [[WIDE_LOAD8]] to <16 x i32>
; CHECK-MAXBW-NEXT: [[WIDE_LOAD9:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1
+; CHECK-MAXBW-NEXT: [[TMP23:%.*]] = sext <16 x i8> [[WIDE_LOAD8]] to <16 x i32>
; CHECK-MAXBW-NEXT: [[TMP25:%.*]] = sext <16 x i8> [[WIDE_LOAD9]] to <16 x i32>
; CHECK-MAXBW-NEXT: [[TMP26:%.*]] = mul nsw <16 x i32> [[TMP23]], [[TMP25]]
; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE10]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP26]])
; CHECK-MAXBW-NEXT: [[WIDE_LOAD11:%.*]] = load <16 x i8>, ptr [[TMP10]], align 1
-; CHECK-MAXBW-NEXT: [[TMP28:%.*]] = sext <16 x i8> [[WIDE_LOAD11]] to <16 x i32>
; CHECK-MAXBW-NEXT: [[WIDE_LOAD12:%.*]] = load <16 x i8>, ptr [[TMP11]], align 1
+; CHECK-MAXBW-NEXT: [[TMP28:%.*]] = sext <16 x i8> [[WIDE_LOAD11]] to <16 x i32>
; CHECK-MAXBW-NEXT: [[TMP30:%.*]] = sext <16 x i8> [[WIDE_LOAD12]] to <16 x i32>
; CHECK-MAXBW-NEXT: [[TMP31:%.*]] = mul nsw <16 x i32> [[TMP28]], [[TMP30]]
; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE13]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP31]])
@@ -2025,7 +2025,7 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) {
; CHECK-INTERLEAVE1-NEXT: [[TMP8]] = add <16 x i32> [[TMP7]], [[VEC_PHI]]
; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-INTERLEAVE1-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
-; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
+; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK-INTERLEAVE1: middle.block:
; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP8]])
; CHECK-INTERLEAVE1-NEXT: [[TMP11:%.*]] = extractelement <16 x i32> [[TMP6]], i32 15
@@ -2062,7 +2062,7 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) {
; CHECK-INTERLEAVED-NEXT: [[TMP14]] = add <16 x i32> [[TMP12]], [[VEC_PHI1]]
; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
-; CHECK-INTERLEAVED-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
+; CHECK-INTERLEAVED-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK-INTERLEAVED: middle.block:
; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <16 x i32> [[TMP14]], [[TMP13]]
; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[BIN_RDX]])
@@ -2091,7 +2091,7 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) {
; CHECK-MAXBW-NEXT: [[TMP8]] = add <16 x i32> [[TMP7]], [[VEC_PHI]]
; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
-; CHECK-MAXBW-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
+; CHECK-MAXBW-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK-MAXBW: middle.block:
; CHECK-MAXBW-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP8]])
; CHECK-MAXBW-NEXT: [[TMP11:%.*]] = extractelement <16 x i32> [[TMP6]], i32 15
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product.ll
index 1ace7d4..4636c1b 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-dot-product.ll
@@ -18,11 +18,11 @@ define i32 @dotp(ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ]
; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX1]]
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP16]], align 1
-; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-INTERLEAVE1-NEXT: [[TMP20:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX1]]
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP20]], align 1
-; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
-; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = mul <16 x i32> [[TMP3]], [[TMP1]]
+; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = zext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
+; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
+; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = mul <16 x i32> [[TMP2]], [[TMP3]]
; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP4]])
; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT1]] = add nuw i64 [[INDEX1]], 16
; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT1]], 1024
@@ -47,17 +47,17 @@ define i32 @dotp(ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[TMP20]], i32 16
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP20]], align 1
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1
-; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-INTERLEAVED-NEXT: [[TMP28:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX1]]
-; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP28]], i32 16
+; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[TMP28]], i32 16
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP28]], align 1
-; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1
-; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = zext <16 x i8> [[WIDE_LOAD3]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1
+; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = zext <16 x i8> [[WIDE_LOAD3]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = mul <16 x i32> [[TMP4]], [[TMP5]]
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP6]])
; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = zext <16 x i8> [[WIDE_LOAD4]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = mul <16 x i32> [[TMP6]], [[TMP2]]
-; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = mul <16 x i32> [[TMP7]], [[TMP3]]
-; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP8]])
+; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = mul <16 x i32> [[TMP7]], [[TMP8]]
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE5]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP9]])
; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT1]] = add nuw i64 [[INDEX1]], 32
; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT1]], 1024
@@ -75,26 +75,26 @@ define i32 @dotp(ptr %a, ptr %b) #0 {
; CHECK-MAXBW-NEXT: br label [[VECTOR_PH:%.*]]
; CHECK-MAXBW: vector.ph:
; CHECK-MAXBW-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
+; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16
; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
; CHECK-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-MAXBW: vector.body:
; CHECK-MAXBW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-MAXBW-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 2 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE5:%.*]], [[VECTOR_BODY]] ]
+; CHECK-MAXBW-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ]
; CHECK-MAXBW-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]]
-; CHECK-MAXBW-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 8 x i8>, ptr [[TMP7]], align 1
-; CHECK-MAXBW-NEXT: [[TMP13:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD2]] to <vscale x 8 x i32>
+; CHECK-MAXBW-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP7]], align 1
; CHECK-MAXBW-NEXT: [[TMP14:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]]
-; CHECK-MAXBW-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 8 x i8>, ptr [[TMP14]], align 1
-; CHECK-MAXBW-NEXT: [[TMP20:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD4]] to <vscale x 8 x i32>
-; CHECK-MAXBW-NEXT: [[TMP22:%.*]] = mul <vscale x 8 x i32> [[TMP20]], [[TMP13]]
-; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE5]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI1]], <vscale x 8 x i32> [[TMP22]])
+; CHECK-MAXBW-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 16 x i8>, ptr [[TMP14]], align 1
+; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = zext <vscale x 16 x i8> [[WIDE_LOAD1]] to <vscale x 16 x i32>
+; CHECK-MAXBW-NEXT: [[TMP5:%.*]] = zext <vscale x 16 x i8> [[WIDE_LOAD]] to <vscale x 16 x i32>
+; CHECK-MAXBW-NEXT: [[TMP6:%.*]] = mul <vscale x 16 x i32> [[TMP4]], [[TMP5]]
+; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 4 x i32> @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32(<vscale x 4 x i32> [[VEC_PHI]], <vscale x 16 x i32> [[TMP6]])
; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-MAXBW-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-MAXBW-NEXT: br i1 [[TMP23]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK-MAXBW: middle.block:
-; CHECK-MAXBW-NEXT: [[TMP16:%.*]] = call i32 @llvm.vector.reduce.add.nxv2i32(<vscale x 2 x i32> [[PARTIAL_REDUCE5]])
+; CHECK-MAXBW-NEXT: [[TMP8:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[PARTIAL_REDUCE]])
; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
; CHECK-MAXBW-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]]
; CHECK-MAXBW: scalar.ph:
@@ -134,10 +134,10 @@ define i64 @not_dotp_i8_to_i64_has_neon_dotprod(ptr readonly %a, ptr readonly %b
; CHECK-INTERLEAVE1-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]]
; CHECK-INTERLEAVE1-NEXT: [[NEXT_GEP1:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]]
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[NEXT_GEP]], align 1
-; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i64>
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[NEXT_GEP1]], align 1
-; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i64>
-; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = mul nuw nsw <16 x i64> [[TMP1]], [[TMP0]]
+; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i64>
+; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i64>
+; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = mul nuw nsw <16 x i64> [[TMP0]], [[TMP1]]
; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI]], <16 x i64> [[TMP2]])
; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
@@ -157,54 +157,78 @@ define i64 @not_dotp_i8_to_i64_has_neon_dotprod(ptr readonly %a, ptr readonly %b
; CHECK-INTERLEAVED: vector.body:
; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ]
+; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE6:%.*]], [[VECTOR_BODY]] ]
+; CHECK-INTERLEAVED-NEXT: [[VEC_PHI2:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE13:%.*]], [[VECTOR_BODY]] ]
+; CHECK-INTERLEAVED-NEXT: [[VEC_PHI3:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE14:%.*]], [[VECTOR_BODY]] ]
; CHECK-INTERLEAVED-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]]
; CHECK-INTERLEAVED-NEXT: [[NEXT_GEP1:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]]
+; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 16
+; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 32
+; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 48
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[NEXT_GEP]], align 1
-; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i64>
-; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[NEXT_GEP1]], align 1
-; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i64>
-; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = mul nuw nsw <16 x i64> [[TMP1]], [[TMP0]]
-; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI]], <16 x i64> [[TMP2]])
-; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
-; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
-; CHECK-INTERLEAVED-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP0]], align 1
+; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD6:%.*]] = load <16 x i8>, ptr [[TMP10]], align 1
+; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD7:%.*]] = load <16 x i8>, ptr [[TMP11]], align 1
+; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[NEXT_GEP1]], i32 16
+; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = getelementptr i8, ptr [[NEXT_GEP1]], i32 32
+; CHECK-INTERLEAVED-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[NEXT_GEP1]], i32 48
+; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[NEXT_GEP1]], align 1
+; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD5:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1
+; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD10:%.*]] = load <16 x i8>, ptr [[TMP18]], align 1
+; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD11:%.*]] = load <16 x i8>, ptr [[TMP19]], align 1
+; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = zext <16 x i8> [[WIDE_LOAD4]] to <16 x i64>
+; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i64>
+; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = mul nuw nsw <16 x i64> [[TMP2]], [[TMP3]]
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI]], <16 x i64> [[TMP4]])
+; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD5]] to <16 x i64>
+; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = zext <16 x i8> [[WIDE_LOAD3]] to <16 x i64>
+; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = mul nuw nsw <16 x i64> [[TMP5]], [[TMP6]]
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE6]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI1]], <16 x i64> [[TMP7]])
+; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = zext <16 x i8> [[WIDE_LOAD10]] to <16 x i64>
+; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = zext <16 x i8> [[WIDE_LOAD6]] to <16 x i64>
+; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = mul nuw nsw <16 x i64> [[TMP12]], [[TMP13]]
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE13]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI2]], <16 x i64> [[TMP14]])
+; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = zext <16 x i8> [[WIDE_LOAD11]] to <16 x i64>
+; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = zext <16 x i8> [[WIDE_LOAD7]] to <16 x i64>
+; CHECK-INTERLEAVED-NEXT: [[TMP17:%.*]] = mul nuw nsw <16 x i64> [[TMP15]], [[TMP16]]
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE14]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI3]], <16 x i64> [[TMP17]])
+; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 64
+; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-INTERLEAVED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK-INTERLEAVED: middle.block:
-; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[PARTIAL_REDUCE]])
+; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <2 x i64> [[PARTIAL_REDUCE6]], [[PARTIAL_REDUCE]]
+; CHECK-INTERLEAVED-NEXT: [[BIN_RDX15:%.*]] = add <2 x i64> [[PARTIAL_REDUCE13]], [[BIN_RDX]]
+; CHECK-INTERLEAVED-NEXT: [[BIN_RDX16:%.*]] = add <2 x i64> [[PARTIAL_REDUCE14]], [[BIN_RDX15]]
+; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[BIN_RDX16]])
; CHECK-INTERLEAVED-NEXT: br label [[FOR_EXIT:%.*]]
; CHECK-INTERLEAVED: for.exit:
-; CHECK-INTERLEAVED-NEXT: ret i64 [[TMP4]]
+; CHECK-INTERLEAVED-NEXT: ret i64 [[TMP9]]
;
; CHECK-MAXBW-LABEL: define i64 @not_dotp_i8_to_i64_has_neon_dotprod(
; CHECK-MAXBW-SAME: ptr readonly [[A:%.*]], ptr readonly [[B:%.*]]) #[[ATTR1:[0-9]+]] {
; CHECK-MAXBW-NEXT: entry:
; CHECK-MAXBW-NEXT: br label [[VECTOR_PH:%.*]]
; CHECK-MAXBW: vector.ph:
-; CHECK-MAXBW-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
-; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; CHECK-MAXBW-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[A]], i64 [[N_VEC]]
-; CHECK-MAXBW-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[B]], i64 [[N_VEC]]
; CHECK-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-MAXBW: vector.body:
; CHECK-MAXBW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-MAXBW-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
+; CHECK-MAXBW-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ]
; CHECK-MAXBW-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]]
; CHECK-MAXBW-NEXT: [[NEXT_GEP1:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]]
-; CHECK-MAXBW-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i8>, ptr [[NEXT_GEP]], align 1
-; CHECK-MAXBW-NEXT: [[TMP11:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD]] to <vscale x 8 x i64>
-; CHECK-MAXBW-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 8 x i8>, ptr [[NEXT_GEP1]], align 1
-; CHECK-MAXBW-NEXT: [[TMP13:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD2]] to <vscale x 8 x i64>
-; CHECK-MAXBW-NEXT: [[TMP14:%.*]] = mul nuw nsw <vscale x 8 x i64> [[TMP13]], [[TMP11]]
-; CHECK-MAXBW-NEXT: [[TMP15]] = add <vscale x 8 x i64> [[TMP14]], [[VEC_PHI]]
-; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
-; CHECK-MAXBW-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-MAXBW-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK-MAXBW-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[NEXT_GEP]], align 1
+; CHECK-MAXBW-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[NEXT_GEP1]], align 1
+; CHECK-MAXBW-NEXT: [[TMP0:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i64>
+; CHECK-MAXBW-NEXT: [[TMP1:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i64>
+; CHECK-MAXBW-NEXT: [[TMP2:%.*]] = mul nuw nsw <16 x i64> [[TMP0]], [[TMP1]]
+; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI]], <16 x i64> [[TMP2]])
+; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
+; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-MAXBW-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK-MAXBW: middle.block:
-; CHECK-MAXBW-NEXT: [[TMP17:%.*]] = call i64 @llvm.vector.reduce.add.nxv8i64(<vscale x 8 x i64> [[TMP15]])
-; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-MAXBW-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]]
-; CHECK-MAXBW: scalar.ph:
+; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[PARTIAL_REDUCE]])
+; CHECK-MAXBW-NEXT: br label [[FOR_EXIT:%.*]]
+; CHECK-MAXBW: for.exit:
+; CHECK-MAXBW-NEXT: ret i64 [[TMP4]]
;
entry:
br label %for.body
@@ -245,10 +269,10 @@ define i64 @not_dotp_i16_to_i64_has_neon_dotprod(ptr readonly %a, ptr readonly %
; CHECK-INTERLEAVE1-NEXT: [[OFFSET_IDX1:%.*]] = mul i64 [[INDEX]], 2
; CHECK-INTERLEAVE1-NEXT: [[NEXT_GEP2:%.*]] = getelementptr i8, ptr [[B]], i64 [[OFFSET_IDX1]]
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i16>, ptr [[NEXT_GEP]], align 2
-; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = zext <8 x i16> [[WIDE_LOAD]] to <8 x i64>
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD3:%.*]] = load <8 x i16>, ptr [[NEXT_GEP2]], align 2
-; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = zext <8 x i16> [[WIDE_LOAD3]] to <8 x i64>
-; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = mul nuw nsw <8 x i64> [[TMP1]], [[TMP0]]
+; CHECK-INTERLEAVE1-NEXT: [[TMP0:%.*]] = zext <8 x i16> [[WIDE_LOAD3]] to <8 x i64>
+; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = zext <8 x i16> [[WIDE_LOAD]] to <8 x i64>
+; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = mul nuw nsw <8 x i64> [[TMP0]], [[TMP1]]
; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v8i64(<2 x i64> [[VEC_PHI]], <8 x i64> [[TMP2]])
; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
@@ -269,30 +293,50 @@ define i64 @not_dotp_i16_to_i64_has_neon_dotprod(ptr readonly %a, ptr readonly %
; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ]
; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE7:%.*]], [[VECTOR_BODY]] ]
+; CHECK-INTERLEAVED-NEXT: [[VEC_PHI2:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE14:%.*]], [[VECTOR_BODY]] ]
+; CHECK-INTERLEAVED-NEXT: [[VEC_PHI3:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE15:%.*]], [[VECTOR_BODY]] ]
; CHECK-INTERLEAVED-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 2
; CHECK-INTERLEAVED-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[OFFSET_IDX]]
; CHECK-INTERLEAVED-NEXT: [[OFFSET_IDX2:%.*]] = mul i64 [[INDEX]], 2
; CHECK-INTERLEAVED-NEXT: [[NEXT_GEP3:%.*]] = getelementptr i8, ptr [[B]], i64 [[OFFSET_IDX2]]
; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = getelementptr i16, ptr [[NEXT_GEP]], i32 8
+; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = getelementptr i16, ptr [[NEXT_GEP]], i32 16
+; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = getelementptr i16, ptr [[NEXT_GEP]], i32 24
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i16>, ptr [[NEXT_GEP]], align 2
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load <8 x i16>, ptr [[TMP0]], align 2
-; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = zext <8 x i16> [[WIDE_LOAD]] to <8 x i64>
-; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = zext <8 x i16> [[WIDE_LOAD4]] to <8 x i64>
-; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = getelementptr i16, ptr [[NEXT_GEP3]], i32 8
+; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD7:%.*]] = load <8 x i16>, ptr [[TMP10]], align 2
+; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD8:%.*]] = load <8 x i16>, ptr [[TMP11]], align 2
+; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i16, ptr [[NEXT_GEP3]], i32 8
+; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = getelementptr i16, ptr [[NEXT_GEP3]], i32 16
+; CHECK-INTERLEAVED-NEXT: [[TMP19:%.*]] = getelementptr i16, ptr [[NEXT_GEP3]], i32 24
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD5:%.*]] = load <8 x i16>, ptr [[NEXT_GEP3]], align 2
-; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD6:%.*]] = load <8 x i16>, ptr [[TMP3]], align 2
-; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = zext <8 x i16> [[WIDE_LOAD5]] to <8 x i64>
+; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD6:%.*]] = load <8 x i16>, ptr [[TMP1]], align 2
+; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD11:%.*]] = load <8 x i16>, ptr [[TMP18]], align 2
+; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD12:%.*]] = load <8 x i16>, ptr [[TMP19]], align 2
+; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = zext <8 x i16> [[WIDE_LOAD5]] to <8 x i64>
+; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = zext <8 x i16> [[WIDE_LOAD]] to <8 x i64>
+; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = mul nuw nsw <8 x i64> [[TMP2]], [[TMP3]]
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v8i64(<2 x i64> [[VEC_PHI]], <8 x i64> [[TMP4]])
; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = zext <8 x i16> [[WIDE_LOAD6]] to <8 x i64>
-; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = mul nuw nsw <8 x i64> [[TMP4]], [[TMP1]]
-; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = mul nuw nsw <8 x i64> [[TMP5]], [[TMP2]]
-; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v8i64(<2 x i64> [[VEC_PHI]], <8 x i64> [[TMP6]])
+; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = zext <8 x i16> [[WIDE_LOAD4]] to <8 x i64>
+; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = mul nuw nsw <8 x i64> [[TMP5]], [[TMP6]]
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE7]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v8i64(<2 x i64> [[VEC_PHI1]], <8 x i64> [[TMP7]])
-; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
+; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = zext <8 x i16> [[WIDE_LOAD11]] to <8 x i64>
+; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = zext <8 x i16> [[WIDE_LOAD7]] to <8 x i64>
+; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = mul nuw nsw <8 x i64> [[TMP12]], [[TMP13]]
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE14]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v8i64(<2 x i64> [[VEC_PHI2]], <8 x i64> [[TMP14]])
+; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = zext <8 x i16> [[WIDE_LOAD12]] to <8 x i64>
+; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = zext <8 x i16> [[WIDE_LOAD8]] to <8 x i64>
+; CHECK-INTERLEAVED-NEXT: [[TMP17:%.*]] = mul nuw nsw <8 x i64> [[TMP15]], [[TMP16]]
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE15]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v8i64(<2 x i64> [[VEC_PHI3]], <8 x i64> [[TMP17]])
+; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; CHECK-INTERLEAVED-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; CHECK-INTERLEAVED: middle.block:
; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <2 x i64> [[PARTIAL_REDUCE7]], [[PARTIAL_REDUCE]]
-; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[BIN_RDX]])
+; CHECK-INTERLEAVED-NEXT: [[BIN_RDX16:%.*]] = add <2 x i64> [[PARTIAL_REDUCE14]], [[BIN_RDX]]
+; CHECK-INTERLEAVED-NEXT: [[BIN_RDX17:%.*]] = add <2 x i64> [[PARTIAL_REDUCE15]], [[BIN_RDX16]]
+; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[BIN_RDX17]])
; CHECK-INTERLEAVED-NEXT: br label [[FOR_EXIT:%.*]]
; CHECK-INTERLEAVED: for.exit:
; CHECK-INTERLEAVED-NEXT: ret i64 [[TMP9]]
@@ -302,36 +346,28 @@ define i64 @not_dotp_i16_to_i64_has_neon_dotprod(ptr readonly %a, ptr readonly %
; CHECK-MAXBW-NEXT: entry:
; CHECK-MAXBW-NEXT: br label [[VECTOR_PH:%.*]]
; CHECK-MAXBW: vector.ph:
-; CHECK-MAXBW-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
-; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
-; CHECK-MAXBW-NEXT: [[TMP6:%.*]] = mul i64 [[N_VEC]], 2
-; CHECK-MAXBW-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP6]]
-; CHECK-MAXBW-NEXT: [[TMP8:%.*]] = mul i64 [[N_VEC]], 2
-; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP8]]
; CHECK-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-MAXBW: vector.body:
; CHECK-MAXBW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-MAXBW-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP17:%.*]], [[VECTOR_BODY]] ]
+; CHECK-MAXBW-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ]
; CHECK-MAXBW-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 2
; CHECK-MAXBW-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[OFFSET_IDX]]
; CHECK-MAXBW-NEXT: [[OFFSET_IDX1:%.*]] = mul i64 [[INDEX]], 2
; CHECK-MAXBW-NEXT: [[NEXT_GEP2:%.*]] = getelementptr i8, ptr [[B]], i64 [[OFFSET_IDX1]]
-; CHECK-MAXBW-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i16>, ptr [[NEXT_GEP]], align 2
-; CHECK-MAXBW-NEXT: [[TMP13:%.*]] = zext <vscale x 4 x i16> [[WIDE_LOAD]] to <vscale x 4 x i64>
-; CHECK-MAXBW-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 4 x i16>, ptr [[NEXT_GEP2]], align 2
-; CHECK-MAXBW-NEXT: [[TMP15:%.*]] = zext <vscale x 4 x i16> [[WIDE_LOAD3]] to <vscale x 4 x i64>
-; CHECK-MAXBW-NEXT: [[TMP16:%.*]] = mul nuw nsw <vscale x 4 x i64> [[TMP15]], [[TMP13]]
-; CHECK-MAXBW-NEXT: [[TMP17]] = add <vscale x 4 x i64> [[TMP16]], [[VEC_PHI]]
-; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
-; CHECK-MAXBW-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-MAXBW-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK-MAXBW-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i16>, ptr [[NEXT_GEP]], align 2
+; CHECK-MAXBW-NEXT: [[WIDE_LOAD3:%.*]] = load <8 x i16>, ptr [[NEXT_GEP2]], align 2
+; CHECK-MAXBW-NEXT: [[TMP0:%.*]] = zext <8 x i16> [[WIDE_LOAD3]] to <8 x i64>
+; CHECK-MAXBW-NEXT: [[TMP1:%.*]] = zext <8 x i16> [[WIDE_LOAD]] to <8 x i64>
+; CHECK-MAXBW-NEXT: [[TMP2:%.*]] = mul nuw nsw <8 x i64> [[TMP0]], [[TMP1]]
+; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v8i64(<2 x i64> [[VEC_PHI]], <8 x i64> [[TMP2]])
+; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
+; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-MAXBW-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK-MAXBW: middle.block:
-; CHECK-MAXBW-NEXT: [[TMP19:%.*]] = call i64 @llvm.vector.reduce.add.nxv4i64(<vscale x 4 x i64> [[TMP17]])
-; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
-; CHECK-MAXBW-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]]
-; CHECK-MAXBW: scalar.ph:
+; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[PARTIAL_REDUCE]])
+; CHECK-MAXBW-NEXT: br label [[FOR_EXIT:%.*]]
+; CHECK-MAXBW: for.exit:
+; CHECK-MAXBW-NEXT: ret i64 [[TMP4]]
;
entry:
br label %for.body
@@ -687,7 +723,7 @@ define i32 @not_dotp_different_types(ptr %a, ptr %b) #0 {
; CHECK-MAXBW-NEXT: [[TMP138]] = add <16 x i32> [[TMP136]], [[VEC_PHI1]]
; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-MAXBW-NEXT: [[TMP70:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
-; CHECK-MAXBW-NEXT: br i1 [[TMP70]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK-MAXBW-NEXT: br i1 [[TMP70]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; CHECK-MAXBW: middle.block:
; CHECK-MAXBW-NEXT: [[TMP71:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP138]])
; CHECK-MAXBW-NEXT: br label [[FOR_EXIT:%.*]]
@@ -835,7 +871,7 @@ define i32 @not_dotp_not_loop_carried(ptr %a, ptr %b) #0 {
; CHECK-MAXBW-NEXT: [[TMP27:%.*]] = add <vscale x 8 x i32> [[TMP25]], [[TMP26]]
; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-MAXBW-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-MAXBW-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
+; CHECK-MAXBW-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK-MAXBW: middle.block:
; CHECK-MAXBW-NEXT: [[TMP20:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-MAXBW-NEXT: [[TMP21:%.*]] = mul nuw i32 [[TMP20]], 8
@@ -964,7 +1000,7 @@ define i32 @not_dotp_not_phi(ptr %a, ptr %b) #0 {
; CHECK-MAXBW-NEXT: [[TMP21:%.*]] = add <vscale x 8 x i32> [[TMP20]], [[TMP19]]
; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-MAXBW-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-MAXBW-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
+; CHECK-MAXBW-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK-MAXBW: middle.block:
; CHECK-MAXBW-NEXT: [[TMP23:%.*]] = call i32 @llvm.vscale.i32()
; CHECK-MAXBW-NEXT: [[TMP24:%.*]] = mul nuw i32 [[TMP23]], 8
@@ -1024,26 +1060,26 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP9]]
; CHECK-INTERLEAVE1-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP9]]
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1
-; CHECK-INTERLEAVE1-NEXT: [[TMP23:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1
-; CHECK-INTERLEAVE1-NEXT: [[TMP12:%.*]] = sext <16 x i8> [[WIDE_LOAD4]] to <16 x i32>
-; CHECK-INTERLEAVE1-NEXT: [[TMP13:%.*]] = mul nsw <16 x i32> [[TMP12]], [[TMP23]]
+; CHECK-INTERLEAVE1-NEXT: [[TMP23:%.*]] = sext <16 x i8> [[WIDE_LOAD4]] to <16 x i32>
+; CHECK-INTERLEAVE1-NEXT: [[TMP12:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
+; CHECK-INTERLEAVE1-NEXT: [[TMP13:%.*]] = mul nsw <16 x i32> [[TMP23]], [[TMP12]]
; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI3]], <16 x i32> [[TMP13]])
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD5:%.*]] = load <16 x i8>, ptr [[TMP4]], align 1
-; CHECK-INTERLEAVE1-NEXT: [[TMP14:%.*]] = sext <16 x i8> [[WIDE_LOAD5]] to <16 x i32>
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD6:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1
+; CHECK-INTERLEAVE1-NEXT: [[TMP14:%.*]] = sext <16 x i8> [[WIDE_LOAD5]] to <16 x i32>
; CHECK-INTERLEAVE1-NEXT: [[TMP15:%.*]] = sext <16 x i8> [[WIDE_LOAD6]] to <16 x i32>
; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = mul nsw <16 x i32> [[TMP14]], [[TMP15]]
; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE7]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI2]], <16 x i32> [[TMP16]])
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD8:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1
-; CHECK-INTERLEAVE1-NEXT: [[TMP17:%.*]] = sext <16 x i8> [[WIDE_LOAD8]] to <16 x i32>
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD9:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1
+; CHECK-INTERLEAVE1-NEXT: [[TMP17:%.*]] = sext <16 x i8> [[WIDE_LOAD8]] to <16 x i32>
; CHECK-INTERLEAVE1-NEXT: [[TMP18:%.*]] = sext <16 x i8> [[WIDE_LOAD9]] to <16 x i32>
; CHECK-INTERLEAVE1-NEXT: [[TMP19:%.*]] = mul nsw <16 x i32> [[TMP17]], [[TMP18]]
; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE10]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP19]])
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD11:%.*]] = load <16 x i8>, ptr [[TMP10]], align 1
-; CHECK-INTERLEAVE1-NEXT: [[TMP20:%.*]] = sext <16 x i8> [[WIDE_LOAD11]] to <16 x i32>
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD12:%.*]] = load <16 x i8>, ptr [[TMP11]], align 1
+; CHECK-INTERLEAVE1-NEXT: [[TMP20:%.*]] = sext <16 x i8> [[WIDE_LOAD11]] to <16 x i32>
; CHECK-INTERLEAVE1-NEXT: [[TMP21:%.*]] = sext <16 x i8> [[WIDE_LOAD12]] to <16 x i32>
; CHECK-INTERLEAVE1-NEXT: [[TMP22:%.*]] = mul nsw <16 x i32> [[TMP20]], [[TMP21]]
; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE13]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP22]])
@@ -1092,58 +1128,58 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVED-NEXT: [[TMP43:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 16
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD8:%.*]] = load <16 x i8>, ptr [[TMP43]], align 1
-; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = sext <16 x i8> [[WIDE_LOAD8]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i32 16
+; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i32 16
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD9:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1
-; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD10:%.*]] = load <16 x i8>, ptr [[TMP14]], align 1
-; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = sext <16 x i8> [[WIDE_LOAD9]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD10:%.*]] = load <16 x i8>, ptr [[TMP12]], align 1
+; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = sext <16 x i8> [[WIDE_LOAD9]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = mul nsw <16 x i32> [[TMP13]], [[TMP14]]
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI6]], <16 x i32> [[TMP15]])
; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = sext <16 x i8> [[WIDE_LOAD10]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP17:%.*]] = mul nsw <16 x i32> [[TMP15]], [[TMP12]]
-; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = mul nsw <16 x i32> [[TMP16]], [[TMP13]]
-; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI6]], <16 x i32> [[TMP17]])
+; CHECK-INTERLEAVED-NEXT: [[TMP17:%.*]] = sext <16 x i8> [[WIDE_LOAD8]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = mul nsw <16 x i32> [[TMP16]], [[TMP17]]
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE11]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI7]], <16 x i32> [[TMP18]])
; CHECK-INTERLEAVED-NEXT: [[TMP19:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i32 16
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD12:%.*]] = load <16 x i8>, ptr [[TMP4]], align 1
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD13:%.*]] = load <16 x i8>, ptr [[TMP19]], align 1
-; CHECK-INTERLEAVED-NEXT: [[TMP20:%.*]] = sext <16 x i8> [[WIDE_LOAD12]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP21:%.*]] = sext <16 x i8> [[WIDE_LOAD13]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[TMP5]], i32 16
+; CHECK-INTERLEAVED-NEXT: [[TMP20:%.*]] = getelementptr inbounds i8, ptr [[TMP5]], i32 16
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD14:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1
-; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD15:%.*]] = load <16 x i8>, ptr [[TMP22]], align 1
-; CHECK-INTERLEAVED-NEXT: [[TMP23:%.*]] = sext <16 x i8> [[WIDE_LOAD14]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP24:%.*]] = sext <16 x i8> [[WIDE_LOAD15]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP25:%.*]] = mul nsw <16 x i32> [[TMP20]], [[TMP23]]
-; CHECK-INTERLEAVED-NEXT: [[TMP26:%.*]] = mul nsw <16 x i32> [[TMP21]], [[TMP24]]
-; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE16]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI4]], <16 x i32> [[TMP25]])
+; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD15:%.*]] = load <16 x i8>, ptr [[TMP20]], align 1
+; CHECK-INTERLEAVED-NEXT: [[TMP21:%.*]] = sext <16 x i8> [[WIDE_LOAD12]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP22:%.*]] = sext <16 x i8> [[WIDE_LOAD14]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP23:%.*]] = mul nsw <16 x i32> [[TMP21]], [[TMP22]]
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE16]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI4]], <16 x i32> [[TMP23]])
+; CHECK-INTERLEAVED-NEXT: [[TMP24:%.*]] = sext <16 x i8> [[WIDE_LOAD13]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP25:%.*]] = sext <16 x i8> [[WIDE_LOAD15]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP26:%.*]] = mul nsw <16 x i32> [[TMP24]], [[TMP25]]
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE17]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI5]], <16 x i32> [[TMP26]])
; CHECK-INTERLEAVED-NEXT: [[TMP27:%.*]] = getelementptr inbounds i8, ptr [[TMP7]], i32 16
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD18:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD19:%.*]] = load <16 x i8>, ptr [[TMP27]], align 1
-; CHECK-INTERLEAVED-NEXT: [[TMP28:%.*]] = sext <16 x i8> [[WIDE_LOAD18]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP29:%.*]] = sext <16 x i8> [[WIDE_LOAD19]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP30:%.*]] = getelementptr inbounds i8, ptr [[TMP8]], i32 16
+; CHECK-INTERLEAVED-NEXT: [[TMP28:%.*]] = getelementptr inbounds i8, ptr [[TMP8]], i32 16
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD20:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1
-; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD21:%.*]] = load <16 x i8>, ptr [[TMP30]], align 1
-; CHECK-INTERLEAVED-NEXT: [[TMP31:%.*]] = sext <16 x i8> [[WIDE_LOAD20]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP48:%.*]] = sext <16 x i8> [[WIDE_LOAD21]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP33:%.*]] = mul nsw <16 x i32> [[TMP28]], [[TMP31]]
-; CHECK-INTERLEAVED-NEXT: [[TMP34:%.*]] = mul nsw <16 x i32> [[TMP29]], [[TMP48]]
-; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE22]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI2]], <16 x i32> [[TMP33]])
+; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD21:%.*]] = load <16 x i8>, ptr [[TMP28]], align 1
+; CHECK-INTERLEAVED-NEXT: [[TMP29:%.*]] = sext <16 x i8> [[WIDE_LOAD18]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP30:%.*]] = sext <16 x i8> [[WIDE_LOAD20]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP31:%.*]] = mul nsw <16 x i32> [[TMP29]], [[TMP30]]
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE22]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI2]], <16 x i32> [[TMP31]])
+; CHECK-INTERLEAVED-NEXT: [[TMP48:%.*]] = sext <16 x i8> [[WIDE_LOAD19]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP33:%.*]] = sext <16 x i8> [[WIDE_LOAD21]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP34:%.*]] = mul nsw <16 x i32> [[TMP48]], [[TMP33]]
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE23]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI3]], <16 x i32> [[TMP34]])
; CHECK-INTERLEAVED-NEXT: [[TMP35:%.*]] = getelementptr inbounds i8, ptr [[TMP10]], i32 16
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD24:%.*]] = load <16 x i8>, ptr [[TMP10]], align 1
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD25:%.*]] = load <16 x i8>, ptr [[TMP35]], align 1
-; CHECK-INTERLEAVED-NEXT: [[TMP36:%.*]] = sext <16 x i8> [[WIDE_LOAD24]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP37:%.*]] = sext <16 x i8> [[WIDE_LOAD25]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP38:%.*]] = getelementptr inbounds i8, ptr [[TMP11]], i32 16
+; CHECK-INTERLEAVED-NEXT: [[TMP36:%.*]] = getelementptr inbounds i8, ptr [[TMP11]], i32 16
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD26:%.*]] = load <16 x i8>, ptr [[TMP11]], align 1
-; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD27:%.*]] = load <16 x i8>, ptr [[TMP38]], align 1
-; CHECK-INTERLEAVED-NEXT: [[TMP39:%.*]] = sext <16 x i8> [[WIDE_LOAD26]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP40:%.*]] = sext <16 x i8> [[WIDE_LOAD27]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP41:%.*]] = mul nsw <16 x i32> [[TMP36]], [[TMP39]]
-; CHECK-INTERLEAVED-NEXT: [[TMP42:%.*]] = mul nsw <16 x i32> [[TMP37]], [[TMP40]]
-; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE28]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP41]])
+; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD27:%.*]] = load <16 x i8>, ptr [[TMP36]], align 1
+; CHECK-INTERLEAVED-NEXT: [[TMP37:%.*]] = sext <16 x i8> [[WIDE_LOAD24]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP38:%.*]] = sext <16 x i8> [[WIDE_LOAD26]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP39:%.*]] = mul nsw <16 x i32> [[TMP37]], [[TMP38]]
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE28]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP39]])
+; CHECK-INTERLEAVED-NEXT: [[TMP40:%.*]] = sext <16 x i8> [[WIDE_LOAD25]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP41:%.*]] = sext <16 x i8> [[WIDE_LOAD27]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP42:%.*]] = mul nsw <16 x i32> [[TMP40]], [[TMP41]]
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE29]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP42]])
; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
; CHECK-INTERLEAVED-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -1165,21 +1201,21 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) #0 {
; CHECK-MAXBW-SAME: i32 [[NUM_OUT:%.*]], i64 [[NUM_IN:%.*]], ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-MAXBW-NEXT: entry:
; CHECK-MAXBW-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-MAXBW-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 3
+; CHECK-MAXBW-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4
; CHECK-MAXBW-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[NUM_IN]], [[TMP1]]
; CHECK-MAXBW-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK-MAXBW: vector.ph:
; CHECK-MAXBW-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
+; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16
; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[NUM_IN]], [[TMP3]]
; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 [[NUM_IN]], [[N_MOD_VF]]
; CHECK-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-MAXBW: vector.body:
; CHECK-MAXBW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-MAXBW-NEXT: [[VEC_PHI4:%.*]] = phi <vscale x 2 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE16:%.*]], [[VECTOR_BODY]] ]
-; CHECK-MAXBW-NEXT: [[VEC_PHI5:%.*]] = phi <vscale x 2 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE17:%.*]], [[VECTOR_BODY]] ]
-; CHECK-MAXBW-NEXT: [[VEC_PHI6:%.*]] = phi <vscale x 2 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ]
-; CHECK-MAXBW-NEXT: [[VEC_PHI7:%.*]] = phi <vscale x 2 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE11:%.*]], [[VECTOR_BODY]] ]
+; CHECK-MAXBW-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE13:%.*]], [[VECTOR_BODY]] ]
+; CHECK-MAXBW-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE10:%.*]], [[VECTOR_BODY]] ]
+; CHECK-MAXBW-NEXT: [[VEC_PHI2:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE7:%.*]], [[VECTOR_BODY]] ]
+; CHECK-MAXBW-NEXT: [[VEC_PHI3:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ]
; CHECK-MAXBW-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[INDEX]]
; CHECK-MAXBW-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]]
; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = or disjoint i64 [[INDEX]], 1
@@ -1191,38 +1227,38 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) #0 {
; CHECK-MAXBW-NEXT: [[TMP15:%.*]] = or disjoint i64 [[INDEX]], 3
; CHECK-MAXBW-NEXT: [[TMP16:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[TMP15]]
; CHECK-MAXBW-NEXT: [[TMP17:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[TMP15]]
-; CHECK-MAXBW-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i8>, ptr [[TMP7]], align 1
-; CHECK-MAXBW-NEXT: [[TMP23:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD]] to <vscale x 8 x i32>
-; CHECK-MAXBW-NEXT: [[WIDE_LOAD9:%.*]] = load <vscale x 8 x i8>, ptr [[TMP8]], align 1
-; CHECK-MAXBW-NEXT: [[TMP29:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD9]] to <vscale x 8 x i32>
-; CHECK-MAXBW-NEXT: [[TMP31:%.*]] = mul nsw <vscale x 8 x i32> [[TMP29]], [[TMP23]]
-; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE11]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI7]], <vscale x 8 x i32> [[TMP31]])
-; CHECK-MAXBW-NEXT: [[WIDE_LOAD12:%.*]] = load <vscale x 8 x i8>, ptr [[TMP10]], align 1
-; CHECK-MAXBW-NEXT: [[TMP37:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD12]] to <vscale x 8 x i32>
-; CHECK-MAXBW-NEXT: [[WIDE_LOAD14:%.*]] = load <vscale x 8 x i8>, ptr [[TMP11]], align 1
-; CHECK-MAXBW-NEXT: [[TMP43:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD14]] to <vscale x 8 x i32>
-; CHECK-MAXBW-NEXT: [[TMP45:%.*]] = mul nsw <vscale x 8 x i32> [[TMP37]], [[TMP43]]
-; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI6]], <vscale x 8 x i32> [[TMP45]])
-; CHECK-MAXBW-NEXT: [[WIDE_LOAD18:%.*]] = load <vscale x 8 x i8>, ptr [[TMP13]], align 1
-; CHECK-MAXBW-NEXT: [[TMP51:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD18]] to <vscale x 8 x i32>
-; CHECK-MAXBW-NEXT: [[WIDE_LOAD20:%.*]] = load <vscale x 8 x i8>, ptr [[TMP14]], align 1
-; CHECK-MAXBW-NEXT: [[TMP57:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD20]] to <vscale x 8 x i32>
-; CHECK-MAXBW-NEXT: [[TMP59:%.*]] = mul nsw <vscale x 8 x i32> [[TMP51]], [[TMP57]]
-; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE17]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI5]], <vscale x 8 x i32> [[TMP59]])
-; CHECK-MAXBW-NEXT: [[WIDE_LOAD24:%.*]] = load <vscale x 8 x i8>, ptr [[TMP16]], align 1
-; CHECK-MAXBW-NEXT: [[TMP65:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD24]] to <vscale x 8 x i32>
-; CHECK-MAXBW-NEXT: [[WIDE_LOAD26:%.*]] = load <vscale x 8 x i8>, ptr [[TMP17]], align 1
-; CHECK-MAXBW-NEXT: [[TMP71:%.*]] = sext <vscale x 8 x i8> [[WIDE_LOAD26]] to <vscale x 8 x i32>
-; CHECK-MAXBW-NEXT: [[TMP73:%.*]] = mul nsw <vscale x 8 x i32> [[TMP65]], [[TMP71]]
-; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE16]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI4]], <vscale x 8 x i32> [[TMP73]])
+; CHECK-MAXBW-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP7]], align 1
+; CHECK-MAXBW-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 16 x i8>, ptr [[TMP8]], align 1
+; CHECK-MAXBW-NEXT: [[TMP27:%.*]] = sext <vscale x 16 x i8> [[WIDE_LOAD4]] to <vscale x 16 x i32>
+; CHECK-MAXBW-NEXT: [[TMP32:%.*]] = sext <vscale x 16 x i8> [[WIDE_LOAD]] to <vscale x 16 x i32>
+; CHECK-MAXBW-NEXT: [[TMP33:%.*]] = mul nsw <vscale x 16 x i32> [[TMP27]], [[TMP32]]
+; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 4 x i32> @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32(<vscale x 4 x i32> [[VEC_PHI3]], <vscale x 16 x i32> [[TMP33]])
+; CHECK-MAXBW-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 16 x i8>, ptr [[TMP10]], align 1
+; CHECK-MAXBW-NEXT: [[WIDE_LOAD6:%.*]] = load <vscale x 16 x i8>, ptr [[TMP11]], align 1
+; CHECK-MAXBW-NEXT: [[TMP18:%.*]] = sext <vscale x 16 x i8> [[WIDE_LOAD5]] to <vscale x 16 x i32>
+; CHECK-MAXBW-NEXT: [[TMP19:%.*]] = sext <vscale x 16 x i8> [[WIDE_LOAD6]] to <vscale x 16 x i32>
+; CHECK-MAXBW-NEXT: [[TMP20:%.*]] = mul nsw <vscale x 16 x i32> [[TMP18]], [[TMP19]]
+; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE7]] = call <vscale x 4 x i32> @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32(<vscale x 4 x i32> [[VEC_PHI2]], <vscale x 16 x i32> [[TMP20]])
+; CHECK-MAXBW-NEXT: [[WIDE_LOAD8:%.*]] = load <vscale x 16 x i8>, ptr [[TMP13]], align 1
+; CHECK-MAXBW-NEXT: [[WIDE_LOAD9:%.*]] = load <vscale x 16 x i8>, ptr [[TMP14]], align 1
+; CHECK-MAXBW-NEXT: [[TMP21:%.*]] = sext <vscale x 16 x i8> [[WIDE_LOAD8]] to <vscale x 16 x i32>
+; CHECK-MAXBW-NEXT: [[TMP22:%.*]] = sext <vscale x 16 x i8> [[WIDE_LOAD9]] to <vscale x 16 x i32>
+; CHECK-MAXBW-NEXT: [[TMP23:%.*]] = mul nsw <vscale x 16 x i32> [[TMP21]], [[TMP22]]
+; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE10]] = call <vscale x 4 x i32> @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32(<vscale x 4 x i32> [[VEC_PHI1]], <vscale x 16 x i32> [[TMP23]])
+; CHECK-MAXBW-NEXT: [[WIDE_LOAD11:%.*]] = load <vscale x 16 x i8>, ptr [[TMP16]], align 1
+; CHECK-MAXBW-NEXT: [[WIDE_LOAD12:%.*]] = load <vscale x 16 x i8>, ptr [[TMP17]], align 1
+; CHECK-MAXBW-NEXT: [[TMP24:%.*]] = sext <vscale x 16 x i8> [[WIDE_LOAD11]] to <vscale x 16 x i32>
+; CHECK-MAXBW-NEXT: [[TMP25:%.*]] = sext <vscale x 16 x i8> [[WIDE_LOAD12]] to <vscale x 16 x i32>
+; CHECK-MAXBW-NEXT: [[TMP26:%.*]] = mul nsw <vscale x 16 x i32> [[TMP24]], [[TMP25]]
+; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE13]] = call <vscale x 4 x i32> @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32(<vscale x 4 x i32> [[VEC_PHI]], <vscale x 16 x i32> [[TMP26]])
; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-MAXBW-NEXT: [[TMP74:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-MAXBW-NEXT: br i1 [[TMP74]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
+; CHECK-MAXBW-NEXT: br i1 [[TMP74]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; CHECK-MAXBW: middle.block:
-; CHECK-MAXBW-NEXT: [[TMP39:%.*]] = call i32 @llvm.vector.reduce.add.nxv2i32(<vscale x 2 x i32> [[PARTIAL_REDUCE16]])
-; CHECK-MAXBW-NEXT: [[TMP40:%.*]] = call i32 @llvm.vector.reduce.add.nxv2i32(<vscale x 2 x i32> [[PARTIAL_REDUCE17]])
-; CHECK-MAXBW-NEXT: [[TMP41:%.*]] = call i32 @llvm.vector.reduce.add.nxv2i32(<vscale x 2 x i32> [[PARTIAL_REDUCE]])
-; CHECK-MAXBW-NEXT: [[TMP42:%.*]] = call i32 @llvm.vector.reduce.add.nxv2i32(<vscale x 2 x i32> [[PARTIAL_REDUCE11]])
+; CHECK-MAXBW-NEXT: [[TMP28:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[PARTIAL_REDUCE13]])
+; CHECK-MAXBW-NEXT: [[TMP29:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[PARTIAL_REDUCE10]])
+; CHECK-MAXBW-NEXT: [[TMP30:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[PARTIAL_REDUCE7]])
+; CHECK-MAXBW-NEXT: [[TMP31:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[PARTIAL_REDUCE]])
; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[NUM_IN]], [[N_VEC]]
; CHECK-MAXBW-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
; CHECK-MAXBW: scalar.ph:
@@ -1390,7 +1426,7 @@ define i32 @dotp_predicated(i64 %N, ptr %a, ptr %b) #0 {
; CHECK-MAXBW-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i64(i64 [[INDEX]], i64 [[TMP9]])
; CHECK-MAXBW-NEXT: [[TMP19:%.*]] = extractelement <vscale x 16 x i1> [[ACTIVE_LANE_MASK_NEXT]], i32 0
; CHECK-MAXBW-NEXT: [[TMP20:%.*]] = xor i1 [[TMP19]], true
-; CHECK-MAXBW-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
+; CHECK-MAXBW-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; CHECK-MAXBW: middle.block:
; CHECK-MAXBW-NEXT: [[TMP21:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[PARTIAL_REDUCE]])
; CHECK-MAXBW-NEXT: br label [[EXIT:%.*]]
@@ -1525,7 +1561,7 @@ define i32 @not_dotp_extend_user(ptr %a, ptr %b) #0 {
; CHECK-MAXBW-NEXT: [[TMP24]] = add <vscale x 8 x i32> [[TMP22]], [[VEC_PHI1]]
; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-MAXBW-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-MAXBW-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
+; CHECK-MAXBW-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; CHECK-MAXBW: middle.block:
; CHECK-MAXBW-NEXT: [[TMP16:%.*]] = call i32 @llvm.vector.reduce.add.nxv8i32(<vscale x 8 x i32> [[TMP24]])
; CHECK-MAXBW-NEXT: [[TMP17:%.*]] = call i32 @llvm.vscale.i32()
@@ -1565,110 +1601,93 @@ define i64 @dotp_cost_disagreement(ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVE1-NEXT: entry:
; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_PH:%.*]]
; CHECK-INTERLEAVE1: vector.ph:
-; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
-; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i64 41, [[TMP3]]
-; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i64 41, [[N_MOD_VF]]
; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-INTERLEAVE1: vector.body:
; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
+; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ]
; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDEX]]
-; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i8>, ptr [[TMP7]], align 1
-; CHECK-INTERLEAVE1-NEXT: [[TMP9:%.*]] = zext <vscale x 2 x i8> [[WIDE_LOAD]] to <vscale x 2 x i64>
+; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1
; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = add nuw nsw i64 [[INDEX]], 1
; CHECK-INTERLEAVE1-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[TMP10]]
-; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 2 x i8>, ptr [[TMP11]], align 1
-; CHECK-INTERLEAVE1-NEXT: [[TMP13:%.*]] = zext <vscale x 2 x i8> [[WIDE_LOAD1]] to <vscale x 2 x i64>
-; CHECK-INTERLEAVE1-NEXT: [[TMP14:%.*]] = mul nuw nsw <vscale x 2 x i64> [[TMP13]], [[TMP9]]
-; CHECK-INTERLEAVE1-NEXT: [[TMP15]] = add <vscale x 2 x i64> [[VEC_PHI]], [[TMP14]]
-; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
-; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
+; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP11]], align 1
+; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD1]] to <16 x i64>
+; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i64>
+; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = mul nuw nsw <16 x i64> [[TMP3]], [[TMP4]]
+; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI]], <16 x i64> [[TMP5]])
+; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
+; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 32
+; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
; CHECK-INTERLEAVE1: middle.block:
-; CHECK-INTERLEAVE1-NEXT: [[TMP17:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> [[TMP15]])
-; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 41, [[N_VEC]]
-; CHECK-INTERLEAVE1-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH:%.*]]
+; CHECK-INTERLEAVE1-NEXT: [[TMP8:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[PARTIAL_REDUCE]])
+; CHECK-INTERLEAVE1-NEXT: br label [[SCALAR_PH:%.*]]
; CHECK-INTERLEAVE1: scalar.ph:
;
; CHECK-INTERLEAVED-LABEL: define i64 @dotp_cost_disagreement(
; CHECK-INTERLEAVED-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-INTERLEAVED-NEXT: entry:
-; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
-; CHECK-INTERLEAVED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 41, [[TMP1]]
-; CHECK-INTERLEAVED-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_PH:%.*]]
; CHECK-INTERLEAVED: vector.ph:
-; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
-; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 41, [[TMP3]]
-; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i64 41, [[N_MOD_VF]]
; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-INTERLEAVED: vector.body:
; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP24:%.*]], [[VECTOR_BODY]] ]
-; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <vscale x 2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP25:%.*]], [[VECTOR_BODY]] ]
+; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ]
+; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE5:%.*]], [[VECTOR_BODY]] ]
; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDEX]]
-; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = shl nuw i64 [[TMP9]], 1
-; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP7]], i64 [[TMP10]]
-; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i8>, ptr [[TMP7]], align 1
-; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 2 x i8>, ptr [[TMP11]], align 1
-; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = zext <vscale x 2 x i8> [[WIDE_LOAD]] to <vscale x 2 x i64>
-; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = zext <vscale x 2 x i8> [[WIDE_LOAD2]] to <vscale x 2 x i64>
+; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP7]], i32 16
+; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1
+; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1
; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = add nuw nsw i64 [[INDEX]], 1
; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[TMP14]]
-; CHECK-INTERLEAVED-NEXT: [[TMP17:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = shl nuw i64 [[TMP17]], 1
-; CHECK-INTERLEAVED-NEXT: [[TMP19:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP15]], i64 [[TMP18]]
-; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load <vscale x 2 x i8>, ptr [[TMP15]], align 1
-; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load <vscale x 2 x i8>, ptr [[TMP19]], align 1
-; CHECK-INTERLEAVED-NEXT: [[TMP20:%.*]] = zext <vscale x 2 x i8> [[WIDE_LOAD3]] to <vscale x 2 x i64>
-; CHECK-INTERLEAVED-NEXT: [[TMP21:%.*]] = zext <vscale x 2 x i8> [[WIDE_LOAD4]] to <vscale x 2 x i64>
-; CHECK-INTERLEAVED-NEXT: [[TMP22:%.*]] = mul nuw nsw <vscale x 2 x i64> [[TMP20]], [[TMP12]]
-; CHECK-INTERLEAVED-NEXT: [[TMP23:%.*]] = mul nuw nsw <vscale x 2 x i64> [[TMP21]], [[TMP13]]
-; CHECK-INTERLEAVED-NEXT: [[TMP24]] = add <vscale x 2 x i64> [[VEC_PHI]], [[TMP22]]
-; CHECK-INTERLEAVED-NEXT: [[TMP25]] = add <vscale x 2 x i64> [[VEC_PHI1]], [[TMP23]]
-; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
-; CHECK-INTERLEAVED-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-INTERLEAVED-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
+; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP15]], i32 16
+; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP15]], align 1
+; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP4]], align 1
+; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD3]] to <16 x i64>
+; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i64>
+; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = mul nuw nsw <16 x i64> [[TMP5]], [[TMP6]]
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI]], <16 x i64> [[TMP13]])
+; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = zext <16 x i8> [[WIDE_LOAD4]] to <16 x i64>
+; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i64>
+; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = mul nuw nsw <16 x i64> [[TMP8]], [[TMP9]]
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE5]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI1]], <16 x i64> [[TMP10]])
+; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
+; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 32
+; CHECK-INTERLEAVED-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
; CHECK-INTERLEAVED: middle.block:
-; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <vscale x 2 x i64> [[TMP25]], [[TMP24]]
-; CHECK-INTERLEAVED-NEXT: [[TMP27:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> [[BIN_RDX]])
-; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i64 41, [[N_VEC]]
-; CHECK-INTERLEAVED-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <2 x i64> [[PARTIAL_REDUCE5]], [[PARTIAL_REDUCE]]
+; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[BIN_RDX]])
+; CHECK-INTERLEAVED-NEXT: br label [[SCALAR_PH:%.*]]
; CHECK-INTERLEAVED: scalar.ph:
;
; CHECK-MAXBW-LABEL: define i64 @dotp_cost_disagreement(
; CHECK-MAXBW-SAME: ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR0]] {
; CHECK-MAXBW-NEXT: entry:
; CHECK-MAXBW-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-MAXBW-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 3
+; CHECK-MAXBW-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4
; CHECK-MAXBW-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 41, [[TMP1]]
; CHECK-MAXBW-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK-MAXBW: vector.ph:
; CHECK-MAXBW-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
+; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16
; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 41, [[TMP3]]
; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 41, [[N_MOD_VF]]
; CHECK-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-MAXBW: vector.body:
; CHECK-MAXBW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-MAXBW-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP14:%.*]], [[VECTOR_BODY]] ]
+; CHECK-MAXBW-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ]
; CHECK-MAXBW-NEXT: [[TMP7:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDEX]]
-; CHECK-MAXBW-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i8>, ptr [[TMP7]], align 1
-; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD]] to <vscale x 8 x i64>
+; CHECK-MAXBW-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP7]], align 1
; CHECK-MAXBW-NEXT: [[TMP10:%.*]] = add nuw nsw i64 [[INDEX]], 1
; CHECK-MAXBW-NEXT: [[TMP11:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[TMP10]]
-; CHECK-MAXBW-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 8 x i8>, ptr [[TMP11]], align 1
-; CHECK-MAXBW-NEXT: [[TMP8:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD1]] to <vscale x 8 x i64>
-; CHECK-MAXBW-NEXT: [[TMP13:%.*]] = mul nuw nsw <vscale x 8 x i64> [[TMP8]], [[TMP9]]
-; CHECK-MAXBW-NEXT: [[TMP14]] = add <vscale x 8 x i64> [[VEC_PHI]], [[TMP13]]
+; CHECK-MAXBW-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 16 x i8>, ptr [[TMP11]], align 1
+; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = zext <vscale x 16 x i8> [[WIDE_LOAD1]] to <vscale x 16 x i64>
+; CHECK-MAXBW-NEXT: [[TMP8:%.*]] = zext <vscale x 16 x i8> [[WIDE_LOAD]] to <vscale x 16 x i64>
+; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = mul nuw nsw <vscale x 16 x i64> [[TMP12]], [[TMP8]]
+; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 2 x i64> @llvm.vector.partial.reduce.add.nxv2i64.nxv16i64(<vscale x 2 x i64> [[VEC_PHI]], <vscale x 16 x i64> [[TMP9]])
; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-MAXBW-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-MAXBW-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
+; CHECK-MAXBW-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
; CHECK-MAXBW: middle.block:
-; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = call i64 @llvm.vector.reduce.add.nxv8i64(<vscale x 8 x i64> [[TMP14]])
+; CHECK-MAXBW-NEXT: [[TMP13:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> [[PARTIAL_REDUCE]])
; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 41, [[N_VEC]]
; CHECK-MAXBW-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
; CHECK-MAXBW: scalar.ph:
@@ -1971,7 +1990,7 @@ define i64 @not_dotp_ext_outside_plan(ptr %a, i16 %b, i64 %n) #0 {
; CHECK-MAXBW-NEXT: [[TMP11]] = add <vscale x 4 x i64> [[TMP10]], [[VEC_PHI]]
; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-MAXBW-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
+; CHECK-MAXBW-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
; CHECK-MAXBW: middle.block:
; CHECK-MAXBW-NEXT: [[TMP13:%.*]] = call i64 @llvm.vector.reduce.add.nxv4i64(<vscale x 4 x i64> [[TMP11]])
; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
@@ -2104,7 +2123,7 @@ define i64 @not_dotp_ext_outside_plan2(ptr %a, i16 %b, i64 %n) #0 {
; CHECK-MAXBW-NEXT: [[TMP11]] = add <vscale x 4 x i64> [[TMP10]], [[VEC_PHI]]
; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-MAXBW-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]]
+; CHECK-MAXBW-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
; CHECK-MAXBW: middle.block:
; CHECK-MAXBW-NEXT: [[TMP13:%.*]] = call i64 @llvm.vector.reduce.add.nxv4i64(<vscale x 4 x i64> [[TMP11]])
; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
@@ -2160,10 +2179,10 @@ define dso_local i32 @not_dotp_vscale1(ptr %a, ptr %b, i32 %n, i64 %cost) #0 {
; CHECK-INTERLEAVE1-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]]
; CHECK-INTERLEAVE1-NEXT: [[NEXT_GEP1:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]]
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[NEXT_GEP]], align 1
-; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i64>
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[NEXT_GEP1]], align 1
-; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i64>
-; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = mul nuw nsw <16 x i64> [[TMP6]], [[TMP5]]
+; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i64>
+; CHECK-INTERLEAVE1-NEXT: [[TMP6:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i64>
+; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = mul nuw nsw <16 x i64> [[TMP5]], [[TMP6]]
; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI]], <16 x i64> [[TMP10]])
; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-INTERLEAVE1-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -2181,10 +2200,10 @@ define dso_local i32 @not_dotp_vscale1(ptr %a, ptr %b, i32 %n, i64 %cost) #0 {
; CHECK-INTERLEAVED-NEXT: br i1 [[CMP]], label [[FOR_BODY_PREHEADER:%.*]], label [[EXIT:%.*]]
; CHECK-INTERLEAVED: for.body.preheader:
; CHECK-INTERLEAVED-NEXT: [[TMP0:%.*]] = zext i32 [[N]] to i64
-; CHECK-INTERLEAVED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], 16
+; CHECK-INTERLEAVED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], 32
; CHECK-INTERLEAVED-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK-INTERLEAVED: vector.ph:
-; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], 16
+; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], 32
; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]]
; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = trunc i64 [[N_VEC]] to i32
; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[A]], i64 [[N_VEC]]
@@ -2194,19 +2213,29 @@ define dso_local i32 @not_dotp_vscale1(ptr %a, ptr %b, i32 %n, i64 %cost) #0 {
; CHECK-INTERLEAVED: vector.body:
; CHECK-INTERLEAVED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <2 x i64> [ [[TMP4]], [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ]
+; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi <2 x i64> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE6:%.*]], [[VECTOR_BODY]] ]
; CHECK-INTERLEAVED-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]]
; CHECK-INTERLEAVED-NEXT: [[NEXT_GEP2:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]]
+; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 16
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[NEXT_GEP]], align 1
-; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i64>
-; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[NEXT_GEP2]], align 1
-; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i64>
-; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = mul nuw nsw <16 x i64> [[TMP6]], [[TMP5]]
-; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI]], <16 x i64> [[TMP10]])
-; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
+; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1
+; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[NEXT_GEP2]], i32 16
+; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[NEXT_GEP2]], align 1
+; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD5:%.*]] = load <16 x i8>, ptr [[TMP6]], align 1
+; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = zext <16 x i8> [[WIDE_LOAD4]] to <16 x i64>
+; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i64>
+; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = mul nuw nsw <16 x i64> [[TMP13]], [[TMP15]]
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI]], <16 x i64> [[TMP16]])
+; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = zext <16 x i8> [[WIDE_LOAD5]] to <16 x i64>
+; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = zext <16 x i8> [[WIDE_LOAD3]] to <16 x i64>
+; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = mul nuw nsw <16 x i64> [[TMP10]], [[TMP11]]
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE6]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI1]], <16 x i64> [[TMP12]])
+; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
; CHECK-INTERLEAVED-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-INTERLEAVED-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]]
; CHECK-INTERLEAVED: middle.block:
-; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[PARTIAL_REDUCE]])
+; CHECK-INTERLEAVED-NEXT: [[BIN_RDX:%.*]] = add <2 x i64> [[PARTIAL_REDUCE6]], [[PARTIAL_REDUCE]]
+; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> [[BIN_RDX]])
; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]]
; CHECK-INTERLEAVED-NEXT: br i1 [[CMP_N]], label [[EXIT_LOOPEXIT:%.*]], label [[SCALAR_PH]]
; CHECK-INTERLEAVED: scalar.ph:
@@ -2219,35 +2248,35 @@ define dso_local i32 @not_dotp_vscale1(ptr %a, ptr %b, i32 %n, i64 %cost) #0 {
; CHECK-MAXBW: for.body.preheader:
; CHECK-MAXBW-NEXT: [[TMP0:%.*]] = zext i32 [[N]] to i64
; CHECK-MAXBW-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-MAXBW-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3
+; CHECK-MAXBW-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4
; CHECK-MAXBW-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]]
; CHECK-MAXBW-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK-MAXBW: vector.ph:
; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 8
+; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 16
; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP4]]
; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]]
; CHECK-MAXBW-NEXT: [[TMP7:%.*]] = trunc i64 [[N_VEC]] to i32
; CHECK-MAXBW-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[A]], i64 [[N_VEC]]
; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[B]], i64 [[N_VEC]]
-; CHECK-MAXBW-NEXT: [[TMP10:%.*]] = insertelement <vscale x 8 x i64> zeroinitializer, i64 [[COST]], i32 0
+; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = insertelement <vscale x 2 x i64> zeroinitializer, i64 [[COST]], i32 0
; CHECK-MAXBW-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-MAXBW: vector.body:
; CHECK-MAXBW-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-MAXBW-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 8 x i64> [ [[TMP10]], [[VECTOR_PH]] ], [ [[TMP20:%.*]], [[VECTOR_BODY]] ]
+; CHECK-MAXBW-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 2 x i64> [ [[TMP12]], [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ]
; CHECK-MAXBW-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]]
; CHECK-MAXBW-NEXT: [[NEXT_GEP1:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]]
-; CHECK-MAXBW-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i8>, ptr [[NEXT_GEP]], align 1
-; CHECK-MAXBW-NEXT: [[TMP14:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD]] to <vscale x 8 x i64>
-; CHECK-MAXBW-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 8 x i8>, ptr [[NEXT_GEP1]], align 1
-; CHECK-MAXBW-NEXT: [[TMP16:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD2]] to <vscale x 8 x i64>
-; CHECK-MAXBW-NEXT: [[TMP17:%.*]] = mul nuw nsw <vscale x 8 x i64> [[TMP16]], [[TMP14]]
-; CHECK-MAXBW-NEXT: [[TMP20]] = add <vscale x 8 x i64> [[TMP17]], [[VEC_PHI]]
+; CHECK-MAXBW-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[NEXT_GEP]], align 1
+; CHECK-MAXBW-NEXT: [[WIDE_LOAD2:%.*]] = load <vscale x 16 x i8>, ptr [[NEXT_GEP1]], align 1
+; CHECK-MAXBW-NEXT: [[TMP14:%.*]] = zext <vscale x 16 x i8> [[WIDE_LOAD2]] to <vscale x 16 x i64>
+; CHECK-MAXBW-NEXT: [[TMP10:%.*]] = zext <vscale x 16 x i8> [[WIDE_LOAD]] to <vscale x 16 x i64>
+; CHECK-MAXBW-NEXT: [[TMP11:%.*]] = mul nuw nsw <vscale x 16 x i64> [[TMP14]], [[TMP10]]
+; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 2 x i64> @llvm.vector.partial.reduce.add.nxv2i64.nxv16i64(<vscale x 2 x i64> [[VEC_PHI]], <vscale x 16 x i64> [[TMP11]])
; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP4]]
; CHECK-MAXBW-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-MAXBW-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]]
+; CHECK-MAXBW-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]]
; CHECK-MAXBW: middle.block:
-; CHECK-MAXBW-NEXT: [[TMP19:%.*]] = call i64 @llvm.vector.reduce.add.nxv8i64(<vscale x 8 x i64> [[TMP20]])
+; CHECK-MAXBW-NEXT: [[TMP13:%.*]] = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> [[PARTIAL_REDUCE]])
; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]]
; CHECK-MAXBW-NEXT: br i1 [[CMP_N]], label [[EXIT_LOOPEXIT:%.*]], label [[SCALAR_PH]]
; CHECK-MAXBW: scalar.ph:
@@ -2319,17 +2348,16 @@ define dso_local void @not_dotp_high_register_pressure(ptr %a, ptr %b, ptr %sum,
; CHECK-INTERLEAVE1-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-INTERLEAVE1: vector.body:
; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ [[TMP0]], [[VECTOR_PH]] ], [ [[TMP36:%.*]], [[VECTOR_BODY]] ]
-; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ [[TMP1]], [[VECTOR_PH]] ], [ [[TMP33:%.*]], [[VECTOR_BODY]] ]
-; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI2:%.*]] = phi <4 x i32> [ [[TMP2]], [[VECTOR_PH]] ], [ [[TMP30:%.*]], [[VECTOR_BODY]] ]
-; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI3:%.*]] = phi <4 x i32> [ [[TMP3]], [[VECTOR_PH]] ], [ [[TMP27:%.*]], [[VECTOR_BODY]] ]
-; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI4:%.*]] = phi <4 x i32> [ [[TMP4]], [[VECTOR_PH]] ], [ [[TMP24:%.*]], [[VECTOR_BODY]] ]
-; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI5:%.*]] = phi <4 x i32> [ [[TMP5]], [[VECTOR_PH]] ], [ [[TMP21:%.*]], [[VECTOR_BODY]] ]
-; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI6:%.*]] = phi <4 x i32> [ [[TMP6]], [[VECTOR_PH]] ], [ [[TMP18:%.*]], [[VECTOR_BODY]] ]
-; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI7:%.*]] = phi <4 x i32> [ [[TMP7]], [[VECTOR_PH]] ], [ [[TMP15:%.*]], [[VECTOR_BODY]] ]
+; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ [[TMP0]], [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE21:%.*]], [[VECTOR_BODY]] ]
+; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ [[TMP1]], [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE20:%.*]], [[VECTOR_BODY]] ]
+; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI2:%.*]] = phi <4 x i32> [ [[TMP2]], [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE19:%.*]], [[VECTOR_BODY]] ]
+; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI3:%.*]] = phi <4 x i32> [ [[TMP3]], [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE18:%.*]], [[VECTOR_BODY]] ]
+; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI4:%.*]] = phi <4 x i32> [ [[TMP4]], [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE17:%.*]], [[VECTOR_BODY]] ]
+; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI5:%.*]] = phi <4 x i32> [ [[TMP5]], [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE16:%.*]], [[VECTOR_BODY]] ]
+; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI6:%.*]] = phi <4 x i32> [ [[TMP6]], [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE15:%.*]], [[VECTOR_BODY]] ]
+; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI7:%.*]] = phi <4 x i32> [ [[TMP7]], [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ]
; CHECK-INTERLEAVE1-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDEX]]
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1
-; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-INTERLEAVE1-NEXT: [[TMP11:%.*]] = shl nsw i64 [[INDEX]], 3
; CHECK-INTERLEAVE1-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[TMP11]]
; CHECK-INTERLEAVE1-NEXT: [[WIDE_VEC:%.*]] = load <128 x i8>, ptr [[TMP12]], align 1
@@ -2341,42 +2369,43 @@ define dso_local void @not_dotp_high_register_pressure(ptr %a, ptr %b, ptr %sum,
; CHECK-INTERLEAVE1-NEXT: [[STRIDED_VEC12:%.*]] = shufflevector <128 x i8> [[WIDE_VEC]], <128 x i8> poison, <16 x i32> <i32 5, i32 13, i32 21, i32 29, i32 37, i32 45, i32 53, i32 61, i32 69, i32 77, i32 85, i32 93, i32 101, i32 109, i32 117, i32 125>
; CHECK-INTERLEAVE1-NEXT: [[STRIDED_VEC13:%.*]] = shufflevector <128 x i8> [[WIDE_VEC]], <128 x i8> poison, <16 x i32> <i32 6, i32 14, i32 22, i32 30, i32 38, i32 46, i32 54, i32 62, i32 70, i32 78, i32 86, i32 94, i32 102, i32 110, i32 118, i32 126>
; CHECK-INTERLEAVE1-NEXT: [[STRIDED_VEC14:%.*]] = shufflevector <128 x i8> [[WIDE_VEC]], <128 x i8> poison, <16 x i32> <i32 7, i32 15, i32 23, i32 31, i32 39, i32 47, i32 55, i32 63, i32 71, i32 79, i32 87, i32 95, i32 103, i32 111, i32 119, i32 127>
-; CHECK-INTERLEAVE1-NEXT: [[TMP13:%.*]] = sext <16 x i8> [[STRIDED_VEC]] to <16 x i32>
-; CHECK-INTERLEAVE1-NEXT: [[TMP14:%.*]] = mul nsw <16 x i32> [[TMP13]], [[TMP10]]
-; CHECK-INTERLEAVE1-NEXT: [[TMP15]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI7]], <16 x i32> [[TMP14]])
-; CHECK-INTERLEAVE1-NEXT: [[TMP29:%.*]] = sext <16 x i8> [[STRIDED_VEC8]] to <16 x i32>
-; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = mul nsw <16 x i32> [[TMP29]], [[TMP10]]
-; CHECK-INTERLEAVE1-NEXT: [[TMP18]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI6]], <16 x i32> [[TMP16]])
+; CHECK-INTERLEAVE1-NEXT: [[TMP35:%.*]] = sext <16 x i8> [[STRIDED_VEC]] to <16 x i32>
+; CHECK-INTERLEAVE1-NEXT: [[TMP44:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
+; CHECK-INTERLEAVE1-NEXT: [[TMP13:%.*]] = mul nsw <16 x i32> [[TMP35]], [[TMP44]]
+; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI7]], <16 x i32> [[TMP13]])
+; CHECK-INTERLEAVE1-NEXT: [[TMP14:%.*]] = sext <16 x i8> [[STRIDED_VEC8]] to <16 x i32>
+; CHECK-INTERLEAVE1-NEXT: [[TMP16:%.*]] = mul nsw <16 x i32> [[TMP14]], [[TMP44]]
+; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE15]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI6]], <16 x i32> [[TMP16]])
; CHECK-INTERLEAVE1-NEXT: [[TMP17:%.*]] = sext <16 x i8> [[STRIDED_VEC9]] to <16 x i32>
-; CHECK-INTERLEAVE1-NEXT: [[TMP31:%.*]] = mul nsw <16 x i32> [[TMP17]], [[TMP10]]
-; CHECK-INTERLEAVE1-NEXT: [[TMP21]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI5]], <16 x i32> [[TMP31]])
-; CHECK-INTERLEAVE1-NEXT: [[TMP19:%.*]] = sext <16 x i8> [[STRIDED_VEC10]] to <16 x i32>
-; CHECK-INTERLEAVE1-NEXT: [[TMP20:%.*]] = mul nsw <16 x i32> [[TMP19]], [[TMP10]]
-; CHECK-INTERLEAVE1-NEXT: [[TMP24]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI4]], <16 x i32> [[TMP20]])
-; CHECK-INTERLEAVE1-NEXT: [[TMP32:%.*]] = sext <16 x i8> [[STRIDED_VEC11]] to <16 x i32>
-; CHECK-INTERLEAVE1-NEXT: [[TMP22:%.*]] = mul nsw <16 x i32> [[TMP32]], [[TMP10]]
-; CHECK-INTERLEAVE1-NEXT: [[TMP27]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI3]], <16 x i32> [[TMP22]])
-; CHECK-INTERLEAVE1-NEXT: [[TMP23:%.*]] = sext <16 x i8> [[STRIDED_VEC12]] to <16 x i32>
-; CHECK-INTERLEAVE1-NEXT: [[TMP34:%.*]] = mul nsw <16 x i32> [[TMP23]], [[TMP10]]
-; CHECK-INTERLEAVE1-NEXT: [[TMP30]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI2]], <16 x i32> [[TMP34]])
-; CHECK-INTERLEAVE1-NEXT: [[TMP25:%.*]] = sext <16 x i8> [[STRIDED_VEC13]] to <16 x i32>
-; CHECK-INTERLEAVE1-NEXT: [[TMP26:%.*]] = mul nsw <16 x i32> [[TMP25]], [[TMP10]]
-; CHECK-INTERLEAVE1-NEXT: [[TMP33]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP26]])
-; CHECK-INTERLEAVE1-NEXT: [[TMP35:%.*]] = sext <16 x i8> [[STRIDED_VEC14]] to <16 x i32>
-; CHECK-INTERLEAVE1-NEXT: [[TMP28:%.*]] = mul nsw <16 x i32> [[TMP35]], [[TMP10]]
-; CHECK-INTERLEAVE1-NEXT: [[TMP36]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP28]])
+; CHECK-INTERLEAVE1-NEXT: [[TMP19:%.*]] = mul nsw <16 x i32> [[TMP17]], [[TMP44]]
+; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE16]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI5]], <16 x i32> [[TMP19]])
+; CHECK-INTERLEAVE1-NEXT: [[TMP20:%.*]] = sext <16 x i8> [[STRIDED_VEC10]] to <16 x i32>
+; CHECK-INTERLEAVE1-NEXT: [[TMP22:%.*]] = mul nsw <16 x i32> [[TMP20]], [[TMP44]]
+; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE17]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI4]], <16 x i32> [[TMP22]])
+; CHECK-INTERLEAVE1-NEXT: [[TMP23:%.*]] = sext <16 x i8> [[STRIDED_VEC11]] to <16 x i32>
+; CHECK-INTERLEAVE1-NEXT: [[TMP25:%.*]] = mul nsw <16 x i32> [[TMP23]], [[TMP44]]
+; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE18]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI3]], <16 x i32> [[TMP25]])
+; CHECK-INTERLEAVE1-NEXT: [[TMP26:%.*]] = sext <16 x i8> [[STRIDED_VEC12]] to <16 x i32>
+; CHECK-INTERLEAVE1-NEXT: [[TMP28:%.*]] = mul nsw <16 x i32> [[TMP26]], [[TMP44]]
+; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE19]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI2]], <16 x i32> [[TMP28]])
+; CHECK-INTERLEAVE1-NEXT: [[TMP29:%.*]] = sext <16 x i8> [[STRIDED_VEC13]] to <16 x i32>
+; CHECK-INTERLEAVE1-NEXT: [[TMP31:%.*]] = mul nsw <16 x i32> [[TMP29]], [[TMP44]]
+; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE20]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP31]])
+; CHECK-INTERLEAVE1-NEXT: [[TMP32:%.*]] = sext <16 x i8> [[STRIDED_VEC14]] to <16 x i32>
+; CHECK-INTERLEAVE1-NEXT: [[TMP34:%.*]] = mul nsw <16 x i32> [[TMP32]], [[TMP44]]
+; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE21]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP34]])
; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-INTERLEAVE1-NEXT: [[TMP37:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP37]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]]
; CHECK-INTERLEAVE1: middle.block:
-; CHECK-INTERLEAVE1-NEXT: [[TMP38:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP36]])
-; CHECK-INTERLEAVE1-NEXT: [[TMP39:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP33]])
-; CHECK-INTERLEAVE1-NEXT: [[TMP40:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP30]])
-; CHECK-INTERLEAVE1-NEXT: [[TMP41:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP27]])
-; CHECK-INTERLEAVE1-NEXT: [[TMP42:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP24]])
-; CHECK-INTERLEAVE1-NEXT: [[TMP43:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP21]])
-; CHECK-INTERLEAVE1-NEXT: [[TMP44:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP18]])
-; CHECK-INTERLEAVE1-NEXT: [[TMP45:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP15]])
+; CHECK-INTERLEAVE1-NEXT: [[TMP36:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE21]])
+; CHECK-INTERLEAVE1-NEXT: [[TMP45:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE20]])
+; CHECK-INTERLEAVE1-NEXT: [[TMP38:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE19]])
+; CHECK-INTERLEAVE1-NEXT: [[TMP39:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE18]])
+; CHECK-INTERLEAVE1-NEXT: [[TMP40:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE17]])
+; CHECK-INTERLEAVE1-NEXT: [[TMP41:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE16]])
+; CHECK-INTERLEAVE1-NEXT: [[TMP42:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE15]])
+; CHECK-INTERLEAVE1-NEXT: [[TMP43:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]])
; CHECK-INTERLEAVE1-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]]
; CHECK-INTERLEAVE1-NEXT: br i1 [[CMP_N]], label [[FOR_COND_FOR_COND_CLEANUP_CRIT_EDGE:%.*]], label [[SCALAR_PH]]
; CHECK-INTERLEAVE1: scalar.ph:
@@ -2429,7 +2458,6 @@ define dso_local void @not_dotp_high_register_pressure(ptr %a, ptr %b, ptr %sum,
; CHECK-INTERLEAVED-NEXT: [[VEC_PHI7:%.*]] = phi <4 x i32> [ [[TMP7]], [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ]
; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDEX]]
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1
-; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = shl nsw i64 [[INDEX]], 3
; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[TMP11]]
; CHECK-INTERLEAVED-NEXT: [[WIDE_VEC:%.*]] = load <128 x i8>, ptr [[TMP12]], align 1
@@ -2441,42 +2469,43 @@ define dso_local void @not_dotp_high_register_pressure(ptr %a, ptr %b, ptr %sum,
; CHECK-INTERLEAVED-NEXT: [[STRIDED_VEC12:%.*]] = shufflevector <128 x i8> [[WIDE_VEC]], <128 x i8> poison, <16 x i32> <i32 5, i32 13, i32 21, i32 29, i32 37, i32 45, i32 53, i32 61, i32 69, i32 77, i32 85, i32 93, i32 101, i32 109, i32 117, i32 125>
; CHECK-INTERLEAVED-NEXT: [[STRIDED_VEC13:%.*]] = shufflevector <128 x i8> [[WIDE_VEC]], <128 x i8> poison, <16 x i32> <i32 6, i32 14, i32 22, i32 30, i32 38, i32 46, i32 54, i32 62, i32 70, i32 78, i32 86, i32 94, i32 102, i32 110, i32 118, i32 126>
; CHECK-INTERLEAVED-NEXT: [[STRIDED_VEC14:%.*]] = shufflevector <128 x i8> [[WIDE_VEC]], <128 x i8> poison, <16 x i32> <i32 7, i32 15, i32 23, i32 31, i32 39, i32 47, i32 55, i32 63, i32 71, i32 79, i32 87, i32 95, i32 103, i32 111, i32 119, i32 127>
-; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = sext <16 x i8> [[STRIDED_VEC]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = mul nsw <16 x i32> [[TMP13]], [[TMP10]]
-; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI7]], <16 x i32> [[TMP14]])
-; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = sext <16 x i8> [[STRIDED_VEC8]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = mul nsw <16 x i32> [[TMP15]], [[TMP10]]
+; CHECK-INTERLEAVED-NEXT: [[TMP35:%.*]] = sext <16 x i8> [[STRIDED_VEC]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP44:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP13:%.*]] = mul nsw <16 x i32> [[TMP35]], [[TMP44]]
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI7]], <16 x i32> [[TMP13]])
+; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = sext <16 x i8> [[STRIDED_VEC8]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP16:%.*]] = mul nsw <16 x i32> [[TMP14]], [[TMP44]]
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE15]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI6]], <16 x i32> [[TMP16]])
; CHECK-INTERLEAVED-NEXT: [[TMP17:%.*]] = sext <16 x i8> [[STRIDED_VEC9]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP18:%.*]] = mul nsw <16 x i32> [[TMP17]], [[TMP10]]
-; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE16]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI5]], <16 x i32> [[TMP18]])
-; CHECK-INTERLEAVED-NEXT: [[TMP19:%.*]] = sext <16 x i8> [[STRIDED_VEC10]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP20:%.*]] = mul nsw <16 x i32> [[TMP19]], [[TMP10]]
-; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE17]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI4]], <16 x i32> [[TMP20]])
-; CHECK-INTERLEAVED-NEXT: [[TMP21:%.*]] = sext <16 x i8> [[STRIDED_VEC11]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP22:%.*]] = mul nsw <16 x i32> [[TMP21]], [[TMP10]]
-; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE18]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI3]], <16 x i32> [[TMP22]])
-; CHECK-INTERLEAVED-NEXT: [[TMP23:%.*]] = sext <16 x i8> [[STRIDED_VEC12]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP24:%.*]] = mul nsw <16 x i32> [[TMP23]], [[TMP10]]
-; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE19]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI2]], <16 x i32> [[TMP24]])
-; CHECK-INTERLEAVED-NEXT: [[TMP25:%.*]] = sext <16 x i8> [[STRIDED_VEC13]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP26:%.*]] = mul nsw <16 x i32> [[TMP25]], [[TMP10]]
-; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE20]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP26]])
-; CHECK-INTERLEAVED-NEXT: [[TMP27:%.*]] = sext <16 x i8> [[STRIDED_VEC14]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP28:%.*]] = mul nsw <16 x i32> [[TMP27]], [[TMP10]]
-; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE21]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP28]])
+; CHECK-INTERLEAVED-NEXT: [[TMP19:%.*]] = mul nsw <16 x i32> [[TMP17]], [[TMP44]]
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE16]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI5]], <16 x i32> [[TMP19]])
+; CHECK-INTERLEAVED-NEXT: [[TMP20:%.*]] = sext <16 x i8> [[STRIDED_VEC10]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP22:%.*]] = mul nsw <16 x i32> [[TMP20]], [[TMP44]]
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE17]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI4]], <16 x i32> [[TMP22]])
+; CHECK-INTERLEAVED-NEXT: [[TMP23:%.*]] = sext <16 x i8> [[STRIDED_VEC11]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP25:%.*]] = mul nsw <16 x i32> [[TMP23]], [[TMP44]]
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE18]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI3]], <16 x i32> [[TMP25]])
+; CHECK-INTERLEAVED-NEXT: [[TMP26:%.*]] = sext <16 x i8> [[STRIDED_VEC12]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP28:%.*]] = mul nsw <16 x i32> [[TMP26]], [[TMP44]]
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE19]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI2]], <16 x i32> [[TMP28]])
+; CHECK-INTERLEAVED-NEXT: [[TMP45:%.*]] = sext <16 x i8> [[STRIDED_VEC13]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP31:%.*]] = mul nsw <16 x i32> [[TMP45]], [[TMP44]]
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE20]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP31]])
+; CHECK-INTERLEAVED-NEXT: [[TMP32:%.*]] = sext <16 x i8> [[STRIDED_VEC14]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP34:%.*]] = mul nsw <16 x i32> [[TMP32]], [[TMP44]]
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE21]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP34]])
; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-INTERLEAVED-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-INTERLEAVED-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]]
; CHECK-INTERLEAVED: middle.block:
-; CHECK-INTERLEAVED-NEXT: [[TMP30:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE21]])
-; CHECK-INTERLEAVED-NEXT: [[TMP31:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE20]])
-; CHECK-INTERLEAVED-NEXT: [[TMP32:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE19]])
-; CHECK-INTERLEAVED-NEXT: [[TMP33:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE18]])
-; CHECK-INTERLEAVED-NEXT: [[TMP34:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE17]])
-; CHECK-INTERLEAVED-NEXT: [[TMP35:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE16]])
-; CHECK-INTERLEAVED-NEXT: [[TMP36:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE15]])
-; CHECK-INTERLEAVED-NEXT: [[TMP37:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]])
+; CHECK-INTERLEAVED-NEXT: [[TMP36:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE21]])
+; CHECK-INTERLEAVED-NEXT: [[TMP37:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE20]])
+; CHECK-INTERLEAVED-NEXT: [[TMP38:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE19]])
+; CHECK-INTERLEAVED-NEXT: [[TMP39:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE18]])
+; CHECK-INTERLEAVED-NEXT: [[TMP40:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE17]])
+; CHECK-INTERLEAVED-NEXT: [[TMP41:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE16]])
+; CHECK-INTERLEAVED-NEXT: [[TMP42:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE15]])
+; CHECK-INTERLEAVED-NEXT: [[TMP43:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]])
; CHECK-INTERLEAVED-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]]
; CHECK-INTERLEAVED-NEXT: br i1 [[CMP_N]], label [[FOR_COND_FOR_COND_CLEANUP_CRIT_EDGE:%.*]], label [[SCALAR_PH]]
; CHECK-INTERLEAVED: scalar.ph:
@@ -2529,7 +2558,6 @@ define dso_local void @not_dotp_high_register_pressure(ptr %a, ptr %b, ptr %sum,
; CHECK-MAXBW-NEXT: [[VEC_PHI7:%.*]] = phi <4 x i32> [ [[TMP7]], [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ]
; CHECK-MAXBW-NEXT: [[TMP8:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 [[INDEX]]
; CHECK-MAXBW-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1
-; CHECK-MAXBW-NEXT: [[TMP10:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-MAXBW-NEXT: [[TMP11:%.*]] = shl nsw i64 [[INDEX]], 3
; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i64 [[TMP11]]
; CHECK-MAXBW-NEXT: [[WIDE_VEC:%.*]] = load <128 x i8>, ptr [[TMP12]], align 1
@@ -2541,42 +2569,43 @@ define dso_local void @not_dotp_high_register_pressure(ptr %a, ptr %b, ptr %sum,
; CHECK-MAXBW-NEXT: [[STRIDED_VEC12:%.*]] = shufflevector <128 x i8> [[WIDE_VEC]], <128 x i8> poison, <16 x i32> <i32 5, i32 13, i32 21, i32 29, i32 37, i32 45, i32 53, i32 61, i32 69, i32 77, i32 85, i32 93, i32 101, i32 109, i32 117, i32 125>
; CHECK-MAXBW-NEXT: [[STRIDED_VEC13:%.*]] = shufflevector <128 x i8> [[WIDE_VEC]], <128 x i8> poison, <16 x i32> <i32 6, i32 14, i32 22, i32 30, i32 38, i32 46, i32 54, i32 62, i32 70, i32 78, i32 86, i32 94, i32 102, i32 110, i32 118, i32 126>
; CHECK-MAXBW-NEXT: [[STRIDED_VEC14:%.*]] = shufflevector <128 x i8> [[WIDE_VEC]], <128 x i8> poison, <16 x i32> <i32 7, i32 15, i32 23, i32 31, i32 39, i32 47, i32 55, i32 63, i32 71, i32 79, i32 87, i32 95, i32 103, i32 111, i32 119, i32 127>
-; CHECK-MAXBW-NEXT: [[TMP13:%.*]] = sext <16 x i8> [[STRIDED_VEC]] to <16 x i32>
-; CHECK-MAXBW-NEXT: [[TMP14:%.*]] = mul nsw <16 x i32> [[TMP13]], [[TMP10]]
-; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI7]], <16 x i32> [[TMP14]])
-; CHECK-MAXBW-NEXT: [[TMP15:%.*]] = sext <16 x i8> [[STRIDED_VEC8]] to <16 x i32>
-; CHECK-MAXBW-NEXT: [[TMP16:%.*]] = mul nsw <16 x i32> [[TMP15]], [[TMP10]]
+; CHECK-MAXBW-NEXT: [[TMP35:%.*]] = sext <16 x i8> [[STRIDED_VEC]] to <16 x i32>
+; CHECK-MAXBW-NEXT: [[TMP44:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
+; CHECK-MAXBW-NEXT: [[TMP13:%.*]] = mul nsw <16 x i32> [[TMP35]], [[TMP44]]
+; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI7]], <16 x i32> [[TMP13]])
+; CHECK-MAXBW-NEXT: [[TMP14:%.*]] = sext <16 x i8> [[STRIDED_VEC8]] to <16 x i32>
+; CHECK-MAXBW-NEXT: [[TMP16:%.*]] = mul nsw <16 x i32> [[TMP14]], [[TMP44]]
; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE15]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI6]], <16 x i32> [[TMP16]])
; CHECK-MAXBW-NEXT: [[TMP17:%.*]] = sext <16 x i8> [[STRIDED_VEC9]] to <16 x i32>
-; CHECK-MAXBW-NEXT: [[TMP18:%.*]] = mul nsw <16 x i32> [[TMP17]], [[TMP10]]
-; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE16]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI5]], <16 x i32> [[TMP18]])
-; CHECK-MAXBW-NEXT: [[TMP19:%.*]] = sext <16 x i8> [[STRIDED_VEC10]] to <16 x i32>
-; CHECK-MAXBW-NEXT: [[TMP20:%.*]] = mul nsw <16 x i32> [[TMP19]], [[TMP10]]
-; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE17]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI4]], <16 x i32> [[TMP20]])
-; CHECK-MAXBW-NEXT: [[TMP21:%.*]] = sext <16 x i8> [[STRIDED_VEC11]] to <16 x i32>
-; CHECK-MAXBW-NEXT: [[TMP22:%.*]] = mul nsw <16 x i32> [[TMP21]], [[TMP10]]
-; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE18]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI3]], <16 x i32> [[TMP22]])
-; CHECK-MAXBW-NEXT: [[TMP23:%.*]] = sext <16 x i8> [[STRIDED_VEC12]] to <16 x i32>
-; CHECK-MAXBW-NEXT: [[TMP24:%.*]] = mul nsw <16 x i32> [[TMP23]], [[TMP10]]
-; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE19]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI2]], <16 x i32> [[TMP24]])
-; CHECK-MAXBW-NEXT: [[TMP25:%.*]] = sext <16 x i8> [[STRIDED_VEC13]] to <16 x i32>
-; CHECK-MAXBW-NEXT: [[TMP26:%.*]] = mul nsw <16 x i32> [[TMP25]], [[TMP10]]
-; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE20]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP26]])
-; CHECK-MAXBW-NEXT: [[TMP27:%.*]] = sext <16 x i8> [[STRIDED_VEC14]] to <16 x i32>
-; CHECK-MAXBW-NEXT: [[TMP28:%.*]] = mul nsw <16 x i32> [[TMP27]], [[TMP10]]
-; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE21]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP28]])
+; CHECK-MAXBW-NEXT: [[TMP19:%.*]] = mul nsw <16 x i32> [[TMP17]], [[TMP44]]
+; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE16]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI5]], <16 x i32> [[TMP19]])
+; CHECK-MAXBW-NEXT: [[TMP20:%.*]] = sext <16 x i8> [[STRIDED_VEC10]] to <16 x i32>
+; CHECK-MAXBW-NEXT: [[TMP22:%.*]] = mul nsw <16 x i32> [[TMP20]], [[TMP44]]
+; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE17]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI4]], <16 x i32> [[TMP22]])
+; CHECK-MAXBW-NEXT: [[TMP23:%.*]] = sext <16 x i8> [[STRIDED_VEC11]] to <16 x i32>
+; CHECK-MAXBW-NEXT: [[TMP25:%.*]] = mul nsw <16 x i32> [[TMP23]], [[TMP44]]
+; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE18]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI3]], <16 x i32> [[TMP25]])
+; CHECK-MAXBW-NEXT: [[TMP26:%.*]] = sext <16 x i8> [[STRIDED_VEC12]] to <16 x i32>
+; CHECK-MAXBW-NEXT: [[TMP28:%.*]] = mul nsw <16 x i32> [[TMP26]], [[TMP44]]
+; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE19]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI2]], <16 x i32> [[TMP28]])
+; CHECK-MAXBW-NEXT: [[TMP45:%.*]] = sext <16 x i8> [[STRIDED_VEC13]] to <16 x i32>
+; CHECK-MAXBW-NEXT: [[TMP31:%.*]] = mul nsw <16 x i32> [[TMP45]], [[TMP44]]
+; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE20]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP31]])
+; CHECK-MAXBW-NEXT: [[TMP32:%.*]] = sext <16 x i8> [[STRIDED_VEC14]] to <16 x i32>
+; CHECK-MAXBW-NEXT: [[TMP34:%.*]] = mul nsw <16 x i32> [[TMP32]], [[TMP44]]
+; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE21]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP34]])
; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-MAXBW-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-MAXBW-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]]
+; CHECK-MAXBW-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]]
; CHECK-MAXBW: middle.block:
-; CHECK-MAXBW-NEXT: [[TMP30:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE21]])
-; CHECK-MAXBW-NEXT: [[TMP31:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE20]])
-; CHECK-MAXBW-NEXT: [[TMP32:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE19]])
-; CHECK-MAXBW-NEXT: [[TMP33:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE18]])
-; CHECK-MAXBW-NEXT: [[TMP34:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE17]])
-; CHECK-MAXBW-NEXT: [[TMP35:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE16]])
-; CHECK-MAXBW-NEXT: [[TMP36:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE15]])
-; CHECK-MAXBW-NEXT: [[TMP37:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]])
+; CHECK-MAXBW-NEXT: [[TMP36:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE21]])
+; CHECK-MAXBW-NEXT: [[TMP37:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE20]])
+; CHECK-MAXBW-NEXT: [[TMP38:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE19]])
+; CHECK-MAXBW-NEXT: [[TMP39:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE18]])
+; CHECK-MAXBW-NEXT: [[TMP40:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE17]])
+; CHECK-MAXBW-NEXT: [[TMP41:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE16]])
+; CHECK-MAXBW-NEXT: [[TMP42:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE15]])
+; CHECK-MAXBW-NEXT: [[TMP43:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE]])
; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]]
; CHECK-MAXBW-NEXT: br i1 [[CMP_N]], label [[FOR_COND_FOR_COND_CLEANUP_CRIT_EDGE:%.*]], label [[SCALAR_PH]]
; CHECK-MAXBW: scalar.ph:
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-interleave.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-interleave.ll
index b308b92..bd9fae6 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-interleave.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-interleave.ll
@@ -23,12 +23,12 @@ define i32 @partial_reduce_with_non_constant_start_value(ptr %src, i32 %rdx.star
; IC2-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 16
; IC2-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1
; IC2-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1
-; IC2-NEXT: [[TMP4:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
-; IC2-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
-; IC2-NEXT: [[TMP6:%.*]] = mul nuw nsw <16 x i32> [[TMP4]], [[TMP4]]
+; IC2-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; IC2-NEXT: [[TMP7:%.*]] = mul nuw nsw <16 x i32> [[TMP5]], [[TMP5]]
-; IC2-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP6]])
-; IC2-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP7]])
+; IC2-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP7]])
+; IC2-NEXT: [[TMP8:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
+; IC2-NEXT: [[TMP6:%.*]] = mul nuw nsw <16 x i32> [[TMP8]], [[TMP8]]
+; IC2-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP6]])
; IC2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
; IC2-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; IC2-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
@@ -80,18 +80,18 @@ define i32 @partial_reduce_with_non_constant_start_value(ptr %src, i32 %rdx.star
; IC4-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1
; IC4-NEXT: [[WIDE_LOAD5:%.*]] = load <16 x i8>, ptr [[TMP4]], align 1
; IC4-NEXT: [[WIDE_LOAD6:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1
-; IC4-NEXT: [[TMP6:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
+; IC4-NEXT: [[TMP9:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
+; IC4-NEXT: [[TMP13:%.*]] = mul nuw nsw <16 x i32> [[TMP9]], [[TMP9]]
+; IC4-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP13]])
; IC4-NEXT: [[TMP7:%.*]] = zext <16 x i8> [[WIDE_LOAD4]] to <16 x i32>
-; IC4-NEXT: [[TMP8:%.*]] = zext <16 x i8> [[WIDE_LOAD5]] to <16 x i32>
-; IC4-NEXT: [[TMP9:%.*]] = zext <16 x i8> [[WIDE_LOAD6]] to <16 x i32>
-; IC4-NEXT: [[TMP10:%.*]] = mul nuw nsw <16 x i32> [[TMP6]], [[TMP6]]
; IC4-NEXT: [[TMP11:%.*]] = mul nuw nsw <16 x i32> [[TMP7]], [[TMP7]]
-; IC4-NEXT: [[TMP12:%.*]] = mul nuw nsw <16 x i32> [[TMP8]], [[TMP8]]
-; IC4-NEXT: [[TMP13:%.*]] = mul nuw nsw <16 x i32> [[TMP9]], [[TMP9]]
-; IC4-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP10]])
; IC4-NEXT: [[PARTIAL_REDUCE7]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP11]])
+; IC4-NEXT: [[TMP10:%.*]] = zext <16 x i8> [[WIDE_LOAD5]] to <16 x i32>
+; IC4-NEXT: [[TMP12:%.*]] = mul nuw nsw <16 x i32> [[TMP10]], [[TMP10]]
; IC4-NEXT: [[PARTIAL_REDUCE8]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI2]], <16 x i32> [[TMP12]])
-; IC4-NEXT: [[PARTIAL_REDUCE9]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI3]], <16 x i32> [[TMP13]])
+; IC4-NEXT: [[TMP14:%.*]] = zext <16 x i8> [[WIDE_LOAD6]] to <16 x i32>
+; IC4-NEXT: [[TMP16:%.*]] = mul nuw nsw <16 x i32> [[TMP14]], [[TMP14]]
+; IC4-NEXT: [[PARTIAL_REDUCE9]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI3]], <16 x i32> [[TMP16]])
; IC4-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 64
; IC4-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; IC4-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-sub.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-sub.ll
index 7bb4715..6dae09e 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-sub.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce-sub.ll
@@ -18,11 +18,11 @@ define i32 @dotp(ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVE1-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ]
; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]]
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1
-; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
; CHECK-INTERLEAVE1-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]]
; CHECK-INTERLEAVE1-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP10]], align 1
-; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
-; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = mul <16 x i32> [[TMP3]], [[TMP1]]
+; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = zext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
+; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
+; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = mul <16 x i32> [[TMP2]], [[TMP3]]
; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = sub <16 x i32> zeroinitializer, [[TMP4]]
; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP5]])
; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
@@ -48,19 +48,19 @@ define i32 @dotp(ptr %a, ptr %b) #0 {
; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[TMP7]], i32 16
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP7]], align 1
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1
-; CHECK-INTERLEAVED-NEXT: [[TMP2:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-INTERLEAVED-NEXT: [[TMP14:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]]
-; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP14]], i32 16
+; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[TMP14]], i32 16
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD3:%.*]] = load <16 x i8>, ptr [[TMP14]], align 1
-; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1
-; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = zext <16 x i8> [[WIDE_LOAD3]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = zext <16 x i8> [[WIDE_LOAD4]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = mul <16 x i32> [[TMP6]], [[TMP2]]
-; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = mul <16 x i32> [[TMP15]], [[TMP3]]
-; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = sub <16 x i32> zeroinitializer, [[TMP8]]
-; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = sub <16 x i32> zeroinitializer, [[TMP9]]
-; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP10]])
+; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD4:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1
+; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = zext <16 x i8> [[WIDE_LOAD3]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = mul <16 x i32> [[TMP4]], [[TMP5]]
+; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = sub <16 x i32> zeroinitializer, [[TMP6]]
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP15]])
+; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = zext <16 x i8> [[WIDE_LOAD4]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = mul <16 x i32> [[TMP8]], [[TMP9]]
+; CHECK-INTERLEAVED-NEXT: [[TMP11:%.*]] = sub <16 x i32> zeroinitializer, [[TMP10]]
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE5]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP11]])
; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
@@ -78,27 +78,27 @@ define i32 @dotp(ptr %a, ptr %b) #0 {
; CHECK-MAXBW-NEXT: br label [[ENTRY:%.*]]
; CHECK-MAXBW: vector.ph:
; CHECK-MAXBW-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
-; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 8
+; CHECK-MAXBW-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16
; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
; CHECK-MAXBW-NEXT: br label [[FOR_BODY:%.*]]
; CHECK-MAXBW: vector.body:
; CHECK-MAXBW-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
-; CHECK-MAXBW-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 2 x i32> [ zeroinitializer, [[ENTRY]] ], [ [[PARTIAL_REDUCE:%.*]], [[FOR_BODY]] ]
+; CHECK-MAXBW-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, [[ENTRY]] ], [ [[PARTIAL_REDUCE:%.*]], [[FOR_BODY]] ]
; CHECK-MAXBW-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[A]], i64 [[IV]]
-; CHECK-MAXBW-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 8 x i8>, ptr [[TMP7]], align 1
-; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD]] to <vscale x 8 x i32>
+; CHECK-MAXBW-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP7]], align 1
; CHECK-MAXBW-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[B]], i64 [[IV]]
-; CHECK-MAXBW-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 8 x i8>, ptr [[TMP10]], align 1
-; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = zext <vscale x 8 x i8> [[WIDE_LOAD1]] to <vscale x 8 x i32>
-; CHECK-MAXBW-NEXT: [[TMP13:%.*]] = mul <vscale x 8 x i32> [[TMP12]], [[TMP9]]
-; CHECK-MAXBW-NEXT: [[TMP14:%.*]] = sub <vscale x 8 x i32> zeroinitializer, [[TMP13]]
-; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 2 x i32> @llvm.vector.partial.reduce.add.nxv2i32.nxv8i32(<vscale x 2 x i32> [[VEC_PHI]], <vscale x 8 x i32> [[TMP14]])
+; CHECK-MAXBW-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 16 x i8>, ptr [[TMP10]], align 1
+; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = zext <vscale x 16 x i8> [[WIDE_LOAD1]] to <vscale x 16 x i32>
+; CHECK-MAXBW-NEXT: [[TMP5:%.*]] = zext <vscale x 16 x i8> [[WIDE_LOAD]] to <vscale x 16 x i32>
+; CHECK-MAXBW-NEXT: [[TMP6:%.*]] = mul <vscale x 16 x i32> [[TMP4]], [[TMP5]]
+; CHECK-MAXBW-NEXT: [[TMP8:%.*]] = sub <vscale x 16 x i32> zeroinitializer, [[TMP6]]
+; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 4 x i32> @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32(<vscale x 4 x i32> [[VEC_PHI]], <vscale x 16 x i32> [[TMP8]])
; CHECK-MAXBW-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], [[TMP3]]
; CHECK-MAXBW-NEXT: [[TMP16:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]]
; CHECK-MAXBW-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK-MAXBW: middle.block:
-; CHECK-MAXBW-NEXT: [[TMP17:%.*]] = call i32 @llvm.vector.reduce.add.nxv2i32(<vscale x 2 x i32> [[PARTIAL_REDUCE]])
+; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[PARTIAL_REDUCE]])
; CHECK-MAXBW-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
; CHECK-MAXBW-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH:%.*]]
; CHECK-MAXBW: scalar.ph:
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce.ll b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce.ll
index 70532ad..46ec858 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/partial-reduce.ll
@@ -45,8 +45,8 @@ define i32 @zext_add_reduc_i8_i32_sve(ptr %a) #0 {
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP0]], align 1
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1
; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP3]])
+; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP4]])
; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
@@ -138,8 +138,8 @@ define i32 @zext_add_reduc_i8_i32_neon(ptr %a) #2 {
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP0]], align 1
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1
; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP3]])
+; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP4]])
; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
@@ -227,8 +227,8 @@ define i64 @zext_add_reduc_i8_i64(ptr %a) #0 {
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1
; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i64>
-; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i64>
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI]], <16 x i64> [[TMP4]])
+; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i64>
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE3]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v16i64(<2 x i64> [[VEC_PHI1]], <16 x i64> [[TMP5]])
; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
@@ -321,8 +321,8 @@ define i64 @zext_add_reduc_i16_i64(ptr %a) #0 {
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i16>, ptr [[TMP1]], align 2
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x i16>, ptr [[TMP3]], align 2
; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = zext <8 x i16> [[WIDE_LOAD]] to <8 x i64>
-; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = zext <8 x i16> [[WIDE_LOAD2]] to <8 x i64>
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v8i64(<2 x i64> [[VEC_PHI]], <8 x i64> [[TMP4]])
+; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = zext <8 x i16> [[WIDE_LOAD2]] to <8 x i64>
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE3]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v8i64(<2 x i64> [[VEC_PHI1]], <8 x i64> [[TMP5]])
; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
@@ -421,12 +421,12 @@ define i32 @zext_add_reduc_i8_i32_has_neon_dotprod(ptr %a) #1 {
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD5:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD6:%.*]] = load <16 x i8>, ptr [[TMP8]], align 1
; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = zext <16 x i8> [[WIDE_LOAD5]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = zext <16 x i8> [[WIDE_LOAD6]] to <16 x i32>
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP4]])
+; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP5]])
+; CHECK-INTERLEAVED-NEXT: [[TMP10:%.*]] = zext <16 x i8> [[WIDE_LOAD5]] to <16 x i32>
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE8]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI2]], <16 x i32> [[TMP10]])
+; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = zext <16 x i8> [[WIDE_LOAD6]] to <16 x i32>
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE9]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI3]], <16 x i32> [[TMP7]])
; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 64
; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
@@ -811,8 +811,8 @@ define i32 @sext_add_reduc_i8_i32(ptr %a) #0 {
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP1]], align 1
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD2:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1
; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
-; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP4]])
+; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = sext <16 x i8> [[WIDE_LOAD2]] to <16 x i32>
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE3]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI1]], <16 x i32> [[TMP5]])
; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
; CHECK-INTERLEAVED-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
@@ -1000,11 +1000,10 @@ define i32 @add_of_loop_invariant_zext(i32 %a, ptr %b, i8 %c, i32 %d) #0 {
; CHECK-INTERLEAVE1: vector.ph:
; CHECK-INTERLEAVE1-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP0]], 16
; CHECK-INTERLEAVE1-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP0]], [[N_MOD_VF]]
-; CHECK-INTERLEAVE1-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[C]], i64 0
-; CHECK-INTERLEAVE1-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i8> [[BROADCAST_SPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
; CHECK-INTERLEAVE1-NEXT: [[TMP1:%.*]] = add i32 [[D]], [[N_VEC]]
; CHECK-INTERLEAVE1-NEXT: [[TMP2:%.*]] = insertelement <4 x i32> zeroinitializer, i32 [[A]], i32 0
-; CHECK-INTERLEAVE1-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[BROADCAST_SPLAT]] to <16 x i32>
+; CHECK-INTERLEAVE1-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[C]], i64 0
+; CHECK-INTERLEAVE1-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i8> [[BROADCAST_SPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
; CHECK-INTERLEAVE1-NEXT: br label [[FOR_BODY:%.*]]
; CHECK-INTERLEAVE1: vector.body:
; CHECK-INTERLEAVE1-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[FOR_BODY]] ]
@@ -1012,7 +1011,8 @@ define i32 @add_of_loop_invariant_zext(i32 %a, ptr %b, i8 %c, i32 %d) #0 {
; CHECK-INTERLEAVE1-NEXT: [[OFFSET_IDX:%.*]] = add i32 [[D]], [[INDEX]]
; CHECK-INTERLEAVE1-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[B]], i32 [[OFFSET_IDX]]
; CHECK-INTERLEAVE1-NEXT: store <16 x i8> zeroinitializer, ptr [[TMP4]], align 1
-; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP3]])
+; CHECK-INTERLEAVE1-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[BROADCAST_SPLAT]] to <16 x i32>
+; CHECK-INTERLEAVE1-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP5]])
; CHECK-INTERLEAVE1-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16
; CHECK-INTERLEAVE1-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-INTERLEAVE1-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
@@ -1031,23 +1031,23 @@ define i32 @add_of_loop_invariant_zext(i32 %a, ptr %b, i8 %c, i32 %d) #0 {
; CHECK-INTERLEAVED: vector.ph:
; CHECK-INTERLEAVED-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP2]], 32
; CHECK-INTERLEAVED-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP2]], [[N_MOD_VF]]
+; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = add i32 [[D]], [[N_VEC]]
+; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = insertelement <4 x i32> zeroinitializer, i32 [[A]], i32 0
; CHECK-INTERLEAVED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[C]], i64 0
; CHECK-INTERLEAVED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i8> [[BROADCAST_SPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer
-; CHECK-INTERLEAVED-NEXT: [[TMP1:%.*]] = add i32 [[D]], [[N_VEC]]
-; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = insertelement <4 x i32> zeroinitializer, i32 [[A]], i32 0
-; CHECK-INTERLEAVED-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[BROADCAST_SPLAT]] to <16 x i32>
; CHECK-INTERLEAVED-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK-INTERLEAVED: vector.body:
; CHECK-INTERLEAVED-NEXT: [[VEC_PHI1:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[TMP22:%.*]], [[VECTOR_BODY]] ]
-; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ [[TMP5]], [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ]
+; CHECK-INTERLEAVED-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ [[TMP3]], [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ]
; CHECK-INTERLEAVED-NEXT: [[VEC_PHI2:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE2:%.*]], [[VECTOR_BODY]] ]
; CHECK-INTERLEAVED-NEXT: [[OFFSET_IDX:%.*]] = add i32 [[D]], [[VEC_PHI1]]
; CHECK-INTERLEAVED-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[B]], i32 [[OFFSET_IDX]]
; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i32 16
; CHECK-INTERLEAVED-NEXT: store <16 x i8> zeroinitializer, ptr [[TMP4]], align 1
; CHECK-INTERLEAVED-NEXT: store <16 x i8> zeroinitializer, ptr [[TMP6]], align 1
-; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP3]])
-; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE2]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI2]], <16 x i32> [[TMP3]])
+; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[BROADCAST_SPLAT]] to <16 x i32>
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP5]])
+; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE2]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI2]], <16 x i32> [[TMP5]])
; CHECK-INTERLEAVED-NEXT: [[TMP22]] = add nuw i32 [[VEC_PHI1]], 32
; CHECK-INTERLEAVED-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP22]], [[N_VEC]]
; CHECK-INTERLEAVED-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
@@ -1071,11 +1071,10 @@ define i32 @add_of_loop_invariant_zext(i32 %a, ptr %b, i8 %c, i32 %d) #0 {
; CHECK-MAXBW-NEXT: [[TMP4:%.*]] = mul nuw i32 [[TMP3]], 16
; CHECK-MAXBW-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[TMP0]], [[TMP4]]
; CHECK-MAXBW-NEXT: [[N_VEC:%.*]] = sub i32 [[TMP0]], [[N_MOD_VF]]
-; CHECK-MAXBW-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[C]], i64 0
-; CHECK-MAXBW-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
; CHECK-MAXBW-NEXT: [[TMP7:%.*]] = add i32 [[D]], [[N_VEC]]
; CHECK-MAXBW-NEXT: [[TMP8:%.*]] = insertelement <vscale x 4 x i32> zeroinitializer, i32 [[A]], i32 0
-; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = zext <vscale x 16 x i8> [[BROADCAST_SPLAT]] to <vscale x 16 x i32>
+; CHECK-MAXBW-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[C]], i64 0
+; CHECK-MAXBW-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[BROADCAST_SPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
; CHECK-MAXBW-NEXT: br label [[FOR_BODY:%.*]]
; CHECK-MAXBW: vector.body:
; CHECK-MAXBW-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[FOR_BODY]] ]
@@ -1083,6 +1082,7 @@ define i32 @add_of_loop_invariant_zext(i32 %a, ptr %b, i8 %c, i32 %d) #0 {
; CHECK-MAXBW-NEXT: [[OFFSET_IDX:%.*]] = add i32 [[D]], [[INDEX]]
; CHECK-MAXBW-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[B]], i32 [[OFFSET_IDX]]
; CHECK-MAXBW-NEXT: store <vscale x 16 x i8> zeroinitializer, ptr [[TMP10]], align 1
+; CHECK-MAXBW-NEXT: [[TMP9:%.*]] = zext <vscale x 16 x i8> [[BROADCAST_SPLAT]] to <vscale x 16 x i32>
; CHECK-MAXBW-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 4 x i32> @llvm.vector.partial.reduce.add.nxv4i32.nxv16i32(<vscale x 4 x i32> [[VEC_PHI]], <vscale x 16 x i32> [[TMP9]])
; CHECK-MAXBW-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP4]]
; CHECK-MAXBW-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
@@ -1164,12 +1164,12 @@ define i64 @sext_reduction_i32_to_i64(ptr %arr, i64 %n) #1 {
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x i32>, ptr [[TMP14]], align 4
; CHECK-INTERLEAVED-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x i32>, ptr [[TMP3]], align 4
; CHECK-INTERLEAVED-NEXT: [[TMP15:%.*]] = sext <4 x i32> [[WIDE_LOAD]] to <4 x i64>
-; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = sext <4 x i32> [[WIDE_LOAD4]] to <4 x i64>
-; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = sext <4 x i32> [[WIDE_LOAD5]] to <4 x i64>
-; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = sext <4 x i32> [[WIDE_LOAD6]] to <4 x i64>
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v4i64(<2 x i64> [[VEC_PHI]], <4 x i64> [[TMP15]])
+; CHECK-INTERLEAVED-NEXT: [[TMP5:%.*]] = sext <4 x i32> [[WIDE_LOAD4]] to <4 x i64>
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE7]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v4i64(<2 x i64> [[VEC_PHI1]], <4 x i64> [[TMP5]])
+; CHECK-INTERLEAVED-NEXT: [[TMP6:%.*]] = sext <4 x i32> [[WIDE_LOAD5]] to <4 x i64>
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE8]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v4i64(<2 x i64> [[VEC_PHI2]], <4 x i64> [[TMP6]])
+; CHECK-INTERLEAVED-NEXT: [[TMP7:%.*]] = sext <4 x i32> [[WIDE_LOAD6]] to <4 x i64>
; CHECK-INTERLEAVED-NEXT: [[PARTIAL_REDUCE9]] = call <2 x i64> @llvm.vector.partial.reduce.add.v2i64.v4i64(<2 x i64> [[VEC_PHI3]], <4 x i64> [[TMP7]])
; CHECK-INTERLEAVED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-INTERLEAVED-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/pr33053.ll b/llvm/test/Transforms/LoopVectorize/AArch64/pr33053.ll
index ebf4a4f..ab1486d 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/pr33053.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/pr33053.ll
@@ -37,7 +37,7 @@ for.end: ; preds = %for.body
ret i32 %conv27
}
-attributes #0 = { norecurse nounwind readonly "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { norecurse nounwind readonly "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+neon" "use-soft-float"="false" }
!llvm.ident = !{!0}
!0 = !{!"clang"}
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/reg-usage.ll b/llvm/test/Transforms/LoopVectorize/AArch64/reg-usage.ll
index 25ee100..70685c1 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/reg-usage.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/reg-usage.ll
@@ -192,7 +192,7 @@ define i32 @dotp_unrolled(i32 %num_out, i64 %num_in, ptr %a, ptr %b) {
; CHECK: LV(REG): VF = 16
; CHECK-NEXT: LV(REG): Found max usage: 2 item
; CHECK-NEXT: LV(REG): RegisterClass: Generic::ScalarRC, 9 registers
-; CHECK-NEXT: LV(REG): RegisterClass: Generic::VectorRC, 12 registers
+; CHECK-NEXT: LV(REG): RegisterClass: Generic::VectorRC, 6 registers
; CHECK-NEXT: LV(REG): Found invariant usage: 1 item
entry:
br label %for.body
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll
index 786a2aa..28d2a27 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll
@@ -1630,4 +1630,4 @@ for.body: ; preds = %for.body, %entry
}
attributes #1 = { "target-features"="+sve" vscale_range(1, 16) }
-attributes #0 = { "unsafe-fp-math"="true" "target-features"="+sve" vscale_range(1, 16) }
+attributes #0 = { "target-features"="+sve" vscale_range(1, 16) }
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/vplan-printing.ll b/llvm/test/Transforms/LoopVectorize/AArch64/vplan-printing.ll
index d4e5dea..49f663f 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/vplan-printing.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/vplan-printing.ll
@@ -23,18 +23,15 @@ define i32 @print_partial_reduction(ptr %a, ptr %b) {
; CHECK-NEXT: <x1> vector loop: {
; CHECK-NEXT: vector.body:
; CHECK-NEXT: EMIT vp<[[CAN_IV:%.+]]> = CANONICAL-INDUCTION ir<0>, vp<[[CAN_IV_NEXT:%.+]]>
-; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<[[ACC:%.+]]> = phi vp<[[RDX_START]]>, ir<[[REDUCE:%.+]]> (VF scaled by 1/4)
+; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<[[ACC:%.+]]> = phi vp<[[RDX_START]]>, vp<[[REDUCE:%.+]]> (VF scaled by 1/4)
; CHECK-NEXT: vp<[[STEPS:%.+]]> = SCALAR-STEPS vp<[[CAN_IV]]>, ir<1>, vp<[[VF]]>
; CHECK-NEXT: CLONE ir<%gep.a> = getelementptr ir<%a>, vp<[[STEPS]]>
; CHECK-NEXT: vp<[[PTR_A:%.+]]> = vector-pointer ir<%gep.a>
; CHECK-NEXT: WIDEN ir<%load.a> = load vp<[[PTR_A]]>
-; CHECK-NEXT: WIDEN-CAST ir<%ext.a> = zext ir<%load.a> to i32
; CHECK-NEXT: CLONE ir<%gep.b> = getelementptr ir<%b>, vp<[[STEPS]]>
; CHECK-NEXT: vp<[[PTR_B:%.+]]> = vector-pointer ir<%gep.b>
; CHECK-NEXT: WIDEN ir<%load.b> = load vp<[[PTR_B]]>
-; CHECK-NEXT: WIDEN-CAST ir<%ext.b> = zext ir<%load.b> to i32
-; CHECK-NEXT: WIDEN ir<%mul> = mul ir<%ext.b>, ir<%ext.a>
-; CHECK-NEXT: PARTIAL-REDUCE ir<[[REDUCE]]> = add ir<[[ACC]]>, ir<%mul>
+; CHECK-NEXT: EXPRESSION vp<[[REDUCE]]> = ir<[[ACC]]> + partial.reduce.add (mul (ir<%load.b> zext to i32), (ir<%load.a> zext to i32))
; CHECK-NEXT: EMIT vp<[[CAN_IV_NEXT]]> = add nuw vp<[[CAN_IV]]>, vp<[[VFxUF]]>
; CHECK-NEXT: EMIT branch-on-count vp<[[CAN_IV_NEXT]]>, vp<[[VEC_TC]]>
; CHECK-NEXT: No successors
@@ -42,7 +39,7 @@ define i32 @print_partial_reduction(ptr %a, ptr %b) {
; CHECK-NEXT: Successor(s): middle.block
; CHECK-EMPTY:
; CHECK-NEXT: middle.block:
-; CHECK-NEXT: EMIT vp<[[RED_RESULT:%.+]]> = compute-reduction-result ir<[[ACC]]>, ir<[[REDUCE]]>
+; CHECK-NEXT: EMIT vp<[[RED_RESULT:%.+]]> = compute-reduction-result ir<[[ACC]]>, vp<[[REDUCE]]>
; CHECK-NEXT: EMIT vp<[[CMP:%.+]]> = icmp eq ir<1024>, vp<[[VEC_TC]]>
; CHECK-NEXT: EMIT branch-on-cond vp<[[CMP]]>
; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph
@@ -89,10 +86,10 @@ define i32 @print_partial_reduction(ptr %a, ptr %b) {
; CHECK-NEXT: WIDEN-REDUCTION-PHI ir<%accum> = phi vp<[[RDX_START]]>, ir<%add> (VF scaled by 1/4)
; CHECK-NEXT: CLONE ir<%gep.a> = getelementptr ir<%a>, vp<[[EP_IV]]>
; CHECK-NEXT: WIDEN ir<%load.a> = load ir<%gep.a>
-; CHECK-NEXT: WIDEN-CAST ir<%ext.a> = zext ir<%load.a> to i32
; CHECK-NEXT: CLONE ir<%gep.b> = getelementptr ir<%b>, vp<[[EP_IV]]>
; CHECK-NEXT: WIDEN ir<%load.b> = load ir<%gep.b>
; CHECK-NEXT: WIDEN-CAST ir<%ext.b> = zext ir<%load.b> to i32
+; CHECK-NEXT: WIDEN-CAST ir<%ext.a> = zext ir<%load.a> to i32
; CHECK-NEXT: WIDEN ir<%mul> = mul ir<%ext.b>, ir<%ext.a>
; CHECK-NEXT: PARTIAL-REDUCE ir<%add> = add ir<%accum>, ir<%mul>
; CHECK-NEXT: EMIT vp<[[EP_IV_NEXT:%.+]]> = add nuw vp<[[EP_IV]]>, ir<16>
diff --git a/llvm/test/Transforms/LoopVectorize/ARM/arm-ieee-vectorize.ll b/llvm/test/Transforms/LoopVectorize/ARM/arm-ieee-vectorize.ll
index 0f398a6..2579918 100644
--- a/llvm/test/Transforms/LoopVectorize/ARM/arm-ieee-vectorize.ll
+++ b/llvm/test/Transforms/LoopVectorize/ARM/arm-ieee-vectorize.ll
@@ -327,5 +327,5 @@ for.end: ; preds = %for.body, %entry
declare float @fabsf(float)
-attributes #1 = { nounwind readnone "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="cortex-a8" "target-features"="+dsp,+neon,+vfp3" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #2 = { nounwind readnone "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="cortex-a8" "target-features"="+dsp,+neon,+vfp3" "unsafe-fp-math"="true" "use-soft-float"="false" }
+attributes #1 = { nounwind readnone "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="cortex-a8" "target-features"="+dsp,+neon,+vfp3" "use-soft-float"="false" }
+attributes #2 = { nounwind readnone "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="cortex-a8" "target-features"="+dsp,+neon,+vfp3" "use-soft-float"="false" }
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/partial-reduce-dot-product.ll b/llvm/test/Transforms/LoopVectorize/RISCV/partial-reduce-dot-product.ll
index 0a9b1e0..61e3a18 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/partial-reduce-dot-product.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/partial-reduce-dot-product.ll
@@ -60,10 +60,10 @@ define i32 @vqdot(ptr %a, ptr %b) #0 {
; ZVQDOTQ-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 1 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ]
; ZVQDOTQ-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]]
; ZVQDOTQ-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i8>, ptr [[TMP6]], align 1
-; ZVQDOTQ-NEXT: [[TMP8:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD]] to <vscale x 4 x i32>
; ZVQDOTQ-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]]
; ZVQDOTQ-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x i8>, ptr [[TMP9]], align 1
; ZVQDOTQ-NEXT: [[TMP11:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD1]] to <vscale x 4 x i32>
+; ZVQDOTQ-NEXT: [[TMP8:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD]] to <vscale x 4 x i32>
; ZVQDOTQ-NEXT: [[TMP12:%.*]] = mul <vscale x 4 x i32> [[TMP11]], [[TMP8]]
; ZVQDOTQ-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 1 x i32> @llvm.vector.partial.reduce.add.nxv1i32.nxv4i32(<vscale x 1 x i32> [[VEC_PHI]], <vscale x 4 x i32> [[TMP12]])
; ZVQDOTQ-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
@@ -125,17 +125,17 @@ define i32 @vqdot(ptr %a, ptr %b) #0 {
; FIXED-ZVQDOTQ-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i32 8
; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i8>, ptr [[TMP0]], align 1
; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x i8>, ptr [[TMP2]], align 1
-; FIXED-ZVQDOTQ-NEXT: [[TMP3:%.*]] = sext <8 x i8> [[WIDE_LOAD]] to <8 x i32>
-; FIXED-ZVQDOTQ-NEXT: [[TMP4:%.*]] = sext <8 x i8> [[WIDE_LOAD2]] to <8 x i32>
; FIXED-ZVQDOTQ-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]]
; FIXED-ZVQDOTQ-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i32 8
; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD3:%.*]] = load <8 x i8>, ptr [[TMP5]], align 1
; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD4:%.*]] = load <8 x i8>, ptr [[TMP7]], align 1
; FIXED-ZVQDOTQ-NEXT: [[TMP8:%.*]] = sext <8 x i8> [[WIDE_LOAD3]] to <8 x i32>
-; FIXED-ZVQDOTQ-NEXT: [[TMP9:%.*]] = sext <8 x i8> [[WIDE_LOAD4]] to <8 x i32>
+; FIXED-ZVQDOTQ-NEXT: [[TMP3:%.*]] = sext <8 x i8> [[WIDE_LOAD]] to <8 x i32>
; FIXED-ZVQDOTQ-NEXT: [[TMP10:%.*]] = mul <8 x i32> [[TMP8]], [[TMP3]]
-; FIXED-ZVQDOTQ-NEXT: [[TMP11:%.*]] = mul <8 x i32> [[TMP9]], [[TMP4]]
; FIXED-ZVQDOTQ-NEXT: [[PARTIAL_REDUCE]] = call <2 x i32> @llvm.vector.partial.reduce.add.v2i32.v8i32(<2 x i32> [[VEC_PHI]], <8 x i32> [[TMP10]])
+; FIXED-ZVQDOTQ-NEXT: [[TMP9:%.*]] = sext <8 x i8> [[WIDE_LOAD4]] to <8 x i32>
+; FIXED-ZVQDOTQ-NEXT: [[TMP12:%.*]] = sext <8 x i8> [[WIDE_LOAD2]] to <8 x i32>
+; FIXED-ZVQDOTQ-NEXT: [[TMP11:%.*]] = mul <8 x i32> [[TMP9]], [[TMP12]]
; FIXED-ZVQDOTQ-NEXT: [[PARTIAL_REDUCE5]] = call <2 x i32> @llvm.vector.partial.reduce.add.v2i32.v8i32(<2 x i32> [[VEC_PHI1]], <8 x i32> [[TMP11]])
; FIXED-ZVQDOTQ-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; FIXED-ZVQDOTQ-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
@@ -222,10 +222,10 @@ define i32 @vqdotu(ptr %a, ptr %b) #0 {
; ZVQDOTQ-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 1 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ]
; ZVQDOTQ-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]]
; ZVQDOTQ-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i8>, ptr [[TMP6]], align 1
-; ZVQDOTQ-NEXT: [[TMP8:%.*]] = zext <vscale x 4 x i8> [[WIDE_LOAD]] to <vscale x 4 x i32>
; ZVQDOTQ-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]]
; ZVQDOTQ-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x i8>, ptr [[TMP9]], align 1
; ZVQDOTQ-NEXT: [[TMP11:%.*]] = zext <vscale x 4 x i8> [[WIDE_LOAD1]] to <vscale x 4 x i32>
+; ZVQDOTQ-NEXT: [[TMP8:%.*]] = zext <vscale x 4 x i8> [[WIDE_LOAD]] to <vscale x 4 x i32>
; ZVQDOTQ-NEXT: [[TMP12:%.*]] = mul <vscale x 4 x i32> [[TMP11]], [[TMP8]]
; ZVQDOTQ-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 1 x i32> @llvm.vector.partial.reduce.add.nxv1i32.nxv4i32(<vscale x 1 x i32> [[VEC_PHI]], <vscale x 4 x i32> [[TMP12]])
; ZVQDOTQ-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
@@ -287,17 +287,17 @@ define i32 @vqdotu(ptr %a, ptr %b) #0 {
; FIXED-ZVQDOTQ-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i32 8
; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i8>, ptr [[TMP0]], align 1
; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x i8>, ptr [[TMP2]], align 1
-; FIXED-ZVQDOTQ-NEXT: [[TMP3:%.*]] = zext <8 x i8> [[WIDE_LOAD]] to <8 x i32>
-; FIXED-ZVQDOTQ-NEXT: [[TMP4:%.*]] = zext <8 x i8> [[WIDE_LOAD2]] to <8 x i32>
; FIXED-ZVQDOTQ-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]]
; FIXED-ZVQDOTQ-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i32 8
; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD3:%.*]] = load <8 x i8>, ptr [[TMP5]], align 1
; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD4:%.*]] = load <8 x i8>, ptr [[TMP7]], align 1
; FIXED-ZVQDOTQ-NEXT: [[TMP8:%.*]] = zext <8 x i8> [[WIDE_LOAD3]] to <8 x i32>
-; FIXED-ZVQDOTQ-NEXT: [[TMP9:%.*]] = zext <8 x i8> [[WIDE_LOAD4]] to <8 x i32>
+; FIXED-ZVQDOTQ-NEXT: [[TMP3:%.*]] = zext <8 x i8> [[WIDE_LOAD]] to <8 x i32>
; FIXED-ZVQDOTQ-NEXT: [[TMP10:%.*]] = mul <8 x i32> [[TMP8]], [[TMP3]]
-; FIXED-ZVQDOTQ-NEXT: [[TMP11:%.*]] = mul <8 x i32> [[TMP9]], [[TMP4]]
; FIXED-ZVQDOTQ-NEXT: [[PARTIAL_REDUCE]] = call <2 x i32> @llvm.vector.partial.reduce.add.v2i32.v8i32(<2 x i32> [[VEC_PHI]], <8 x i32> [[TMP10]])
+; FIXED-ZVQDOTQ-NEXT: [[TMP9:%.*]] = zext <8 x i8> [[WIDE_LOAD4]] to <8 x i32>
+; FIXED-ZVQDOTQ-NEXT: [[TMP12:%.*]] = zext <8 x i8> [[WIDE_LOAD2]] to <8 x i32>
+; FIXED-ZVQDOTQ-NEXT: [[TMP11:%.*]] = mul <8 x i32> [[TMP9]], [[TMP12]]
; FIXED-ZVQDOTQ-NEXT: [[PARTIAL_REDUCE5]] = call <2 x i32> @llvm.vector.partial.reduce.add.v2i32.v8i32(<2 x i32> [[VEC_PHI1]], <8 x i32> [[TMP11]])
; FIXED-ZVQDOTQ-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; FIXED-ZVQDOTQ-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
@@ -384,10 +384,10 @@ define i32 @vqdotsu(ptr %a, ptr %b) #0 {
; ZVQDOTQ-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 1 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ]
; ZVQDOTQ-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]]
; ZVQDOTQ-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i8>, ptr [[TMP6]], align 1
-; ZVQDOTQ-NEXT: [[TMP8:%.*]] = zext <vscale x 4 x i8> [[WIDE_LOAD]] to <vscale x 4 x i32>
; ZVQDOTQ-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]]
; ZVQDOTQ-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x i8>, ptr [[TMP9]], align 1
; ZVQDOTQ-NEXT: [[TMP11:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD1]] to <vscale x 4 x i32>
+; ZVQDOTQ-NEXT: [[TMP8:%.*]] = zext <vscale x 4 x i8> [[WIDE_LOAD]] to <vscale x 4 x i32>
; ZVQDOTQ-NEXT: [[TMP12:%.*]] = mul <vscale x 4 x i32> [[TMP11]], [[TMP8]]
; ZVQDOTQ-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 1 x i32> @llvm.vector.partial.reduce.add.nxv1i32.nxv4i32(<vscale x 1 x i32> [[VEC_PHI]], <vscale x 4 x i32> [[TMP12]])
; ZVQDOTQ-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
@@ -449,18 +449,18 @@ define i32 @vqdotsu(ptr %a, ptr %b) #0 {
; FIXED-ZVQDOTQ-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i32 8
; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i8>, ptr [[TMP0]], align 1
; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x i8>, ptr [[TMP2]], align 1
-; FIXED-ZVQDOTQ-NEXT: [[TMP3:%.*]] = zext <8 x i8> [[WIDE_LOAD]] to <8 x i32>
-; FIXED-ZVQDOTQ-NEXT: [[TMP4:%.*]] = zext <8 x i8> [[WIDE_LOAD2]] to <8 x i32>
; FIXED-ZVQDOTQ-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]]
; FIXED-ZVQDOTQ-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i32 8
; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD3:%.*]] = load <8 x i8>, ptr [[TMP5]], align 1
; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD4:%.*]] = load <8 x i8>, ptr [[TMP7]], align 1
-; FIXED-ZVQDOTQ-NEXT: [[TMP8:%.*]] = sext <8 x i8> [[WIDE_LOAD3]] to <8 x i32>
-; FIXED-ZVQDOTQ-NEXT: [[TMP9:%.*]] = sext <8 x i8> [[WIDE_LOAD4]] to <8 x i32>
-; FIXED-ZVQDOTQ-NEXT: [[TMP10:%.*]] = mul <8 x i32> [[TMP8]], [[TMP3]]
+; FIXED-ZVQDOTQ-NEXT: [[TMP9:%.*]] = sext <8 x i8> [[WIDE_LOAD3]] to <8 x i32>
+; FIXED-ZVQDOTQ-NEXT: [[TMP4:%.*]] = zext <8 x i8> [[WIDE_LOAD]] to <8 x i32>
; FIXED-ZVQDOTQ-NEXT: [[TMP11:%.*]] = mul <8 x i32> [[TMP9]], [[TMP4]]
-; FIXED-ZVQDOTQ-NEXT: [[PARTIAL_REDUCE]] = call <2 x i32> @llvm.vector.partial.reduce.add.v2i32.v8i32(<2 x i32> [[VEC_PHI]], <8 x i32> [[TMP10]])
-; FIXED-ZVQDOTQ-NEXT: [[PARTIAL_REDUCE5]] = call <2 x i32> @llvm.vector.partial.reduce.add.v2i32.v8i32(<2 x i32> [[VEC_PHI1]], <8 x i32> [[TMP11]])
+; FIXED-ZVQDOTQ-NEXT: [[PARTIAL_REDUCE]] = call <2 x i32> @llvm.vector.partial.reduce.add.v2i32.v8i32(<2 x i32> [[VEC_PHI]], <8 x i32> [[TMP11]])
+; FIXED-ZVQDOTQ-NEXT: [[TMP10:%.*]] = sext <8 x i8> [[WIDE_LOAD4]] to <8 x i32>
+; FIXED-ZVQDOTQ-NEXT: [[TMP8:%.*]] = zext <8 x i8> [[WIDE_LOAD2]] to <8 x i32>
+; FIXED-ZVQDOTQ-NEXT: [[TMP14:%.*]] = mul <8 x i32> [[TMP10]], [[TMP8]]
+; FIXED-ZVQDOTQ-NEXT: [[PARTIAL_REDUCE5]] = call <2 x i32> @llvm.vector.partial.reduce.add.v2i32.v8i32(<2 x i32> [[VEC_PHI1]], <8 x i32> [[TMP14]])
; FIXED-ZVQDOTQ-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; FIXED-ZVQDOTQ-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; FIXED-ZVQDOTQ-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
@@ -545,10 +545,10 @@ define i32 @vqdotsu2(ptr %a, ptr %b) #0 {
; ZVQDOTQ-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 1 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PARTIAL_REDUCE:%.*]], [[VECTOR_BODY]] ]
; ZVQDOTQ-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[A]], i64 [[INDEX]]
; ZVQDOTQ-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i8>, ptr [[TMP6]], align 1
-; ZVQDOTQ-NEXT: [[TMP8:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD]] to <vscale x 4 x i32>
; ZVQDOTQ-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]]
; ZVQDOTQ-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x i8>, ptr [[TMP9]], align 1
; ZVQDOTQ-NEXT: [[TMP11:%.*]] = zext <vscale x 4 x i8> [[WIDE_LOAD1]] to <vscale x 4 x i32>
+; ZVQDOTQ-NEXT: [[TMP8:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD]] to <vscale x 4 x i32>
; ZVQDOTQ-NEXT: [[TMP12:%.*]] = mul <vscale x 4 x i32> [[TMP11]], [[TMP8]]
; ZVQDOTQ-NEXT: [[PARTIAL_REDUCE]] = call <vscale x 1 x i32> @llvm.vector.partial.reduce.add.nxv1i32.nxv4i32(<vscale x 1 x i32> [[VEC_PHI]], <vscale x 4 x i32> [[TMP12]])
; ZVQDOTQ-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]]
@@ -610,18 +610,18 @@ define i32 @vqdotsu2(ptr %a, ptr %b) #0 {
; FIXED-ZVQDOTQ-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP0]], i32 8
; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i8>, ptr [[TMP0]], align 1
; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD2:%.*]] = load <8 x i8>, ptr [[TMP2]], align 1
-; FIXED-ZVQDOTQ-NEXT: [[TMP3:%.*]] = sext <8 x i8> [[WIDE_LOAD]] to <8 x i32>
-; FIXED-ZVQDOTQ-NEXT: [[TMP4:%.*]] = sext <8 x i8> [[WIDE_LOAD2]] to <8 x i32>
; FIXED-ZVQDOTQ-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[B]], i64 [[INDEX]]
; FIXED-ZVQDOTQ-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[TMP5]], i32 8
; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD3:%.*]] = load <8 x i8>, ptr [[TMP5]], align 1
; FIXED-ZVQDOTQ-NEXT: [[WIDE_LOAD4:%.*]] = load <8 x i8>, ptr [[TMP7]], align 1
-; FIXED-ZVQDOTQ-NEXT: [[TMP8:%.*]] = zext <8 x i8> [[WIDE_LOAD3]] to <8 x i32>
-; FIXED-ZVQDOTQ-NEXT: [[TMP9:%.*]] = zext <8 x i8> [[WIDE_LOAD4]] to <8 x i32>
-; FIXED-ZVQDOTQ-NEXT: [[TMP10:%.*]] = mul <8 x i32> [[TMP8]], [[TMP3]]
+; FIXED-ZVQDOTQ-NEXT: [[TMP9:%.*]] = zext <8 x i8> [[WIDE_LOAD3]] to <8 x i32>
+; FIXED-ZVQDOTQ-NEXT: [[TMP4:%.*]] = sext <8 x i8> [[WIDE_LOAD]] to <8 x i32>
; FIXED-ZVQDOTQ-NEXT: [[TMP11:%.*]] = mul <8 x i32> [[TMP9]], [[TMP4]]
-; FIXED-ZVQDOTQ-NEXT: [[PARTIAL_REDUCE]] = call <2 x i32> @llvm.vector.partial.reduce.add.v2i32.v8i32(<2 x i32> [[VEC_PHI]], <8 x i32> [[TMP10]])
-; FIXED-ZVQDOTQ-NEXT: [[PARTIAL_REDUCE5]] = call <2 x i32> @llvm.vector.partial.reduce.add.v2i32.v8i32(<2 x i32> [[VEC_PHI1]], <8 x i32> [[TMP11]])
+; FIXED-ZVQDOTQ-NEXT: [[PARTIAL_REDUCE]] = call <2 x i32> @llvm.vector.partial.reduce.add.v2i32.v8i32(<2 x i32> [[VEC_PHI]], <8 x i32> [[TMP11]])
+; FIXED-ZVQDOTQ-NEXT: [[TMP10:%.*]] = zext <8 x i8> [[WIDE_LOAD4]] to <8 x i32>
+; FIXED-ZVQDOTQ-NEXT: [[TMP8:%.*]] = sext <8 x i8> [[WIDE_LOAD2]] to <8 x i32>
+; FIXED-ZVQDOTQ-NEXT: [[TMP14:%.*]] = mul <8 x i32> [[TMP10]], [[TMP8]]
+; FIXED-ZVQDOTQ-NEXT: [[PARTIAL_REDUCE5]] = call <2 x i32> @llvm.vector.partial.reduce.add.v2i32.v8i32(<2 x i32> [[VEC_PHI1]], <8 x i32> [[TMP14]])
; FIXED-ZVQDOTQ-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; FIXED-ZVQDOTQ-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
; FIXED-ZVQDOTQ-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/WebAssembly/partial-reduce-accumulate.ll b/llvm/test/Transforms/LoopVectorize/WebAssembly/partial-reduce-accumulate.ll
new file mode 100644
index 0000000..2338da5
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/WebAssembly/partial-reduce-accumulate.ll
@@ -0,0 +1,126 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt -mattr=+simd128 -passes=loop-vectorize %s -S | FileCheck %s
+; RUN: opt -mattr=+simd128 -passes=loop-vectorize -vectorizer-maximize-bandwidth %s -S | FileCheck %s --check-prefix=CHECK-MAX-BANDWIDTH
+
+target triple = "wasm32"
+
+define hidden i32 @accumulate_add_u8_u8(ptr noundef readonly %a, ptr noundef readonly %b, i32 noundef %N) {
+; CHECK-LABEL: define hidden i32 @accumulate_add_u8_u8(
+; CHECK-SAME: ptr noundef readonly [[A:%.*]], ptr noundef readonly [[B:%.*]], i32 noundef [[N:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[N]], 4
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[N]], 4
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i32 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP2]], align 1
+; CHECK-NEXT: [[TMP3:%.*]] = zext <4 x i8> [[WIDE_LOAD]] to <4 x i32>
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i32 [[INDEX]]
+; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i8>, ptr [[TMP5]], align 1
+; CHECK-NEXT: [[TMP6:%.*]] = zext <4 x i8> [[WIDE_LOAD1]] to <4 x i32>
+; CHECK-NEXT: [[TMP7:%.*]] = add <4 x i32> [[VEC_PHI]], [[TMP3]]
+; CHECK-NEXT: [[TMP8]] = add <4 x i32> [[TMP7]], [[TMP6]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[TMP10:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP8]])
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label %[[FOR_COND_CLEANUP:.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP10]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_COND_CLEANUP]]:
+; CHECK-NEXT: [[RESULT_0_LCSSA:%.*]] = phi i32 [ [[ADD3:%.*]], %[[FOR_BODY]] ], [ [[TMP10]], %[[MIDDLE_BLOCK]] ]
+; CHECK-NEXT: ret i32 [[RESULT_0_LCSSA]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i32 [ [[INC:%.*]], %[[FOR_BODY]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; CHECK-NEXT: [[RED:%.*]] = phi i32 [ [[ADD3]], %[[FOR_BODY]] ], [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i32 [[IV]]
+; CHECK-NEXT: [[TMP11:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[TMP11]] to i32
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i32 [[IV]]
+; CHECK-NEXT: [[TMP12:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
+; CHECK-NEXT: [[CONV2:%.*]] = zext i8 [[TMP12]] to i32
+; CHECK-NEXT: [[ADD:%.*]] = add i32 [[RED]], [[CONV]]
+; CHECK-NEXT: [[ADD3]] = add i32 [[ADD]], [[CONV2]]
+; CHECK-NEXT: [[INC]] = add nuw i32 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+;
+; CHECK-MAX-BANDWIDTH-LABEL: define hidden i32 @accumulate_add_u8_u8(
+; CHECK-MAX-BANDWIDTH-SAME: ptr noundef readonly [[A:%.*]], ptr noundef readonly [[B:%.*]], i32 noundef [[N:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-MAX-BANDWIDTH-NEXT: [[ENTRY:.*]]:
+; CHECK-MAX-BANDWIDTH-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[N]], 16
+; CHECK-MAX-BANDWIDTH-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK-MAX-BANDWIDTH: [[VECTOR_PH]]:
+; CHECK-MAX-BANDWIDTH-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[N]], 16
+; CHECK-MAX-BANDWIDTH-NEXT: [[N_VEC:%.*]] = sub i32 [[N]], [[N_MOD_VF]]
+; CHECK-MAX-BANDWIDTH-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK-MAX-BANDWIDTH: [[VECTOR_BODY]]:
+; CHECK-MAX-BANDWIDTH-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-MAX-BANDWIDTH-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[PARTIAL_REDUCE2:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-MAX-BANDWIDTH-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i32 [[INDEX]]
+; CHECK-MAX-BANDWIDTH-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1
+; CHECK-MAX-BANDWIDTH-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i32 [[INDEX]]
+; CHECK-MAX-BANDWIDTH-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP5]], align 1
+; CHECK-MAX-BANDWIDTH-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32>
+; CHECK-MAX-BANDWIDTH-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP3]])
+; CHECK-MAX-BANDWIDTH-NEXT: [[TMP6:%.*]] = zext <16 x i8> [[WIDE_LOAD1]] to <16 x i32>
+; CHECK-MAX-BANDWIDTH-NEXT: [[PARTIAL_REDUCE2]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP6]])
+; CHECK-MAX-BANDWIDTH-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16
+; CHECK-MAX-BANDWIDTH-NEXT: [[TMP9:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-MAX-BANDWIDTH-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-MAX-BANDWIDTH: [[MIDDLE_BLOCK]]:
+; CHECK-MAX-BANDWIDTH-NEXT: [[TMP7:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE2]])
+; CHECK-MAX-BANDWIDTH-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N]], [[N_VEC]]
+; CHECK-MAX-BANDWIDTH-NEXT: br i1 [[CMP_N]], label %[[FOR_COND_CLEANUP:.*]], label %[[SCALAR_PH]]
+; CHECK-MAX-BANDWIDTH: [[SCALAR_PH]]:
+; CHECK-MAX-BANDWIDTH-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-MAX-BANDWIDTH-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP7]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; CHECK-MAX-BANDWIDTH-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK-MAX-BANDWIDTH: [[FOR_COND_CLEANUP]]:
+; CHECK-MAX-BANDWIDTH-NEXT: [[RESULT_0_LCSSA:%.*]] = phi i32 [ [[ADD3:%.*]], %[[FOR_BODY]] ], [ [[TMP7]], %[[MIDDLE_BLOCK]] ]
+; CHECK-MAX-BANDWIDTH-NEXT: ret i32 [[RESULT_0_LCSSA]]
+; CHECK-MAX-BANDWIDTH: [[FOR_BODY]]:
+; CHECK-MAX-BANDWIDTH-NEXT: [[IV:%.*]] = phi i32 [ [[INC:%.*]], %[[FOR_BODY]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; CHECK-MAX-BANDWIDTH-NEXT: [[RED:%.*]] = phi i32 [ [[ADD3]], %[[FOR_BODY]] ], [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ]
+; CHECK-MAX-BANDWIDTH-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i32 [[IV]]
+; CHECK-MAX-BANDWIDTH-NEXT: [[TMP11:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; CHECK-MAX-BANDWIDTH-NEXT: [[CONV:%.*]] = zext i8 [[TMP11]] to i32
+; CHECK-MAX-BANDWIDTH-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds nuw i8, ptr [[B]], i32 [[IV]]
+; CHECK-MAX-BANDWIDTH-NEXT: [[TMP12:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
+; CHECK-MAX-BANDWIDTH-NEXT: [[CONV2:%.*]] = zext i8 [[TMP12]] to i32
+; CHECK-MAX-BANDWIDTH-NEXT: [[ADD:%.*]] = add i32 [[RED]], [[CONV]]
+; CHECK-MAX-BANDWIDTH-NEXT: [[ADD3]] = add i32 [[ADD]], [[CONV2]]
+; CHECK-MAX-BANDWIDTH-NEXT: [[INC]] = add nuw i32 [[IV]], 1
+; CHECK-MAX-BANDWIDTH-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[N]]
+; CHECK-MAX-BANDWIDTH-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_COND_CLEANUP]], label %[[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+;
+entry:
+ br label %for.body
+
+for.cond.cleanup: ; preds = %for.body
+ ret i32 %add3
+
+for.body: ; preds = %entry, %for.body
+ %iv = phi i32 [ %inc, %for.body ], [ 0, %entry ]
+ %red = phi i32 [ %add3, %for.body ], [ 0, %entry ]
+ %arrayidx = getelementptr inbounds nuw i8, ptr %a, i32 %iv
+ %0 = load i8, ptr %arrayidx, align 1
+ %conv = zext i8 %0 to i32
+ %arrayidx1 = getelementptr inbounds nuw i8, ptr %b, i32 %iv
+ %1 = load i8, ptr %arrayidx1, align 1
+ %conv2 = zext i8 %1 to i32
+ %add = add i32 %red, %conv
+ %add3 = add i32 %add, %conv2
+ %inc = add nuw i32 %iv, 1
+ %exitcond.not = icmp eq i32 %inc, %N
+ br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
+}
diff --git a/llvm/test/Transforms/LoopVectorize/X86/illegal-parallel-loop-uniform-write.ll b/llvm/test/Transforms/LoopVectorize/X86/illegal-parallel-loop-uniform-write.ll
index 9168ebf..08d39ea 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/illegal-parallel-loop-uniform-write.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/illegal-parallel-loop-uniform-write.ll
@@ -214,7 +214,7 @@ for.end15: ; preds = %for.end.us, %entry
ret void
}
-attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "use-soft-float"="false" }
!3 = !{!4, !5}
!4 = !{!4}
diff --git a/llvm/test/Transforms/LoopVectorize/X86/int128_no_gather.ll b/llvm/test/Transforms/LoopVectorize/X86/int128_no_gather.ll
index 44e9d3e..b7f8be1 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/int128_no_gather.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/int128_no_gather.ll
@@ -71,6 +71,6 @@ declare i32 @printf(ptr, ...) #1
; Function Attrs: nounwind
declare i32 @puts(ptr nocapture readonly) #2
-attributes #0 = { noinline nounwind uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="skylake-avx512" "target-features"="+adx,+aes,+avx,+avx2,+avx512bw,+avx512cd,+avx512dq,+avx512f,+avx512vl,+bmi,+bmi2,+clflushopt,+clwb,+cx16,+f16c,+fma,+fsgsbase,+fxsr,+lzcnt,+mmx,+movbe,+mpx,+pclmul,+pku,+popcnt,+rdrnd,+rdseed,+rtm,+sgx,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+x87,+xsave,+xsavec,+xsaveopt,+xsaves" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="skylake-avx512" "target-features"="+adx,+aes,+avx,+avx2,+avx512bw,+avx512cd,+avx512dq,+avx512f,+avx512vl,+bmi,+bmi2,+clflushopt,+clwb,+cx16,+f16c,+fma,+fsgsbase,+fxsr,+lzcnt,+mmx,+movbe,+mpx,+pclmul,+pku,+popcnt,+rdrnd,+rdseed,+rtm,+sgx,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+x87,+xsave,+xsavec,+xsaveopt,+xsaves" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { noinline nounwind uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="skylake-avx512" "target-features"="+adx,+aes,+avx,+avx2,+avx512bw,+avx512cd,+avx512dq,+avx512f,+avx512vl,+bmi,+bmi2,+clflushopt,+clwb,+cx16,+f16c,+fma,+fsgsbase,+fxsr,+lzcnt,+mmx,+movbe,+mpx,+pclmul,+pku,+popcnt,+rdrnd,+rdseed,+rtm,+sgx,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+x87,+xsave,+xsavec,+xsaveopt,+xsaves" "use-soft-float"="false" }
+attributes #1 = { nounwind "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="skylake-avx512" "target-features"="+adx,+aes,+avx,+avx2,+avx512bw,+avx512cd,+avx512dq,+avx512f,+avx512vl,+bmi,+bmi2,+clflushopt,+clwb,+cx16,+f16c,+fma,+fsgsbase,+fxsr,+lzcnt,+mmx,+movbe,+mpx,+pclmul,+pku,+popcnt,+rdrnd,+rdseed,+rtm,+sgx,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+x87,+xsave,+xsavec,+xsaveopt,+xsaves" "use-soft-float"="false" }
attributes #2 = { nounwind }
diff --git a/llvm/test/Transforms/LoopVectorize/X86/reduction-fastmath.ll b/llvm/test/Transforms/LoopVectorize/X86/reduction-fastmath.ll
index f066000..52e90e4 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/reduction-fastmath.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/reduction-fastmath.ll
@@ -362,4 +362,4 @@ for.body:
br i1 %exitcond, label %for.cond.cleanup, label %for.body
}
-attributes #0 = { "no-infs-fp-math"="true" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "unsafe-fp-math"="false" }
+attributes #0 = { "no-infs-fp-math"="true" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" }
diff --git a/llvm/test/Transforms/LoopVectorize/X86/scatter_crash.ll b/llvm/test/Transforms/LoopVectorize/X86/scatter_crash.ll
index 005696a..e405fe7 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/scatter_crash.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/scatter_crash.ll
@@ -335,4 +335,4 @@ for.body: ; preds = %for.body.preheader,
br i1 %cmp, label %for.body, label %for.cond.cleanup.loopexit99
}
-attributes #0 = { norecurse nounwind ssp uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="broadwell" "target-features"="+adx,+aes,+avx,+avx2,+avx512cd,+avx512f,+bmi,+bmi2,+cx16,+f16c,+fma,+fsgsbase,+fxsr,+evex512,+lzcnt,+mmx,+movbe,+pclmul,+popcnt,+rdrnd,+rdseed,+rtm,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+x87,+xsave,+xsaveopt,-vzeroupper" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { norecurse nounwind ssp uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="broadwell" "target-features"="+adx,+aes,+avx,+avx2,+avx512cd,+avx512f,+bmi,+bmi2,+cx16,+f16c,+fma,+fsgsbase,+fxsr,+evex512,+lzcnt,+mmx,+movbe,+pclmul,+popcnt,+rdrnd,+rdseed,+rtm,+sse,+sse2,+sse3,+sse4.1,+sse4.2,+ssse3,+x87,+xsave,+xsaveopt,-vzeroupper" "use-soft-float"="false" }
diff --git a/llvm/test/Transforms/LoopVectorize/X86/tail_folding_and_assume_safety.ll b/llvm/test/Transforms/LoopVectorize/X86/tail_folding_and_assume_safety.ll
index b98a2ea..c7550ca 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/tail_folding_and_assume_safety.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/tail_folding_and_assume_safety.ll
@@ -143,7 +143,7 @@ for.inc:
br i1 %exitcond, label %for.cond.cleanup, label %for.body, !llvm.loop !11
}
-attributes #0 = { norecurse nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { norecurse nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "use-soft-float"="false" }
!llvm.module.flags = !{!0}
!llvm.ident = !{!1}
diff --git a/llvm/test/Transforms/LoopVectorize/X86/vectorization-remarks-profitable.ll b/llvm/test/Transforms/LoopVectorize/X86/vectorization-remarks-profitable.ll
index 2c187ca..9471e4a 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/vectorization-remarks-profitable.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/vectorization-remarks-profitable.ll
@@ -72,7 +72,7 @@ for.end: ; preds = %for.body, %entry
ret void, !dbg !27
}
-attributes #0 = { nounwind uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+mmx,+sse,+sse2" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+mmx,+sse,+sse2" "use-soft-float"="false" }
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!7, !8}
diff --git a/llvm/test/Transforms/LoopVectorize/diag-missing-instr-debug-loc.ll b/llvm/test/Transforms/LoopVectorize/diag-missing-instr-debug-loc.ll
index 8ce2a97..0656de4 100644
--- a/llvm/test/Transforms/LoopVectorize/diag-missing-instr-debug-loc.ll
+++ b/llvm/test/Transforms/LoopVectorize/diag-missing-instr-debug-loc.ll
@@ -50,7 +50,7 @@ for.body: ; preds = %for.body.preheader,
br i1 %exitcond, label %for.cond.cleanup, label %for.body, !dbg !9, !llvm.loop !18
}
-attributes #0 = { norecurse nounwind ssp uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="core2" "target-features"="+cx16,+fxsr,+mmx,+sse,+sse2,+sse3,+ssse3,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { norecurse nounwind ssp uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="core2" "target-features"="+cx16,+fxsr,+mmx,+sse,+sse2,+sse3,+ssse3,+x87" "use-soft-float"="false" }
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!3, !4}
diff --git a/llvm/test/Transforms/LoopVectorize/diag-with-hotness-info-2.ll b/llvm/test/Transforms/LoopVectorize/diag-with-hotness-info-2.ll
index 65bd36c..5e51624f 100644
--- a/llvm/test/Transforms/LoopVectorize/diag-with-hotness-info-2.ll
+++ b/llvm/test/Transforms/LoopVectorize/diag-with-hotness-info-2.ll
@@ -134,7 +134,7 @@ for.cond.cleanup:
ret void, !dbg !44
}
-attributes #0 = { norecurse nounwind ssp uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="core2" "target-features"="+cx16,+fxsr,+mmx,+sse,+sse2,+sse3,+ssse3,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { norecurse nounwind ssp uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="core2" "target-features"="+cx16,+fxsr,+mmx,+sse,+sse2,+sse3,+ssse3,+x87" "use-soft-float"="false" }
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!3, !4, !5}
diff --git a/llvm/test/Transforms/LoopVectorize/diag-with-hotness-info.ll b/llvm/test/Transforms/LoopVectorize/diag-with-hotness-info.ll
index 4b7b714..77ec95a 100644
--- a/llvm/test/Transforms/LoopVectorize/diag-with-hotness-info.ll
+++ b/llvm/test/Transforms/LoopVectorize/diag-with-hotness-info.ll
@@ -145,7 +145,7 @@ for.body: ; preds = %entry, %for.body
br i1 %exitcond, label %for.cond.cleanup, label %for.body, !dbg !43, !llvm.loop !55
}
-attributes #0 = { norecurse nounwind ssp uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="core2" "target-features"="+cx16,+fxsr,+mmx,+sse,+sse2,+sse3,+ssse3,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { norecurse nounwind ssp uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="core2" "target-features"="+cx16,+fxsr,+mmx,+sse,+sse2,+sse3,+ssse3,+x87" "use-soft-float"="false" }
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!3, !4, !5}
diff --git a/llvm/test/Transforms/LoopVectorize/incorrect-dom-info.ll b/llvm/test/Transforms/LoopVectorize/incorrect-dom-info.ll
index 5cf99b8..3487331 100644
--- a/llvm/test/Transforms/LoopVectorize/incorrect-dom-info.ll
+++ b/llvm/test/Transforms/LoopVectorize/incorrect-dom-info.ll
@@ -135,7 +135,7 @@ thread-pre-split5: ; preds = %.lr.ph
ret void
}
-attributes #0 = { nounwind ssp uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind ssp uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "use-soft-float"="false" }
!llvm.ident = !{!0}
diff --git a/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll b/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll
index 8efe29a..fc2e233 100644
--- a/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll
+++ b/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll
@@ -783,7 +783,7 @@ for.body: ; preds = %for.body, %entry
@SA = common global i32 0, align 4
@SB = common global float 0.000000e+00, align 4
-define void @int_float_struct(ptr nocapture readonly %A) #0 {
+define void @int_float_struct(ptr nocapture readonly %A) {
; CHECK-LABEL: @int_float_struct(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[VECTOR_PH:%.*]]
@@ -1546,5 +1546,3 @@ loop:
end:
ret void
}
-
-attributes #0 = { "unsafe-fp-math"="true" }
diff --git a/llvm/test/Transforms/LoopVectorize/metadata-width.ll b/llvm/test/Transforms/LoopVectorize/metadata-width.ll
index ddf9029..22243d9 100644
--- a/llvm/test/Transforms/LoopVectorize/metadata-width.ll
+++ b/llvm/test/Transforms/LoopVectorize/metadata-width.ll
@@ -89,7 +89,7 @@ for.end: ; preds = %for.body, %entry
ret void
}
-attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "use-soft-float"="false" }
!0 = !{!0, !1, !5}
!1 = !{!"llvm.loop.vectorize.width", i32 8}
diff --git a/llvm/test/Transforms/LoopVectorize/multiple-address-spaces.ll b/llvm/test/Transforms/LoopVectorize/multiple-address-spaces.ll
index a1fc1b8..87b5a0b 100644
--- a/llvm/test/Transforms/LoopVectorize/multiple-address-spaces.ll
+++ b/llvm/test/Transforms/LoopVectorize/multiple-address-spaces.ll
@@ -56,4 +56,4 @@ for.end: ; preds = %for.body
ret i32 0
}
-attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "use-soft-float"="false" }
diff --git a/llvm/test/Transforms/LoopVectorize/runtime-check-address-space.ll b/llvm/test/Transforms/LoopVectorize/runtime-check-address-space.ll
index 705bbab..64ae730 100644
--- a/llvm/test/Transforms/LoopVectorize/runtime-check-address-space.ll
+++ b/llvm/test/Transforms/LoopVectorize/runtime-check-address-space.ll
@@ -218,4 +218,4 @@ for.end: ; preds = %for.body, %entry
ret void
}
-attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "use-soft-float"="false" }
diff --git a/llvm/test/Transforms/LoopVectorize/runtime-check-readonly-address-space.ll b/llvm/test/Transforms/LoopVectorize/runtime-check-readonly-address-space.ll
index 1effb10..872a3929 100644
--- a/llvm/test/Transforms/LoopVectorize/runtime-check-readonly-address-space.ll
+++ b/llvm/test/Transforms/LoopVectorize/runtime-check-readonly-address-space.ll
@@ -129,4 +129,4 @@ for.end: ; preds = %for.body
ret void
}
-attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "use-soft-float"="false" }
diff --git a/llvm/test/Transforms/LoopVectorize/scev-exitlim-crash.ll b/llvm/test/Transforms/LoopVectorize/scev-exitlim-crash.ll
index 8224d6b..137e098 100644
--- a/llvm/test/Transforms/LoopVectorize/scev-exitlim-crash.ll
+++ b/llvm/test/Transforms/LoopVectorize/scev-exitlim-crash.ll
@@ -104,8 +104,8 @@ for.end26: ; preds = %for.cond4.for.end26
}
declare i32 @fn2(double) #1
-attributes #0 = { nounwind ssp uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind ssp uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "use-soft-float"="false" }
!0 = !{!"int", !1}
!1 = !{!"omnipotent char", !2}
diff --git a/llvm/test/Transforms/MergeICmps/X86/int64-and-ptr.ll b/llvm/test/Transforms/MergeICmps/X86/int64-and-ptr.ll
index e2f5007..cc187de 100644
--- a/llvm/test/Transforms/MergeICmps/X86/int64-and-ptr.ll
+++ b/llvm/test/Transforms/MergeICmps/X86/int64-and-ptr.ll
@@ -35,11 +35,7 @@ if.end5: ; preds = %if.then, %entry
ret i1 %rez.0
}
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.start.p0(ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture)
; Function Attrs: argmemonly nounwind
-declare void @llvm.lifetime.end.p0(ptr nocapture) #1
-
-attributes #0 = { nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { argmemonly nounwind }
-attributes #2 = { nounwind }
+declare void @llvm.lifetime.end.p0(ptr nocapture)
diff --git a/llvm/test/Transforms/MergeICmps/X86/pr41917.ll b/llvm/test/Transforms/MergeICmps/X86/pr41917.ll
index d4fd34a..d8065e8 100644
--- a/llvm/test/Transforms/MergeICmps/X86/pr41917.ll
+++ b/llvm/test/Transforms/MergeICmps/X86/pr41917.ll
@@ -7,13 +7,13 @@ target triple = "i386-pc-windows-msvc19.11.0"
%class.a = type { i32, i32, i32, i32, i32 }
; Function Attrs: nounwind optsize
-define dso_local zeroext i1 @pr41917(ptr byval(%class.a) nocapture readonly align 4 %g, ptr byval(%class.a) nocapture readonly align 4 %p2) local_unnamed_addr #0 {
+define dso_local zeroext i1 @pr41917(ptr byval(%class.a) nocapture readonly align 4 %g, ptr byval(%class.a) nocapture readonly align 4 %p2) local_unnamed_addr {
; CHECK-LABEL: @pr41917(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call zeroext i1 @f2() #[[ATTR3:[0-9]+]]
+; CHECK-NEXT: [[CALL:%.*]] = tail call zeroext i1 @f2()
; CHECK-NEXT: br i1 [[CALL]], label [[LAND_RHS:%.*]], label %"land.end+land.rhs3"
; CHECK: land.rhs:
-; CHECK-NEXT: [[CALL1:%.*]] = tail call zeroext i1 @f2() #[[ATTR3]]
+; CHECK-NEXT: [[CALL1:%.*]] = tail call zeroext i1 @f2()
; CHECK-NEXT: br label %"land.end+land.rhs3"
; CHECK: "land.end+land.rhs3":
; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[CLASS_A:%.*]], ptr [[G:%.*]], i32 0, i32 1
@@ -25,11 +25,11 @@ define dso_local zeroext i1 @pr41917(ptr byval(%class.a) nocapture readonly alig
; CHECK-NEXT: ret i1 [[TMP2]]
;
entry:
- %call = tail call zeroext i1 @f2() #2
+ %call = tail call zeroext i1 @f2()
br i1 %call, label %land.rhs, label %land.end
land.rhs: ; preds = %entry
- %call1 = tail call zeroext i1 @f2() #2
+ %call1 = tail call zeroext i1 @f2()
br label %land.end
land.end: ; preds = %land.rhs, %entry
@@ -53,11 +53,7 @@ land.end6: ; preds = %land.rhs3, %land.en
ret i1 %4
}
-declare dso_local zeroext i1 @f2() local_unnamed_addr #1
-
-attributes #0 = { nounwind optsize "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-features"="+cx8,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { optsize "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-features"="+cx8,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #2 = { nounwind optsize }
+declare dso_local zeroext i1 @f2() local_unnamed_addr
!llvm.module.flags = !{!0, !1}
!llvm.ident = !{!2}
diff --git a/llvm/test/Transforms/NewGVN/basic-cyclic-opt.ll b/llvm/test/Transforms/NewGVN/basic-cyclic-opt.ll
index 1cf9fd9..12b4184 100644
--- a/llvm/test/Transforms/NewGVN/basic-cyclic-opt.ll
+++ b/llvm/test/Transforms/NewGVN/basic-cyclic-opt.ll
@@ -4,7 +4,7 @@ target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
;; Function Attrs: nounwind ssp uwtable
;; We should eliminate the sub, and one of the phi nodes
-define void @vnum_test1(ptr %data) #0 {
+define void @vnum_test1(ptr %data) {
; CHECK-LABEL: @vnum_test1(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[TMP:%.*]] = getelementptr inbounds i32, ptr [[DATA:%.*]], i64 3
@@ -74,7 +74,7 @@ bb19: ; preds = %bb4
;; We should eliminate the sub, one of the phi nodes, prove the store of the sub
;; and the load of data are equivalent, that the load always produces constant 0, and
;; delete the load replacing it with constant 0.
-define i32 @vnum_test2(ptr %data) #0 {
+define i32 @vnum_test2(ptr %data) {
; CHECK-LABEL: @vnum_test2(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[TMP:%.*]] = getelementptr inbounds i32, ptr [[DATA:%.*]], i64 3
@@ -142,11 +142,10 @@ bb21: ; preds = %bb4
ret i32 %p.0
}
-
; Function Attrs: nounwind ssp uwtable
;; Same as test 2, with a conditional store of m-n, so it has to also discover
;; that data ends up with the same value no matter what branch is taken.
-define i32 @vnum_test3(ptr %data) #0 {
+define i32 @vnum_test3(ptr %data) {
; CHECK-LABEL: @vnum_test3(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[TMP:%.*]] = getelementptr inbounds i32, ptr [[DATA:%.*]], i64 3
@@ -305,7 +304,6 @@ bb3: ; preds = %bb2
%tmp3 = sub i32 %tmp, %phi2
ret i32 %tmp3
}
-attributes #0 = { nounwind ssp uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
!llvm.ident = !{!0, !0, !0}
diff --git a/llvm/test/Transforms/NewGVN/cond_br2-xfail.ll b/llvm/test/Transforms/NewGVN/cond_br2-xfail.ll
index 8b2d662..b6da86b 100644
--- a/llvm/test/Transforms/NewGVN/cond_br2-xfail.ll
+++ b/llvm/test/Transforms/NewGVN/cond_br2-xfail.ll
@@ -10,7 +10,7 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
%"union.llvm::SmallVectorBase::U" = type { x86_fp80 }
; Function Attrs: ssp uwtable
-define void @_Z4testv() #0 personality ptr @__gxx_personality_v0 {
+define void @_Z4testv() personality ptr @__gxx_personality_v0 {
; CHECK: @_Z4testv()
; CHECK: invoke.cont:
; CHECK: br i1 true, label %new.notnull.i11, label %if.end.i14
@@ -18,7 +18,7 @@ define void @_Z4testv() #0 personality ptr @__gxx_personality_v0 {
entry:
%sv = alloca %"class.llvm::SmallVector", align 16
- call void @llvm.lifetime.start.p0(ptr %sv) #1
+ call void @llvm.lifetime.start.p0(ptr %sv)
%FirstEl.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SmallVector", ptr %sv, i64 0, i32 0, i32 0, i32 0, i32 0, i32 3
store ptr %FirstEl.i.i.i.i.i.i, ptr %sv, align 16, !tbaa !4
%EndX.i.i.i.i.i.i = getelementptr inbounds %"class.llvm::SmallVector", ptr %sv, i64 0, i32 0, i32 0, i32 0, i32 0, i32 1
@@ -83,11 +83,11 @@ invoke.cont3: ; preds = %invoke.cont2
br i1 %cmp.i.i.i.i19, label %_ZN4llvm11SmallVectorIiLj8EED1Ev.exit21, label %if.then.i.i.i20
if.then.i.i.i20: ; preds = %invoke.cont3
- call void @free(ptr %5) #1
+ call void @free(ptr %5)
br label %_ZN4llvm11SmallVectorIiLj8EED1Ev.exit21
_ZN4llvm11SmallVectorIiLj8EED1Ev.exit21: ; preds = %invoke.cont3, %if.then.i.i.i20
- call void @llvm.lifetime.end.p0(ptr %sv) #1
+ call void @llvm.lifetime.end.p0(ptr %sv)
ret void
lpad: ; preds = %if.end.i14, %if.end.i, %invoke.cont2
@@ -98,7 +98,7 @@ lpad: ; preds = %if.end.i14, %if.end
br i1 %cmp.i.i.i.i, label %eh.resume, label %if.then.i.i.i
if.then.i.i.i: ; preds = %lpad
- call void @free(ptr %7) #1
+ call void @free(ptr %7)
br label %eh.resume
eh.resume: ; preds = %if.then.i.i.i, %lpad
@@ -106,24 +106,19 @@ eh.resume: ; preds = %if.then.i.i.i, %lpa
}
; Function Attrs: nounwind
-declare void @llvm.lifetime.start.p0(ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture)
declare i32 @__gxx_personality_v0(...)
-declare void @_Z1gRN4llvm11SmallVectorIiLj8EEE(ptr) #2
+declare void @_Z1gRN4llvm11SmallVectorIiLj8EEE(ptr)
; Function Attrs: nounwind
-declare void @llvm.lifetime.end.p0(ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture)
-declare void @_ZN4llvm15SmallVectorBase8grow_podEmm(ptr, i64, i64) #2
+declare void @_ZN4llvm15SmallVectorBase8grow_podEmm(ptr, i64, i64)
; Function Attrs: nounwind
-declare void @free(ptr nocapture) #3
-
-attributes #0 = { ssp uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind }
-attributes #2 = { "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #3 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+declare void @free(ptr nocapture)
!0 = !{!"any pointer", !1}
!1 = !{!"omnipotent char", !2}
diff --git a/llvm/test/Transforms/NewGVN/equivalent-phi.ll b/llvm/test/Transforms/NewGVN/equivalent-phi.ll
index ba4fc14..388b941 100644
--- a/llvm/test/Transforms/NewGVN/equivalent-phi.ll
+++ b/llvm/test/Transforms/NewGVN/equivalent-phi.ll
@@ -8,7 +8,7 @@ target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
;; one set of indexing calculations and a load
; Function Attrs: nounwind ssp uwtable
-define i32 @bar(i32 %arg, i32 %arg1, i32 %arg2) #0 {
+define i32 @bar(i32 %arg, i32 %arg1, i32 %arg2) {
; CHECK-LABEL: @bar(
; CHECK-NEXT: bb:
; CHECK-NEXT: br label [[BB3:%.*]]
@@ -59,8 +59,6 @@ bb20: ; preds = %bb17
ret i32 %tmp14
}
-attributes #0 = { nounwind ssp uwtable "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+fxsr,+mmx,+sse,+sse2,+sse3,+sse4.1,+ssse3" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
!llvm.module.flags = !{!0}
!llvm.ident = !{!1}
diff --git a/llvm/test/Transforms/NewGVN/memory-handling.ll b/llvm/test/Transforms/NewGVN/memory-handling.ll
index f83d145..71e9041 100644
--- a/llvm/test/Transforms/NewGVN/memory-handling.ll
+++ b/llvm/test/Transforms/NewGVN/memory-handling.ll
@@ -282,10 +282,10 @@ declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1) #2
; Function Attrs: inlinehint nounwind readonly uwtable
declare i32 @tolower(i32) local_unnamed_addr #3
-attributes #0 = { nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #0 = { nounwind uwtable }
+attributes #1 = { nounwind readnone }
attributes #2 = { argmemonly nounwind }
-attributes #3 = { inlinehint nounwind readonly uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #3 = { inlinehint nounwind readonly uwtable }
attributes #4 = { nounwind readnone }
attributes #5 = { nounwind readonly }
diff --git a/llvm/test/Transforms/NewGVN/pr31483.ll b/llvm/test/Transforms/NewGVN/pr31483.ll
index 82e9a2a..c1fb836 100644
--- a/llvm/test/Transforms/NewGVN/pr31483.ll
+++ b/llvm/test/Transforms/NewGVN/pr31483.ll
@@ -6,7 +6,7 @@ target datalayout = "E-m:e-i64:64-n32:64"
;; Ensure we do not believe the indexing increments are unreachable due to incorrect memory
;; equivalence detection. In PR31483, we were deleting those blocks as unreachable
; Function Attrs: nounwind
-define signext i32 @ham(ptr %arg, ptr %arg1) #0 {
+define signext i32 @ham(ptr %arg, ptr %arg1) {
; CHECK-LABEL: @ham(
; CHECK-NEXT: bb:
; CHECK-NEXT: [[TMP:%.*]] = alloca ptr, align 8
@@ -89,12 +89,7 @@ bb23: ; preds = %bb2
ret i32 undef
}
-declare signext i32 @zot(ptr, ...) #1
+declare signext i32 @zot(ptr, ...)
; Function Attrs: nounwind
-declare void @llvm.va_end(ptr) #2
-
-attributes #0 = { nounwind "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="ppc64" "target-features"="+altivec,-bpermd,-crypto,-direct-move,-extdiv,-power8-vector,-vsx" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="ppc64" "target-features"="+altivec,-bpermd,-crypto,-direct-move,-extdiv,-power8-vector,-vsx" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #2 = { nounwind }
-
+declare void @llvm.va_end(ptr)
diff --git a/llvm/test/Transforms/NewGVN/pr31501.ll b/llvm/test/Transforms/NewGVN/pr31501.ll
index 353c693..b2ba42b 100644
--- a/llvm/test/Transforms/NewGVN/pr31501.ll
+++ b/llvm/test/Transforms/NewGVN/pr31501.ll
@@ -49,9 +49,9 @@ target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
%struct.wombat.28 = type <{ ptr, i8, i8, [6 x i8] }>
; Function Attrs: norecurse nounwind ssp uwtable
-define weak_odr hidden ptr @quux(ptr %arg, ptr %arg1) local_unnamed_addr #0 align 2 {
+define weak_odr hidden ptr @quux(ptr %arg, ptr %arg1) local_unnamed_addr align 2 {
; CHECK-LABEL: define weak_odr hidden ptr @quux(
-; CHECK-SAME: ptr [[ARG:%.*]], ptr [[ARG1:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] align 2 {
+; CHECK-SAME: ptr [[ARG:%.*]], ptr [[ARG1:%.*]]) local_unnamed_addr align 2 {
; CHECK-NEXT: [[BB:.*]]:
; CHECK-NEXT: [[TMP:%.*]] = getelementptr inbounds [[STRUCT_BARNEY:%.*]], ptr [[ARG]], i64 0, i32 3, i32 0, i32 0, i32 0
; CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TMP]], align 8, !tbaa [[ANYPTR_TBAA2:![0-9]+]]
@@ -112,8 +112,6 @@ bb21: ; preds = %bb19, %bb
ret ptr %tmp22
}
-attributes #0 = { norecurse nounwind ssp uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+fxsr,+mmx,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
!llvm.module.flags = !{!0}
!llvm.ident = !{!1}
diff --git a/llvm/test/Transforms/NewGVN/pr33187.ll b/llvm/test/Transforms/NewGVN/pr33187.ll
index 969f172..61eb7a5 100644
--- a/llvm/test/Transforms/NewGVN/pr33187.ll
+++ b/llvm/test/Transforms/NewGVN/pr33187.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
;; Ensure we don't change after value numbering by accidentally deleting the wrong expression.
; RUN: opt -passes=newgvn -S %s | FileCheck %s
-define void @fn1(i1 %arg) local_unnamed_addr #0 {
+define void @fn1(i1 %arg) local_unnamed_addr {
; CHECK-LABEL: @fn1(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[FOR_COND_PREHEADER:%.*]]
@@ -89,8 +89,7 @@ if.end18: ; preds = %L, %while.body12
br label %while.cond10
}
-
-define void @hoge() local_unnamed_addr #0 {
+define void @hoge() local_unnamed_addr {
; CHECK-LABEL: @hoge(
; CHECK-NEXT: bb:
; CHECK-NEXT: br label [[BB1:%.*]]
@@ -108,9 +107,6 @@ bb1: ; preds = %bb1, %bb
br label %bb1
}
-attributes #0 = { nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
-
define void @a(i1 %arg) {
; CHECK-LABEL: @a(
; CHECK-NEXT: b:
@@ -143,4 +139,3 @@ f: ; preds = %d
%e = getelementptr i8, ptr %i, i64 1
br label %d
}
-
diff --git a/llvm/test/Transforms/NewGVN/pr33305.ll b/llvm/test/Transforms/NewGVN/pr33305.ll
index e742f14..ff645f8 100644
--- a/llvm/test/Transforms/NewGVN/pr33305.ll
+++ b/llvm/test/Transforms/NewGVN/pr33305.ll
@@ -16,9 +16,9 @@ target triple = "x86_64-apple-macosx10.12.0"
@str.2 = private unnamed_addr constant [8 x i8] c"Screwed\00"
; Function Attrs: nounwind optsize ssp uwtable
-define i32 @main() local_unnamed_addr #0 {
+define i32 @main() local_unnamed_addr {
; CHECK-LABEL: define i32 @main(
-; CHECK-SAME: ) local_unnamed_addr #[[ATTR0:[0-9]+]] {
+; CHECK-SAME: ) local_unnamed_addr {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[DOTPR_I:%.*]] = load i32, ptr @c, align 4, !tbaa [[INT_TBAA3:![0-9]+]]
; CHECK-NEXT: [[CMP13_I:%.*]] = icmp slt i32 [[DOTPR_I]], 1
@@ -77,7 +77,7 @@ define i32 @main() local_unnamed_addr #0 {
; CHECK-NEXT: br i1 [[TOBOOL]], label %[[IF_END:.*]], label %[[IF_THEN:.*]]
; CHECK: [[IF_THEN]]:
; CHECK-NEXT: [[PUTS2:%.*]] = tail call i32 @puts(ptr @str.2)
-; CHECK-NEXT: tail call void @abort() #[[ATTR3:[0-9]+]]
+; CHECK-NEXT: tail call void @abort()
; CHECK-NEXT: unreachable
; CHECK: [[IF_END]]:
; CHECK-NEXT: [[PUTS:%.*]] = tail call i32 @puts(ptr @str)
@@ -153,7 +153,7 @@ fn1.exit: ; preds = %if.then.i, %for.end
if.then: ; preds = %fn1.exit
%puts2 = tail call i32 @puts(ptr @str.2)
- tail call void @abort() #3
+ tail call void @abort()
unreachable
if.end: ; preds = %fn1.exit
@@ -162,15 +162,10 @@ if.end: ; preds = %fn1.exit
}
; Function Attrs: noreturn nounwind optsize
-declare void @abort() local_unnamed_addr #1
+declare void @abort() local_unnamed_addr
; Function Attrs: nounwind
-declare i32 @puts(ptr nocapture readonly) local_unnamed_addr #2
-
-attributes #0 = { nounwind optsize ssp uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+fxsr,+mmx,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { noreturn nounwind optsize "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+fxsr,+mmx,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #2 = { nounwind }
-attributes #3 = { noreturn nounwind optsize }
+declare i32 @puts(ptr nocapture readonly) local_unnamed_addr
!llvm.module.flags = !{!0, !1}
!llvm.ident = !{!2}
diff --git a/llvm/test/Transforms/NewGVN/pr34430.ll b/llvm/test/Transforms/NewGVN/pr34430.ll
index 490ba433..62d5770 100644
--- a/llvm/test/Transforms/NewGVN/pr34430.ll
+++ b/llvm/test/Transforms/NewGVN/pr34430.ll
@@ -4,7 +4,7 @@
source_filename = "bugpoint-output-e4c7d0f.bc"
; Make sure we still properly resolve phi cycles when they involve predicateinfo copies of phis.
-define void @hoge(i1 %arg) local_unnamed_addr #0 {
+define void @hoge(i1 %arg) local_unnamed_addr {
; CHECK-LABEL: @hoge(
; CHECK-NEXT: bb:
; CHECK-NEXT: br i1 %arg, label [[BB6:%.*]], label [[BB1:%.*]]
@@ -41,8 +41,6 @@ bb6: ; preds = %bb4, %bb2, %bb1, %b
br label %bb4
}
-attributes #0 = { norecurse noreturn nounwind ssp uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+fxsr,+mmx,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
!llvm.ident = !{!0}
!0 = !{!"clang version 6.0.0"}
diff --git a/llvm/test/Transforms/NewGVN/pr34452.ll b/llvm/test/Transforms/NewGVN/pr34452.ll
index 48bdd88..7c38147 100644
--- a/llvm/test/Transforms/NewGVN/pr34452.ll
+++ b/llvm/test/Transforms/NewGVN/pr34452.ll
@@ -6,9 +6,9 @@ source_filename = "bugpoint-output-09f7a24.bc"
@WHOLELINE = external local_unnamed_addr global i32, align 4
; Function Attrs: nounwind uwtable
-define void @sgrep() local_unnamed_addr #0 {
+define void @sgrep() local_unnamed_addr {
; CHECK-LABEL: define void @sgrep(
-; CHECK-SAME: ) local_unnamed_addr #[[ATTR0:[0-9]+]] {
+; CHECK-SAME: ) local_unnamed_addr {
; CHECK-NEXT: [[ENTRY:.*]]:
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr @WHOLELINE, align 4, !tbaa [[INT_TBAA1:![0-9]+]]
; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[TMP0]], 0
@@ -36,10 +36,7 @@ while.body.us: ; preds = %while.body.us, %ent
}
; Function Attrs: nounwind readnone speculatable
-declare { i64, i1 } @llvm.sadd.with.overflow.i64(i64, i64) #1
-
-attributes #0 = { nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "polly-optimized" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind readnone speculatable }
+declare { i64, i1 } @llvm.sadd.with.overflow.i64(i64, i64)
!llvm.ident = !{!0}
diff --git a/llvm/test/Transforms/OpenMP/dead_use.ll b/llvm/test/Transforms/OpenMP/dead_use.ll
index 1c4b2c6..ad0f91c 100644
--- a/llvm/test/Transforms/OpenMP/dead_use.ll
+++ b/llvm/test/Transforms/OpenMP/dead_use.ll
@@ -6,9 +6,9 @@
@0 = private unnamed_addr global %struct.ident_t { i32 0, i32 2, i32 0, i32 0, ptr @.str }, align 8
; Function Attrs: nounwind uwtable
-define dso_local i32 @b() #0 {
+define dso_local i32 @b() {
; CHECK-LABEL: define dso_local i32 @b(
-; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
+; CHECK-SAME: ) {
; CHECK-NEXT: [[TMP1:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @a()
; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP1]], align 4
@@ -21,9 +21,9 @@ define dso_local i32 @b() #0 {
}
; Function Attrs: nounwind uwtable
-define internal i32 @a() #0 {
+define internal i32 @a() {
; CHECK-LABEL: define internal i32 @a(
-; CHECK-SAME: ) #[[ATTR0]] {
+; CHECK-SAME: ) {
; CHECK-NEXT: [[TMP1:%.*]] = alloca i32, align 4
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @b()
; CHECK-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB0:[0-9]+]], i32 0, ptr @.omp_outlined.)
@@ -38,9 +38,9 @@ define internal i32 @a() #0 {
}
; Function Attrs: norecurse nounwind uwtable
-define internal void @.omp_outlined.(ptr noalias %0, ptr noalias %1) #1 {
+define internal void @.omp_outlined.(ptr noalias %0, ptr noalias %1) {
; CHECK-LABEL: define internal void @.omp_outlined.(
-; CHECK-SAME: ptr noalias [[TMP0:%.*]], ptr noalias [[TMP1:%.*]]) #[[ATTR1:[0-9]+]] {
+; CHECK-SAME: ptr noalias [[TMP0:%.*]], ptr noalias [[TMP1:%.*]]) {
; CHECK-NEXT: [[TMP3:%.*]] = alloca ptr, align 8
; CHECK-NEXT: [[TMP4:%.*]] = alloca ptr, align 8
; CHECK-NEXT: store ptr [[TMP0]], ptr [[TMP3]], align 8, !tbaa [[ANYPTR_TBAA2:![0-9]+]]
@@ -55,11 +55,7 @@ define internal void @.omp_outlined.(ptr noalias %0, ptr noalias %1) #1 {
}
; Function Attrs: nounwind
-declare !callback !6 void @__kmpc_fork_call(ptr, i32, ptr, ...) #2
-
-attributes #0 = { nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { norecurse nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #2 = { nounwind }
+declare !callback !6 void @__kmpc_fork_call(ptr, i32, ptr, ...)
!llvm.module.flags = !{!0}
!llvm.ident = !{!1}
diff --git a/llvm/test/Transforms/OpenMP/icv_remarks.ll b/llvm/test/Transforms/OpenMP/icv_remarks.ll
index f76d487..3caa56f 100644
--- a/llvm/test/Transforms/OpenMP/icv_remarks.ll
+++ b/llvm/test/Transforms/OpenMP/icv_remarks.ll
@@ -14,55 +14,48 @@ target triple = "x86_64-unknown-linux-gnu"
; CHECK-DAG: remark: icv_remarks.c:12:0: OpenMP ICV nthreads Value: IMPLEMENTATION_DEFINED
; CHECK-DAG: remark: icv_remarks.c:12:0: OpenMP ICV active_levels Value: 0
; CHECK-DAG: remark: icv_remarks.c:12:0: OpenMP ICV cancel Value: 0
-define dso_local void @foo(i32 %a) local_unnamed_addr #0 !dbg !17 {
+define dso_local void @foo(i32 %a) local_unnamed_addr !dbg !17 {
entry:
%.kmpc_loc.addr = alloca %struct.ident_t, align 8
call void @llvm.memcpy.p0.p0.i64(ptr nonnull align 8 dereferenceable(24) %.kmpc_loc.addr, ptr nonnull align 8 dereferenceable(24) @0, i64 16, i1 false)
call void @llvm.dbg.value(metadata i32 %a, metadata !19, metadata !DIExpression()), !dbg !21
- tail call void @omp_set_num_threads(i32 %a) #1, !dbg !22
- %call = tail call i32 @omp_get_max_threads() #1, !dbg !23
+ tail call void @omp_set_num_threads(i32 %a), !dbg !22
+ %call = tail call i32 @omp_get_max_threads(), !dbg !23
call void @llvm.dbg.value(metadata i32 %call, metadata !20, metadata !DIExpression()), !dbg !21
- tail call void @use(i32 %call) #1, !dbg !24
+ tail call void @use(i32 %call), !dbg !24
%0 = getelementptr inbounds %struct.ident_t, ptr %.kmpc_loc.addr, i64 0, i32 4, !dbg !25
store ptr @1, ptr %0, align 8, !dbg !25, !tbaa !26
- call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr nonnull %.kmpc_loc.addr, i32 0, ptr @.omp_outlined.) #1, !dbg !25
+ call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr nonnull %.kmpc_loc.addr, i32 0, ptr @.omp_outlined.), !dbg !25
ret void, !dbg !32
}
-declare !dbg !4 dso_local void @omp_set_num_threads(i32) local_unnamed_addr #1
+declare !dbg !4 dso_local void @omp_set_num_threads(i32) local_unnamed_addr
-declare !dbg !9 dso_local i32 @omp_get_max_threads() local_unnamed_addr #1
+declare !dbg !9 dso_local i32 @omp_get_max_threads() local_unnamed_addr
-declare !dbg !12 dso_local void @use(i32) local_unnamed_addr #2
+declare !dbg !12 dso_local void @use(i32) local_unnamed_addr
; CHECK-DAG: remark: icv_remarks.c:18:0: OpenMP ICV nthreads Value: IMPLEMENTATION_DEFINED
; CHECK-DAG: remark: icv_remarks.c:18:0: OpenMP ICV active_levels Value: 0
; CHECK-DAG: remark: icv_remarks.c:18:0: OpenMP ICV cancel Value: 0
-define internal void @.omp_outlined.(ptr noalias nocapture readnone %.global_tid., ptr noalias nocapture readnone %.bound_tid.) #3 !dbg !33 {
+define internal void @.omp_outlined.(ptr noalias nocapture readnone %.global_tid., ptr noalias nocapture readnone %.bound_tid.) !dbg !33 {
entry:
call void @llvm.dbg.value(metadata ptr %.global_tid., metadata !41, metadata !DIExpression()), !dbg !43
call void @llvm.dbg.value(metadata ptr %.bound_tid., metadata !42, metadata !DIExpression()), !dbg !43
- call void @llvm.dbg.value(metadata ptr undef, metadata !44, metadata !DIExpression()) #1, !dbg !50
- call void @llvm.dbg.value(metadata ptr undef, metadata !47, metadata !DIExpression()) #1, !dbg !50
- tail call void @omp_set_num_threads(i32 10) #1, !dbg !52
- %call.i = tail call i32 @omp_get_max_threads() #1, !dbg !53
- call void @llvm.dbg.value(metadata i32 %call.i, metadata !48, metadata !DIExpression()) #1, !dbg !54
- tail call void @use(i32 %call.i) #1, !dbg !55
+ call void @llvm.dbg.value(metadata ptr undef, metadata !44, metadata !DIExpression()), !dbg !50
+ call void @llvm.dbg.value(metadata ptr undef, metadata !47, metadata !DIExpression()), !dbg !50
+ tail call void @omp_set_num_threads(i32 10), !dbg !52
+ %call.i = tail call i32 @omp_get_max_threads(), !dbg !53
+ call void @llvm.dbg.value(metadata i32 %call.i, metadata !48, metadata !DIExpression()), !dbg !54
+ tail call void @use(i32 %call.i), !dbg !55
ret void, !dbg !56
}
-declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg) #4
+declare void @llvm.memcpy.p0.p0.i64(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i64, i1 immarg)
-declare !callback !57 dso_local void @__kmpc_fork_call(ptr, i32, ptr, ...) local_unnamed_addr #1
+declare !callback !57 dso_local void @__kmpc_fork_call(ptr, i32, ptr, ...) local_unnamed_addr
-declare void @llvm.dbg.value(metadata, metadata, metadata) #5
-
-attributes #0 = { nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind }
-attributes #2 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #3 = { norecurse nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #4 = { argmemonly nounwind willreturn }
-attributes #5 = { nounwind readnone speculatable willreturn }
+declare void @llvm.dbg.value(metadata, metadata, metadata)
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!13, !14, !15, !59}
diff --git a/llvm/test/Transforms/PGOProfile/misexpect-branch-correct.ll b/llvm/test/Transforms/PGOProfile/misexpect-branch-correct.ll
index 0a1efda..eb1cc5e 100644
--- a/llvm/test/Transforms/PGOProfile/misexpect-branch-correct.ll
+++ b/llvm/test/Transforms/PGOProfile/misexpect-branch-correct.ll
@@ -9,7 +9,6 @@
; CHECK-NOT: remark: {{.*}}
; CHECK: !{!"branch_weights", i32 0, i32 200000}
-
; ModuleID = 'misexpect-branch-correct.c'
source_filename = "misexpect-branch-correct.c"
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
@@ -19,14 +18,14 @@ target triple = "x86_64-unknown-linux-gnu"
@outer_loop = constant i32 2000, align 4
; Function Attrs: nounwind
-define i32 @bar() #0 {
+define i32 @bar() {
entry:
%rando = alloca i32, align 4
%x = alloca i32, align 4
- call void @llvm.lifetime.start.p0(ptr %rando) #4
+ call void @llvm.lifetime.start.p0(ptr %rando)
%call = call i32 (...) @buzz()
store i32 %call, ptr %rando, align 4, !tbaa !3
- call void @llvm.lifetime.start.p0(ptr %x) #4
+ call void @llvm.lifetime.start.p0(ptr %x)
store i32 0, ptr %x, align 4, !tbaa !3
%0 = load i32, ptr %rando, align 4, !tbaa !3
%rem = srem i32 %0, 200000
@@ -52,31 +51,25 @@ if.else: ; preds = %entry
if.end: ; preds = %if.else, %if.then
%2 = load i32, ptr %x, align 4, !tbaa !3
- call void @llvm.lifetime.end.p0(ptr %x) #4
- call void @llvm.lifetime.end.p0(ptr %rando) #4
+ call void @llvm.lifetime.end.p0(ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %rando)
ret i32 %2
}
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.start.p0(ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture)
-declare i32 @buzz(...) #2
+declare i32 @buzz(...)
; Function Attrs: nounwind readnone willreturn
-declare i64 @llvm.expect.i64(i64, i64) #3
+declare i64 @llvm.expect.i64(i64, i64)
-declare i32 @baz(i32) #2
+declare i32 @baz(i32)
-declare i32 @foo(i32) #2
+declare i32 @foo(i32)
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.end.p0(ptr nocapture) #1
-
-attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-features"="+cx8,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { argmemonly nounwind willreturn }
-attributes #2 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-features"="+cx8,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #3 = { nounwind readnone willreturn }
-attributes #4 = { nounwind }
+declare void @llvm.lifetime.end.p0(ptr nocapture)
!llvm.module.flags = !{!0, !1}
!llvm.ident = !{!2}
diff --git a/llvm/test/Transforms/PGOProfile/misexpect-branch-overflow.ll b/llvm/test/Transforms/PGOProfile/misexpect-branch-overflow.ll
index 68b233e..3390825 100644
--- a/llvm/test/Transforms/PGOProfile/misexpect-branch-overflow.ll
+++ b/llvm/test/Transforms/PGOProfile/misexpect-branch-overflow.ll
@@ -16,14 +16,14 @@ target triple = "x86_64-unknown-linux-gnu"
@outer_loop = constant i32 2000, align 4
; Function Attrs: nounwind
-define i32 @bar() #0 !dbg !6 {
+define i32 @bar() !dbg !6 {
entry:
%rando = alloca i32, align 4
%x = alloca i32, align 4
- call void @llvm.lifetime.start.p0(ptr %rando) #4, !dbg !9
+ call void @llvm.lifetime.start.p0(ptr %rando), !dbg !9
%call = call i32 (...) @buzz(), !dbg !9
store i32 %call, ptr %rando, align 4, !dbg !9, !tbaa !10
- call void @llvm.lifetime.start.p0(ptr %x) #4, !dbg !14
+ call void @llvm.lifetime.start.p0(ptr %x), !dbg !14
store i32 0, ptr %x, align 4, !dbg !14, !tbaa !10
%0 = load i32, ptr %rando, align 4, !dbg !15, !tbaa !10
%rem = srem i32 %0, 200000, !dbg !15
@@ -49,31 +49,25 @@ if.else: ; preds = %entry
if.end: ; preds = %if.else, %if.then
%2 = load i32, ptr %x, align 4, !dbg !19, !tbaa !10
- call void @llvm.lifetime.end.p0(ptr %x) #4, !dbg !20
- call void @llvm.lifetime.end.p0(ptr %rando) #4, !dbg !20
+ call void @llvm.lifetime.end.p0(ptr %x), !dbg !20
+ call void @llvm.lifetime.end.p0(ptr %rando), !dbg !20
ret i32 %2, !dbg !19
}
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.start.p0(ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture)
-declare i32 @buzz(...) #2
+declare i32 @buzz(...)
; Function Attrs: nounwind readnone willreturn
-declare i64 @llvm.expect.i64(i64, i64) #3
+declare i64 @llvm.expect.i64(i64, i64)
-declare i32 @baz(i32) #2
+declare i32 @baz(i32)
-declare i32 @foo(i32) #2
+declare i32 @foo(i32)
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.end.p0(ptr nocapture) #1
-
-attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-features"="+cx8,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { argmemonly nounwind willreturn }
-attributes #2 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-features"="+cx8,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #3 = { nounwind readnone willreturn }
-attributes #4 = { nounwind }
+declare void @llvm.lifetime.end.p0(ptr nocapture)
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!3, !4}
diff --git a/llvm/test/Transforms/PGOProfile/misexpect-branch-stripped.ll b/llvm/test/Transforms/PGOProfile/misexpect-branch-stripped.ll
index 2f188f5..d34708c 100644
--- a/llvm/test/Transforms/PGOProfile/misexpect-branch-stripped.ll
+++ b/llvm/test/Transforms/PGOProfile/misexpect-branch-stripped.ll
@@ -19,7 +19,6 @@
; DISABLED-NOT: warning: <unknown>:0:0: 19.98%
; DISABLED-NOT: remark: <unknown>:0:0: Potential performance regression from use of the llvm.expect intrinsic: Annotation was correct on 19.98% (399668 / 2000000) of profiled executions.
-
; ModuleID = 'misexpect-branch.c'
source_filename = "misexpect-branch.c"
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
@@ -29,14 +28,14 @@ target triple = "x86_64-unknown-linux-gnu"
@outer_loop = constant i32 2000, align 4
; Function Attrs: nounwind
-define i32 @bar() #0 {
+define i32 @bar() {
entry:
%rando = alloca i32, align 4
%x = alloca i32, align 4
- call void @llvm.lifetime.start.p0(ptr %rando) #4
+ call void @llvm.lifetime.start.p0(ptr %rando)
%call = call i32 (...) @buzz()
store i32 %call, ptr %rando, align 4, !tbaa !3
- call void @llvm.lifetime.start.p0(ptr %x) #4
+ call void @llvm.lifetime.start.p0(ptr %x)
store i32 0, ptr %x, align 4, !tbaa !3
%0 = load i32, ptr %rando, align 4, !tbaa !3
%rem = srem i32 %0, 200000
@@ -62,31 +61,25 @@ if.else: ; preds = %entry
if.end: ; preds = %if.else, %if.then
%2 = load i32, ptr %x, align 4, !tbaa !3
- call void @llvm.lifetime.end.p0(ptr %x) #4
- call void @llvm.lifetime.end.p0(ptr %rando) #4
+ call void @llvm.lifetime.end.p0(ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %rando)
ret i32 %2
}
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.start.p0(ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture)
-declare i32 @buzz(...) #2
+declare i32 @buzz(...)
; Function Attrs: nounwind readnone willreturn
-declare i64 @llvm.expect.i64(i64, i64) #3
+declare i64 @llvm.expect.i64(i64, i64)
-declare i32 @baz(i32) #2
+declare i32 @baz(i32)
-declare i32 @foo(i32) #2
+declare i32 @foo(i32)
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.end.p0(ptr nocapture) #1
-
-attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-features"="+cx8,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { argmemonly nounwind willreturn }
-attributes #2 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-features"="+cx8,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #3 = { nounwind readnone willreturn }
-attributes #4 = { nounwind }
+declare void @llvm.lifetime.end.p0(ptr nocapture)
!llvm.module.flags = !{!0, !1}
!llvm.ident = !{!2}
diff --git a/llvm/test/Transforms/PGOProfile/misexpect-branch-unpredictable.ll b/llvm/test/Transforms/PGOProfile/misexpect-branch-unpredictable.ll
index 4add781..a43e632 100644
--- a/llvm/test/Transforms/PGOProfile/misexpect-branch-unpredictable.ll
+++ b/llvm/test/Transforms/PGOProfile/misexpect-branch-unpredictable.ll
@@ -7,7 +7,6 @@
; CHECK-NOT: warning: {{.*}}
; CHECK-NOT: remark: {{.*}}
-
; ModuleID = 'misexpect-branch-unpredictable.c'
source_filename = "clang/test/Profile/misexpect-branch-unpredictable.c"
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
@@ -17,14 +16,14 @@ target triple = "x86_64-unknown-linux-gnu"
@outer_loop = constant i32 2000, align 4
; Function Attrs: nounwind
-define i32 @bar() #0 {
+define i32 @bar() {
entry:
%rando = alloca i32, align 4
%x = alloca i32, align 4
- call void @llvm.lifetime.start.p0(ptr %rando) #3
+ call void @llvm.lifetime.start.p0(ptr %rando)
%call = call i32 (...) @buzz()
store i32 %call, ptr %rando, align 4, !tbaa !2
- call void @llvm.lifetime.start.p0(ptr %x) #3
+ call void @llvm.lifetime.start.p0(ptr %x)
store i32 0, ptr %x, align 4, !tbaa !2
%0 = load i32, ptr %rando, align 4, !tbaa !2
%rem = srem i32 %0, 200000
@@ -49,27 +48,22 @@ if.else: ; preds = %entry
if.end: ; preds = %if.else, %if.then
%2 = load i32, ptr %x, align 4, !tbaa !2
- call void @llvm.lifetime.end.p0(ptr %x) #3
- call void @llvm.lifetime.end.p0(ptr %rando) #3
+ call void @llvm.lifetime.end.p0(ptr %x)
+ call void @llvm.lifetime.end.p0(ptr %rando)
ret i32 %2
}
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.start.p0(ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture)
-declare i32 @buzz(...) #2
+declare i32 @buzz(...)
-declare i32 @baz(i32) #2
+declare i32 @baz(i32)
-declare i32 @foo(i32) #2
+declare i32 @foo(i32)
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.end.p0(ptr nocapture) #1
-
-attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-features"="+cx8,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { argmemonly nounwind willreturn }
-attributes #2 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-features"="+cx8,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #3 = { nounwind }
+declare void @llvm.lifetime.end.p0(ptr nocapture)
!llvm.module.flags = !{!0}
!llvm.ident = !{!1}
diff --git a/llvm/test/Transforms/PGOProfile/misexpect-branch.ll b/llvm/test/Transforms/PGOProfile/misexpect-branch.ll
index 5a7731b..7e01f7a 100644
--- a/llvm/test/Transforms/PGOProfile/misexpect-branch.ll
+++ b/llvm/test/Transforms/PGOProfile/misexpect-branch.ll
@@ -33,14 +33,14 @@ target triple = "x86_64-unknown-linux-gnu"
@outer_loop = constant i32 2000, align 4
; Function Attrs: nounwind
-define i32 @bar() #0 !dbg !6 {
+define i32 @bar() !dbg !6 {
entry:
%rando = alloca i32, align 4
%x = alloca i32, align 4
- call void @llvm.lifetime.start.p0(ptr %rando) #4, !dbg !9
+ call void @llvm.lifetime.start.p0(ptr %rando), !dbg !9
%call = call i32 (...) @buzz(), !dbg !9
store i32 %call, ptr %rando, align 4, !dbg !9, !tbaa !10
- call void @llvm.lifetime.start.p0(ptr %x) #4, !dbg !14
+ call void @llvm.lifetime.start.p0(ptr %x), !dbg !14
store i32 0, ptr %x, align 4, !dbg !14, !tbaa !10
%0 = load i32, ptr %rando, align 4, !dbg !15, !tbaa !10
%rem = srem i32 %0, 200000, !dbg !15
@@ -66,31 +66,25 @@ if.else: ; preds = %entry
if.end: ; preds = %if.else, %if.then
%2 = load i32, ptr %x, align 4, !dbg !19, !tbaa !10
- call void @llvm.lifetime.end.p0(ptr %x) #4, !dbg !20
- call void @llvm.lifetime.end.p0(ptr %rando) #4, !dbg !20
+ call void @llvm.lifetime.end.p0(ptr %x), !dbg !20
+ call void @llvm.lifetime.end.p0(ptr %rando), !dbg !20
ret i32 %2, !dbg !19
}
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.start.p0(ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture)
-declare i32 @buzz(...) #2
+declare i32 @buzz(...)
; Function Attrs: nounwind readnone willreturn
-declare i64 @llvm.expect.i64(i64, i64) #3
+declare i64 @llvm.expect.i64(i64, i64)
-declare i32 @baz(i32) #2
+declare i32 @baz(i32)
-declare i32 @foo(i32) #2
+declare i32 @foo(i32)
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.end.p0(ptr nocapture) #1
-
-attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-features"="+cx8,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { argmemonly nounwind willreturn }
-attributes #2 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-features"="+cx8,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #3 = { nounwind readnone willreturn }
-attributes #4 = { nounwind }
+declare void @llvm.lifetime.end.p0(ptr nocapture)
!llvm.dbg.cu = !{!0}
!llvm.module.flags = !{!3, !4}
diff --git a/llvm/test/Transforms/PGOProfile/misexpect-switch-default.ll b/llvm/test/Transforms/PGOProfile/misexpect-switch-default.ll
index 859ba72..38c27b9 100644
--- a/llvm/test/Transforms/PGOProfile/misexpect-switch-default.ll
+++ b/llvm/test/Transforms/PGOProfile/misexpect-switch-default.ll
@@ -25,7 +25,6 @@
; CORRECT-NOT: warning: {{.*}}
; CORRECT-NOT: remark: {{.*}}
-
; ModuleID = 'misexpect-switch.c'
source_filename = "misexpect-switch.c"
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
@@ -37,10 +36,10 @@ target triple = "x86_64-unknown-linux-gnu"
@arry = dso_local global [25 x i32] zeroinitializer, align 16
; Function Attrs: nounwind uwtable
-define dso_local void @init_arry() #0 {
+define dso_local void @init_arry() {
entry:
%i = alloca i32, align 4
- call void @llvm.lifetime.start.p0(ptr %i) #6
+ call void @llvm.lifetime.start.p0(ptr %i)
store i32 0, ptr %i, align 4, !tbaa !4
br label %for.cond
@@ -50,7 +49,7 @@ for.cond: ; preds = %for.inc, %entry
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
- %call = call i32 @rand() #6
+ %call = call i32 @rand()
%rem = srem i32 %call, 10
%1 = load i32, ptr %i, align 4, !tbaa !4
%idxprom = sext i32 %1 to i64
@@ -65,24 +64,24 @@ for.inc: ; preds = %for.body
br label %for.cond
for.end: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(ptr %i) #6
+ call void @llvm.lifetime.end.p0(ptr %i)
ret void
}
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.start.p0(ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture)
; Function Attrs: nounwind readnone speculatable willreturn
-declare void @llvm.dbg.declare(metadata, metadata, metadata) #2
+declare void @llvm.dbg.declare(metadata, metadata, metadata)
; Function Attrs: nounwind
-declare dso_local i32 @rand() #3
+declare dso_local i32 @rand()
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.end.p0(ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture)
; Function Attrs: nounwind uwtable
-define dso_local i32 @main() #0 {
+define dso_local i32 @main() {
entry:
%retval = alloca i32, align 4
%val = alloca i32, align 4
@@ -90,9 +89,9 @@ entry:
%condition = alloca i32, align 4
store i32 0, ptr %retval, align 4
call void @init_arry()
- call void @llvm.lifetime.start.p0(ptr %val) #6
+ call void @llvm.lifetime.start.p0(ptr %val)
store i32 0, ptr %val, align 4, !tbaa !4
- call void @llvm.lifetime.start.p0(ptr %j) #6
+ call void @llvm.lifetime.start.p0(ptr %j)
store i32 0, ptr %j, align 4, !tbaa !4
br label %for.cond
@@ -102,8 +101,8 @@ for.cond: ; preds = %for.inc, %entry
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
- call void @llvm.lifetime.start.p0(ptr %condition) #6
- %call = call i32 @rand() #6
+ call void @llvm.lifetime.start.p0(ptr %condition)
+ %call = call i32 @rand()
%rem = srem i32 %call, 5
store i32 %rem, ptr %condition, align 4, !tbaa !4
%1 = load i32, ptr %condition, align 4, !tbaa !4
@@ -138,7 +137,7 @@ sw.default: ; preds = %for.body
unreachable
sw.epilog: ; preds = %sw.bb3, %sw.bb2, %sw.bb
- call void @llvm.lifetime.end.p0(ptr %condition) #6
+ call void @llvm.lifetime.end.p0(ptr %condition)
br label %for.inc
for.inc: ; preds = %sw.epilog
@@ -148,25 +147,17 @@ for.inc: ; preds = %sw.epilog
br label %for.cond
for.end: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(ptr %j) #6
- call void @llvm.lifetime.end.p0(ptr %val) #6
+ call void @llvm.lifetime.end.p0(ptr %j)
+ call void @llvm.lifetime.end.p0(ptr %val)
ret i32 0
}
; Function Attrs: nounwind readnone willreturn
-declare i64 @llvm.expect.i64(i64, i64) #4
-
-declare dso_local i32 @sum(ptr, i32) #5
+declare i64 @llvm.expect.i64(i64, i64)
-declare dso_local i32 @random_sample(ptr, i32) #5
+declare dso_local i32 @sum(ptr, i32)
-attributes #0 = { nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { argmemonly nounwind willreturn }
-attributes #2 = { nounwind readnone speculatable willreturn }
-attributes #3 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #4 = { nounwind readnone willreturn }
-attributes #5 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #6 = { nounwind }
+declare dso_local i32 @random_sample(ptr, i32)
!llvm.module.flags = !{!0, !1, !2}
!llvm.ident = !{!3}
diff --git a/llvm/test/Transforms/PGOProfile/misexpect-switch.ll b/llvm/test/Transforms/PGOProfile/misexpect-switch.ll
index 242d5b8..9665559 100644
--- a/llvm/test/Transforms/PGOProfile/misexpect-switch.ll
+++ b/llvm/test/Transforms/PGOProfile/misexpect-switch.ll
@@ -28,7 +28,6 @@
; CORRECT-NOT: warning: {{.*}}
; CORRECT-NOT: remark: {{.*}}
-
; ModuleID = 'misexpect-switch.c'
source_filename = "misexpect-switch.c"
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
@@ -40,10 +39,10 @@ target triple = "x86_64-unknown-linux-gnu"
@arry = dso_local global [25 x i32] zeroinitializer, align 16, !dbg !12
; Function Attrs: nounwind uwtable
-define dso_local void @init_arry() #0 !dbg !21 {
+define dso_local void @init_arry() !dbg !21 {
entry:
%i = alloca i32, align 4
- call void @llvm.lifetime.start.p0(ptr %i) #6, !dbg !26
+ call void @llvm.lifetime.start.p0(ptr %i), !dbg !26
call void @llvm.dbg.declare(metadata ptr %i, metadata !25, metadata !DIExpression()), !dbg !27
store i32 0, ptr %i, align 4, !dbg !28, !tbaa !30
br label %for.cond, !dbg !34
@@ -54,7 +53,7 @@ for.cond: ; preds = %for.inc, %entry
br i1 %cmp, label %for.body, label %for.end, !dbg !38
for.body: ; preds = %for.cond
- %call = call i32 @rand() #6, !dbg !39
+ %call = call i32 @rand(), !dbg !39
%rem = srem i32 %call, 10, !dbg !41
%1 = load i32, ptr %i, align 4, !dbg !42, !tbaa !30
%idxprom = sext i32 %1 to i64, !dbg !43
@@ -69,24 +68,24 @@ for.inc: ; preds = %for.body
br label %for.cond, !dbg !47, !llvm.loop !48
for.end: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(ptr %i) #6, !dbg !50
+ call void @llvm.lifetime.end.p0(ptr %i), !dbg !50
ret void, !dbg !50
}
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.start.p0(ptr nocapture) #1
+declare void @llvm.lifetime.start.p0(ptr nocapture)
; Function Attrs: nounwind readnone speculatable willreturn
-declare void @llvm.dbg.declare(metadata, metadata, metadata) #2
+declare void @llvm.dbg.declare(metadata, metadata, metadata)
; Function Attrs: nounwind
-declare dso_local i32 @rand() #3
+declare dso_local i32 @rand()
; Function Attrs: argmemonly nounwind willreturn
-declare void @llvm.lifetime.end.p0(ptr nocapture) #1
+declare void @llvm.lifetime.end.p0(ptr nocapture)
; Function Attrs: nounwind uwtable
-define dso_local i32 @main() #0 !dbg !51 {
+define dso_local i32 @main() !dbg !51 {
entry:
%retval = alloca i32, align 4
%val = alloca i32, align 4
@@ -94,10 +93,10 @@ entry:
%condition = alloca i32, align 4
store i32 0, ptr %retval, align 4
call void @init_arry(), !dbg !62
- call void @llvm.lifetime.start.p0(ptr %val) #6, !dbg !63
+ call void @llvm.lifetime.start.p0(ptr %val), !dbg !63
call void @llvm.dbg.declare(metadata ptr %val, metadata !55, metadata !DIExpression()), !dbg !64
store i32 0, ptr %val, align 4, !dbg !64, !tbaa !30
- call void @llvm.lifetime.start.p0(ptr %j) #6, !dbg !65
+ call void @llvm.lifetime.start.p0(ptr %j), !dbg !65
call void @llvm.dbg.declare(metadata ptr %j, metadata !56, metadata !DIExpression()), !dbg !66
store i32 0, ptr %j, align 4, !dbg !67, !tbaa !30
br label %for.cond, !dbg !68
@@ -108,9 +107,9 @@ for.cond: ; preds = %for.inc, %entry
br i1 %cmp, label %for.body, label %for.end, !dbg !71
for.body: ; preds = %for.cond
- call void @llvm.lifetime.start.p0(ptr %condition) #6, !dbg !72
+ call void @llvm.lifetime.start.p0(ptr %condition), !dbg !72
call void @llvm.dbg.declare(metadata ptr %condition, metadata !57, metadata !DIExpression()), !dbg !73
- %call = call i32 @rand() #6, !dbg !74
+ %call = call i32 @rand(), !dbg !74
%rem = srem i32 %call, 5, !dbg !75
store i32 %rem, ptr %condition, align 4, !dbg !73, !tbaa !30
%1 = load i32, ptr %condition, align 4, !dbg !76, !tbaa !30
@@ -145,7 +144,7 @@ sw.default: ; preds = %for.body
unreachable, !dbg !87
sw.epilog: ; preds = %sw.bb3, %sw.bb2, %sw.bb
- call void @llvm.lifetime.end.p0(ptr %condition) #6, !dbg !88
+ call void @llvm.lifetime.end.p0(ptr %condition), !dbg !88
br label %for.inc, !dbg !89
for.inc: ; preds = %sw.epilog
@@ -155,25 +154,17 @@ for.inc: ; preds = %sw.epilog
br label %for.cond, !dbg !91, !llvm.loop !92
for.end: ; preds = %for.cond
- call void @llvm.lifetime.end.p0(ptr %j) #6, !dbg !94
- call void @llvm.lifetime.end.p0(ptr %val) #6, !dbg !94
+ call void @llvm.lifetime.end.p0(ptr %j), !dbg !94
+ call void @llvm.lifetime.end.p0(ptr %val), !dbg !94
ret i32 0, !dbg !95
}
; Function Attrs: nounwind readnone willreturn
-declare i64 @llvm.expect.i64(i64, i64) #4
-
-declare dso_local i32 @sum(ptr, i32) #5
+declare i64 @llvm.expect.i64(i64, i64)
-declare dso_local i32 @random_sample(ptr, i32) #5
+declare dso_local i32 @sum(ptr, i32)
-attributes #0 = { nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { argmemonly nounwind willreturn }
-attributes #2 = { nounwind readnone speculatable willreturn }
-attributes #3 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #4 = { nounwind readnone willreturn }
-attributes #5 = { "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #6 = { nounwind }
+declare dso_local i32 @random_sample(ptr, i32)
!llvm.dbg.cu = !{!2}
!llvm.module.flags = !{!17, !18, !19}
diff --git a/llvm/test/tools/llvm-ir2vec/entities.mir b/llvm/test/tools/llvm-ir2vec/entities.mir
new file mode 100644
index 0000000..60d9c7a
--- /dev/null
+++ b/llvm/test/tools/llvm-ir2vec/entities.mir
@@ -0,0 +1,28 @@
+# REQUIRES: x86_64-linux
+# RUN: llvm-ir2vec entities --mode=mir %s -o 2>&1 %t1.log
+# RUN: diff %S/output/reference_x86_entities.txt %t1.log
+
+--- |
+ target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128"
+ target triple = "x86_64-unknown-linux-gnu"
+
+ define dso_local noundef i32 @test_function(i32 noundef %a) {
+ entry:
+ ret i32 %a
+ }
+...
+---
+name: test_function
+alignment: 16
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: gr32 }
+liveins:
+ - { reg: '$edi', virtual-reg: '%0' }
+body: |
+ bb.0.entry:
+ liveins: $edi
+
+ %0:gr32 = COPY $edi
+ $eax = COPY %0
+ RET 0, $eax
diff --git a/llvm/test/tools/llvm-ir2vec/output/lit.local.cfg b/llvm/test/tools/llvm-ir2vec/output/lit.local.cfg
new file mode 100644
index 0000000..2406f19
--- /dev/null
+++ b/llvm/test/tools/llvm-ir2vec/output/lit.local.cfg
@@ -0,0 +1,3 @@
+# Don't treat files in this directory as tests
+# These are reference data files, not test scripts
+config.suffixes = []
diff --git a/llvm/test/tools/llvm-ir2vec/output/reference_triplets.txt b/llvm/test/tools/llvm-ir2vec/output/reference_triplets.txt
new file mode 100644
index 0000000..dfbac4c
--- /dev/null
+++ b/llvm/test/tools/llvm-ir2vec/output/reference_triplets.txt
@@ -0,0 +1,33 @@
+MAX_RELATION=4
+187 7072 1
+187 6968 2
+187 187 0
+187 7072 1
+187 6969 2
+187 10 0
+10 7072 1
+10 7072 2
+10 7072 3
+10 6961 4
+10 187 0
+187 6952 1
+187 7072 2
+187 1555 0
+1555 6882 1
+1555 6952 2
+187 7072 1
+187 6968 2
+187 187 0
+187 7072 1
+187 6969 2
+187 601 0
+601 7072 1
+601 7072 2
+601 7072 3
+601 6961 4
+601 187 0
+187 6952 1
+187 7072 2
+187 1555 0
+1555 6882 1
+1555 6952 2
diff --git a/llvm/test/tools/llvm-ir2vec/output/reference_x86_entities.txt b/llvm/test/tools/llvm-ir2vec/output/reference_x86_entities.txt
new file mode 100644
index 0000000..dc436d1
--- /dev/null
+++ b/llvm/test/tools/llvm-ir2vec/output/reference_x86_entities.txt
@@ -0,0 +1,7174 @@
+7173
+AAA 0
+AAD 1
+AADD 2
+AAM 3
+AAND 4
+AAS 5
+ABS_F 6
+ABS_Fp 7
+ADC 8
+ADCX 9
+ADD 10
+ADDPDrm 11
+ADDPDrr 12
+ADDPSrm 13
+ADDPSrr 14
+ADDR 15
+ADDSDrm 16
+ADDSDrm_Int 17
+ADDSDrr 18
+ADDSDrr_Int 19
+ADDSSrm 20
+ADDSSrm_Int 21
+ADDSSrr 22
+ADDSSrr_Int 23
+ADDSUBPDrm 24
+ADDSUBPDrr 25
+ADDSUBPSrm 26
+ADDSUBPSrr 27
+ADD_F 28
+ADD_FI 29
+ADD_FPrST 30
+ADD_FST 31
+ADD_Fp 32
+ADD_FpI 33
+ADD_FrST 34
+ADJCALLSTACKDOWN 35
+ADJCALLSTACKUP 36
+ADOX 37
+AESDEC 38
+AESDECLASTrm 39
+AESDECLASTrr 40
+AESDECWIDE 41
+AESDECrm 42
+AESDECrr 43
+AESENC 44
+AESENCLASTrm 45
+AESENCLASTrr 46
+AESENCWIDE 47
+AESENCrm 48
+AESENCrr 49
+AESIMCrm 50
+AESIMCrr 51
+AESKEYGENASSISTrmi 52
+AESKEYGENASSISTrri 53
+AND 54
+ANDN 55
+ANDNPDrm 56
+ANDNPDrr 57
+ANDNPSrm 58
+ANDNPSrr 59
+ANDPDrm 60
+ANDPDrr 61
+ANDPSrm 62
+ANDPSrr 63
+ANNOTATION_LABEL 64
+AOR 65
+ARITH_FENCE 66
+ARPL 67
+ASAN_CHECK_MEMACCESS 68
+AVX 69
+AVX_SET 70
+AXOR 71
+BEXTR 72
+BEXTRI 73
+BLCFILL 74
+BLCI 75
+BLCIC 76
+BLCMSK 77
+BLCS 78
+BLENDPDrmi 79
+BLENDPDrri 80
+BLENDPSrmi 81
+BLENDPSrri 82
+BLENDVPDrm 83
+BLENDVPDrr 84
+BLENDVPSrm 85
+BLENDVPSrr 86
+BLSFILL 87
+BLSI 88
+BLSIC 89
+BLSMSK 90
+BLSR 91
+BOUNDS 92
+BSF 93
+BSR 94
+BSWAP 95
+BT 96
+BTC 97
+BTR 98
+BTS 99
+BUNDLE 100
+BZHI 101
+CALL 102
+CALLpcrel 103
+CATCHRET 104
+CBW 105
+CCMP 106
+CDQ 107
+CDQE 108
+CFCMOV 109
+CFI_INSTRUCTION 110
+CHS_F 111
+CHS_Fp 112
+CLAC 113
+CLC 114
+CLD 115
+CLDEMOTE 116
+CLEANUPRET 117
+CLFLUSH 118
+CLFLUSHOPT 119
+CLGI 120
+CLI 121
+CLRSSBSY 122
+CLTS 123
+CLUI 124
+CLWB 125
+CLZERO 126
+CMC 127
+CMOV 128
+CMOVBE_F 129
+CMOVBE_Fp 130
+CMOVB_F 131
+CMOVB_Fp 132
+CMOVE_F 133
+CMOVE_Fp 134
+CMOVNBE_F 135
+CMOVNBE_Fp 136
+CMOVNB_F 137
+CMOVNB_Fp 138
+CMOVNE_F 139
+CMOVNE_Fp 140
+CMOVNP_F 141
+CMOVNP_Fp 142
+CMOVP_F 143
+CMOVP_Fp 144
+CMOV_FR 145
+CMOV_GR 146
+CMOV_RFP 147
+CMOV_VK 148
+CMOV_VR 149
+CMP 150
+CMPCCXADDmr 151
+CMPPDrmi 152
+CMPPDrri 153
+CMPPSrmi 154
+CMPPSrri 155
+CMPSB 156
+CMPSDrmi 157
+CMPSDrmi_Int 158
+CMPSDrri 159
+CMPSDrri_Int 160
+CMPSL 161
+CMPSQ 162
+CMPSSrmi 163
+CMPSSrmi_Int 164
+CMPSSrri 165
+CMPSSrri_Int 166
+CMPSW 167
+CMPXCHG 168
+COMISDrm 169
+COMISDrm_Int 170
+COMISDrr 171
+COMISDrr_Int 172
+COMISSrm 173
+COMISSrm_Int 174
+COMISSrr 175
+COMISSrr_Int 176
+COMP_FST 177
+COM_FIPr 178
+COM_FIr 179
+COM_FST 180
+COM_FpIr 181
+COM_Fpr 182
+CONVERGENCECTRL_ANCHOR 183
+CONVERGENCECTRL_ENTRY 184
+CONVERGENCECTRL_GLUE 185
+CONVERGENCECTRL_LOOP 186
+COPY 187
+COPY_TO_REGCLASS 188
+CPUID 189
+CQO 190
+CRC 191
+CS_PREFIX 192
+CTEST 193
+CVTDQ 194
+CVTPD 195
+CVTPS 196
+CVTSD 197
+CVTSI 198
+CVTSS 199
+CVTTPD 200
+CVTTPS 201
+CVTTSD 202
+CVTTSS 203
+CWD 204
+CWDE 205
+DAA 206
+DAS 207
+DATA 208
+DBG_INSTR_REF 209
+DBG_LABEL 210
+DBG_PHI 211
+DBG_VALUE 212
+DBG_VALUE_LIST 213
+DEC 214
+DIV 215
+DIVPDrm 216
+DIVPDrr 217
+DIVPSrm 218
+DIVPSrr 219
+DIVR_F 220
+DIVR_FI 221
+DIVR_FPrST 222
+DIVR_FST 223
+DIVR_Fp 224
+DIVR_FpI 225
+DIVR_FrST 226
+DIVSDrm 227
+DIVSDrm_Int 228
+DIVSDrr 229
+DIVSDrr_Int 230
+DIVSSrm 231
+DIVSSrm_Int 232
+DIVSSrr 233
+DIVSSrr_Int 234
+DIV_F 235
+DIV_FI 236
+DIV_FPrST 237
+DIV_FST 238
+DIV_Fp 239
+DIV_FpI 240
+DIV_FrST 241
+DPPDrmi 242
+DPPDrri 243
+DPPSrmi 244
+DPPSrri 245
+DS_PREFIX 246
+DYN_ALLOCA 247
+EH_LABEL 248
+EH_RETURN 249
+EH_SjLj_LongJmp 250
+EH_SjLj_SetJmp 251
+EH_SjLj_Setup 252
+ENCLS 253
+ENCLU 254
+ENCLV 255
+ENCODEKEY 256
+ENDBR 257
+ENQCMD 258
+ENQCMDS 259
+ENTER 260
+ERETS 261
+ERETU 262
+ES_PREFIX 263
+EXTRACTPSmri 264
+EXTRACTPSrri 265
+EXTRACT_SUBREG 266
+EXTRQ 267
+EXTRQI 268
+F 269
+FAKE_USE 270
+FARCALL 271
+FARJMP 272
+FAULTING_OP 273
+FBLDm 274
+FBSTPm 275
+FCOM 276
+FCOMP 277
+FCOMPP 278
+FCOS 279
+FDECSTP 280
+FEMMS 281
+FENTRY_CALL 282
+FFREE 283
+FFREEP 284
+FICOM 285
+FICOMP 286
+FINCSTP 287
+FLDCW 288
+FLDENVm 289
+FLDL 290
+FLDLG 291
+FLDLN 292
+FLDPI 293
+FNCLEX 294
+FNINIT 295
+FNOP 296
+FNSTCW 297
+FNSTSW 298
+FNSTSWm 299
+FP 300
+FPATAN 301
+FPREM 302
+FPTAN 303
+FRNDINT 304
+FRSTORm 305
+FSAVEm 306
+FSCALE 307
+FSIN 308
+FSINCOS 309
+FSTENVm 310
+FS_PREFIX 311
+FXRSTOR 312
+FXSAVE 313
+FXTRACT 314
+FYL 315
+FsFLD 316
+GC_LABEL 317
+GETSEC 318
+GF 319
+GS_PREFIX 320
+G_ABDS 321
+G_ABDU 322
+G_ABS 323
+G_ADD 324
+G_ADDRSPACE_CAST 325
+G_AND 326
+G_ANYEXT 327
+G_ASHR 328
+G_ASSERT_ALIGN 329
+G_ASSERT_SEXT 330
+G_ASSERT_ZEXT 331
+G_ATOMICRMW_ADD 332
+G_ATOMICRMW_AND 333
+G_ATOMICRMW_FADD 334
+G_ATOMICRMW_FMAX 335
+G_ATOMICRMW_FMAXIMUM 336
+G_ATOMICRMW_FMIN 337
+G_ATOMICRMW_FMINIMUM 338
+G_ATOMICRMW_FSUB 339
+G_ATOMICRMW_MAX 340
+G_ATOMICRMW_MIN 341
+G_ATOMICRMW_NAND 342
+G_ATOMICRMW_OR 343
+G_ATOMICRMW_SUB 344
+G_ATOMICRMW_UDEC_WRAP 345
+G_ATOMICRMW_UINC_WRAP 346
+G_ATOMICRMW_UMAX 347
+G_ATOMICRMW_UMIN 348
+G_ATOMICRMW_USUB_COND 349
+G_ATOMICRMW_USUB_SAT 350
+G_ATOMICRMW_XCHG 351
+G_ATOMICRMW_XOR 352
+G_ATOMIC_CMPXCHG 353
+G_ATOMIC_CMPXCHG_WITH_SUCCESS 354
+G_BITCAST 355
+G_BITREVERSE 356
+G_BLOCK_ADDR 357
+G_BR 358
+G_BRCOND 359
+G_BRINDIRECT 360
+G_BRJT 361
+G_BSWAP 362
+G_BUILD_VECTOR 363
+G_BUILD_VECTOR_TRUNC 364
+G_BZERO 365
+G_CONCAT_VECTORS 366
+G_CONSTANT 367
+G_CONSTANT_FOLD_BARRIER 368
+G_CONSTANT_POOL 369
+G_CTLZ 370
+G_CTLZ_ZERO_UNDEF 371
+G_CTPOP 372
+G_CTTZ 373
+G_CTTZ_ZERO_UNDEF 374
+G_DEBUGTRAP 375
+G_DYN_STACKALLOC 376
+G_EXTRACT 377
+G_EXTRACT_SUBVECTOR 378
+G_EXTRACT_VECTOR_ELT 379
+G_FABS 380
+G_FACOS 381
+G_FADD 382
+G_FASIN 383
+G_FATAN 384
+G_FCANONICALIZE 385
+G_FCEIL 386
+G_FCMP 387
+G_FCONSTANT 388
+G_FCOPYSIGN 389
+G_FCOS 390
+G_FCOSH 391
+G_FDIV 392
+G_FENCE 393
+G_FEXP 394
+G_FFLOOR 395
+G_FFREXP 396
+G_FILD 397
+G_FIST 398
+G_FLDCW 399
+G_FLDEXP 400
+G_FLOG 401
+G_FMA 402
+G_FMAD 403
+G_FMAXIMUM 404
+G_FMAXIMUMNUM 405
+G_FMAXNUM 406
+G_FMAXNUM_IEEE 407
+G_FMINIMUM 408
+G_FMINIMUMNUM 409
+G_FMINNUM 410
+G_FMINNUM_IEEE 411
+G_FMODF 412
+G_FMUL 413
+G_FNEARBYINT 414
+G_FNEG 415
+G_FNSTCW 416
+G_FPEXT 417
+G_FPOW 418
+G_FPOWI 419
+G_FPTOSI 420
+G_FPTOSI_SAT 421
+G_FPTOUI 422
+G_FPTOUI_SAT 423
+G_FPTRUNC 424
+G_FRAME_INDEX 425
+G_FREEZE 426
+G_FREM 427
+G_FRINT 428
+G_FSHL 429
+G_FSHR 430
+G_FSIN 431
+G_FSINCOS 432
+G_FSINH 433
+G_FSQRT 434
+G_FSUB 435
+G_FTAN 436
+G_FTANH 437
+G_GET_FPENV 438
+G_GET_FPMODE 439
+G_GET_ROUNDING 440
+G_GLOBAL_VALUE 441
+G_ICMP 442
+G_IMPLICIT_DEF 443
+G_INDEXED_LOAD 444
+G_INDEXED_SEXTLOAD 445
+G_INDEXED_STORE 446
+G_INDEXED_ZEXTLOAD 447
+G_INSERT 448
+G_INSERT_SUBVECTOR 449
+G_INSERT_VECTOR_ELT 450
+G_INTRINSIC 451
+G_INTRINSIC_CONVERGENT 452
+G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS 453
+G_INTRINSIC_FPTRUNC_ROUND 454
+G_INTRINSIC_LLRINT 455
+G_INTRINSIC_LRINT 456
+G_INTRINSIC_ROUND 457
+G_INTRINSIC_ROUNDEVEN 458
+G_INTRINSIC_TRUNC 459
+G_INTRINSIC_W_SIDE_EFFECTS 460
+G_INTTOPTR 461
+G_INVOKE_REGION_START 462
+G_IS_FPCLASS 463
+G_JUMP_TABLE 464
+G_LLROUND 465
+G_LOAD 466
+G_LROUND 467
+G_LSHR 468
+G_MEMCPY 469
+G_MEMCPY_INLINE 470
+G_MEMMOVE 471
+G_MEMSET 472
+G_MERGE_VALUES 473
+G_MUL 474
+G_OR 475
+G_PHI 476
+G_PREFETCH 477
+G_PTRAUTH_GLOBAL_VALUE 478
+G_PTRMASK 479
+G_PTRTOINT 480
+G_PTR_ADD 481
+G_READCYCLECOUNTER 482
+G_READSTEADYCOUNTER 483
+G_READ_REGISTER 484
+G_RESET_FPENV 485
+G_RESET_FPMODE 486
+G_ROTL 487
+G_ROTR 488
+G_SADDE 489
+G_SADDO 490
+G_SADDSAT 491
+G_SBFX 492
+G_SCMP 493
+G_SDIV 494
+G_SDIVFIX 495
+G_SDIVFIXSAT 496
+G_SDIVREM 497
+G_SELECT 498
+G_SET_FPENV 499
+G_SET_FPMODE 500
+G_SET_ROUNDING 501
+G_SEXT 502
+G_SEXTLOAD 503
+G_SEXT_INREG 504
+G_SHL 505
+G_SHUFFLE_VECTOR 506
+G_SITOFP 507
+G_SMAX 508
+G_SMIN 509
+G_SMULFIX 510
+G_SMULFIXSAT 511
+G_SMULH 512
+G_SMULO 513
+G_SPLAT_VECTOR 514
+G_SREM 515
+G_SSHLSAT 516
+G_SSUBE 517
+G_SSUBO 518
+G_SSUBSAT 519
+G_STACKRESTORE 520
+G_STACKSAVE 521
+G_STEP_VECTOR 522
+G_STORE 523
+G_STRICT_FADD 524
+G_STRICT_FDIV 525
+G_STRICT_FLDEXP 526
+G_STRICT_FMA 527
+G_STRICT_FMUL 528
+G_STRICT_FREM 529
+G_STRICT_FSQRT 530
+G_STRICT_FSUB 531
+G_SUB 532
+G_TRAP 533
+G_TRUNC 534
+G_TRUNC_SSAT_S 535
+G_TRUNC_SSAT_U 536
+G_TRUNC_USAT_U 537
+G_UADDE 538
+G_UADDO 539
+G_UADDSAT 540
+G_UBFX 541
+G_UBSANTRAP 542
+G_UCMP 543
+G_UDIV 544
+G_UDIVFIX 545
+G_UDIVFIXSAT 546
+G_UDIVREM 547
+G_UITOFP 548
+G_UMAX 549
+G_UMIN 550
+G_UMULFIX 551
+G_UMULFIXSAT 552
+G_UMULH 553
+G_UMULO 554
+G_UNMERGE_VALUES 555
+G_UREM 556
+G_USHLSAT 557
+G_USUBE 558
+G_USUBO 559
+G_USUBSAT 560
+G_VAARG 561
+G_VASTART 562
+G_VECREDUCE_ADD 563
+G_VECREDUCE_AND 564
+G_VECREDUCE_FADD 565
+G_VECREDUCE_FMAX 566
+G_VECREDUCE_FMAXIMUM 567
+G_VECREDUCE_FMIN 568
+G_VECREDUCE_FMINIMUM 569
+G_VECREDUCE_FMUL 570
+G_VECREDUCE_MUL 571
+G_VECREDUCE_OR 572
+G_VECREDUCE_SEQ_FADD 573
+G_VECREDUCE_SEQ_FMUL 574
+G_VECREDUCE_SMAX 575
+G_VECREDUCE_SMIN 576
+G_VECREDUCE_UMAX 577
+G_VECREDUCE_UMIN 578
+G_VECREDUCE_XOR 579
+G_VECTOR_COMPRESS 580
+G_VSCALE 581
+G_WRITE_REGISTER 582
+G_XOR 583
+G_ZEXT 584
+G_ZEXTLOAD 585
+HADDPDrm 586
+HADDPDrr 587
+HADDPSrm 588
+HADDPSrr 589
+HLT 590
+HRESET 591
+HSUBPDrm 592
+HSUBPDrr 593
+HSUBPSrm 594
+HSUBPSrr 595
+ICALL_BRANCH_FUNNEL 596
+IDIV 597
+ILD_F 598
+ILD_Fp 599
+IMPLICIT_DEF 600
+IMUL 601
+IMULZU 602
+IN 603
+INC 604
+INCSSPD 605
+INCSSPQ 606
+INDIRECT_THUNK_CALL 607
+INDIRECT_THUNK_TCRETURN 608
+INIT_UNDEF 609
+INLINEASM 610
+INLINEASM_BR 611
+INSB 612
+INSERTPSrmi 613
+INSERTPSrri 614
+INSERTQ 615
+INSERTQI 616
+INSERT_SUBREG 617
+INSL 618
+INSW 619
+INT 620
+INTO 621
+INVD 622
+INVEPT 623
+INVLPG 624
+INVLPGA 625
+INVLPGB 626
+INVPCID 627
+INVVPID 628
+IRET 629
+ISTT_FP 630
+ISTT_Fp 631
+IST_F 632
+IST_FP 633
+IST_Fp 634
+Int_eh_sjlj_setup_dispatch 635
+JCC 636
+JCXZ 637
+JECXZ 638
+JMP 639
+JMPABS 640
+JRCXZ 641
+JUMP_TABLE_DEBUG_INFO 642
+KADDBkk 643
+KADDDkk 644
+KADDQkk 645
+KADDWkk 646
+KANDBkk 647
+KANDDkk 648
+KANDNBkk 649
+KANDNDkk 650
+KANDNQkk 651
+KANDNWkk 652
+KANDQkk 653
+KANDWkk 654
+KCFI_CHECK 655
+KILL 656
+KMOVBkk 657
+KMOVBkk_EVEX 658
+KMOVBkm 659
+KMOVBkm_EVEX 660
+KMOVBkr 661
+KMOVBkr_EVEX 662
+KMOVBmk 663
+KMOVBmk_EVEX 664
+KMOVBrk 665
+KMOVBrk_EVEX 666
+KMOVDkk 667
+KMOVDkk_EVEX 668
+KMOVDkm 669
+KMOVDkm_EVEX 670
+KMOVDkr 671
+KMOVDkr_EVEX 672
+KMOVDmk 673
+KMOVDmk_EVEX 674
+KMOVDrk 675
+KMOVDrk_EVEX 676
+KMOVQkk 677
+KMOVQkk_EVEX 678
+KMOVQkm 679
+KMOVQkm_EVEX 680
+KMOVQkr 681
+KMOVQkr_EVEX 682
+KMOVQmk 683
+KMOVQmk_EVEX 684
+KMOVQrk 685
+KMOVQrk_EVEX 686
+KMOVWkk 687
+KMOVWkk_EVEX 688
+KMOVWkm 689
+KMOVWkm_EVEX 690
+KMOVWkr 691
+KMOVWkr_EVEX 692
+KMOVWmk 693
+KMOVWmk_EVEX 694
+KMOVWrk 695
+KMOVWrk_EVEX 696
+KNOTBkk 697
+KNOTDkk 698
+KNOTQkk 699
+KNOTWkk 700
+KORBkk 701
+KORDkk 702
+KORQkk 703
+KORTESTBkk 704
+KORTESTDkk 705
+KORTESTQkk 706
+KORTESTWkk 707
+KORWkk 708
+KSET 709
+KSHIFTLBki 710
+KSHIFTLDki 711
+KSHIFTLQki 712
+KSHIFTLWki 713
+KSHIFTRBki 714
+KSHIFTRDki 715
+KSHIFTRQki 716
+KSHIFTRWki 717
+KTESTBkk 718
+KTESTDkk 719
+KTESTQkk 720
+KTESTWkk 721
+KUNPCKBWkk 722
+KUNPCKDQkk 723
+KUNPCKWDkk 724
+KXNORBkk 725
+KXNORDkk 726
+KXNORQkk 727
+KXNORWkk 728
+KXORBkk 729
+KXORDkk 730
+KXORQkk 731
+KXORWkk 732
+LAHF 733
+LAR 734
+LCMPXCHG 735
+LDDQUrm 736
+LDMXCSR 737
+LDS 738
+LDTILECFG 739
+LDTILECFG_EVEX 740
+LD_F 741
+LD_Fp 742
+LD_Frr 743
+LEA 744
+LEAVE 745
+LES 746
+LFENCE 747
+LFS 748
+LGDT 749
+LGS 750
+LIDT 751
+LIFETIME_END 752
+LIFETIME_START 753
+LKGS 754
+LLDT 755
+LLWPCB 756
+LMSW 757
+LOADIWKEY 758
+LOAD_STACK_GUARD 759
+LOCAL_ESCAPE 760
+LOCK_ADD 761
+LOCK_AND 762
+LOCK_BTC 763
+LOCK_BTC_RM 764
+LOCK_BTR 765
+LOCK_BTR_RM 766
+LOCK_BTS 767
+LOCK_BTS_RM 768
+LOCK_DEC 769
+LOCK_INC 770
+LOCK_OR 771
+LOCK_PREFIX 772
+LOCK_SUB 773
+LOCK_XOR 774
+LODSB 775
+LODSL 776
+LODSQ 777
+LODSW 778
+LOOP 779
+LOOPE 780
+LOOPNE 781
+LRET 782
+LRETI 783
+LSL 784
+LSS 785
+LTRm 786
+LTRr 787
+LWPINS 788
+LWPVAL 789
+LXADD 790
+LZCNT 791
+MASKMOVDQU 792
+MASKPAIR 793
+MAXCPDrm 794
+MAXCPDrr 795
+MAXCPSrm 796
+MAXCPSrr 797
+MAXCSDrm 798
+MAXCSDrr 799
+MAXCSSrm 800
+MAXCSSrr 801
+MAXPDrm 802
+MAXPDrr 803
+MAXPSrm 804
+MAXPSrr 805
+MAXSDrm 806
+MAXSDrm_Int 807
+MAXSDrr 808
+MAXSDrr_Int 809
+MAXSSrm 810
+MAXSSrm_Int 811
+MAXSSrr 812
+MAXSSrr_Int 813
+MEMBARRIER 814
+MFENCE 815
+MINCPDrm 816
+MINCPDrr 817
+MINCPSrm 818
+MINCPSrr 819
+MINCSDrm 820
+MINCSDrr 821
+MINCSSrm 822
+MINCSSrr 823
+MINPDrm 824
+MINPDrr 825
+MINPSrm 826
+MINPSrr 827
+MINSDrm 828
+MINSDrm_Int 829
+MINSDrr 830
+MINSDrr_Int 831
+MINSSrm 832
+MINSSrm_Int 833
+MINSSrr 834
+MINSSrr_Int 835
+MMX_CVTPD 836
+MMX_CVTPI 837
+MMX_CVTPS 838
+MMX_CVTTPD 839
+MMX_CVTTPS 840
+MMX_EMMS 841
+MMX_MASKMOVQ 842
+MMX_MOVD 843
+MMX_MOVDQ 844
+MMX_MOVFR 845
+MMX_MOVNTQmr 846
+MMX_MOVQ 847
+MMX_PABSBrm 848
+MMX_PABSBrr 849
+MMX_PABSDrm 850
+MMX_PABSDrr 851
+MMX_PABSWrm 852
+MMX_PABSWrr 853
+MMX_PACKSSDWrm 854
+MMX_PACKSSDWrr 855
+MMX_PACKSSWBrm 856
+MMX_PACKSSWBrr 857
+MMX_PACKUSWBrm 858
+MMX_PACKUSWBrr 859
+MMX_PADDBrm 860
+MMX_PADDBrr 861
+MMX_PADDDrm 862
+MMX_PADDDrr 863
+MMX_PADDQrm 864
+MMX_PADDQrr 865
+MMX_PADDSBrm 866
+MMX_PADDSBrr 867
+MMX_PADDSWrm 868
+MMX_PADDSWrr 869
+MMX_PADDUSBrm 870
+MMX_PADDUSBrr 871
+MMX_PADDUSWrm 872
+MMX_PADDUSWrr 873
+MMX_PADDWrm 874
+MMX_PADDWrr 875
+MMX_PALIGNRrmi 876
+MMX_PALIGNRrri 877
+MMX_PANDNrm 878
+MMX_PANDNrr 879
+MMX_PANDrm 880
+MMX_PANDrr 881
+MMX_PAVGBrm 882
+MMX_PAVGBrr 883
+MMX_PAVGWrm 884
+MMX_PAVGWrr 885
+MMX_PCMPEQBrm 886
+MMX_PCMPEQBrr 887
+MMX_PCMPEQDrm 888
+MMX_PCMPEQDrr 889
+MMX_PCMPEQWrm 890
+MMX_PCMPEQWrr 891
+MMX_PCMPGTBrm 892
+MMX_PCMPGTBrr 893
+MMX_PCMPGTDrm 894
+MMX_PCMPGTDrr 895
+MMX_PCMPGTWrm 896
+MMX_PCMPGTWrr 897
+MMX_PEXTRWrri 898
+MMX_PHADDDrm 899
+MMX_PHADDDrr 900
+MMX_PHADDSWrm 901
+MMX_PHADDSWrr 902
+MMX_PHADDWrm 903
+MMX_PHADDWrr 904
+MMX_PHSUBDrm 905
+MMX_PHSUBDrr 906
+MMX_PHSUBSWrm 907
+MMX_PHSUBSWrr 908
+MMX_PHSUBWrm 909
+MMX_PHSUBWrr 910
+MMX_PINSRWrmi 911
+MMX_PINSRWrri 912
+MMX_PMADDUBSWrm 913
+MMX_PMADDUBSWrr 914
+MMX_PMADDWDrm 915
+MMX_PMADDWDrr 916
+MMX_PMAXSWrm 917
+MMX_PMAXSWrr 918
+MMX_PMAXUBrm 919
+MMX_PMAXUBrr 920
+MMX_PMINSWrm 921
+MMX_PMINSWrr 922
+MMX_PMINUBrm 923
+MMX_PMINUBrr 924
+MMX_PMOVMSKBrr 925
+MMX_PMULHRSWrm 926
+MMX_PMULHRSWrr 927
+MMX_PMULHUWrm 928
+MMX_PMULHUWrr 929
+MMX_PMULHWrm 930
+MMX_PMULHWrr 931
+MMX_PMULLWrm 932
+MMX_PMULLWrr 933
+MMX_PMULUDQrm 934
+MMX_PMULUDQrr 935
+MMX_PORrm 936
+MMX_PORrr 937
+MMX_PSADBWrm 938
+MMX_PSADBWrr 939
+MMX_PSHUFBrm 940
+MMX_PSHUFBrr 941
+MMX_PSHUFWmi 942
+MMX_PSHUFWri 943
+MMX_PSIGNBrm 944
+MMX_PSIGNBrr 945
+MMX_PSIGNDrm 946
+MMX_PSIGNDrr 947
+MMX_PSIGNWrm 948
+MMX_PSIGNWrr 949
+MMX_PSLLDri 950
+MMX_PSLLDrm 951
+MMX_PSLLDrr 952
+MMX_PSLLQri 953
+MMX_PSLLQrm 954
+MMX_PSLLQrr 955
+MMX_PSLLWri 956
+MMX_PSLLWrm 957
+MMX_PSLLWrr 958
+MMX_PSRADri 959
+MMX_PSRADrm 960
+MMX_PSRADrr 961
+MMX_PSRAWri 962
+MMX_PSRAWrm 963
+MMX_PSRAWrr 964
+MMX_PSRLDri 965
+MMX_PSRLDrm 966
+MMX_PSRLDrr 967
+MMX_PSRLQri 968
+MMX_PSRLQrm 969
+MMX_PSRLQrr 970
+MMX_PSRLWri 971
+MMX_PSRLWrm 972
+MMX_PSRLWrr 973
+MMX_PSUBBrm 974
+MMX_PSUBBrr 975
+MMX_PSUBDrm 976
+MMX_PSUBDrr 977
+MMX_PSUBQrm 978
+MMX_PSUBQrr 979
+MMX_PSUBSBrm 980
+MMX_PSUBSBrr 981
+MMX_PSUBSWrm 982
+MMX_PSUBSWrr 983
+MMX_PSUBUSBrm 984
+MMX_PSUBUSBrr 985
+MMX_PSUBUSWrm 986
+MMX_PSUBUSWrr 987
+MMX_PSUBWrm 988
+MMX_PSUBWrr 989
+MMX_PUNPCKHBWrm 990
+MMX_PUNPCKHBWrr 991
+MMX_PUNPCKHDQrm 992
+MMX_PUNPCKHDQrr 993
+MMX_PUNPCKHWDrm 994
+MMX_PUNPCKHWDrr 995
+MMX_PUNPCKLBWrm 996
+MMX_PUNPCKLBWrr 997
+MMX_PUNPCKLDQrm 998
+MMX_PUNPCKLDQrr 999
+MMX_PUNPCKLWDrm 1000
+MMX_PUNPCKLWDrr 1001
+MMX_PXORrm 1002
+MMX_PXORrr 1003
+MMX_SET 1004
+MONITOR 1005
+MONITORX 1006
+MONTMUL 1007
+MORESTACK_RET 1008
+MORESTACK_RET_RESTORE_R 1009
+MOV 1010
+MOVAPDmr 1011
+MOVAPDrm 1012
+MOVAPDrr 1013
+MOVAPDrr_REV 1014
+MOVAPSmr 1015
+MOVAPSrm 1016
+MOVAPSrr 1017
+MOVAPSrr_REV 1018
+MOVBE 1019
+MOVDDUPrm 1020
+MOVDDUPrr 1021
+MOVDI 1022
+MOVDIR 1023
+MOVDIRI 1024
+MOVDQAmr 1025
+MOVDQArm 1026
+MOVDQArr 1027
+MOVDQArr_REV 1028
+MOVDQUmr 1029
+MOVDQUrm 1030
+MOVDQUrr 1031
+MOVDQUrr_REV 1032
+MOVHLPSrr 1033
+MOVHPDmr 1034
+MOVHPDrm 1035
+MOVHPSmr 1036
+MOVHPSrm 1037
+MOVLHPSrr 1038
+MOVLPDmr 1039
+MOVLPDrm 1040
+MOVLPSmr 1041
+MOVLPSrm 1042
+MOVMSKPDrr 1043
+MOVMSKPSrr 1044
+MOVNTDQArm 1045
+MOVNTDQmr 1046
+MOVNTI 1047
+MOVNTImr 1048
+MOVNTPDmr 1049
+MOVNTPSmr 1050
+MOVNTSD 1051
+MOVNTSS 1052
+MOVPC 1053
+MOVPDI 1054
+MOVPQI 1055
+MOVPQIto 1056
+MOVQI 1057
+MOVRS 1058
+MOVSB 1059
+MOVSDmr 1060
+MOVSDrm 1061
+MOVSDrm_alt 1062
+MOVSDrr 1063
+MOVSDrr_REV 1064
+MOVSDto 1065
+MOVSHDUPrm 1066
+MOVSHDUPrr 1067
+MOVSHPmr 1068
+MOVSHPrm 1069
+MOVSL 1070
+MOVSLDUPrm 1071
+MOVSLDUPrr 1072
+MOVSQ 1073
+MOVSS 1074
+MOVSSmr 1075
+MOVSSrm 1076
+MOVSSrm_alt 1077
+MOVSSrr 1078
+MOVSSrr_REV 1079
+MOVSW 1080
+MOVSX 1081
+MOVUPDmr 1082
+MOVUPDrm 1083
+MOVUPDrr 1084
+MOVUPDrr_REV 1085
+MOVUPSmr 1086
+MOVUPSrm 1087
+MOVUPSrr 1088
+MOVUPSrr_REV 1089
+MOVZPQILo 1090
+MOVZX 1091
+MPSADBWrmi 1092
+MPSADBWrri 1093
+MUL 1094
+MULPDrm 1095
+MULPDrr 1096
+MULPSrm 1097
+MULPSrr 1098
+MULSDrm 1099
+MULSDrm_Int 1100
+MULSDrr 1101
+MULSDrr_Int 1102
+MULSSrm 1103
+MULSSrm_Int 1104
+MULSSrr 1105
+MULSSrr_Int 1106
+MULX 1107
+MUL_F 1108
+MUL_FI 1109
+MUL_FPrST 1110
+MUL_FST 1111
+MUL_Fp 1112
+MUL_FpI 1113
+MUL_FrST 1114
+MWAITX 1115
+MWAITX_SAVE_RBX 1116
+MWAITXrrr 1117
+MWAITrr 1118
+NEG 1119
+NOOP 1120
+NOOPL 1121
+NOOPLr 1122
+NOOPQ 1123
+NOOPQr 1124
+NOOPW 1125
+NOOPWr 1126
+NOT 1127
+OR 1128
+ORPDrm 1129
+ORPDrr 1130
+ORPSrm 1131
+ORPSrr 1132
+OUT 1133
+OUTSB 1134
+OUTSL 1135
+OUTSW 1136
+PABSBrm 1137
+PABSBrr 1138
+PABSDrm 1139
+PABSDrr 1140
+PABSWrm 1141
+PABSWrr 1142
+PACKSSDWrm 1143
+PACKSSDWrr 1144
+PACKSSWBrm 1145
+PACKSSWBrr 1146
+PACKUSDWrm 1147
+PACKUSDWrr 1148
+PACKUSWBrm 1149
+PACKUSWBrr 1150
+PADDBrm 1151
+PADDBrr 1152
+PADDDrm 1153
+PADDDrr 1154
+PADDQrm 1155
+PADDQrr 1156
+PADDSBrm 1157
+PADDSBrr 1158
+PADDSWrm 1159
+PADDSWrr 1160
+PADDUSBrm 1161
+PADDUSBrr 1162
+PADDUSWrm 1163
+PADDUSWrr 1164
+PADDWrm 1165
+PADDWrr 1166
+PALIGNRrmi 1167
+PALIGNRrri 1168
+PANDNrm 1169
+PANDNrr 1170
+PANDrm 1171
+PANDrr 1172
+PATCHABLE_EVENT_CALL 1173
+PATCHABLE_FUNCTION_ENTER 1174
+PATCHABLE_FUNCTION_EXIT 1175
+PATCHABLE_OP 1176
+PATCHABLE_RET 1177
+PATCHABLE_TAIL_CALL 1178
+PATCHABLE_TYPED_EVENT_CALL 1179
+PATCHPOINT 1180
+PAUSE 1181
+PAVGBrm 1182
+PAVGBrr 1183
+PAVGUSBrm 1184
+PAVGUSBrr 1185
+PAVGWrm 1186
+PAVGWrr 1187
+PBLENDVBrm 1188
+PBLENDVBrr 1189
+PBLENDWrmi 1190
+PBLENDWrri 1191
+PBNDKB 1192
+PCLMULQDQrmi 1193
+PCLMULQDQrri 1194
+PCMPEQBrm 1195
+PCMPEQBrr 1196
+PCMPEQDrm 1197
+PCMPEQDrr 1198
+PCMPEQQrm 1199
+PCMPEQQrr 1200
+PCMPEQWrm 1201
+PCMPEQWrr 1202
+PCMPESTRIrmi 1203
+PCMPESTRIrri 1204
+PCMPESTRMrmi 1205
+PCMPESTRMrri 1206
+PCMPGTBrm 1207
+PCMPGTBrr 1208
+PCMPGTDrm 1209
+PCMPGTDrr 1210
+PCMPGTQrm 1211
+PCMPGTQrr 1212
+PCMPGTWrm 1213
+PCMPGTWrr 1214
+PCMPISTRIrmi 1215
+PCMPISTRIrri 1216
+PCMPISTRMrmi 1217
+PCMPISTRMrri 1218
+PCONFIG 1219
+PDEP 1220
+PEXT 1221
+PEXTRBmri 1222
+PEXTRBrri 1223
+PEXTRDmri 1224
+PEXTRDrri 1225
+PEXTRQmri 1226
+PEXTRQrri 1227
+PEXTRWmri 1228
+PEXTRWrri 1229
+PEXTRWrri_REV 1230
+PF 1231
+PFACCrm 1232
+PFACCrr 1233
+PFADDrm 1234
+PFADDrr 1235
+PFCMPEQrm 1236
+PFCMPEQrr 1237
+PFCMPGErm 1238
+PFCMPGErr 1239
+PFCMPGTrm 1240
+PFCMPGTrr 1241
+PFMAXrm 1242
+PFMAXrr 1243
+PFMINrm 1244
+PFMINrr 1245
+PFMULrm 1246
+PFMULrr 1247
+PFNACCrm 1248
+PFNACCrr 1249
+PFPNACCrm 1250
+PFPNACCrr 1251
+PFRCPIT 1252
+PFRCPrm 1253
+PFRCPrr 1254
+PFRSQIT 1255
+PFRSQRTrm 1256
+PFRSQRTrr 1257
+PFSUBRrm 1258
+PFSUBRrr 1259
+PFSUBrm 1260
+PFSUBrr 1261
+PHADDDrm 1262
+PHADDDrr 1263
+PHADDSWrm 1264
+PHADDSWrr 1265
+PHADDWrm 1266
+PHADDWrr 1267
+PHI 1268
+PHMINPOSUWrm 1269
+PHMINPOSUWrr 1270
+PHSUBDrm 1271
+PHSUBDrr 1272
+PHSUBSWrm 1273
+PHSUBSWrr 1274
+PHSUBWrm 1275
+PHSUBWrr 1276
+PI 1277
+PINSRBrmi 1278
+PINSRBrri 1279
+PINSRDrmi 1280
+PINSRDrri 1281
+PINSRQrmi 1282
+PINSRQrri 1283
+PINSRWrmi 1284
+PINSRWrri 1285
+PLDTILECFGV 1286
+PLEA 1287
+PMADDUBSWrm 1288
+PMADDUBSWrr 1289
+PMADDWDrm 1290
+PMADDWDrr 1291
+PMAXSBrm 1292
+PMAXSBrr 1293
+PMAXSDrm 1294
+PMAXSDrr 1295
+PMAXSWrm 1296
+PMAXSWrr 1297
+PMAXUBrm 1298
+PMAXUBrr 1299
+PMAXUDrm 1300
+PMAXUDrr 1301
+PMAXUWrm 1302
+PMAXUWrr 1303
+PMINSBrm 1304
+PMINSBrr 1305
+PMINSDrm 1306
+PMINSDrr 1307
+PMINSWrm 1308
+PMINSWrr 1309
+PMINUBrm 1310
+PMINUBrr 1311
+PMINUDrm 1312
+PMINUDrr 1313
+PMINUWrm 1314
+PMINUWrr 1315
+PMOVMSKBrr 1316
+PMOVSXBDrm 1317
+PMOVSXBDrr 1318
+PMOVSXBQrm 1319
+PMOVSXBQrr 1320
+PMOVSXBWrm 1321
+PMOVSXBWrr 1322
+PMOVSXDQrm 1323
+PMOVSXDQrr 1324
+PMOVSXWDrm 1325
+PMOVSXWDrr 1326
+PMOVSXWQrm 1327
+PMOVSXWQrr 1328
+PMOVZXBDrm 1329
+PMOVZXBDrr 1330
+PMOVZXBQrm 1331
+PMOVZXBQrr 1332
+PMOVZXBWrm 1333
+PMOVZXBWrr 1334
+PMOVZXDQrm 1335
+PMOVZXDQrr 1336
+PMOVZXWDrm 1337
+PMOVZXWDrr 1338
+PMOVZXWQrm 1339
+PMOVZXWQrr 1340
+PMULDQrm 1341
+PMULDQrr 1342
+PMULHRSWrm 1343
+PMULHRSWrr 1344
+PMULHRWrm 1345
+PMULHRWrr 1346
+PMULHUWrm 1347
+PMULHUWrr 1348
+PMULHWrm 1349
+PMULHWrr 1350
+PMULLDrm 1351
+PMULLDrr 1352
+PMULLWrm 1353
+PMULLWrr 1354
+PMULUDQrm 1355
+PMULUDQrr 1356
+POP 1357
+POPA 1358
+POPCNT 1359
+POPDS 1360
+POPES 1361
+POPF 1362
+POPFS 1363
+POPGS 1364
+POPP 1365
+POPSS 1366
+PORrm 1367
+PORrr 1368
+PREALLOCATED_ARG 1369
+PREALLOCATED_SETUP 1370
+PREFETCH 1371
+PREFETCHIT 1372
+PREFETCHNTA 1373
+PREFETCHRST 1374
+PREFETCHT 1375
+PREFETCHW 1376
+PREFETCHWT 1377
+PROBED_ALLOCA 1378
+PSADBWrm 1379
+PSADBWrr 1380
+PSEUDO_PROBE 1381
+PSHUFBrm 1382
+PSHUFBrr 1383
+PSHUFDmi 1384
+PSHUFDri 1385
+PSHUFHWmi 1386
+PSHUFHWri 1387
+PSHUFLWmi 1388
+PSHUFLWri 1389
+PSIGNBrm 1390
+PSIGNBrr 1391
+PSIGNDrm 1392
+PSIGNDrr 1393
+PSIGNWrm 1394
+PSIGNWrr 1395
+PSLLDQri 1396
+PSLLDri 1397
+PSLLDrm 1398
+PSLLDrr 1399
+PSLLQri 1400
+PSLLQrm 1401
+PSLLQrr 1402
+PSLLWri 1403
+PSLLWrm 1404
+PSLLWrr 1405
+PSMASH 1406
+PSRADri 1407
+PSRADrm 1408
+PSRADrr 1409
+PSRAWri 1410
+PSRAWrm 1411
+PSRAWrr 1412
+PSRLDQri 1413
+PSRLDri 1414
+PSRLDrm 1415
+PSRLDrr 1416
+PSRLQri 1417
+PSRLQrm 1418
+PSRLQrr 1419
+PSRLWri 1420
+PSRLWrm 1421
+PSRLWrr 1422
+PSUBBrm 1423
+PSUBBrr 1424
+PSUBDrm 1425
+PSUBDrr 1426
+PSUBQrm 1427
+PSUBQrr 1428
+PSUBSBrm 1429
+PSUBSBrr 1430
+PSUBSWrm 1431
+PSUBSWrr 1432
+PSUBUSBrm 1433
+PSUBUSBrr 1434
+PSUBUSWrm 1435
+PSUBUSWrr 1436
+PSUBWrm 1437
+PSUBWrr 1438
+PSWAPDrm 1439
+PSWAPDrr 1440
+PT 1441
+PTCMMIMFP 1442
+PTCMMRLFP 1443
+PTCONJTCMMIMFP 1444
+PTCONJTFP 1445
+PTCVTROWD 1446
+PTCVTROWPS 1447
+PTDPBF 1448
+PTDPBHF 1449
+PTDPBSSD 1450
+PTDPBSSDV 1451
+PTDPBSUD 1452
+PTDPBSUDV 1453
+PTDPBUSD 1454
+PTDPBUSDV 1455
+PTDPBUUD 1456
+PTDPBUUDV 1457
+PTDPFP 1458
+PTDPHBF 1459
+PTDPHF 1460
+PTESTrm 1461
+PTESTrr 1462
+PTILELOADD 1463
+PTILELOADDRS 1464
+PTILELOADDRST 1465
+PTILELOADDRSV 1466
+PTILELOADDT 1467
+PTILELOADDV 1468
+PTILEMOVROWrre 1469
+PTILEMOVROWrreV 1470
+PTILEMOVROWrri 1471
+PTILEMOVROWrriV 1472
+PTILEPAIRLOAD 1473
+PTILEPAIRSTORE 1474
+PTILESTORED 1475
+PTILESTOREDV 1476
+PTILEZERO 1477
+PTILEZEROV 1478
+PTMMULTF 1479
+PTTCMMIMFP 1480
+PTTCMMRLFP 1481
+PTTDPBF 1482
+PTTDPFP 1483
+PTTMMULTF 1484
+PTTRANSPOSED 1485
+PTTRANSPOSEDV 1486
+PTWRITE 1487
+PTWRITEm 1488
+PTWRITEr 1489
+PUNPCKHBWrm 1490
+PUNPCKHBWrr 1491
+PUNPCKHDQrm 1492
+PUNPCKHDQrr 1493
+PUNPCKHQDQrm 1494
+PUNPCKHQDQrr 1495
+PUNPCKHWDrm 1496
+PUNPCKHWDrr 1497
+PUNPCKLBWrm 1498
+PUNPCKLBWrr 1499
+PUNPCKLDQrm 1500
+PUNPCKLDQrr 1501
+PUNPCKLQDQrm 1502
+PUNPCKLQDQrr 1503
+PUNPCKLWDrm 1504
+PUNPCKLWDrr 1505
+PUSH 1506
+PUSHA 1507
+PUSHCS 1508
+PUSHDS 1509
+PUSHES 1510
+PUSHF 1511
+PUSHFS 1512
+PUSHGS 1513
+PUSHP 1514
+PUSHSS 1515
+PVALIDATE 1516
+PXORrm 1517
+PXORrr 1518
+RCL 1519
+RCPPSm 1520
+RCPPSr 1521
+RCPSSm 1522
+RCPSSm_Int 1523
+RCPSSr 1524
+RCPSSr_Int 1525
+RCR 1526
+RDFLAGS 1527
+RDFSBASE 1528
+RDGSBASE 1529
+RDMSR 1530
+RDMSRLIST 1531
+RDMSRri 1532
+RDMSRri_EVEX 1533
+RDPID 1534
+RDPKRUr 1535
+RDPMC 1536
+RDPRU 1537
+RDRAND 1538
+RDSEED 1539
+RDSSPD 1540
+RDSSPQ 1541
+RDTSC 1542
+RDTSCP 1543
+REG_SEQUENCE 1544
+REPNE_PREFIX 1545
+REP_MOVSB 1546
+REP_MOVSD 1547
+REP_MOVSQ 1548
+REP_MOVSW 1549
+REP_PREFIX 1550
+REP_STOSB 1551
+REP_STOSD 1552
+REP_STOSQ 1553
+REP_STOSW 1554
+RET 1555
+RETI 1556
+REX 1557
+RMPADJUST 1558
+RMPQUERY 1559
+RMPUPDATE 1560
+ROL 1561
+ROR 1562
+RORX 1563
+ROUNDPDmi 1564
+ROUNDPDri 1565
+ROUNDPSmi 1566
+ROUNDPSri 1567
+ROUNDSDmi 1568
+ROUNDSDmi_Int 1569
+ROUNDSDri 1570
+ROUNDSDri_Int 1571
+ROUNDSSmi 1572
+ROUNDSSmi_Int 1573
+ROUNDSSri 1574
+ROUNDSSri_Int 1575
+RSM 1576
+RSQRTPSm 1577
+RSQRTPSr 1578
+RSQRTSSm 1579
+RSQRTSSm_Int 1580
+RSQRTSSr 1581
+RSQRTSSr_Int 1582
+RSTORSSP 1583
+SAHF 1584
+SALC 1585
+SAR 1586
+SARX 1587
+SAVEPREVSSP 1588
+SBB 1589
+SCASB 1590
+SCASL 1591
+SCASQ 1592
+SCASW 1593
+SEAMCALL 1594
+SEAMOPS 1595
+SEAMRET 1596
+SEG_ALLOCA 1597
+SEH_BeginEpilogue 1598
+SEH_EndEpilogue 1599
+SEH_EndPrologue 1600
+SEH_PushFrame 1601
+SEH_PushReg 1602
+SEH_SaveReg 1603
+SEH_SaveXMM 1604
+SEH_SetFrame 1605
+SEH_StackAlign 1606
+SEH_StackAlloc 1607
+SEH_UnwindV 1608
+SEH_UnwindVersion 1609
+SENDUIPI 1610
+SERIALIZE 1611
+SETB_C 1612
+SETCCm 1613
+SETCCm_EVEX 1614
+SETCCr 1615
+SETCCr_EVEX 1616
+SETSSBSY 1617
+SETZUCCm 1618
+SETZUCCr 1619
+SFENCE 1620
+SGDT 1621
+SHA 1622
+SHL 1623
+SHLD 1624
+SHLDROT 1625
+SHLX 1626
+SHR 1627
+SHRD 1628
+SHRDROT 1629
+SHRX 1630
+SHUFPDrmi 1631
+SHUFPDrri 1632
+SHUFPSrmi 1633
+SHUFPSrri 1634
+SIDT 1635
+SKINIT 1636
+SLDT 1637
+SLWPCB 1638
+SMSW 1639
+SQRTPDm 1640
+SQRTPDr 1641
+SQRTPSm 1642
+SQRTPSr 1643
+SQRTSDm 1644
+SQRTSDm_Int 1645
+SQRTSDr 1646
+SQRTSDr_Int 1647
+SQRTSSm 1648
+SQRTSSm_Int 1649
+SQRTSSr 1650
+SQRTSSr_Int 1651
+SQRT_F 1652
+SQRT_Fp 1653
+SS_PREFIX 1654
+STAC 1655
+STACKALLOC_W_PROBING 1656
+STACKMAP 1657
+STATEPOINT 1658
+STC 1659
+STD 1660
+STGI 1661
+STI 1662
+STMXCSR 1663
+STOSB 1664
+STOSL 1665
+STOSQ 1666
+STOSW 1667
+STR 1668
+STRm 1669
+STTILECFG 1670
+STTILECFG_EVEX 1671
+STUI 1672
+ST_F 1673
+ST_FP 1674
+ST_FPrr 1675
+ST_Fp 1676
+ST_FpP 1677
+ST_Frr 1678
+SUB 1679
+SUBPDrm 1680
+SUBPDrr 1681
+SUBPSrm 1682
+SUBPSrr 1683
+SUBREG_TO_REG 1684
+SUBR_F 1685
+SUBR_FI 1686
+SUBR_FPrST 1687
+SUBR_FST 1688
+SUBR_Fp 1689
+SUBR_FpI 1690
+SUBR_FrST 1691
+SUBSDrm 1692
+SUBSDrm_Int 1693
+SUBSDrr 1694
+SUBSDrr_Int 1695
+SUBSSrm 1696
+SUBSSrm_Int 1697
+SUBSSrr 1698
+SUBSSrr_Int 1699
+SUB_F 1700
+SUB_FI 1701
+SUB_FPrST 1702
+SUB_FST 1703
+SUB_Fp 1704
+SUB_FpI 1705
+SUB_FrST 1706
+SWAPGS 1707
+SYSCALL 1708
+SYSENTER 1709
+SYSEXIT 1710
+SYSRET 1711
+T 1712
+TAILJMPd 1713
+TAILJMPd_CC 1714
+TAILJMPm 1715
+TAILJMPr 1716
+TCMMIMFP 1717
+TCMMRLFP 1718
+TCONJTCMMIMFP 1719
+TCONJTFP 1720
+TCRETURN_HIPE 1721
+TCRETURN_WIN 1722
+TCRETURN_WINmi 1723
+TCRETURNdi 1724
+TCRETURNdicc 1725
+TCRETURNmi 1726
+TCRETURNri 1727
+TCVTROWD 1728
+TCVTROWPS 1729
+TDCALL 1730
+TDPBF 1731
+TDPBHF 1732
+TDPBSSD 1733
+TDPBSUD 1734
+TDPBUSD 1735
+TDPBUUD 1736
+TDPFP 1737
+TDPHBF 1738
+TDPHF 1739
+TEST 1740
+TESTUI 1741
+TILELOADD 1742
+TILELOADDRS 1743
+TILELOADDRST 1744
+TILELOADDRS_EVEX 1745
+TILELOADDT 1746
+TILELOADD_EVEX 1747
+TILEMOVROWrre 1748
+TILEMOVROWrri 1749
+TILERELEASE 1750
+TILESTORED 1751
+TILESTORED_EVEX 1752
+TILEZERO 1753
+TLBSYNC 1754
+TLSCall 1755
+TLS_addr 1756
+TLS_addrX 1757
+TLS_base_addr 1758
+TLS_base_addrX 1759
+TLS_desc 1760
+TMMULTF 1761
+TPAUSE 1762
+TRAP 1763
+TST_F 1764
+TST_Fp 1765
+TTCMMIMFP 1766
+TTCMMRLFP 1767
+TTDPBF 1768
+TTDPFP 1769
+TTMMULTF 1770
+TTRANSPOSED 1771
+TZCNT 1772
+TZMSK 1773
+UBSAN_UD 1774
+UCOMISDrm 1775
+UCOMISDrm_Int 1776
+UCOMISDrr 1777
+UCOMISDrr_Int 1778
+UCOMISSrm 1779
+UCOMISSrm_Int 1780
+UCOMISSrr 1781
+UCOMISSrr_Int 1782
+UCOM_FIPr 1783
+UCOM_FIr 1784
+UCOM_FPPr 1785
+UCOM_FPr 1786
+UCOM_FpIr 1787
+UCOM_Fpr 1788
+UCOM_Fr 1789
+UD 1790
+UIRET 1791
+UMONITOR 1792
+UMWAIT 1793
+UNPCKHPDrm 1794
+UNPCKHPDrr 1795
+UNPCKHPSrm 1796
+UNPCKHPSrr 1797
+UNPCKLPDrm 1798
+UNPCKLPDrr 1799
+UNPCKLPSrm 1800
+UNPCKLPSrr 1801
+URDMSRri 1802
+URDMSRri_EVEX 1803
+URDMSRrr 1804
+URDMSRrr_EVEX 1805
+UWRMSRir 1806
+UWRMSRir_EVEX 1807
+UWRMSRrr 1808
+UWRMSRrr_EVEX 1809
+V 1810
+VAARG 1811
+VAARG_X 1812
+VADDBF 1813
+VADDPDYrm 1814
+VADDPDYrr 1815
+VADDPDZ 1816
+VADDPDZrm 1817
+VADDPDZrmb 1818
+VADDPDZrmbk 1819
+VADDPDZrmbkz 1820
+VADDPDZrmk 1821
+VADDPDZrmkz 1822
+VADDPDZrr 1823
+VADDPDZrrb 1824
+VADDPDZrrbk 1825
+VADDPDZrrbkz 1826
+VADDPDZrrk 1827
+VADDPDZrrkz 1828
+VADDPDrm 1829
+VADDPDrr 1830
+VADDPHZ 1831
+VADDPHZrm 1832
+VADDPHZrmb 1833
+VADDPHZrmbk 1834
+VADDPHZrmbkz 1835
+VADDPHZrmk 1836
+VADDPHZrmkz 1837
+VADDPHZrr 1838
+VADDPHZrrb 1839
+VADDPHZrrbk 1840
+VADDPHZrrbkz 1841
+VADDPHZrrk 1842
+VADDPHZrrkz 1843
+VADDPSYrm 1844
+VADDPSYrr 1845
+VADDPSZ 1846
+VADDPSZrm 1847
+VADDPSZrmb 1848
+VADDPSZrmbk 1849
+VADDPSZrmbkz 1850
+VADDPSZrmk 1851
+VADDPSZrmkz 1852
+VADDPSZrr 1853
+VADDPSZrrb 1854
+VADDPSZrrbk 1855
+VADDPSZrrbkz 1856
+VADDPSZrrk 1857
+VADDPSZrrkz 1858
+VADDPSrm 1859
+VADDPSrr 1860
+VADDSDZrm 1861
+VADDSDZrm_Int 1862
+VADDSDZrmk_Int 1863
+VADDSDZrmkz_Int 1864
+VADDSDZrr 1865
+VADDSDZrr_Int 1866
+VADDSDZrrb_Int 1867
+VADDSDZrrbk_Int 1868
+VADDSDZrrbkz_Int 1869
+VADDSDZrrk_Int 1870
+VADDSDZrrkz_Int 1871
+VADDSDrm 1872
+VADDSDrm_Int 1873
+VADDSDrr 1874
+VADDSDrr_Int 1875
+VADDSHZrm 1876
+VADDSHZrm_Int 1877
+VADDSHZrmk_Int 1878
+VADDSHZrmkz_Int 1879
+VADDSHZrr 1880
+VADDSHZrr_Int 1881
+VADDSHZrrb_Int 1882
+VADDSHZrrbk_Int 1883
+VADDSHZrrbkz_Int 1884
+VADDSHZrrk_Int 1885
+VADDSHZrrkz_Int 1886
+VADDSSZrm 1887
+VADDSSZrm_Int 1888
+VADDSSZrmk_Int 1889
+VADDSSZrmkz_Int 1890
+VADDSSZrr 1891
+VADDSSZrr_Int 1892
+VADDSSZrrb_Int 1893
+VADDSSZrrbk_Int 1894
+VADDSSZrrbkz_Int 1895
+VADDSSZrrk_Int 1896
+VADDSSZrrkz_Int 1897
+VADDSSrm 1898
+VADDSSrm_Int 1899
+VADDSSrr 1900
+VADDSSrr_Int 1901
+VADDSUBPDYrm 1902
+VADDSUBPDYrr 1903
+VADDSUBPDrm 1904
+VADDSUBPDrr 1905
+VADDSUBPSYrm 1906
+VADDSUBPSYrr 1907
+VADDSUBPSrm 1908
+VADDSUBPSrr 1909
+VAESDECLASTYrm 1910
+VAESDECLASTYrr 1911
+VAESDECLASTZ 1912
+VAESDECLASTZrm 1913
+VAESDECLASTZrr 1914
+VAESDECLASTrm 1915
+VAESDECLASTrr 1916
+VAESDECYrm 1917
+VAESDECYrr 1918
+VAESDECZ 1919
+VAESDECZrm 1920
+VAESDECZrr 1921
+VAESDECrm 1922
+VAESDECrr 1923
+VAESENCLASTYrm 1924
+VAESENCLASTYrr 1925
+VAESENCLASTZ 1926
+VAESENCLASTZrm 1927
+VAESENCLASTZrr 1928
+VAESENCLASTrm 1929
+VAESENCLASTrr 1930
+VAESENCYrm 1931
+VAESENCYrr 1932
+VAESENCZ 1933
+VAESENCZrm 1934
+VAESENCZrr 1935
+VAESENCrm 1936
+VAESENCrr 1937
+VAESIMCrm 1938
+VAESIMCrr 1939
+VAESKEYGENASSISTrmi 1940
+VAESKEYGENASSISTrri 1941
+VALIGNDZ 1942
+VALIGNDZrmbi 1943
+VALIGNDZrmbik 1944
+VALIGNDZrmbikz 1945
+VALIGNDZrmi 1946
+VALIGNDZrmik 1947
+VALIGNDZrmikz 1948
+VALIGNDZrri 1949
+VALIGNDZrrik 1950
+VALIGNDZrrikz 1951
+VALIGNQZ 1952
+VALIGNQZrmbi 1953
+VALIGNQZrmbik 1954
+VALIGNQZrmbikz 1955
+VALIGNQZrmi 1956
+VALIGNQZrmik 1957
+VALIGNQZrmikz 1958
+VALIGNQZrri 1959
+VALIGNQZrrik 1960
+VALIGNQZrrikz 1961
+VANDNPDYrm 1962
+VANDNPDYrr 1963
+VANDNPDZ 1964
+VANDNPDZrm 1965
+VANDNPDZrmb 1966
+VANDNPDZrmbk 1967
+VANDNPDZrmbkz 1968
+VANDNPDZrmk 1969
+VANDNPDZrmkz 1970
+VANDNPDZrr 1971
+VANDNPDZrrk 1972
+VANDNPDZrrkz 1973
+VANDNPDrm 1974
+VANDNPDrr 1975
+VANDNPSYrm 1976
+VANDNPSYrr 1977
+VANDNPSZ 1978
+VANDNPSZrm 1979
+VANDNPSZrmb 1980
+VANDNPSZrmbk 1981
+VANDNPSZrmbkz 1982
+VANDNPSZrmk 1983
+VANDNPSZrmkz 1984
+VANDNPSZrr 1985
+VANDNPSZrrk 1986
+VANDNPSZrrkz 1987
+VANDNPSrm 1988
+VANDNPSrr 1989
+VANDPDYrm 1990
+VANDPDYrr 1991
+VANDPDZ 1992
+VANDPDZrm 1993
+VANDPDZrmb 1994
+VANDPDZrmbk 1995
+VANDPDZrmbkz 1996
+VANDPDZrmk 1997
+VANDPDZrmkz 1998
+VANDPDZrr 1999
+VANDPDZrrk 2000
+VANDPDZrrkz 2001
+VANDPDrm 2002
+VANDPDrr 2003
+VANDPSYrm 2004
+VANDPSYrr 2005
+VANDPSZ 2006
+VANDPSZrm 2007
+VANDPSZrmb 2008
+VANDPSZrmbk 2009
+VANDPSZrmbkz 2010
+VANDPSZrmk 2011
+VANDPSZrmkz 2012
+VANDPSZrr 2013
+VANDPSZrrk 2014
+VANDPSZrrkz 2015
+VANDPSrm 2016
+VANDPSrr 2017
+VASTART_SAVE_XMM_REGS 2018
+VBCSTNEBF 2019
+VBCSTNESH 2020
+VBLENDMPDZ 2021
+VBLENDMPDZrm 2022
+VBLENDMPDZrmb 2023
+VBLENDMPDZrmbk 2024
+VBLENDMPDZrmbkz 2025
+VBLENDMPDZrmk 2026
+VBLENDMPDZrmkz 2027
+VBLENDMPDZrr 2028
+VBLENDMPDZrrk 2029
+VBLENDMPDZrrkz 2030
+VBLENDMPSZ 2031
+VBLENDMPSZrm 2032
+VBLENDMPSZrmb 2033
+VBLENDMPSZrmbk 2034
+VBLENDMPSZrmbkz 2035
+VBLENDMPSZrmk 2036
+VBLENDMPSZrmkz 2037
+VBLENDMPSZrr 2038
+VBLENDMPSZrrk 2039
+VBLENDMPSZrrkz 2040
+VBLENDPDYrmi 2041
+VBLENDPDYrri 2042
+VBLENDPDrmi 2043
+VBLENDPDrri 2044
+VBLENDPSYrmi 2045
+VBLENDPSYrri 2046
+VBLENDPSrmi 2047
+VBLENDPSrri 2048
+VBLENDVPDYrmr 2049
+VBLENDVPDYrrr 2050
+VBLENDVPDrmr 2051
+VBLENDVPDrrr 2052
+VBLENDVPSYrmr 2053
+VBLENDVPSYrrr 2054
+VBLENDVPSrmr 2055
+VBLENDVPSrrr 2056
+VBROADCASTF 2057
+VBROADCASTI 2058
+VBROADCASTSDYrm 2059
+VBROADCASTSDYrr 2060
+VBROADCASTSDZ 2061
+VBROADCASTSDZrm 2062
+VBROADCASTSDZrmk 2063
+VBROADCASTSDZrmkz 2064
+VBROADCASTSDZrr 2065
+VBROADCASTSDZrrk 2066
+VBROADCASTSDZrrkz 2067
+VBROADCASTSSYrm 2068
+VBROADCASTSSYrr 2069
+VBROADCASTSSZ 2070
+VBROADCASTSSZrm 2071
+VBROADCASTSSZrmk 2072
+VBROADCASTSSZrmkz 2073
+VBROADCASTSSZrr 2074
+VBROADCASTSSZrrk 2075
+VBROADCASTSSZrrkz 2076
+VBROADCASTSSrm 2077
+VBROADCASTSSrr 2078
+VCMPBF 2079
+VCMPPDYrmi 2080
+VCMPPDYrri 2081
+VCMPPDZ 2082
+VCMPPDZrmbi 2083
+VCMPPDZrmbik 2084
+VCMPPDZrmi 2085
+VCMPPDZrmik 2086
+VCMPPDZrri 2087
+VCMPPDZrrib 2088
+VCMPPDZrribk 2089
+VCMPPDZrrik 2090
+VCMPPDrmi 2091
+VCMPPDrri 2092
+VCMPPHZ 2093
+VCMPPHZrmbi 2094
+VCMPPHZrmbik 2095
+VCMPPHZrmi 2096
+VCMPPHZrmik 2097
+VCMPPHZrri 2098
+VCMPPHZrrib 2099
+VCMPPHZrribk 2100
+VCMPPHZrrik 2101
+VCMPPSYrmi 2102
+VCMPPSYrri 2103
+VCMPPSZ 2104
+VCMPPSZrmbi 2105
+VCMPPSZrmbik 2106
+VCMPPSZrmi 2107
+VCMPPSZrmik 2108
+VCMPPSZrri 2109
+VCMPPSZrrib 2110
+VCMPPSZrribk 2111
+VCMPPSZrrik 2112
+VCMPPSrmi 2113
+VCMPPSrri 2114
+VCMPSDZrmi 2115
+VCMPSDZrmi_Int 2116
+VCMPSDZrmik_Int 2117
+VCMPSDZrri 2118
+VCMPSDZrri_Int 2119
+VCMPSDZrrib_Int 2120
+VCMPSDZrribk_Int 2121
+VCMPSDZrrik_Int 2122
+VCMPSDrmi 2123
+VCMPSDrmi_Int 2124
+VCMPSDrri 2125
+VCMPSDrri_Int 2126
+VCMPSHZrmi 2127
+VCMPSHZrmi_Int 2128
+VCMPSHZrmik_Int 2129
+VCMPSHZrri 2130
+VCMPSHZrri_Int 2131
+VCMPSHZrrib_Int 2132
+VCMPSHZrribk_Int 2133
+VCMPSHZrrik_Int 2134
+VCMPSSZrmi 2135
+VCMPSSZrmi_Int 2136
+VCMPSSZrmik_Int 2137
+VCMPSSZrri 2138
+VCMPSSZrri_Int 2139
+VCMPSSZrrib_Int 2140
+VCMPSSZrribk_Int 2141
+VCMPSSZrrik_Int 2142
+VCMPSSrmi 2143
+VCMPSSrmi_Int 2144
+VCMPSSrri 2145
+VCMPSSrri_Int 2146
+VCOMISBF 2147
+VCOMISDZrm 2148
+VCOMISDZrm_Int 2149
+VCOMISDZrr 2150
+VCOMISDZrr_Int 2151
+VCOMISDZrrb 2152
+VCOMISDrm 2153
+VCOMISDrm_Int 2154
+VCOMISDrr 2155
+VCOMISDrr_Int 2156
+VCOMISHZrm 2157
+VCOMISHZrm_Int 2158
+VCOMISHZrr 2159
+VCOMISHZrr_Int 2160
+VCOMISHZrrb 2161
+VCOMISSZrm 2162
+VCOMISSZrm_Int 2163
+VCOMISSZrr 2164
+VCOMISSZrr_Int 2165
+VCOMISSZrrb 2166
+VCOMISSrm 2167
+VCOMISSrm_Int 2168
+VCOMISSrr 2169
+VCOMISSrr_Int 2170
+VCOMPRESSPDZ 2171
+VCOMPRESSPDZmr 2172
+VCOMPRESSPDZmrk 2173
+VCOMPRESSPDZrr 2174
+VCOMPRESSPDZrrk 2175
+VCOMPRESSPDZrrkz 2176
+VCOMPRESSPSZ 2177
+VCOMPRESSPSZmr 2178
+VCOMPRESSPSZmrk 2179
+VCOMPRESSPSZrr 2180
+VCOMPRESSPSZrrk 2181
+VCOMPRESSPSZrrkz 2182
+VCOMXSDZrm_Int 2183
+VCOMXSDZrr_Int 2184
+VCOMXSDZrrb_Int 2185
+VCOMXSHZrm_Int 2186
+VCOMXSHZrr_Int 2187
+VCOMXSHZrrb_Int 2188
+VCOMXSSZrm_Int 2189
+VCOMXSSZrr_Int 2190
+VCOMXSSZrrb_Int 2191
+VCVT 2192
+VCVTBF 2193
+VCVTBIASPH 2194
+VCVTDQ 2195
+VCVTHF 2196
+VCVTNE 2197
+VCVTNEEBF 2198
+VCVTNEEPH 2199
+VCVTNEOBF 2200
+VCVTNEOPH 2201
+VCVTNEPS 2202
+VCVTPD 2203
+VCVTPH 2204
+VCVTPS 2205
+VCVTQQ 2206
+VCVTSD 2207
+VCVTSH 2208
+VCVTSI 2209
+VCVTSS 2210
+VCVTTBF 2211
+VCVTTPD 2212
+VCVTTPH 2213
+VCVTTPS 2214
+VCVTTSD 2215
+VCVTTSH 2216
+VCVTTSS 2217
+VCVTUDQ 2218
+VCVTUQQ 2219
+VCVTUSI 2220
+VCVTUW 2221
+VCVTW 2222
+VDBPSADBWZ 2223
+VDBPSADBWZrmi 2224
+VDBPSADBWZrmik 2225
+VDBPSADBWZrmikz 2226
+VDBPSADBWZrri 2227
+VDBPSADBWZrrik 2228
+VDBPSADBWZrrikz 2229
+VDIVBF 2230
+VDIVPDYrm 2231
+VDIVPDYrr 2232
+VDIVPDZ 2233
+VDIVPDZrm 2234
+VDIVPDZrmb 2235
+VDIVPDZrmbk 2236
+VDIVPDZrmbkz 2237
+VDIVPDZrmk 2238
+VDIVPDZrmkz 2239
+VDIVPDZrr 2240
+VDIVPDZrrb 2241
+VDIVPDZrrbk 2242
+VDIVPDZrrbkz 2243
+VDIVPDZrrk 2244
+VDIVPDZrrkz 2245
+VDIVPDrm 2246
+VDIVPDrr 2247
+VDIVPHZ 2248
+VDIVPHZrm 2249
+VDIVPHZrmb 2250
+VDIVPHZrmbk 2251
+VDIVPHZrmbkz 2252
+VDIVPHZrmk 2253
+VDIVPHZrmkz 2254
+VDIVPHZrr 2255
+VDIVPHZrrb 2256
+VDIVPHZrrbk 2257
+VDIVPHZrrbkz 2258
+VDIVPHZrrk 2259
+VDIVPHZrrkz 2260
+VDIVPSYrm 2261
+VDIVPSYrr 2262
+VDIVPSZ 2263
+VDIVPSZrm 2264
+VDIVPSZrmb 2265
+VDIVPSZrmbk 2266
+VDIVPSZrmbkz 2267
+VDIVPSZrmk 2268
+VDIVPSZrmkz 2269
+VDIVPSZrr 2270
+VDIVPSZrrb 2271
+VDIVPSZrrbk 2272
+VDIVPSZrrbkz 2273
+VDIVPSZrrk 2274
+VDIVPSZrrkz 2275
+VDIVPSrm 2276
+VDIVPSrr 2277
+VDIVSDZrm 2278
+VDIVSDZrm_Int 2279
+VDIVSDZrmk_Int 2280
+VDIVSDZrmkz_Int 2281
+VDIVSDZrr 2282
+VDIVSDZrr_Int 2283
+VDIVSDZrrb_Int 2284
+VDIVSDZrrbk_Int 2285
+VDIVSDZrrbkz_Int 2286
+VDIVSDZrrk_Int 2287
+VDIVSDZrrkz_Int 2288
+VDIVSDrm 2289
+VDIVSDrm_Int 2290
+VDIVSDrr 2291
+VDIVSDrr_Int 2292
+VDIVSHZrm 2293
+VDIVSHZrm_Int 2294
+VDIVSHZrmk_Int 2295
+VDIVSHZrmkz_Int 2296
+VDIVSHZrr 2297
+VDIVSHZrr_Int 2298
+VDIVSHZrrb_Int 2299
+VDIVSHZrrbk_Int 2300
+VDIVSHZrrbkz_Int 2301
+VDIVSHZrrk_Int 2302
+VDIVSHZrrkz_Int 2303
+VDIVSSZrm 2304
+VDIVSSZrm_Int 2305
+VDIVSSZrmk_Int 2306
+VDIVSSZrmkz_Int 2307
+VDIVSSZrr 2308
+VDIVSSZrr_Int 2309
+VDIVSSZrrb_Int 2310
+VDIVSSZrrbk_Int 2311
+VDIVSSZrrbkz_Int 2312
+VDIVSSZrrk_Int 2313
+VDIVSSZrrkz_Int 2314
+VDIVSSrm 2315
+VDIVSSrm_Int 2316
+VDIVSSrr 2317
+VDIVSSrr_Int 2318
+VDPBF 2319
+VDPPDrmi 2320
+VDPPDrri 2321
+VDPPHPSZ 2322
+VDPPHPSZm 2323
+VDPPHPSZmb 2324
+VDPPHPSZmbk 2325
+VDPPHPSZmbkz 2326
+VDPPHPSZmk 2327
+VDPPHPSZmkz 2328
+VDPPHPSZr 2329
+VDPPHPSZrk 2330
+VDPPHPSZrkz 2331
+VDPPSYrmi 2332
+VDPPSYrri 2333
+VDPPSrmi 2334
+VDPPSrri 2335
+VERRm 2336
+VERRr 2337
+VERWm 2338
+VERWr 2339
+VEXP 2340
+VEXPANDPDZ 2341
+VEXPANDPDZrm 2342
+VEXPANDPDZrmk 2343
+VEXPANDPDZrmkz 2344
+VEXPANDPDZrr 2345
+VEXPANDPDZrrk 2346
+VEXPANDPDZrrkz 2347
+VEXPANDPSZ 2348
+VEXPANDPSZrm 2349
+VEXPANDPSZrmk 2350
+VEXPANDPSZrmkz 2351
+VEXPANDPSZrr 2352
+VEXPANDPSZrrk 2353
+VEXPANDPSZrrkz 2354
+VEXTRACTF 2355
+VEXTRACTI 2356
+VEXTRACTPSZmri 2357
+VEXTRACTPSZrri 2358
+VEXTRACTPSmri 2359
+VEXTRACTPSrri 2360
+VFCMADDCPHZ 2361
+VFCMADDCPHZm 2362
+VFCMADDCPHZmb 2363
+VFCMADDCPHZmbk 2364
+VFCMADDCPHZmbkz 2365
+VFCMADDCPHZmk 2366
+VFCMADDCPHZmkz 2367
+VFCMADDCPHZr 2368
+VFCMADDCPHZrb 2369
+VFCMADDCPHZrbk 2370
+VFCMADDCPHZrbkz 2371
+VFCMADDCPHZrk 2372
+VFCMADDCPHZrkz 2373
+VFCMADDCSHZm 2374
+VFCMADDCSHZmk 2375
+VFCMADDCSHZmkz 2376
+VFCMADDCSHZr 2377
+VFCMADDCSHZrb 2378
+VFCMADDCSHZrbk 2379
+VFCMADDCSHZrbkz 2380
+VFCMADDCSHZrk 2381
+VFCMADDCSHZrkz 2382
+VFCMULCPHZ 2383
+VFCMULCPHZrm 2384
+VFCMULCPHZrmb 2385
+VFCMULCPHZrmbk 2386
+VFCMULCPHZrmbkz 2387
+VFCMULCPHZrmk 2388
+VFCMULCPHZrmkz 2389
+VFCMULCPHZrr 2390
+VFCMULCPHZrrb 2391
+VFCMULCPHZrrbk 2392
+VFCMULCPHZrrbkz 2393
+VFCMULCPHZrrk 2394
+VFCMULCPHZrrkz 2395
+VFCMULCSHZrm 2396
+VFCMULCSHZrmk 2397
+VFCMULCSHZrmkz 2398
+VFCMULCSHZrr 2399
+VFCMULCSHZrrb 2400
+VFCMULCSHZrrbk 2401
+VFCMULCSHZrrbkz 2402
+VFCMULCSHZrrk 2403
+VFCMULCSHZrrkz 2404
+VFIXUPIMMPDZ 2405
+VFIXUPIMMPDZrmbi 2406
+VFIXUPIMMPDZrmbik 2407
+VFIXUPIMMPDZrmbikz 2408
+VFIXUPIMMPDZrmi 2409
+VFIXUPIMMPDZrmik 2410
+VFIXUPIMMPDZrmikz 2411
+VFIXUPIMMPDZrri 2412
+VFIXUPIMMPDZrrib 2413
+VFIXUPIMMPDZrribk 2414
+VFIXUPIMMPDZrribkz 2415
+VFIXUPIMMPDZrrik 2416
+VFIXUPIMMPDZrrikz 2417
+VFIXUPIMMPSZ 2418
+VFIXUPIMMPSZrmbi 2419
+VFIXUPIMMPSZrmbik 2420
+VFIXUPIMMPSZrmbikz 2421
+VFIXUPIMMPSZrmi 2422
+VFIXUPIMMPSZrmik 2423
+VFIXUPIMMPSZrmikz 2424
+VFIXUPIMMPSZrri 2425
+VFIXUPIMMPSZrrib 2426
+VFIXUPIMMPSZrribk 2427
+VFIXUPIMMPSZrribkz 2428
+VFIXUPIMMPSZrrik 2429
+VFIXUPIMMPSZrrikz 2430
+VFIXUPIMMSDZrmi 2431
+VFIXUPIMMSDZrmik 2432
+VFIXUPIMMSDZrmikz 2433
+VFIXUPIMMSDZrri 2434
+VFIXUPIMMSDZrrib 2435
+VFIXUPIMMSDZrribk 2436
+VFIXUPIMMSDZrribkz 2437
+VFIXUPIMMSDZrrik 2438
+VFIXUPIMMSDZrrikz 2439
+VFIXUPIMMSSZrmi 2440
+VFIXUPIMMSSZrmik 2441
+VFIXUPIMMSSZrmikz 2442
+VFIXUPIMMSSZrri 2443
+VFIXUPIMMSSZrrib 2444
+VFIXUPIMMSSZrribk 2445
+VFIXUPIMMSSZrribkz 2446
+VFIXUPIMMSSZrrik 2447
+VFIXUPIMMSSZrrikz 2448
+VFMADD 2449
+VFMADDCPHZ 2450
+VFMADDCPHZm 2451
+VFMADDCPHZmb 2452
+VFMADDCPHZmbk 2453
+VFMADDCPHZmbkz 2454
+VFMADDCPHZmk 2455
+VFMADDCPHZmkz 2456
+VFMADDCPHZr 2457
+VFMADDCPHZrb 2458
+VFMADDCPHZrbk 2459
+VFMADDCPHZrbkz 2460
+VFMADDCPHZrk 2461
+VFMADDCPHZrkz 2462
+VFMADDCSHZm 2463
+VFMADDCSHZmk 2464
+VFMADDCSHZmkz 2465
+VFMADDCSHZr 2466
+VFMADDCSHZrb 2467
+VFMADDCSHZrbk 2468
+VFMADDCSHZrbkz 2469
+VFMADDCSHZrk 2470
+VFMADDCSHZrkz 2471
+VFMADDPD 2472
+VFMADDPS 2473
+VFMADDSD 2474
+VFMADDSS 2475
+VFMADDSUB 2476
+VFMADDSUBPD 2477
+VFMADDSUBPS 2478
+VFMSUB 2479
+VFMSUBADD 2480
+VFMSUBADDPD 2481
+VFMSUBADDPS 2482
+VFMSUBPD 2483
+VFMSUBPS 2484
+VFMSUBSD 2485
+VFMSUBSS 2486
+VFMULCPHZ 2487
+VFMULCPHZrm 2488
+VFMULCPHZrmb 2489
+VFMULCPHZrmbk 2490
+VFMULCPHZrmbkz 2491
+VFMULCPHZrmk 2492
+VFMULCPHZrmkz 2493
+VFMULCPHZrr 2494
+VFMULCPHZrrb 2495
+VFMULCPHZrrbk 2496
+VFMULCPHZrrbkz 2497
+VFMULCPHZrrk 2498
+VFMULCPHZrrkz 2499
+VFMULCSHZrm 2500
+VFMULCSHZrmk 2501
+VFMULCSHZrmkz 2502
+VFMULCSHZrr 2503
+VFMULCSHZrrb 2504
+VFMULCSHZrrbk 2505
+VFMULCSHZrrbkz 2506
+VFMULCSHZrrk 2507
+VFMULCSHZrrkz 2508
+VFNMADD 2509
+VFNMADDPD 2510
+VFNMADDPS 2511
+VFNMADDSD 2512
+VFNMADDSS 2513
+VFNMSUB 2514
+VFNMSUBPD 2515
+VFNMSUBPS 2516
+VFNMSUBSD 2517
+VFNMSUBSS 2518
+VFPCLASSBF 2519
+VFPCLASSPDZ 2520
+VFPCLASSPDZmbi 2521
+VFPCLASSPDZmbik 2522
+VFPCLASSPDZmi 2523
+VFPCLASSPDZmik 2524
+VFPCLASSPDZri 2525
+VFPCLASSPDZrik 2526
+VFPCLASSPHZ 2527
+VFPCLASSPHZmbi 2528
+VFPCLASSPHZmbik 2529
+VFPCLASSPHZmi 2530
+VFPCLASSPHZmik 2531
+VFPCLASSPHZri 2532
+VFPCLASSPHZrik 2533
+VFPCLASSPSZ 2534
+VFPCLASSPSZmbi 2535
+VFPCLASSPSZmbik 2536
+VFPCLASSPSZmi 2537
+VFPCLASSPSZmik 2538
+VFPCLASSPSZri 2539
+VFPCLASSPSZrik 2540
+VFPCLASSSDZmi 2541
+VFPCLASSSDZmik 2542
+VFPCLASSSDZri 2543
+VFPCLASSSDZrik 2544
+VFPCLASSSHZmi 2545
+VFPCLASSSHZmik 2546
+VFPCLASSSHZri 2547
+VFPCLASSSHZrik 2548
+VFPCLASSSSZmi 2549
+VFPCLASSSSZmik 2550
+VFPCLASSSSZri 2551
+VFPCLASSSSZrik 2552
+VFRCZPDYrm 2553
+VFRCZPDYrr 2554
+VFRCZPDrm 2555
+VFRCZPDrr 2556
+VFRCZPSYrm 2557
+VFRCZPSYrr 2558
+VFRCZPSrm 2559
+VFRCZPSrr 2560
+VFRCZSDrm 2561
+VFRCZSDrr 2562
+VFRCZSSrm 2563
+VFRCZSSrr 2564
+VGATHERDPDYrm 2565
+VGATHERDPDZ 2566
+VGATHERDPDZrm 2567
+VGATHERDPDrm 2568
+VGATHERDPSYrm 2569
+VGATHERDPSZ 2570
+VGATHERDPSZrm 2571
+VGATHERDPSrm 2572
+VGATHERPF 2573
+VGATHERQPDYrm 2574
+VGATHERQPDZ 2575
+VGATHERQPDZrm 2576
+VGATHERQPDrm 2577
+VGATHERQPSYrm 2578
+VGATHERQPSZ 2579
+VGATHERQPSZrm 2580
+VGATHERQPSrm 2581
+VGETEXPBF 2582
+VGETEXPPDZ 2583
+VGETEXPPDZm 2584
+VGETEXPPDZmb 2585
+VGETEXPPDZmbk 2586
+VGETEXPPDZmbkz 2587
+VGETEXPPDZmk 2588
+VGETEXPPDZmkz 2589
+VGETEXPPDZr 2590
+VGETEXPPDZrb 2591
+VGETEXPPDZrbk 2592
+VGETEXPPDZrbkz 2593
+VGETEXPPDZrk 2594
+VGETEXPPDZrkz 2595
+VGETEXPPHZ 2596
+VGETEXPPHZm 2597
+VGETEXPPHZmb 2598
+VGETEXPPHZmbk 2599
+VGETEXPPHZmbkz 2600
+VGETEXPPHZmk 2601
+VGETEXPPHZmkz 2602
+VGETEXPPHZr 2603
+VGETEXPPHZrb 2604
+VGETEXPPHZrbk 2605
+VGETEXPPHZrbkz 2606
+VGETEXPPHZrk 2607
+VGETEXPPHZrkz 2608
+VGETEXPPSZ 2609
+VGETEXPPSZm 2610
+VGETEXPPSZmb 2611
+VGETEXPPSZmbk 2612
+VGETEXPPSZmbkz 2613
+VGETEXPPSZmk 2614
+VGETEXPPSZmkz 2615
+VGETEXPPSZr 2616
+VGETEXPPSZrb 2617
+VGETEXPPSZrbk 2618
+VGETEXPPSZrbkz 2619
+VGETEXPPSZrk 2620
+VGETEXPPSZrkz 2621
+VGETEXPSDZm 2622
+VGETEXPSDZmk 2623
+VGETEXPSDZmkz 2624
+VGETEXPSDZr 2625
+VGETEXPSDZrb 2626
+VGETEXPSDZrbk 2627
+VGETEXPSDZrbkz 2628
+VGETEXPSDZrk 2629
+VGETEXPSDZrkz 2630
+VGETEXPSHZm 2631
+VGETEXPSHZmk 2632
+VGETEXPSHZmkz 2633
+VGETEXPSHZr 2634
+VGETEXPSHZrb 2635
+VGETEXPSHZrbk 2636
+VGETEXPSHZrbkz 2637
+VGETEXPSHZrk 2638
+VGETEXPSHZrkz 2639
+VGETEXPSSZm 2640
+VGETEXPSSZmk 2641
+VGETEXPSSZmkz 2642
+VGETEXPSSZr 2643
+VGETEXPSSZrb 2644
+VGETEXPSSZrbk 2645
+VGETEXPSSZrbkz 2646
+VGETEXPSSZrk 2647
+VGETEXPSSZrkz 2648
+VGETMANTBF 2649
+VGETMANTPDZ 2650
+VGETMANTPDZrmbi 2651
+VGETMANTPDZrmbik 2652
+VGETMANTPDZrmbikz 2653
+VGETMANTPDZrmi 2654
+VGETMANTPDZrmik 2655
+VGETMANTPDZrmikz 2656
+VGETMANTPDZrri 2657
+VGETMANTPDZrrib 2658
+VGETMANTPDZrribk 2659
+VGETMANTPDZrribkz 2660
+VGETMANTPDZrrik 2661
+VGETMANTPDZrrikz 2662
+VGETMANTPHZ 2663
+VGETMANTPHZrmbi 2664
+VGETMANTPHZrmbik 2665
+VGETMANTPHZrmbikz 2666
+VGETMANTPHZrmi 2667
+VGETMANTPHZrmik 2668
+VGETMANTPHZrmikz 2669
+VGETMANTPHZrri 2670
+VGETMANTPHZrrib 2671
+VGETMANTPHZrribk 2672
+VGETMANTPHZrribkz 2673
+VGETMANTPHZrrik 2674
+VGETMANTPHZrrikz 2675
+VGETMANTPSZ 2676
+VGETMANTPSZrmbi 2677
+VGETMANTPSZrmbik 2678
+VGETMANTPSZrmbikz 2679
+VGETMANTPSZrmi 2680
+VGETMANTPSZrmik 2681
+VGETMANTPSZrmikz 2682
+VGETMANTPSZrri 2683
+VGETMANTPSZrrib 2684
+VGETMANTPSZrribk 2685
+VGETMANTPSZrribkz 2686
+VGETMANTPSZrrik 2687
+VGETMANTPSZrrikz 2688
+VGETMANTSDZrmi 2689
+VGETMANTSDZrmik 2690
+VGETMANTSDZrmikz 2691
+VGETMANTSDZrri 2692
+VGETMANTSDZrrib 2693
+VGETMANTSDZrribk 2694
+VGETMANTSDZrribkz 2695
+VGETMANTSDZrrik 2696
+VGETMANTSDZrrikz 2697
+VGETMANTSHZrmi 2698
+VGETMANTSHZrmik 2699
+VGETMANTSHZrmikz 2700
+VGETMANTSHZrri 2701
+VGETMANTSHZrrib 2702
+VGETMANTSHZrribk 2703
+VGETMANTSHZrribkz 2704
+VGETMANTSHZrrik 2705
+VGETMANTSHZrrikz 2706
+VGETMANTSSZrmi 2707
+VGETMANTSSZrmik 2708
+VGETMANTSSZrmikz 2709
+VGETMANTSSZrri 2710
+VGETMANTSSZrrib 2711
+VGETMANTSSZrribk 2712
+VGETMANTSSZrribkz 2713
+VGETMANTSSZrrik 2714
+VGETMANTSSZrrikz 2715
+VGF 2716
+VHADDPDYrm 2717
+VHADDPDYrr 2718
+VHADDPDrm 2719
+VHADDPDrr 2720
+VHADDPSYrm 2721
+VHADDPSYrr 2722
+VHADDPSrm 2723
+VHADDPSrr 2724
+VHSUBPDYrm 2725
+VHSUBPDYrr 2726
+VHSUBPDrm 2727
+VHSUBPDrr 2728
+VHSUBPSYrm 2729
+VHSUBPSYrr 2730
+VHSUBPSrm 2731
+VHSUBPSrr 2732
+VINSERTF 2733
+VINSERTI 2734
+VINSERTPSZrmi 2735
+VINSERTPSZrri 2736
+VINSERTPSrmi 2737
+VINSERTPSrri 2738
+VLDDQUYrm 2739
+VLDDQUrm 2740
+VLDMXCSR 2741
+VMASKMOVDQU 2742
+VMASKMOVPDYmr 2743
+VMASKMOVPDYrm 2744
+VMASKMOVPDmr 2745
+VMASKMOVPDrm 2746
+VMASKMOVPSYmr 2747
+VMASKMOVPSYrm 2748
+VMASKMOVPSmr 2749
+VMASKMOVPSrm 2750
+VMAXBF 2751
+VMAXCPDYrm 2752
+VMAXCPDYrr 2753
+VMAXCPDZ 2754
+VMAXCPDZrm 2755
+VMAXCPDZrmb 2756
+VMAXCPDZrmbk 2757
+VMAXCPDZrmbkz 2758
+VMAXCPDZrmk 2759
+VMAXCPDZrmkz 2760
+VMAXCPDZrr 2761
+VMAXCPDZrrk 2762
+VMAXCPDZrrkz 2763
+VMAXCPDrm 2764
+VMAXCPDrr 2765
+VMAXCPHZ 2766
+VMAXCPHZrm 2767
+VMAXCPHZrmb 2768
+VMAXCPHZrmbk 2769
+VMAXCPHZrmbkz 2770
+VMAXCPHZrmk 2771
+VMAXCPHZrmkz 2772
+VMAXCPHZrr 2773
+VMAXCPHZrrk 2774
+VMAXCPHZrrkz 2775
+VMAXCPSYrm 2776
+VMAXCPSYrr 2777
+VMAXCPSZ 2778
+VMAXCPSZrm 2779
+VMAXCPSZrmb 2780
+VMAXCPSZrmbk 2781
+VMAXCPSZrmbkz 2782
+VMAXCPSZrmk 2783
+VMAXCPSZrmkz 2784
+VMAXCPSZrr 2785
+VMAXCPSZrrk 2786
+VMAXCPSZrrkz 2787
+VMAXCPSrm 2788
+VMAXCPSrr 2789
+VMAXCSDZrm 2790
+VMAXCSDZrr 2791
+VMAXCSDrm 2792
+VMAXCSDrr 2793
+VMAXCSHZrm 2794
+VMAXCSHZrr 2795
+VMAXCSSZrm 2796
+VMAXCSSZrr 2797
+VMAXCSSrm 2798
+VMAXCSSrr 2799
+VMAXPDYrm 2800
+VMAXPDYrr 2801
+VMAXPDZ 2802
+VMAXPDZrm 2803
+VMAXPDZrmb 2804
+VMAXPDZrmbk 2805
+VMAXPDZrmbkz 2806
+VMAXPDZrmk 2807
+VMAXPDZrmkz 2808
+VMAXPDZrr 2809
+VMAXPDZrrb 2810
+VMAXPDZrrbk 2811
+VMAXPDZrrbkz 2812
+VMAXPDZrrk 2813
+VMAXPDZrrkz 2814
+VMAXPDrm 2815
+VMAXPDrr 2816
+VMAXPHZ 2817
+VMAXPHZrm 2818
+VMAXPHZrmb 2819
+VMAXPHZrmbk 2820
+VMAXPHZrmbkz 2821
+VMAXPHZrmk 2822
+VMAXPHZrmkz 2823
+VMAXPHZrr 2824
+VMAXPHZrrb 2825
+VMAXPHZrrbk 2826
+VMAXPHZrrbkz 2827
+VMAXPHZrrk 2828
+VMAXPHZrrkz 2829
+VMAXPSYrm 2830
+VMAXPSYrr 2831
+VMAXPSZ 2832
+VMAXPSZrm 2833
+VMAXPSZrmb 2834
+VMAXPSZrmbk 2835
+VMAXPSZrmbkz 2836
+VMAXPSZrmk 2837
+VMAXPSZrmkz 2838
+VMAXPSZrr 2839
+VMAXPSZrrb 2840
+VMAXPSZrrbk 2841
+VMAXPSZrrbkz 2842
+VMAXPSZrrk 2843
+VMAXPSZrrkz 2844
+VMAXPSrm 2845
+VMAXPSrr 2846
+VMAXSDZrm 2847
+VMAXSDZrm_Int 2848
+VMAXSDZrmk_Int 2849
+VMAXSDZrmkz_Int 2850
+VMAXSDZrr 2851
+VMAXSDZrr_Int 2852
+VMAXSDZrrb_Int 2853
+VMAXSDZrrbk_Int 2854
+VMAXSDZrrbkz_Int 2855
+VMAXSDZrrk_Int 2856
+VMAXSDZrrkz_Int 2857
+VMAXSDrm 2858
+VMAXSDrm_Int 2859
+VMAXSDrr 2860
+VMAXSDrr_Int 2861
+VMAXSHZrm 2862
+VMAXSHZrm_Int 2863
+VMAXSHZrmk_Int 2864
+VMAXSHZrmkz_Int 2865
+VMAXSHZrr 2866
+VMAXSHZrr_Int 2867
+VMAXSHZrrb_Int 2868
+VMAXSHZrrbk_Int 2869
+VMAXSHZrrbkz_Int 2870
+VMAXSHZrrk_Int 2871
+VMAXSHZrrkz_Int 2872
+VMAXSSZrm 2873
+VMAXSSZrm_Int 2874
+VMAXSSZrmk_Int 2875
+VMAXSSZrmkz_Int 2876
+VMAXSSZrr 2877
+VMAXSSZrr_Int 2878
+VMAXSSZrrb_Int 2879
+VMAXSSZrrbk_Int 2880
+VMAXSSZrrbkz_Int 2881
+VMAXSSZrrk_Int 2882
+VMAXSSZrrkz_Int 2883
+VMAXSSrm 2884
+VMAXSSrm_Int 2885
+VMAXSSrr 2886
+VMAXSSrr_Int 2887
+VMCALL 2888
+VMCLEARm 2889
+VMFUNC 2890
+VMINBF 2891
+VMINCPDYrm 2892
+VMINCPDYrr 2893
+VMINCPDZ 2894
+VMINCPDZrm 2895
+VMINCPDZrmb 2896
+VMINCPDZrmbk 2897
+VMINCPDZrmbkz 2898
+VMINCPDZrmk 2899
+VMINCPDZrmkz 2900
+VMINCPDZrr 2901
+VMINCPDZrrk 2902
+VMINCPDZrrkz 2903
+VMINCPDrm 2904
+VMINCPDrr 2905
+VMINCPHZ 2906
+VMINCPHZrm 2907
+VMINCPHZrmb 2908
+VMINCPHZrmbk 2909
+VMINCPHZrmbkz 2910
+VMINCPHZrmk 2911
+VMINCPHZrmkz 2912
+VMINCPHZrr 2913
+VMINCPHZrrk 2914
+VMINCPHZrrkz 2915
+VMINCPSYrm 2916
+VMINCPSYrr 2917
+VMINCPSZ 2918
+VMINCPSZrm 2919
+VMINCPSZrmb 2920
+VMINCPSZrmbk 2921
+VMINCPSZrmbkz 2922
+VMINCPSZrmk 2923
+VMINCPSZrmkz 2924
+VMINCPSZrr 2925
+VMINCPSZrrk 2926
+VMINCPSZrrkz 2927
+VMINCPSrm 2928
+VMINCPSrr 2929
+VMINCSDZrm 2930
+VMINCSDZrr 2931
+VMINCSDrm 2932
+VMINCSDrr 2933
+VMINCSHZrm 2934
+VMINCSHZrr 2935
+VMINCSSZrm 2936
+VMINCSSZrr 2937
+VMINCSSrm 2938
+VMINCSSrr 2939
+VMINMAXBF 2940
+VMINMAXPDZ 2941
+VMINMAXPDZrmbi 2942
+VMINMAXPDZrmbik 2943
+VMINMAXPDZrmbikz 2944
+VMINMAXPDZrmi 2945
+VMINMAXPDZrmik 2946
+VMINMAXPDZrmikz 2947
+VMINMAXPDZrri 2948
+VMINMAXPDZrrib 2949
+VMINMAXPDZrribk 2950
+VMINMAXPDZrribkz 2951
+VMINMAXPDZrrik 2952
+VMINMAXPDZrrikz 2953
+VMINMAXPHZ 2954
+VMINMAXPHZrmbi 2955
+VMINMAXPHZrmbik 2956
+VMINMAXPHZrmbikz 2957
+VMINMAXPHZrmi 2958
+VMINMAXPHZrmik 2959
+VMINMAXPHZrmikz 2960
+VMINMAXPHZrri 2961
+VMINMAXPHZrrib 2962
+VMINMAXPHZrribk 2963
+VMINMAXPHZrribkz 2964
+VMINMAXPHZrrik 2965
+VMINMAXPHZrrikz 2966
+VMINMAXPSZ 2967
+VMINMAXPSZrmbi 2968
+VMINMAXPSZrmbik 2969
+VMINMAXPSZrmbikz 2970
+VMINMAXPSZrmi 2971
+VMINMAXPSZrmik 2972
+VMINMAXPSZrmikz 2973
+VMINMAXPSZrri 2974
+VMINMAXPSZrrib 2975
+VMINMAXPSZrribk 2976
+VMINMAXPSZrribkz 2977
+VMINMAXPSZrrik 2978
+VMINMAXPSZrrikz 2979
+VMINMAXSDrmi 2980
+VMINMAXSDrmi_Int 2981
+VMINMAXSDrmik_Int 2982
+VMINMAXSDrmikz_Int 2983
+VMINMAXSDrri 2984
+VMINMAXSDrri_Int 2985
+VMINMAXSDrrib_Int 2986
+VMINMAXSDrribk_Int 2987
+VMINMAXSDrribkz_Int 2988
+VMINMAXSDrrik_Int 2989
+VMINMAXSDrrikz_Int 2990
+VMINMAXSHrmi 2991
+VMINMAXSHrmi_Int 2992
+VMINMAXSHrmik_Int 2993
+VMINMAXSHrmikz_Int 2994
+VMINMAXSHrri 2995
+VMINMAXSHrri_Int 2996
+VMINMAXSHrrib_Int 2997
+VMINMAXSHrribk_Int 2998
+VMINMAXSHrribkz_Int 2999
+VMINMAXSHrrik_Int 3000
+VMINMAXSHrrikz_Int 3001
+VMINMAXSSrmi 3002
+VMINMAXSSrmi_Int 3003
+VMINMAXSSrmik_Int 3004
+VMINMAXSSrmikz_Int 3005
+VMINMAXSSrri 3006
+VMINMAXSSrri_Int 3007
+VMINMAXSSrrib_Int 3008
+VMINMAXSSrribk_Int 3009
+VMINMAXSSrribkz_Int 3010
+VMINMAXSSrrik_Int 3011
+VMINMAXSSrrikz_Int 3012
+VMINPDYrm 3013
+VMINPDYrr 3014
+VMINPDZ 3015
+VMINPDZrm 3016
+VMINPDZrmb 3017
+VMINPDZrmbk 3018
+VMINPDZrmbkz 3019
+VMINPDZrmk 3020
+VMINPDZrmkz 3021
+VMINPDZrr 3022
+VMINPDZrrb 3023
+VMINPDZrrbk 3024
+VMINPDZrrbkz 3025
+VMINPDZrrk 3026
+VMINPDZrrkz 3027
+VMINPDrm 3028
+VMINPDrr 3029
+VMINPHZ 3030
+VMINPHZrm 3031
+VMINPHZrmb 3032
+VMINPHZrmbk 3033
+VMINPHZrmbkz 3034
+VMINPHZrmk 3035
+VMINPHZrmkz 3036
+VMINPHZrr 3037
+VMINPHZrrb 3038
+VMINPHZrrbk 3039
+VMINPHZrrbkz 3040
+VMINPHZrrk 3041
+VMINPHZrrkz 3042
+VMINPSYrm 3043
+VMINPSYrr 3044
+VMINPSZ 3045
+VMINPSZrm 3046
+VMINPSZrmb 3047
+VMINPSZrmbk 3048
+VMINPSZrmbkz 3049
+VMINPSZrmk 3050
+VMINPSZrmkz 3051
+VMINPSZrr 3052
+VMINPSZrrb 3053
+VMINPSZrrbk 3054
+VMINPSZrrbkz 3055
+VMINPSZrrk 3056
+VMINPSZrrkz 3057
+VMINPSrm 3058
+VMINPSrr 3059
+VMINSDZrm 3060
+VMINSDZrm_Int 3061
+VMINSDZrmk_Int 3062
+VMINSDZrmkz_Int 3063
+VMINSDZrr 3064
+VMINSDZrr_Int 3065
+VMINSDZrrb_Int 3066
+VMINSDZrrbk_Int 3067
+VMINSDZrrbkz_Int 3068
+VMINSDZrrk_Int 3069
+VMINSDZrrkz_Int 3070
+VMINSDrm 3071
+VMINSDrm_Int 3072
+VMINSDrr 3073
+VMINSDrr_Int 3074
+VMINSHZrm 3075
+VMINSHZrm_Int 3076
+VMINSHZrmk_Int 3077
+VMINSHZrmkz_Int 3078
+VMINSHZrr 3079
+VMINSHZrr_Int 3080
+VMINSHZrrb_Int 3081
+VMINSHZrrbk_Int 3082
+VMINSHZrrbkz_Int 3083
+VMINSHZrrk_Int 3084
+VMINSHZrrkz_Int 3085
+VMINSSZrm 3086
+VMINSSZrm_Int 3087
+VMINSSZrmk_Int 3088
+VMINSSZrmkz_Int 3089
+VMINSSZrr 3090
+VMINSSZrr_Int 3091
+VMINSSZrrb_Int 3092
+VMINSSZrrbk_Int 3093
+VMINSSZrrbkz_Int 3094
+VMINSSZrrk_Int 3095
+VMINSSZrrkz_Int 3096
+VMINSSrm 3097
+VMINSSrm_Int 3098
+VMINSSrr 3099
+VMINSSrr_Int 3100
+VMLAUNCH 3101
+VMLOAD 3102
+VMMCALL 3103
+VMOV 3104
+VMOVAPDYmr 3105
+VMOVAPDYrm 3106
+VMOVAPDYrr 3107
+VMOVAPDYrr_REV 3108
+VMOVAPDZ 3109
+VMOVAPDZmr 3110
+VMOVAPDZmrk 3111
+VMOVAPDZrm 3112
+VMOVAPDZrmk 3113
+VMOVAPDZrmkz 3114
+VMOVAPDZrr 3115
+VMOVAPDZrr_REV 3116
+VMOVAPDZrrk 3117
+VMOVAPDZrrk_REV 3118
+VMOVAPDZrrkz 3119
+VMOVAPDZrrkz_REV 3120
+VMOVAPDmr 3121
+VMOVAPDrm 3122
+VMOVAPDrr 3123
+VMOVAPDrr_REV 3124
+VMOVAPSYmr 3125
+VMOVAPSYrm 3126
+VMOVAPSYrr 3127
+VMOVAPSYrr_REV 3128
+VMOVAPSZ 3129
+VMOVAPSZmr 3130
+VMOVAPSZmrk 3131
+VMOVAPSZrm 3132
+VMOVAPSZrmk 3133
+VMOVAPSZrmkz 3134
+VMOVAPSZrr 3135
+VMOVAPSZrr_REV 3136
+VMOVAPSZrrk 3137
+VMOVAPSZrrk_REV 3138
+VMOVAPSZrrkz 3139
+VMOVAPSZrrkz_REV 3140
+VMOVAPSmr 3141
+VMOVAPSrm 3142
+VMOVAPSrr 3143
+VMOVAPSrr_REV 3144
+VMOVDDUPYrm 3145
+VMOVDDUPYrr 3146
+VMOVDDUPZ 3147
+VMOVDDUPZrm 3148
+VMOVDDUPZrmk 3149
+VMOVDDUPZrmkz 3150
+VMOVDDUPZrr 3151
+VMOVDDUPZrrk 3152
+VMOVDDUPZrrkz 3153
+VMOVDDUPrm 3154
+VMOVDDUPrr 3155
+VMOVDI 3156
+VMOVDQA 3157
+VMOVDQAYmr 3158
+VMOVDQAYrm 3159
+VMOVDQAYrr 3160
+VMOVDQAYrr_REV 3161
+VMOVDQAmr 3162
+VMOVDQArm 3163
+VMOVDQArr 3164
+VMOVDQArr_REV 3165
+VMOVDQU 3166
+VMOVDQUYmr 3167
+VMOVDQUYrm 3168
+VMOVDQUYrr 3169
+VMOVDQUYrr_REV 3170
+VMOVDQUmr 3171
+VMOVDQUrm 3172
+VMOVDQUrr 3173
+VMOVDQUrr_REV 3174
+VMOVHLPSZrr 3175
+VMOVHLPSrr 3176
+VMOVHPDZ 3177
+VMOVHPDmr 3178
+VMOVHPDrm 3179
+VMOVHPSZ 3180
+VMOVHPSmr 3181
+VMOVHPSrm 3182
+VMOVLHPSZrr 3183
+VMOVLHPSrr 3184
+VMOVLPDZ 3185
+VMOVLPDmr 3186
+VMOVLPDrm 3187
+VMOVLPSZ 3188
+VMOVLPSmr 3189
+VMOVLPSrm 3190
+VMOVMSKPDYrr 3191
+VMOVMSKPDrr 3192
+VMOVMSKPSYrr 3193
+VMOVMSKPSrr 3194
+VMOVNTDQAYrm 3195
+VMOVNTDQAZ 3196
+VMOVNTDQAZrm 3197
+VMOVNTDQArm 3198
+VMOVNTDQYmr 3199
+VMOVNTDQZ 3200
+VMOVNTDQZmr 3201
+VMOVNTDQmr 3202
+VMOVNTPDYmr 3203
+VMOVNTPDZ 3204
+VMOVNTPDZmr 3205
+VMOVNTPDmr 3206
+VMOVNTPSYmr 3207
+VMOVNTPSZ 3208
+VMOVNTPSZmr 3209
+VMOVNTPSmr 3210
+VMOVPDI 3211
+VMOVPQI 3212
+VMOVPQIto 3213
+VMOVQI 3214
+VMOVRSBZ 3215
+VMOVRSBZm 3216
+VMOVRSBZmk 3217
+VMOVRSBZmkz 3218
+VMOVRSDZ 3219
+VMOVRSDZm 3220
+VMOVRSDZmk 3221
+VMOVRSDZmkz 3222
+VMOVRSQZ 3223
+VMOVRSQZm 3224
+VMOVRSQZmk 3225
+VMOVRSQZmkz 3226
+VMOVRSWZ 3227
+VMOVRSWZm 3228
+VMOVRSWZmk 3229
+VMOVRSWZmkz 3230
+VMOVSDZmr 3231
+VMOVSDZmrk 3232
+VMOVSDZrm 3233
+VMOVSDZrm_alt 3234
+VMOVSDZrmk 3235
+VMOVSDZrmkz 3236
+VMOVSDZrr 3237
+VMOVSDZrr_REV 3238
+VMOVSDZrrk 3239
+VMOVSDZrrk_REV 3240
+VMOVSDZrrkz 3241
+VMOVSDZrrkz_REV 3242
+VMOVSDmr 3243
+VMOVSDrm 3244
+VMOVSDrm_alt 3245
+VMOVSDrr 3246
+VMOVSDrr_REV 3247
+VMOVSDto 3248
+VMOVSH 3249
+VMOVSHDUPYrm 3250
+VMOVSHDUPYrr 3251
+VMOVSHDUPZ 3252
+VMOVSHDUPZrm 3253
+VMOVSHDUPZrmk 3254
+VMOVSHDUPZrmkz 3255
+VMOVSHDUPZrr 3256
+VMOVSHDUPZrrk 3257
+VMOVSHDUPZrrkz 3258
+VMOVSHDUPrm 3259
+VMOVSHDUPrr 3260
+VMOVSHZmr 3261
+VMOVSHZmrk 3262
+VMOVSHZrm 3263
+VMOVSHZrm_alt 3264
+VMOVSHZrmk 3265
+VMOVSHZrmkz 3266
+VMOVSHZrr 3267
+VMOVSHZrr_REV 3268
+VMOVSHZrrk 3269
+VMOVSHZrrk_REV 3270
+VMOVSHZrrkz 3271
+VMOVSHZrrkz_REV 3272
+VMOVSHtoW 3273
+VMOVSLDUPYrm 3274
+VMOVSLDUPYrr 3275
+VMOVSLDUPZ 3276
+VMOVSLDUPZrm 3277
+VMOVSLDUPZrmk 3278
+VMOVSLDUPZrmkz 3279
+VMOVSLDUPZrr 3280
+VMOVSLDUPZrrk 3281
+VMOVSLDUPZrrkz 3282
+VMOVSLDUPrm 3283
+VMOVSLDUPrr 3284
+VMOVSS 3285
+VMOVSSZmr 3286
+VMOVSSZmrk 3287
+VMOVSSZrm 3288
+VMOVSSZrm_alt 3289
+VMOVSSZrmk 3290
+VMOVSSZrmkz 3291
+VMOVSSZrr 3292
+VMOVSSZrr_REV 3293
+VMOVSSZrrk 3294
+VMOVSSZrrk_REV 3295
+VMOVSSZrrkz 3296
+VMOVSSZrrkz_REV 3297
+VMOVSSmr 3298
+VMOVSSrm 3299
+VMOVSSrm_alt 3300
+VMOVSSrr 3301
+VMOVSSrr_REV 3302
+VMOVUPDYmr 3303
+VMOVUPDYrm 3304
+VMOVUPDYrr 3305
+VMOVUPDYrr_REV 3306
+VMOVUPDZ 3307
+VMOVUPDZmr 3308
+VMOVUPDZmrk 3309
+VMOVUPDZrm 3310
+VMOVUPDZrmk 3311
+VMOVUPDZrmkz 3312
+VMOVUPDZrr 3313
+VMOVUPDZrr_REV 3314
+VMOVUPDZrrk 3315
+VMOVUPDZrrk_REV 3316
+VMOVUPDZrrkz 3317
+VMOVUPDZrrkz_REV 3318
+VMOVUPDmr 3319
+VMOVUPDrm 3320
+VMOVUPDrr 3321
+VMOVUPDrr_REV 3322
+VMOVUPSYmr 3323
+VMOVUPSYrm 3324
+VMOVUPSYrr 3325
+VMOVUPSYrr_REV 3326
+VMOVUPSZ 3327
+VMOVUPSZmr 3328
+VMOVUPSZmrk 3329
+VMOVUPSZrm 3330
+VMOVUPSZrmk 3331
+VMOVUPSZrmkz 3332
+VMOVUPSZrr 3333
+VMOVUPSZrr_REV 3334
+VMOVUPSZrrk 3335
+VMOVUPSZrrk_REV 3336
+VMOVUPSZrrkz 3337
+VMOVUPSZrrkz_REV 3338
+VMOVUPSmr 3339
+VMOVUPSrm 3340
+VMOVUPSrr 3341
+VMOVUPSrr_REV 3342
+VMOVW 3343
+VMOVWmr 3344
+VMOVWrm 3345
+VMOVZPDILo 3346
+VMOVZPQILo 3347
+VMOVZPWILo 3348
+VMPSADBWYrmi 3349
+VMPSADBWYrri 3350
+VMPSADBWZ 3351
+VMPSADBWZrmi 3352
+VMPSADBWZrmik 3353
+VMPSADBWZrmikz 3354
+VMPSADBWZrri 3355
+VMPSADBWZrrik 3356
+VMPSADBWZrrikz 3357
+VMPSADBWrmi 3358
+VMPSADBWrri 3359
+VMPTRLDm 3360
+VMPTRSTm 3361
+VMREAD 3362
+VMRESUME 3363
+VMRUN 3364
+VMSAVE 3365
+VMULBF 3366
+VMULPDYrm 3367
+VMULPDYrr 3368
+VMULPDZ 3369
+VMULPDZrm 3370
+VMULPDZrmb 3371
+VMULPDZrmbk 3372
+VMULPDZrmbkz 3373
+VMULPDZrmk 3374
+VMULPDZrmkz 3375
+VMULPDZrr 3376
+VMULPDZrrb 3377
+VMULPDZrrbk 3378
+VMULPDZrrbkz 3379
+VMULPDZrrk 3380
+VMULPDZrrkz 3381
+VMULPDrm 3382
+VMULPDrr 3383
+VMULPHZ 3384
+VMULPHZrm 3385
+VMULPHZrmb 3386
+VMULPHZrmbk 3387
+VMULPHZrmbkz 3388
+VMULPHZrmk 3389
+VMULPHZrmkz 3390
+VMULPHZrr 3391
+VMULPHZrrb 3392
+VMULPHZrrbk 3393
+VMULPHZrrbkz 3394
+VMULPHZrrk 3395
+VMULPHZrrkz 3396
+VMULPSYrm 3397
+VMULPSYrr 3398
+VMULPSZ 3399
+VMULPSZrm 3400
+VMULPSZrmb 3401
+VMULPSZrmbk 3402
+VMULPSZrmbkz 3403
+VMULPSZrmk 3404
+VMULPSZrmkz 3405
+VMULPSZrr 3406
+VMULPSZrrb 3407
+VMULPSZrrbk 3408
+VMULPSZrrbkz 3409
+VMULPSZrrk 3410
+VMULPSZrrkz 3411
+VMULPSrm 3412
+VMULPSrr 3413
+VMULSDZrm 3414
+VMULSDZrm_Int 3415
+VMULSDZrmk_Int 3416
+VMULSDZrmkz_Int 3417
+VMULSDZrr 3418
+VMULSDZrr_Int 3419
+VMULSDZrrb_Int 3420
+VMULSDZrrbk_Int 3421
+VMULSDZrrbkz_Int 3422
+VMULSDZrrk_Int 3423
+VMULSDZrrkz_Int 3424
+VMULSDrm 3425
+VMULSDrm_Int 3426
+VMULSDrr 3427
+VMULSDrr_Int 3428
+VMULSHZrm 3429
+VMULSHZrm_Int 3430
+VMULSHZrmk_Int 3431
+VMULSHZrmkz_Int 3432
+VMULSHZrr 3433
+VMULSHZrr_Int 3434
+VMULSHZrrb_Int 3435
+VMULSHZrrbk_Int 3436
+VMULSHZrrbkz_Int 3437
+VMULSHZrrk_Int 3438
+VMULSHZrrkz_Int 3439
+VMULSSZrm 3440
+VMULSSZrm_Int 3441
+VMULSSZrmk_Int 3442
+VMULSSZrmkz_Int 3443
+VMULSSZrr 3444
+VMULSSZrr_Int 3445
+VMULSSZrrb_Int 3446
+VMULSSZrrbk_Int 3447
+VMULSSZrrbkz_Int 3448
+VMULSSZrrk_Int 3449
+VMULSSZrrkz_Int 3450
+VMULSSrm 3451
+VMULSSrm_Int 3452
+VMULSSrr 3453
+VMULSSrr_Int 3454
+VMWRITE 3455
+VMXOFF 3456
+VMXON 3457
+VORPDYrm 3458
+VORPDYrr 3459
+VORPDZ 3460
+VORPDZrm 3461
+VORPDZrmb 3462
+VORPDZrmbk 3463
+VORPDZrmbkz 3464
+VORPDZrmk 3465
+VORPDZrmkz 3466
+VORPDZrr 3467
+VORPDZrrk 3468
+VORPDZrrkz 3469
+VORPDrm 3470
+VORPDrr 3471
+VORPSYrm 3472
+VORPSYrr 3473
+VORPSZ 3474
+VORPSZrm 3475
+VORPSZrmb 3476
+VORPSZrmbk 3477
+VORPSZrmbkz 3478
+VORPSZrmk 3479
+VORPSZrmkz 3480
+VORPSZrr 3481
+VORPSZrrk 3482
+VORPSZrrkz 3483
+VORPSrm 3484
+VORPSrr 3485
+VP 3486
+VPABSBYrm 3487
+VPABSBYrr 3488
+VPABSBZ 3489
+VPABSBZrm 3490
+VPABSBZrmk 3491
+VPABSBZrmkz 3492
+VPABSBZrr 3493
+VPABSBZrrk 3494
+VPABSBZrrkz 3495
+VPABSBrm 3496
+VPABSBrr 3497
+VPABSDYrm 3498
+VPABSDYrr 3499
+VPABSDZ 3500
+VPABSDZrm 3501
+VPABSDZrmb 3502
+VPABSDZrmbk 3503
+VPABSDZrmbkz 3504
+VPABSDZrmk 3505
+VPABSDZrmkz 3506
+VPABSDZrr 3507
+VPABSDZrrk 3508
+VPABSDZrrkz 3509
+VPABSDrm 3510
+VPABSDrr 3511
+VPABSQZ 3512
+VPABSQZrm 3513
+VPABSQZrmb 3514
+VPABSQZrmbk 3515
+VPABSQZrmbkz 3516
+VPABSQZrmk 3517
+VPABSQZrmkz 3518
+VPABSQZrr 3519
+VPABSQZrrk 3520
+VPABSQZrrkz 3521
+VPABSWYrm 3522
+VPABSWYrr 3523
+VPABSWZ 3524
+VPABSWZrm 3525
+VPABSWZrmk 3526
+VPABSWZrmkz 3527
+VPABSWZrr 3528
+VPABSWZrrk 3529
+VPABSWZrrkz 3530
+VPABSWrm 3531
+VPABSWrr 3532
+VPACKSSDWYrm 3533
+VPACKSSDWYrr 3534
+VPACKSSDWZ 3535
+VPACKSSDWZrm 3536
+VPACKSSDWZrmb 3537
+VPACKSSDWZrmbk 3538
+VPACKSSDWZrmbkz 3539
+VPACKSSDWZrmk 3540
+VPACKSSDWZrmkz 3541
+VPACKSSDWZrr 3542
+VPACKSSDWZrrk 3543
+VPACKSSDWZrrkz 3544
+VPACKSSDWrm 3545
+VPACKSSDWrr 3546
+VPACKSSWBYrm 3547
+VPACKSSWBYrr 3548
+VPACKSSWBZ 3549
+VPACKSSWBZrm 3550
+VPACKSSWBZrmk 3551
+VPACKSSWBZrmkz 3552
+VPACKSSWBZrr 3553
+VPACKSSWBZrrk 3554
+VPACKSSWBZrrkz 3555
+VPACKSSWBrm 3556
+VPACKSSWBrr 3557
+VPACKUSDWYrm 3558
+VPACKUSDWYrr 3559
+VPACKUSDWZ 3560
+VPACKUSDWZrm 3561
+VPACKUSDWZrmb 3562
+VPACKUSDWZrmbk 3563
+VPACKUSDWZrmbkz 3564
+VPACKUSDWZrmk 3565
+VPACKUSDWZrmkz 3566
+VPACKUSDWZrr 3567
+VPACKUSDWZrrk 3568
+VPACKUSDWZrrkz 3569
+VPACKUSDWrm 3570
+VPACKUSDWrr 3571
+VPACKUSWBYrm 3572
+VPACKUSWBYrr 3573
+VPACKUSWBZ 3574
+VPACKUSWBZrm 3575
+VPACKUSWBZrmk 3576
+VPACKUSWBZrmkz 3577
+VPACKUSWBZrr 3578
+VPACKUSWBZrrk 3579
+VPACKUSWBZrrkz 3580
+VPACKUSWBrm 3581
+VPACKUSWBrr 3582
+VPADDBYrm 3583
+VPADDBYrr 3584
+VPADDBZ 3585
+VPADDBZrm 3586
+VPADDBZrmk 3587
+VPADDBZrmkz 3588
+VPADDBZrr 3589
+VPADDBZrrk 3590
+VPADDBZrrkz 3591
+VPADDBrm 3592
+VPADDBrr 3593
+VPADDDYrm 3594
+VPADDDYrr 3595
+VPADDDZ 3596
+VPADDDZrm 3597
+VPADDDZrmb 3598
+VPADDDZrmbk 3599
+VPADDDZrmbkz 3600
+VPADDDZrmk 3601
+VPADDDZrmkz 3602
+VPADDDZrr 3603
+VPADDDZrrk 3604
+VPADDDZrrkz 3605
+VPADDDrm 3606
+VPADDDrr 3607
+VPADDQYrm 3608
+VPADDQYrr 3609
+VPADDQZ 3610
+VPADDQZrm 3611
+VPADDQZrmb 3612
+VPADDQZrmbk 3613
+VPADDQZrmbkz 3614
+VPADDQZrmk 3615
+VPADDQZrmkz 3616
+VPADDQZrr 3617
+VPADDQZrrk 3618
+VPADDQZrrkz 3619
+VPADDQrm 3620
+VPADDQrr 3621
+VPADDSBYrm 3622
+VPADDSBYrr 3623
+VPADDSBZ 3624
+VPADDSBZrm 3625
+VPADDSBZrmk 3626
+VPADDSBZrmkz 3627
+VPADDSBZrr 3628
+VPADDSBZrrk 3629
+VPADDSBZrrkz 3630
+VPADDSBrm 3631
+VPADDSBrr 3632
+VPADDSWYrm 3633
+VPADDSWYrr 3634
+VPADDSWZ 3635
+VPADDSWZrm 3636
+VPADDSWZrmk 3637
+VPADDSWZrmkz 3638
+VPADDSWZrr 3639
+VPADDSWZrrk 3640
+VPADDSWZrrkz 3641
+VPADDSWrm 3642
+VPADDSWrr 3643
+VPADDUSBYrm 3644
+VPADDUSBYrr 3645
+VPADDUSBZ 3646
+VPADDUSBZrm 3647
+VPADDUSBZrmk 3648
+VPADDUSBZrmkz 3649
+VPADDUSBZrr 3650
+VPADDUSBZrrk 3651
+VPADDUSBZrrkz 3652
+VPADDUSBrm 3653
+VPADDUSBrr 3654
+VPADDUSWYrm 3655
+VPADDUSWYrr 3656
+VPADDUSWZ 3657
+VPADDUSWZrm 3658
+VPADDUSWZrmk 3659
+VPADDUSWZrmkz 3660
+VPADDUSWZrr 3661
+VPADDUSWZrrk 3662
+VPADDUSWZrrkz 3663
+VPADDUSWrm 3664
+VPADDUSWrr 3665
+VPADDWYrm 3666
+VPADDWYrr 3667
+VPADDWZ 3668
+VPADDWZrm 3669
+VPADDWZrmk 3670
+VPADDWZrmkz 3671
+VPADDWZrr 3672
+VPADDWZrrk 3673
+VPADDWZrrkz 3674
+VPADDWrm 3675
+VPADDWrr 3676
+VPALIGNRYrmi 3677
+VPALIGNRYrri 3678
+VPALIGNRZ 3679
+VPALIGNRZrmi 3680
+VPALIGNRZrmik 3681
+VPALIGNRZrmikz 3682
+VPALIGNRZrri 3683
+VPALIGNRZrrik 3684
+VPALIGNRZrrikz 3685
+VPALIGNRrmi 3686
+VPALIGNRrri 3687
+VPANDDZ 3688
+VPANDDZrm 3689
+VPANDDZrmb 3690
+VPANDDZrmbk 3691
+VPANDDZrmbkz 3692
+VPANDDZrmk 3693
+VPANDDZrmkz 3694
+VPANDDZrr 3695
+VPANDDZrrk 3696
+VPANDDZrrkz 3697
+VPANDNDZ 3698
+VPANDNDZrm 3699
+VPANDNDZrmb 3700
+VPANDNDZrmbk 3701
+VPANDNDZrmbkz 3702
+VPANDNDZrmk 3703
+VPANDNDZrmkz 3704
+VPANDNDZrr 3705
+VPANDNDZrrk 3706
+VPANDNDZrrkz 3707
+VPANDNQZ 3708
+VPANDNQZrm 3709
+VPANDNQZrmb 3710
+VPANDNQZrmbk 3711
+VPANDNQZrmbkz 3712
+VPANDNQZrmk 3713
+VPANDNQZrmkz 3714
+VPANDNQZrr 3715
+VPANDNQZrrk 3716
+VPANDNQZrrkz 3717
+VPANDNYrm 3718
+VPANDNYrr 3719
+VPANDNrm 3720
+VPANDNrr 3721
+VPANDQZ 3722
+VPANDQZrm 3723
+VPANDQZrmb 3724
+VPANDQZrmbk 3725
+VPANDQZrmbkz 3726
+VPANDQZrmk 3727
+VPANDQZrmkz 3728
+VPANDQZrr 3729
+VPANDQZrrk 3730
+VPANDQZrrkz 3731
+VPANDYrm 3732
+VPANDYrr 3733
+VPANDrm 3734
+VPANDrr 3735
+VPAVGBYrm 3736
+VPAVGBYrr 3737
+VPAVGBZ 3738
+VPAVGBZrm 3739
+VPAVGBZrmk 3740
+VPAVGBZrmkz 3741
+VPAVGBZrr 3742
+VPAVGBZrrk 3743
+VPAVGBZrrkz 3744
+VPAVGBrm 3745
+VPAVGBrr 3746
+VPAVGWYrm 3747
+VPAVGWYrr 3748
+VPAVGWZ 3749
+VPAVGWZrm 3750
+VPAVGWZrmk 3751
+VPAVGWZrmkz 3752
+VPAVGWZrr 3753
+VPAVGWZrrk 3754
+VPAVGWZrrkz 3755
+VPAVGWrm 3756
+VPAVGWrr 3757
+VPBLENDDYrmi 3758
+VPBLENDDYrri 3759
+VPBLENDDrmi 3760
+VPBLENDDrri 3761
+VPBLENDMBZ 3762
+VPBLENDMBZrm 3763
+VPBLENDMBZrmk 3764
+VPBLENDMBZrmkz 3765
+VPBLENDMBZrr 3766
+VPBLENDMBZrrk 3767
+VPBLENDMBZrrkz 3768
+VPBLENDMDZ 3769
+VPBLENDMDZrm 3770
+VPBLENDMDZrmb 3771
+VPBLENDMDZrmbk 3772
+VPBLENDMDZrmbkz 3773
+VPBLENDMDZrmk 3774
+VPBLENDMDZrmkz 3775
+VPBLENDMDZrr 3776
+VPBLENDMDZrrk 3777
+VPBLENDMDZrrkz 3778
+VPBLENDMQZ 3779
+VPBLENDMQZrm 3780
+VPBLENDMQZrmb 3781
+VPBLENDMQZrmbk 3782
+VPBLENDMQZrmbkz 3783
+VPBLENDMQZrmk 3784
+VPBLENDMQZrmkz 3785
+VPBLENDMQZrr 3786
+VPBLENDMQZrrk 3787
+VPBLENDMQZrrkz 3788
+VPBLENDMWZ 3789
+VPBLENDMWZrm 3790
+VPBLENDMWZrmk 3791
+VPBLENDMWZrmkz 3792
+VPBLENDMWZrr 3793
+VPBLENDMWZrrk 3794
+VPBLENDMWZrrkz 3795
+VPBLENDVBYrmr 3796
+VPBLENDVBYrrr 3797
+VPBLENDVBrmr 3798
+VPBLENDVBrrr 3799
+VPBLENDWYrmi 3800
+VPBLENDWYrri 3801
+VPBLENDWrmi 3802
+VPBLENDWrri 3803
+VPBROADCASTBYrm 3804
+VPBROADCASTBYrr 3805
+VPBROADCASTBZ 3806
+VPBROADCASTBZrm 3807
+VPBROADCASTBZrmk 3808
+VPBROADCASTBZrmkz 3809
+VPBROADCASTBZrr 3810
+VPBROADCASTBZrrk 3811
+VPBROADCASTBZrrkz 3812
+VPBROADCASTBrZ 3813
+VPBROADCASTBrZrr 3814
+VPBROADCASTBrZrrk 3815
+VPBROADCASTBrZrrkz 3816
+VPBROADCASTBrm 3817
+VPBROADCASTBrr 3818
+VPBROADCASTDYrm 3819
+VPBROADCASTDYrr 3820
+VPBROADCASTDZ 3821
+VPBROADCASTDZrm 3822
+VPBROADCASTDZrmk 3823
+VPBROADCASTDZrmkz 3824
+VPBROADCASTDZrr 3825
+VPBROADCASTDZrrk 3826
+VPBROADCASTDZrrkz 3827
+VPBROADCASTDrZ 3828
+VPBROADCASTDrZrr 3829
+VPBROADCASTDrZrrk 3830
+VPBROADCASTDrZrrkz 3831
+VPBROADCASTDrm 3832
+VPBROADCASTDrr 3833
+VPBROADCASTMB 3834
+VPBROADCASTMW 3835
+VPBROADCASTQYrm 3836
+VPBROADCASTQYrr 3837
+VPBROADCASTQZ 3838
+VPBROADCASTQZrm 3839
+VPBROADCASTQZrmk 3840
+VPBROADCASTQZrmkz 3841
+VPBROADCASTQZrr 3842
+VPBROADCASTQZrrk 3843
+VPBROADCASTQZrrkz 3844
+VPBROADCASTQrZ 3845
+VPBROADCASTQrZrr 3846
+VPBROADCASTQrZrrk 3847
+VPBROADCASTQrZrrkz 3848
+VPBROADCASTQrm 3849
+VPBROADCASTQrr 3850
+VPBROADCASTWYrm 3851
+VPBROADCASTWYrr 3852
+VPBROADCASTWZ 3853
+VPBROADCASTWZrm 3854
+VPBROADCASTWZrmk 3855
+VPBROADCASTWZrmkz 3856
+VPBROADCASTWZrr 3857
+VPBROADCASTWZrrk 3858
+VPBROADCASTWZrrkz 3859
+VPBROADCASTWrZ 3860
+VPBROADCASTWrZrr 3861
+VPBROADCASTWrZrrk 3862
+VPBROADCASTWrZrrkz 3863
+VPBROADCASTWrm 3864
+VPBROADCASTWrr 3865
+VPCLMULQDQYrmi 3866
+VPCLMULQDQYrri 3867
+VPCLMULQDQZ 3868
+VPCLMULQDQZrmi 3869
+VPCLMULQDQZrri 3870
+VPCLMULQDQrmi 3871
+VPCLMULQDQrri 3872
+VPCMOVYrmr 3873
+VPCMOVYrrm 3874
+VPCMOVYrrr 3875
+VPCMOVYrrr_REV 3876
+VPCMOVrmr 3877
+VPCMOVrrm 3878
+VPCMOVrrr 3879
+VPCMOVrrr_REV 3880
+VPCMPBZ 3881
+VPCMPBZrmi 3882
+VPCMPBZrmik 3883
+VPCMPBZrri 3884
+VPCMPBZrrik 3885
+VPCMPDZ 3886
+VPCMPDZrmbi 3887
+VPCMPDZrmbik 3888
+VPCMPDZrmi 3889
+VPCMPDZrmik 3890
+VPCMPDZrri 3891
+VPCMPDZrrik 3892
+VPCMPEQBYrm 3893
+VPCMPEQBYrr 3894
+VPCMPEQBZ 3895
+VPCMPEQBZrm 3896
+VPCMPEQBZrmk 3897
+VPCMPEQBZrr 3898
+VPCMPEQBZrrk 3899
+VPCMPEQBrm 3900
+VPCMPEQBrr 3901
+VPCMPEQDYrm 3902
+VPCMPEQDYrr 3903
+VPCMPEQDZ 3904
+VPCMPEQDZrm 3905
+VPCMPEQDZrmb 3906
+VPCMPEQDZrmbk 3907
+VPCMPEQDZrmk 3908
+VPCMPEQDZrr 3909
+VPCMPEQDZrrk 3910
+VPCMPEQDrm 3911
+VPCMPEQDrr 3912
+VPCMPEQQYrm 3913
+VPCMPEQQYrr 3914
+VPCMPEQQZ 3915
+VPCMPEQQZrm 3916
+VPCMPEQQZrmb 3917
+VPCMPEQQZrmbk 3918
+VPCMPEQQZrmk 3919
+VPCMPEQQZrr 3920
+VPCMPEQQZrrk 3921
+VPCMPEQQrm 3922
+VPCMPEQQrr 3923
+VPCMPEQWYrm 3924
+VPCMPEQWYrr 3925
+VPCMPEQWZ 3926
+VPCMPEQWZrm 3927
+VPCMPEQWZrmk 3928
+VPCMPEQWZrr 3929
+VPCMPEQWZrrk 3930
+VPCMPEQWrm 3931
+VPCMPEQWrr 3932
+VPCMPESTRIrmi 3933
+VPCMPESTRIrri 3934
+VPCMPESTRMrmi 3935
+VPCMPESTRMrri 3936
+VPCMPGTBYrm 3937
+VPCMPGTBYrr 3938
+VPCMPGTBZ 3939
+VPCMPGTBZrm 3940
+VPCMPGTBZrmk 3941
+VPCMPGTBZrr 3942
+VPCMPGTBZrrk 3943
+VPCMPGTBrm 3944
+VPCMPGTBrr 3945
+VPCMPGTDYrm 3946
+VPCMPGTDYrr 3947
+VPCMPGTDZ 3948
+VPCMPGTDZrm 3949
+VPCMPGTDZrmb 3950
+VPCMPGTDZrmbk 3951
+VPCMPGTDZrmk 3952
+VPCMPGTDZrr 3953
+VPCMPGTDZrrk 3954
+VPCMPGTDrm 3955
+VPCMPGTDrr 3956
+VPCMPGTQYrm 3957
+VPCMPGTQYrr 3958
+VPCMPGTQZ 3959
+VPCMPGTQZrm 3960
+VPCMPGTQZrmb 3961
+VPCMPGTQZrmbk 3962
+VPCMPGTQZrmk 3963
+VPCMPGTQZrr 3964
+VPCMPGTQZrrk 3965
+VPCMPGTQrm 3966
+VPCMPGTQrr 3967
+VPCMPGTWYrm 3968
+VPCMPGTWYrr 3969
+VPCMPGTWZ 3970
+VPCMPGTWZrm 3971
+VPCMPGTWZrmk 3972
+VPCMPGTWZrr 3973
+VPCMPGTWZrrk 3974
+VPCMPGTWrm 3975
+VPCMPGTWrr 3976
+VPCMPISTRIrmi 3977
+VPCMPISTRIrri 3978
+VPCMPISTRMrmi 3979
+VPCMPISTRMrri 3980
+VPCMPQZ 3981
+VPCMPQZrmbi 3982
+VPCMPQZrmbik 3983
+VPCMPQZrmi 3984
+VPCMPQZrmik 3985
+VPCMPQZrri 3986
+VPCMPQZrrik 3987
+VPCMPUBZ 3988
+VPCMPUBZrmi 3989
+VPCMPUBZrmik 3990
+VPCMPUBZrri 3991
+VPCMPUBZrrik 3992
+VPCMPUDZ 3993
+VPCMPUDZrmbi 3994
+VPCMPUDZrmbik 3995
+VPCMPUDZrmi 3996
+VPCMPUDZrmik 3997
+VPCMPUDZrri 3998
+VPCMPUDZrrik 3999
+VPCMPUQZ 4000
+VPCMPUQZrmbi 4001
+VPCMPUQZrmbik 4002
+VPCMPUQZrmi 4003
+VPCMPUQZrmik 4004
+VPCMPUQZrri 4005
+VPCMPUQZrrik 4006
+VPCMPUWZ 4007
+VPCMPUWZrmi 4008
+VPCMPUWZrmik 4009
+VPCMPUWZrri 4010
+VPCMPUWZrrik 4011
+VPCMPWZ 4012
+VPCMPWZrmi 4013
+VPCMPWZrmik 4014
+VPCMPWZrri 4015
+VPCMPWZrrik 4016
+VPCOMBmi 4017
+VPCOMBri 4018
+VPCOMDmi 4019
+VPCOMDri 4020
+VPCOMPRESSBZ 4021
+VPCOMPRESSBZmr 4022
+VPCOMPRESSBZmrk 4023
+VPCOMPRESSBZrr 4024
+VPCOMPRESSBZrrk 4025
+VPCOMPRESSBZrrkz 4026
+VPCOMPRESSDZ 4027
+VPCOMPRESSDZmr 4028
+VPCOMPRESSDZmrk 4029
+VPCOMPRESSDZrr 4030
+VPCOMPRESSDZrrk 4031
+VPCOMPRESSDZrrkz 4032
+VPCOMPRESSQZ 4033
+VPCOMPRESSQZmr 4034
+VPCOMPRESSQZmrk 4035
+VPCOMPRESSQZrr 4036
+VPCOMPRESSQZrrk 4037
+VPCOMPRESSQZrrkz 4038
+VPCOMPRESSWZ 4039
+VPCOMPRESSWZmr 4040
+VPCOMPRESSWZmrk 4041
+VPCOMPRESSWZrr 4042
+VPCOMPRESSWZrrk 4043
+VPCOMPRESSWZrrkz 4044
+VPCOMQmi 4045
+VPCOMQri 4046
+VPCOMUBmi 4047
+VPCOMUBri 4048
+VPCOMUDmi 4049
+VPCOMUDri 4050
+VPCOMUQmi 4051
+VPCOMUQri 4052
+VPCOMUWmi 4053
+VPCOMUWri 4054
+VPCOMWmi 4055
+VPCOMWri 4056
+VPCONFLICTDZ 4057
+VPCONFLICTDZrm 4058
+VPCONFLICTDZrmb 4059
+VPCONFLICTDZrmbk 4060
+VPCONFLICTDZrmbkz 4061
+VPCONFLICTDZrmk 4062
+VPCONFLICTDZrmkz 4063
+VPCONFLICTDZrr 4064
+VPCONFLICTDZrrk 4065
+VPCONFLICTDZrrkz 4066
+VPCONFLICTQZ 4067
+VPCONFLICTQZrm 4068
+VPCONFLICTQZrmb 4069
+VPCONFLICTQZrmbk 4070
+VPCONFLICTQZrmbkz 4071
+VPCONFLICTQZrmk 4072
+VPCONFLICTQZrmkz 4073
+VPCONFLICTQZrr 4074
+VPCONFLICTQZrrk 4075
+VPCONFLICTQZrrkz 4076
+VPDPBSSDSYrm 4077
+VPDPBSSDSYrr 4078
+VPDPBSSDSZ 4079
+VPDPBSSDSZrm 4080
+VPDPBSSDSZrmb 4081
+VPDPBSSDSZrmbk 4082
+VPDPBSSDSZrmbkz 4083
+VPDPBSSDSZrmk 4084
+VPDPBSSDSZrmkz 4085
+VPDPBSSDSZrr 4086
+VPDPBSSDSZrrk 4087
+VPDPBSSDSZrrkz 4088
+VPDPBSSDSrm 4089
+VPDPBSSDSrr 4090
+VPDPBSSDYrm 4091
+VPDPBSSDYrr 4092
+VPDPBSSDZ 4093
+VPDPBSSDZrm 4094
+VPDPBSSDZrmb 4095
+VPDPBSSDZrmbk 4096
+VPDPBSSDZrmbkz 4097
+VPDPBSSDZrmk 4098
+VPDPBSSDZrmkz 4099
+VPDPBSSDZrr 4100
+VPDPBSSDZrrk 4101
+VPDPBSSDZrrkz 4102
+VPDPBSSDrm 4103
+VPDPBSSDrr 4104
+VPDPBSUDSYrm 4105
+VPDPBSUDSYrr 4106
+VPDPBSUDSZ 4107
+VPDPBSUDSZrm 4108
+VPDPBSUDSZrmb 4109
+VPDPBSUDSZrmbk 4110
+VPDPBSUDSZrmbkz 4111
+VPDPBSUDSZrmk 4112
+VPDPBSUDSZrmkz 4113
+VPDPBSUDSZrr 4114
+VPDPBSUDSZrrk 4115
+VPDPBSUDSZrrkz 4116
+VPDPBSUDSrm 4117
+VPDPBSUDSrr 4118
+VPDPBSUDYrm 4119
+VPDPBSUDYrr 4120
+VPDPBSUDZ 4121
+VPDPBSUDZrm 4122
+VPDPBSUDZrmb 4123
+VPDPBSUDZrmbk 4124
+VPDPBSUDZrmbkz 4125
+VPDPBSUDZrmk 4126
+VPDPBSUDZrmkz 4127
+VPDPBSUDZrr 4128
+VPDPBSUDZrrk 4129
+VPDPBSUDZrrkz 4130
+VPDPBSUDrm 4131
+VPDPBSUDrr 4132
+VPDPBUSDSYrm 4133
+VPDPBUSDSYrr 4134
+VPDPBUSDSZ 4135
+VPDPBUSDSZrm 4136
+VPDPBUSDSZrmb 4137
+VPDPBUSDSZrmbk 4138
+VPDPBUSDSZrmbkz 4139
+VPDPBUSDSZrmk 4140
+VPDPBUSDSZrmkz 4141
+VPDPBUSDSZrr 4142
+VPDPBUSDSZrrk 4143
+VPDPBUSDSZrrkz 4144
+VPDPBUSDSrm 4145
+VPDPBUSDSrr 4146
+VPDPBUSDYrm 4147
+VPDPBUSDYrr 4148
+VPDPBUSDZ 4149
+VPDPBUSDZrm 4150
+VPDPBUSDZrmb 4151
+VPDPBUSDZrmbk 4152
+VPDPBUSDZrmbkz 4153
+VPDPBUSDZrmk 4154
+VPDPBUSDZrmkz 4155
+VPDPBUSDZrr 4156
+VPDPBUSDZrrk 4157
+VPDPBUSDZrrkz 4158
+VPDPBUSDrm 4159
+VPDPBUSDrr 4160
+VPDPBUUDSYrm 4161
+VPDPBUUDSYrr 4162
+VPDPBUUDSZ 4163
+VPDPBUUDSZrm 4164
+VPDPBUUDSZrmb 4165
+VPDPBUUDSZrmbk 4166
+VPDPBUUDSZrmbkz 4167
+VPDPBUUDSZrmk 4168
+VPDPBUUDSZrmkz 4169
+VPDPBUUDSZrr 4170
+VPDPBUUDSZrrk 4171
+VPDPBUUDSZrrkz 4172
+VPDPBUUDSrm 4173
+VPDPBUUDSrr 4174
+VPDPBUUDYrm 4175
+VPDPBUUDYrr 4176
+VPDPBUUDZ 4177
+VPDPBUUDZrm 4178
+VPDPBUUDZrmb 4179
+VPDPBUUDZrmbk 4180
+VPDPBUUDZrmbkz 4181
+VPDPBUUDZrmk 4182
+VPDPBUUDZrmkz 4183
+VPDPBUUDZrr 4184
+VPDPBUUDZrrk 4185
+VPDPBUUDZrrkz 4186
+VPDPBUUDrm 4187
+VPDPBUUDrr 4188
+VPDPWSSDSYrm 4189
+VPDPWSSDSYrr 4190
+VPDPWSSDSZ 4191
+VPDPWSSDSZrm 4192
+VPDPWSSDSZrmb 4193
+VPDPWSSDSZrmbk 4194
+VPDPWSSDSZrmbkz 4195
+VPDPWSSDSZrmk 4196
+VPDPWSSDSZrmkz 4197
+VPDPWSSDSZrr 4198
+VPDPWSSDSZrrk 4199
+VPDPWSSDSZrrkz 4200
+VPDPWSSDSrm 4201
+VPDPWSSDSrr 4202
+VPDPWSSDYrm 4203
+VPDPWSSDYrr 4204
+VPDPWSSDZ 4205
+VPDPWSSDZrm 4206
+VPDPWSSDZrmb 4207
+VPDPWSSDZrmbk 4208
+VPDPWSSDZrmbkz 4209
+VPDPWSSDZrmk 4210
+VPDPWSSDZrmkz 4211
+VPDPWSSDZrr 4212
+VPDPWSSDZrrk 4213
+VPDPWSSDZrrkz 4214
+VPDPWSSDrm 4215
+VPDPWSSDrr 4216
+VPDPWSUDSYrm 4217
+VPDPWSUDSYrr 4218
+VPDPWSUDSZ 4219
+VPDPWSUDSZrm 4220
+VPDPWSUDSZrmb 4221
+VPDPWSUDSZrmbk 4222
+VPDPWSUDSZrmbkz 4223
+VPDPWSUDSZrmk 4224
+VPDPWSUDSZrmkz 4225
+VPDPWSUDSZrr 4226
+VPDPWSUDSZrrk 4227
+VPDPWSUDSZrrkz 4228
+VPDPWSUDSrm 4229
+VPDPWSUDSrr 4230
+VPDPWSUDYrm 4231
+VPDPWSUDYrr 4232
+VPDPWSUDZ 4233
+VPDPWSUDZrm 4234
+VPDPWSUDZrmb 4235
+VPDPWSUDZrmbk 4236
+VPDPWSUDZrmbkz 4237
+VPDPWSUDZrmk 4238
+VPDPWSUDZrmkz 4239
+VPDPWSUDZrr 4240
+VPDPWSUDZrrk 4241
+VPDPWSUDZrrkz 4242
+VPDPWSUDrm 4243
+VPDPWSUDrr 4244
+VPDPWUSDSYrm 4245
+VPDPWUSDSYrr 4246
+VPDPWUSDSZ 4247
+VPDPWUSDSZrm 4248
+VPDPWUSDSZrmb 4249
+VPDPWUSDSZrmbk 4250
+VPDPWUSDSZrmbkz 4251
+VPDPWUSDSZrmk 4252
+VPDPWUSDSZrmkz 4253
+VPDPWUSDSZrr 4254
+VPDPWUSDSZrrk 4255
+VPDPWUSDSZrrkz 4256
+VPDPWUSDSrm 4257
+VPDPWUSDSrr 4258
+VPDPWUSDYrm 4259
+VPDPWUSDYrr 4260
+VPDPWUSDZ 4261
+VPDPWUSDZrm 4262
+VPDPWUSDZrmb 4263
+VPDPWUSDZrmbk 4264
+VPDPWUSDZrmbkz 4265
+VPDPWUSDZrmk 4266
+VPDPWUSDZrmkz 4267
+VPDPWUSDZrr 4268
+VPDPWUSDZrrk 4269
+VPDPWUSDZrrkz 4270
+VPDPWUSDrm 4271
+VPDPWUSDrr 4272
+VPDPWUUDSYrm 4273
+VPDPWUUDSYrr 4274
+VPDPWUUDSZ 4275
+VPDPWUUDSZrm 4276
+VPDPWUUDSZrmb 4277
+VPDPWUUDSZrmbk 4278
+VPDPWUUDSZrmbkz 4279
+VPDPWUUDSZrmk 4280
+VPDPWUUDSZrmkz 4281
+VPDPWUUDSZrr 4282
+VPDPWUUDSZrrk 4283
+VPDPWUUDSZrrkz 4284
+VPDPWUUDSrm 4285
+VPDPWUUDSrr 4286
+VPDPWUUDYrm 4287
+VPDPWUUDYrr 4288
+VPDPWUUDZ 4289
+VPDPWUUDZrm 4290
+VPDPWUUDZrmb 4291
+VPDPWUUDZrmbk 4292
+VPDPWUUDZrmbkz 4293
+VPDPWUUDZrmk 4294
+VPDPWUUDZrmkz 4295
+VPDPWUUDZrr 4296
+VPDPWUUDZrrk 4297
+VPDPWUUDZrrkz 4298
+VPDPWUUDrm 4299
+VPDPWUUDrr 4300
+VPERM 4301
+VPERMBZ 4302
+VPERMBZrm 4303
+VPERMBZrmk 4304
+VPERMBZrmkz 4305
+VPERMBZrr 4306
+VPERMBZrrk 4307
+VPERMBZrrkz 4308
+VPERMDYrm 4309
+VPERMDYrr 4310
+VPERMDZ 4311
+VPERMDZrm 4312
+VPERMDZrmb 4313
+VPERMDZrmbk 4314
+VPERMDZrmbkz 4315
+VPERMDZrmk 4316
+VPERMDZrmkz 4317
+VPERMDZrr 4318
+VPERMDZrrk 4319
+VPERMDZrrkz 4320
+VPERMI 4321
+VPERMIL 4322
+VPERMILPDYmi 4323
+VPERMILPDYri 4324
+VPERMILPDYrm 4325
+VPERMILPDYrr 4326
+VPERMILPDZ 4327
+VPERMILPDZmbi 4328
+VPERMILPDZmbik 4329
+VPERMILPDZmbikz 4330
+VPERMILPDZmi 4331
+VPERMILPDZmik 4332
+VPERMILPDZmikz 4333
+VPERMILPDZri 4334
+VPERMILPDZrik 4335
+VPERMILPDZrikz 4336
+VPERMILPDZrm 4337
+VPERMILPDZrmb 4338
+VPERMILPDZrmbk 4339
+VPERMILPDZrmbkz 4340
+VPERMILPDZrmk 4341
+VPERMILPDZrmkz 4342
+VPERMILPDZrr 4343
+VPERMILPDZrrk 4344
+VPERMILPDZrrkz 4345
+VPERMILPDmi 4346
+VPERMILPDri 4347
+VPERMILPDrm 4348
+VPERMILPDrr 4349
+VPERMILPSYmi 4350
+VPERMILPSYri 4351
+VPERMILPSYrm 4352
+VPERMILPSYrr 4353
+VPERMILPSZ 4354
+VPERMILPSZmbi 4355
+VPERMILPSZmbik 4356
+VPERMILPSZmbikz 4357
+VPERMILPSZmi 4358
+VPERMILPSZmik 4359
+VPERMILPSZmikz 4360
+VPERMILPSZri 4361
+VPERMILPSZrik 4362
+VPERMILPSZrikz 4363
+VPERMILPSZrm 4364
+VPERMILPSZrmb 4365
+VPERMILPSZrmbk 4366
+VPERMILPSZrmbkz 4367
+VPERMILPSZrmk 4368
+VPERMILPSZrmkz 4369
+VPERMILPSZrr 4370
+VPERMILPSZrrk 4371
+VPERMILPSZrrkz 4372
+VPERMILPSmi 4373
+VPERMILPSri 4374
+VPERMILPSrm 4375
+VPERMILPSrr 4376
+VPERMPDYmi 4377
+VPERMPDYri 4378
+VPERMPDZ 4379
+VPERMPDZmbi 4380
+VPERMPDZmbik 4381
+VPERMPDZmbikz 4382
+VPERMPDZmi 4383
+VPERMPDZmik 4384
+VPERMPDZmikz 4385
+VPERMPDZri 4386
+VPERMPDZrik 4387
+VPERMPDZrikz 4388
+VPERMPDZrm 4389
+VPERMPDZrmb 4390
+VPERMPDZrmbk 4391
+VPERMPDZrmbkz 4392
+VPERMPDZrmk 4393
+VPERMPDZrmkz 4394
+VPERMPDZrr 4395
+VPERMPDZrrk 4396
+VPERMPDZrrkz 4397
+VPERMPSYrm 4398
+VPERMPSYrr 4399
+VPERMPSZ 4400
+VPERMPSZrm 4401
+VPERMPSZrmb 4402
+VPERMPSZrmbk 4403
+VPERMPSZrmbkz 4404
+VPERMPSZrmk 4405
+VPERMPSZrmkz 4406
+VPERMPSZrr 4407
+VPERMPSZrrk 4408
+VPERMPSZrrkz 4409
+VPERMQYmi 4410
+VPERMQYri 4411
+VPERMQZ 4412
+VPERMQZmbi 4413
+VPERMQZmbik 4414
+VPERMQZmbikz 4415
+VPERMQZmi 4416
+VPERMQZmik 4417
+VPERMQZmikz 4418
+VPERMQZri 4419
+VPERMQZrik 4420
+VPERMQZrikz 4421
+VPERMQZrm 4422
+VPERMQZrmb 4423
+VPERMQZrmbk 4424
+VPERMQZrmbkz 4425
+VPERMQZrmk 4426
+VPERMQZrmkz 4427
+VPERMQZrr 4428
+VPERMQZrrk 4429
+VPERMQZrrkz 4430
+VPERMT 4431
+VPERMWZ 4432
+VPERMWZrm 4433
+VPERMWZrmk 4434
+VPERMWZrmkz 4435
+VPERMWZrr 4436
+VPERMWZrrk 4437
+VPERMWZrrkz 4438
+VPEXPANDBZ 4439
+VPEXPANDBZrm 4440
+VPEXPANDBZrmk 4441
+VPEXPANDBZrmkz 4442
+VPEXPANDBZrr 4443
+VPEXPANDBZrrk 4444
+VPEXPANDBZrrkz 4445
+VPEXPANDDZ 4446
+VPEXPANDDZrm 4447
+VPEXPANDDZrmk 4448
+VPEXPANDDZrmkz 4449
+VPEXPANDDZrr 4450
+VPEXPANDDZrrk 4451
+VPEXPANDDZrrkz 4452
+VPEXPANDQZ 4453
+VPEXPANDQZrm 4454
+VPEXPANDQZrmk 4455
+VPEXPANDQZrmkz 4456
+VPEXPANDQZrr 4457
+VPEXPANDQZrrk 4458
+VPEXPANDQZrrkz 4459
+VPEXPANDWZ 4460
+VPEXPANDWZrm 4461
+VPEXPANDWZrmk 4462
+VPEXPANDWZrmkz 4463
+VPEXPANDWZrr 4464
+VPEXPANDWZrrk 4465
+VPEXPANDWZrrkz 4466
+VPEXTRBZmri 4467
+VPEXTRBZrri 4468
+VPEXTRBmri 4469
+VPEXTRBrri 4470
+VPEXTRDZmri 4471
+VPEXTRDZrri 4472
+VPEXTRDmri 4473
+VPEXTRDrri 4474
+VPEXTRQZmri 4475
+VPEXTRQZrri 4476
+VPEXTRQmri 4477
+VPEXTRQrri 4478
+VPEXTRWZmri 4479
+VPEXTRWZrri 4480
+VPEXTRWZrri_REV 4481
+VPEXTRWmri 4482
+VPEXTRWrri 4483
+VPEXTRWrri_REV 4484
+VPGATHERDDYrm 4485
+VPGATHERDDZ 4486
+VPGATHERDDZrm 4487
+VPGATHERDDrm 4488
+VPGATHERDQYrm 4489
+VPGATHERDQZ 4490
+VPGATHERDQZrm 4491
+VPGATHERDQrm 4492
+VPGATHERQDYrm 4493
+VPGATHERQDZ 4494
+VPGATHERQDZrm 4495
+VPGATHERQDrm 4496
+VPGATHERQQYrm 4497
+VPGATHERQQZ 4498
+VPGATHERQQZrm 4499
+VPGATHERQQrm 4500
+VPHADDBDrm 4501
+VPHADDBDrr 4502
+VPHADDBQrm 4503
+VPHADDBQrr 4504
+VPHADDBWrm 4505
+VPHADDBWrr 4506
+VPHADDDQrm 4507
+VPHADDDQrr 4508
+VPHADDDYrm 4509
+VPHADDDYrr 4510
+VPHADDDrm 4511
+VPHADDDrr 4512
+VPHADDSWYrm 4513
+VPHADDSWYrr 4514
+VPHADDSWrm 4515
+VPHADDSWrr 4516
+VPHADDUBDrm 4517
+VPHADDUBDrr 4518
+VPHADDUBQrm 4519
+VPHADDUBQrr 4520
+VPHADDUBWrm 4521
+VPHADDUBWrr 4522
+VPHADDUDQrm 4523
+VPHADDUDQrr 4524
+VPHADDUWDrm 4525
+VPHADDUWDrr 4526
+VPHADDUWQrm 4527
+VPHADDUWQrr 4528
+VPHADDWDrm 4529
+VPHADDWDrr 4530
+VPHADDWQrm 4531
+VPHADDWQrr 4532
+VPHADDWYrm 4533
+VPHADDWYrr 4534
+VPHADDWrm 4535
+VPHADDWrr 4536
+VPHMINPOSUWrm 4537
+VPHMINPOSUWrr 4538
+VPHSUBBWrm 4539
+VPHSUBBWrr 4540
+VPHSUBDQrm 4541
+VPHSUBDQrr 4542
+VPHSUBDYrm 4543
+VPHSUBDYrr 4544
+VPHSUBDrm 4545
+VPHSUBDrr 4546
+VPHSUBSWYrm 4547
+VPHSUBSWYrr 4548
+VPHSUBSWrm 4549
+VPHSUBSWrr 4550
+VPHSUBWDrm 4551
+VPHSUBWDrr 4552
+VPHSUBWYrm 4553
+VPHSUBWYrr 4554
+VPHSUBWrm 4555
+VPHSUBWrr 4556
+VPINSRBZrmi 4557
+VPINSRBZrri 4558
+VPINSRBrmi 4559
+VPINSRBrri 4560
+VPINSRDZrmi 4561
+VPINSRDZrri 4562
+VPINSRDrmi 4563
+VPINSRDrri 4564
+VPINSRQZrmi 4565
+VPINSRQZrri 4566
+VPINSRQrmi 4567
+VPINSRQrri 4568
+VPINSRWZrmi 4569
+VPINSRWZrri 4570
+VPINSRWrmi 4571
+VPINSRWrri 4572
+VPLZCNTDZ 4573
+VPLZCNTDZrm 4574
+VPLZCNTDZrmb 4575
+VPLZCNTDZrmbk 4576
+VPLZCNTDZrmbkz 4577
+VPLZCNTDZrmk 4578
+VPLZCNTDZrmkz 4579
+VPLZCNTDZrr 4580
+VPLZCNTDZrrk 4581
+VPLZCNTDZrrkz 4582
+VPLZCNTQZ 4583
+VPLZCNTQZrm 4584
+VPLZCNTQZrmb 4585
+VPLZCNTQZrmbk 4586
+VPLZCNTQZrmbkz 4587
+VPLZCNTQZrmk 4588
+VPLZCNTQZrmkz 4589
+VPLZCNTQZrr 4590
+VPLZCNTQZrrk 4591
+VPLZCNTQZrrkz 4592
+VPMACSDDrm 4593
+VPMACSDDrr 4594
+VPMACSDQHrm 4595
+VPMACSDQHrr 4596
+VPMACSDQLrm 4597
+VPMACSDQLrr 4598
+VPMACSSDDrm 4599
+VPMACSSDDrr 4600
+VPMACSSDQHrm 4601
+VPMACSSDQHrr 4602
+VPMACSSDQLrm 4603
+VPMACSSDQLrr 4604
+VPMACSSWDrm 4605
+VPMACSSWDrr 4606
+VPMACSSWWrm 4607
+VPMACSSWWrr 4608
+VPMACSWDrm 4609
+VPMACSWDrr 4610
+VPMACSWWrm 4611
+VPMACSWWrr 4612
+VPMADCSSWDrm 4613
+VPMADCSSWDrr 4614
+VPMADCSWDrm 4615
+VPMADCSWDrr 4616
+VPMADD 4617
+VPMADDUBSWYrm 4618
+VPMADDUBSWYrr 4619
+VPMADDUBSWZ 4620
+VPMADDUBSWZrm 4621
+VPMADDUBSWZrmk 4622
+VPMADDUBSWZrmkz 4623
+VPMADDUBSWZrr 4624
+VPMADDUBSWZrrk 4625
+VPMADDUBSWZrrkz 4626
+VPMADDUBSWrm 4627
+VPMADDUBSWrr 4628
+VPMADDWDYrm 4629
+VPMADDWDYrr 4630
+VPMADDWDZ 4631
+VPMADDWDZrm 4632
+VPMADDWDZrmk 4633
+VPMADDWDZrmkz 4634
+VPMADDWDZrr 4635
+VPMADDWDZrrk 4636
+VPMADDWDZrrkz 4637
+VPMADDWDrm 4638
+VPMADDWDrr 4639
+VPMASKMOVDYmr 4640
+VPMASKMOVDYrm 4641
+VPMASKMOVDmr 4642
+VPMASKMOVDrm 4643
+VPMASKMOVQYmr 4644
+VPMASKMOVQYrm 4645
+VPMASKMOVQmr 4646
+VPMASKMOVQrm 4647
+VPMAXSBYrm 4648
+VPMAXSBYrr 4649
+VPMAXSBZ 4650
+VPMAXSBZrm 4651
+VPMAXSBZrmk 4652
+VPMAXSBZrmkz 4653
+VPMAXSBZrr 4654
+VPMAXSBZrrk 4655
+VPMAXSBZrrkz 4656
+VPMAXSBrm 4657
+VPMAXSBrr 4658
+VPMAXSDYrm 4659
+VPMAXSDYrr 4660
+VPMAXSDZ 4661
+VPMAXSDZrm 4662
+VPMAXSDZrmb 4663
+VPMAXSDZrmbk 4664
+VPMAXSDZrmbkz 4665
+VPMAXSDZrmk 4666
+VPMAXSDZrmkz 4667
+VPMAXSDZrr 4668
+VPMAXSDZrrk 4669
+VPMAXSDZrrkz 4670
+VPMAXSDrm 4671
+VPMAXSDrr 4672
+VPMAXSQZ 4673
+VPMAXSQZrm 4674
+VPMAXSQZrmb 4675
+VPMAXSQZrmbk 4676
+VPMAXSQZrmbkz 4677
+VPMAXSQZrmk 4678
+VPMAXSQZrmkz 4679
+VPMAXSQZrr 4680
+VPMAXSQZrrk 4681
+VPMAXSQZrrkz 4682
+VPMAXSWYrm 4683
+VPMAXSWYrr 4684
+VPMAXSWZ 4685
+VPMAXSWZrm 4686
+VPMAXSWZrmk 4687
+VPMAXSWZrmkz 4688
+VPMAXSWZrr 4689
+VPMAXSWZrrk 4690
+VPMAXSWZrrkz 4691
+VPMAXSWrm 4692
+VPMAXSWrr 4693
+VPMAXUBYrm 4694
+VPMAXUBYrr 4695
+VPMAXUBZ 4696
+VPMAXUBZrm 4697
+VPMAXUBZrmk 4698
+VPMAXUBZrmkz 4699
+VPMAXUBZrr 4700
+VPMAXUBZrrk 4701
+VPMAXUBZrrkz 4702
+VPMAXUBrm 4703
+VPMAXUBrr 4704
+VPMAXUDYrm 4705
+VPMAXUDYrr 4706
+VPMAXUDZ 4707
+VPMAXUDZrm 4708
+VPMAXUDZrmb 4709
+VPMAXUDZrmbk 4710
+VPMAXUDZrmbkz 4711
+VPMAXUDZrmk 4712
+VPMAXUDZrmkz 4713
+VPMAXUDZrr 4714
+VPMAXUDZrrk 4715
+VPMAXUDZrrkz 4716
+VPMAXUDrm 4717
+VPMAXUDrr 4718
+VPMAXUQZ 4719
+VPMAXUQZrm 4720
+VPMAXUQZrmb 4721
+VPMAXUQZrmbk 4722
+VPMAXUQZrmbkz 4723
+VPMAXUQZrmk 4724
+VPMAXUQZrmkz 4725
+VPMAXUQZrr 4726
+VPMAXUQZrrk 4727
+VPMAXUQZrrkz 4728
+VPMAXUWYrm 4729
+VPMAXUWYrr 4730
+VPMAXUWZ 4731
+VPMAXUWZrm 4732
+VPMAXUWZrmk 4733
+VPMAXUWZrmkz 4734
+VPMAXUWZrr 4735
+VPMAXUWZrrk 4736
+VPMAXUWZrrkz 4737
+VPMAXUWrm 4738
+VPMAXUWrr 4739
+VPMINSBYrm 4740
+VPMINSBYrr 4741
+VPMINSBZ 4742
+VPMINSBZrm 4743
+VPMINSBZrmk 4744
+VPMINSBZrmkz 4745
+VPMINSBZrr 4746
+VPMINSBZrrk 4747
+VPMINSBZrrkz 4748
+VPMINSBrm 4749
+VPMINSBrr 4750
+VPMINSDYrm 4751
+VPMINSDYrr 4752
+VPMINSDZ 4753
+VPMINSDZrm 4754
+VPMINSDZrmb 4755
+VPMINSDZrmbk 4756
+VPMINSDZrmbkz 4757
+VPMINSDZrmk 4758
+VPMINSDZrmkz 4759
+VPMINSDZrr 4760
+VPMINSDZrrk 4761
+VPMINSDZrrkz 4762
+VPMINSDrm 4763
+VPMINSDrr 4764
+VPMINSQZ 4765
+VPMINSQZrm 4766
+VPMINSQZrmb 4767
+VPMINSQZrmbk 4768
+VPMINSQZrmbkz 4769
+VPMINSQZrmk 4770
+VPMINSQZrmkz 4771
+VPMINSQZrr 4772
+VPMINSQZrrk 4773
+VPMINSQZrrkz 4774
+VPMINSWYrm 4775
+VPMINSWYrr 4776
+VPMINSWZ 4777
+VPMINSWZrm 4778
+VPMINSWZrmk 4779
+VPMINSWZrmkz 4780
+VPMINSWZrr 4781
+VPMINSWZrrk 4782
+VPMINSWZrrkz 4783
+VPMINSWrm 4784
+VPMINSWrr 4785
+VPMINUBYrm 4786
+VPMINUBYrr 4787
+VPMINUBZ 4788
+VPMINUBZrm 4789
+VPMINUBZrmk 4790
+VPMINUBZrmkz 4791
+VPMINUBZrr 4792
+VPMINUBZrrk 4793
+VPMINUBZrrkz 4794
+VPMINUBrm 4795
+VPMINUBrr 4796
+VPMINUDYrm 4797
+VPMINUDYrr 4798
+VPMINUDZ 4799
+VPMINUDZrm 4800
+VPMINUDZrmb 4801
+VPMINUDZrmbk 4802
+VPMINUDZrmbkz 4803
+VPMINUDZrmk 4804
+VPMINUDZrmkz 4805
+VPMINUDZrr 4806
+VPMINUDZrrk 4807
+VPMINUDZrrkz 4808
+VPMINUDrm 4809
+VPMINUDrr 4810
+VPMINUQZ 4811
+VPMINUQZrm 4812
+VPMINUQZrmb 4813
+VPMINUQZrmbk 4814
+VPMINUQZrmbkz 4815
+VPMINUQZrmk 4816
+VPMINUQZrmkz 4817
+VPMINUQZrr 4818
+VPMINUQZrrk 4819
+VPMINUQZrrkz 4820
+VPMINUWYrm 4821
+VPMINUWYrr 4822
+VPMINUWZ 4823
+VPMINUWZrm 4824
+VPMINUWZrmk 4825
+VPMINUWZrmkz 4826
+VPMINUWZrr 4827
+VPMINUWZrrk 4828
+VPMINUWZrrkz 4829
+VPMINUWrm 4830
+VPMINUWrr 4831
+VPMOVB 4832
+VPMOVD 4833
+VPMOVDBZ 4834
+VPMOVDBZmr 4835
+VPMOVDBZmrk 4836
+VPMOVDBZrr 4837
+VPMOVDBZrrk 4838
+VPMOVDBZrrkz 4839
+VPMOVDWZ 4840
+VPMOVDWZmr 4841
+VPMOVDWZmrk 4842
+VPMOVDWZrr 4843
+VPMOVDWZrrk 4844
+VPMOVDWZrrkz 4845
+VPMOVM 4846
+VPMOVMSKBYrr 4847
+VPMOVMSKBrr 4848
+VPMOVQ 4849
+VPMOVQBZ 4850
+VPMOVQBZmr 4851
+VPMOVQBZmrk 4852
+VPMOVQBZrr 4853
+VPMOVQBZrrk 4854
+VPMOVQBZrrkz 4855
+VPMOVQDZ 4856
+VPMOVQDZmr 4857
+VPMOVQDZmrk 4858
+VPMOVQDZrr 4859
+VPMOVQDZrrk 4860
+VPMOVQDZrrkz 4861
+VPMOVQWZ 4862
+VPMOVQWZmr 4863
+VPMOVQWZmrk 4864
+VPMOVQWZrr 4865
+VPMOVQWZrrk 4866
+VPMOVQWZrrkz 4867
+VPMOVSDBZ 4868
+VPMOVSDBZmr 4869
+VPMOVSDBZmrk 4870
+VPMOVSDBZrr 4871
+VPMOVSDBZrrk 4872
+VPMOVSDBZrrkz 4873
+VPMOVSDWZ 4874
+VPMOVSDWZmr 4875
+VPMOVSDWZmrk 4876
+VPMOVSDWZrr 4877
+VPMOVSDWZrrk 4878
+VPMOVSDWZrrkz 4879
+VPMOVSQBZ 4880
+VPMOVSQBZmr 4881
+VPMOVSQBZmrk 4882
+VPMOVSQBZrr 4883
+VPMOVSQBZrrk 4884
+VPMOVSQBZrrkz 4885
+VPMOVSQDZ 4886
+VPMOVSQDZmr 4887
+VPMOVSQDZmrk 4888
+VPMOVSQDZrr 4889
+VPMOVSQDZrrk 4890
+VPMOVSQDZrrkz 4891
+VPMOVSQWZ 4892
+VPMOVSQWZmr 4893
+VPMOVSQWZmrk 4894
+VPMOVSQWZrr 4895
+VPMOVSQWZrrk 4896
+VPMOVSQWZrrkz 4897
+VPMOVSWBZ 4898
+VPMOVSWBZmr 4899
+VPMOVSWBZmrk 4900
+VPMOVSWBZrr 4901
+VPMOVSWBZrrk 4902
+VPMOVSWBZrrkz 4903
+VPMOVSXBDYrm 4904
+VPMOVSXBDYrr 4905
+VPMOVSXBDZ 4906
+VPMOVSXBDZrm 4907
+VPMOVSXBDZrmk 4908
+VPMOVSXBDZrmkz 4909
+VPMOVSXBDZrr 4910
+VPMOVSXBDZrrk 4911
+VPMOVSXBDZrrkz 4912
+VPMOVSXBDrm 4913
+VPMOVSXBDrr 4914
+VPMOVSXBQYrm 4915
+VPMOVSXBQYrr 4916
+VPMOVSXBQZ 4917
+VPMOVSXBQZrm 4918
+VPMOVSXBQZrmk 4919
+VPMOVSXBQZrmkz 4920
+VPMOVSXBQZrr 4921
+VPMOVSXBQZrrk 4922
+VPMOVSXBQZrrkz 4923
+VPMOVSXBQrm 4924
+VPMOVSXBQrr 4925
+VPMOVSXBWYrm 4926
+VPMOVSXBWYrr 4927
+VPMOVSXBWZ 4928
+VPMOVSXBWZrm 4929
+VPMOVSXBWZrmk 4930
+VPMOVSXBWZrmkz 4931
+VPMOVSXBWZrr 4932
+VPMOVSXBWZrrk 4933
+VPMOVSXBWZrrkz 4934
+VPMOVSXBWrm 4935
+VPMOVSXBWrr 4936
+VPMOVSXDQYrm 4937
+VPMOVSXDQYrr 4938
+VPMOVSXDQZ 4939
+VPMOVSXDQZrm 4940
+VPMOVSXDQZrmk 4941
+VPMOVSXDQZrmkz 4942
+VPMOVSXDQZrr 4943
+VPMOVSXDQZrrk 4944
+VPMOVSXDQZrrkz 4945
+VPMOVSXDQrm 4946
+VPMOVSXDQrr 4947
+VPMOVSXWDYrm 4948
+VPMOVSXWDYrr 4949
+VPMOVSXWDZ 4950
+VPMOVSXWDZrm 4951
+VPMOVSXWDZrmk 4952
+VPMOVSXWDZrmkz 4953
+VPMOVSXWDZrr 4954
+VPMOVSXWDZrrk 4955
+VPMOVSXWDZrrkz 4956
+VPMOVSXWDrm 4957
+VPMOVSXWDrr 4958
+VPMOVSXWQYrm 4959
+VPMOVSXWQYrr 4960
+VPMOVSXWQZ 4961
+VPMOVSXWQZrm 4962
+VPMOVSXWQZrmk 4963
+VPMOVSXWQZrmkz 4964
+VPMOVSXWQZrr 4965
+VPMOVSXWQZrrk 4966
+VPMOVSXWQZrrkz 4967
+VPMOVSXWQrm 4968
+VPMOVSXWQrr 4969
+VPMOVUSDBZ 4970
+VPMOVUSDBZmr 4971
+VPMOVUSDBZmrk 4972
+VPMOVUSDBZrr 4973
+VPMOVUSDBZrrk 4974
+VPMOVUSDBZrrkz 4975
+VPMOVUSDWZ 4976
+VPMOVUSDWZmr 4977
+VPMOVUSDWZmrk 4978
+VPMOVUSDWZrr 4979
+VPMOVUSDWZrrk 4980
+VPMOVUSDWZrrkz 4981
+VPMOVUSQBZ 4982
+VPMOVUSQBZmr 4983
+VPMOVUSQBZmrk 4984
+VPMOVUSQBZrr 4985
+VPMOVUSQBZrrk 4986
+VPMOVUSQBZrrkz 4987
+VPMOVUSQDZ 4988
+VPMOVUSQDZmr 4989
+VPMOVUSQDZmrk 4990
+VPMOVUSQDZrr 4991
+VPMOVUSQDZrrk 4992
+VPMOVUSQDZrrkz 4993
+VPMOVUSQWZ 4994
+VPMOVUSQWZmr 4995
+VPMOVUSQWZmrk 4996
+VPMOVUSQWZrr 4997
+VPMOVUSQWZrrk 4998
+VPMOVUSQWZrrkz 4999
+VPMOVUSWBZ 5000
+VPMOVUSWBZmr 5001
+VPMOVUSWBZmrk 5002
+VPMOVUSWBZrr 5003
+VPMOVUSWBZrrk 5004
+VPMOVUSWBZrrkz 5005
+VPMOVW 5006
+VPMOVWBZ 5007
+VPMOVWBZmr 5008
+VPMOVWBZmrk 5009
+VPMOVWBZrr 5010
+VPMOVWBZrrk 5011
+VPMOVWBZrrkz 5012
+VPMOVZXBDYrm 5013
+VPMOVZXBDYrr 5014
+VPMOVZXBDZ 5015
+VPMOVZXBDZrm 5016
+VPMOVZXBDZrmk 5017
+VPMOVZXBDZrmkz 5018
+VPMOVZXBDZrr 5019
+VPMOVZXBDZrrk 5020
+VPMOVZXBDZrrkz 5021
+VPMOVZXBDrm 5022
+VPMOVZXBDrr 5023
+VPMOVZXBQYrm 5024
+VPMOVZXBQYrr 5025
+VPMOVZXBQZ 5026
+VPMOVZXBQZrm 5027
+VPMOVZXBQZrmk 5028
+VPMOVZXBQZrmkz 5029
+VPMOVZXBQZrr 5030
+VPMOVZXBQZrrk 5031
+VPMOVZXBQZrrkz 5032
+VPMOVZXBQrm 5033
+VPMOVZXBQrr 5034
+VPMOVZXBWYrm 5035
+VPMOVZXBWYrr 5036
+VPMOVZXBWZ 5037
+VPMOVZXBWZrm 5038
+VPMOVZXBWZrmk 5039
+VPMOVZXBWZrmkz 5040
+VPMOVZXBWZrr 5041
+VPMOVZXBWZrrk 5042
+VPMOVZXBWZrrkz 5043
+VPMOVZXBWrm 5044
+VPMOVZXBWrr 5045
+VPMOVZXDQYrm 5046
+VPMOVZXDQYrr 5047
+VPMOVZXDQZ 5048
+VPMOVZXDQZrm 5049
+VPMOVZXDQZrmk 5050
+VPMOVZXDQZrmkz 5051
+VPMOVZXDQZrr 5052
+VPMOVZXDQZrrk 5053
+VPMOVZXDQZrrkz 5054
+VPMOVZXDQrm 5055
+VPMOVZXDQrr 5056
+VPMOVZXWDYrm 5057
+VPMOVZXWDYrr 5058
+VPMOVZXWDZ 5059
+VPMOVZXWDZrm 5060
+VPMOVZXWDZrmk 5061
+VPMOVZXWDZrmkz 5062
+VPMOVZXWDZrr 5063
+VPMOVZXWDZrrk 5064
+VPMOVZXWDZrrkz 5065
+VPMOVZXWDrm 5066
+VPMOVZXWDrr 5067
+VPMOVZXWQYrm 5068
+VPMOVZXWQYrr 5069
+VPMOVZXWQZ 5070
+VPMOVZXWQZrm 5071
+VPMOVZXWQZrmk 5072
+VPMOVZXWQZrmkz 5073
+VPMOVZXWQZrr 5074
+VPMOVZXWQZrrk 5075
+VPMOVZXWQZrrkz 5076
+VPMOVZXWQrm 5077
+VPMOVZXWQrr 5078
+VPMULDQYrm 5079
+VPMULDQYrr 5080
+VPMULDQZ 5081
+VPMULDQZrm 5082
+VPMULDQZrmb 5083
+VPMULDQZrmbk 5084
+VPMULDQZrmbkz 5085
+VPMULDQZrmk 5086
+VPMULDQZrmkz 5087
+VPMULDQZrr 5088
+VPMULDQZrrk 5089
+VPMULDQZrrkz 5090
+VPMULDQrm 5091
+VPMULDQrr 5092
+VPMULHRSWYrm 5093
+VPMULHRSWYrr 5094
+VPMULHRSWZ 5095
+VPMULHRSWZrm 5096
+VPMULHRSWZrmk 5097
+VPMULHRSWZrmkz 5098
+VPMULHRSWZrr 5099
+VPMULHRSWZrrk 5100
+VPMULHRSWZrrkz 5101
+VPMULHRSWrm 5102
+VPMULHRSWrr 5103
+VPMULHUWYrm 5104
+VPMULHUWYrr 5105
+VPMULHUWZ 5106
+VPMULHUWZrm 5107
+VPMULHUWZrmk 5108
+VPMULHUWZrmkz 5109
+VPMULHUWZrr 5110
+VPMULHUWZrrk 5111
+VPMULHUWZrrkz 5112
+VPMULHUWrm 5113
+VPMULHUWrr 5114
+VPMULHWYrm 5115
+VPMULHWYrr 5116
+VPMULHWZ 5117
+VPMULHWZrm 5118
+VPMULHWZrmk 5119
+VPMULHWZrmkz 5120
+VPMULHWZrr 5121
+VPMULHWZrrk 5122
+VPMULHWZrrkz 5123
+VPMULHWrm 5124
+VPMULHWrr 5125
+VPMULLDYrm 5126
+VPMULLDYrr 5127
+VPMULLDZ 5128
+VPMULLDZrm 5129
+VPMULLDZrmb 5130
+VPMULLDZrmbk 5131
+VPMULLDZrmbkz 5132
+VPMULLDZrmk 5133
+VPMULLDZrmkz 5134
+VPMULLDZrr 5135
+VPMULLDZrrk 5136
+VPMULLDZrrkz 5137
+VPMULLDrm 5138
+VPMULLDrr 5139
+VPMULLQZ 5140
+VPMULLQZrm 5141
+VPMULLQZrmb 5142
+VPMULLQZrmbk 5143
+VPMULLQZrmbkz 5144
+VPMULLQZrmk 5145
+VPMULLQZrmkz 5146
+VPMULLQZrr 5147
+VPMULLQZrrk 5148
+VPMULLQZrrkz 5149
+VPMULLWYrm 5150
+VPMULLWYrr 5151
+VPMULLWZ 5152
+VPMULLWZrm 5153
+VPMULLWZrmk 5154
+VPMULLWZrmkz 5155
+VPMULLWZrr 5156
+VPMULLWZrrk 5157
+VPMULLWZrrkz 5158
+VPMULLWrm 5159
+VPMULLWrr 5160
+VPMULTISHIFTQBZ 5161
+VPMULTISHIFTQBZrm 5162
+VPMULTISHIFTQBZrmb 5163
+VPMULTISHIFTQBZrmbk 5164
+VPMULTISHIFTQBZrmbkz 5165
+VPMULTISHIFTQBZrmk 5166
+VPMULTISHIFTQBZrmkz 5167
+VPMULTISHIFTQBZrr 5168
+VPMULTISHIFTQBZrrk 5169
+VPMULTISHIFTQBZrrkz 5170
+VPMULUDQYrm 5171
+VPMULUDQYrr 5172
+VPMULUDQZ 5173
+VPMULUDQZrm 5174
+VPMULUDQZrmb 5175
+VPMULUDQZrmbk 5176
+VPMULUDQZrmbkz 5177
+VPMULUDQZrmk 5178
+VPMULUDQZrmkz 5179
+VPMULUDQZrr 5180
+VPMULUDQZrrk 5181
+VPMULUDQZrrkz 5182
+VPMULUDQrm 5183
+VPMULUDQrr 5184
+VPOPCNTBZ 5185
+VPOPCNTBZrm 5186
+VPOPCNTBZrmk 5187
+VPOPCNTBZrmkz 5188
+VPOPCNTBZrr 5189
+VPOPCNTBZrrk 5190
+VPOPCNTBZrrkz 5191
+VPOPCNTDZ 5192
+VPOPCNTDZrm 5193
+VPOPCNTDZrmb 5194
+VPOPCNTDZrmbk 5195
+VPOPCNTDZrmbkz 5196
+VPOPCNTDZrmk 5197
+VPOPCNTDZrmkz 5198
+VPOPCNTDZrr 5199
+VPOPCNTDZrrk 5200
+VPOPCNTDZrrkz 5201
+VPOPCNTQZ 5202
+VPOPCNTQZrm 5203
+VPOPCNTQZrmb 5204
+VPOPCNTQZrmbk 5205
+VPOPCNTQZrmbkz 5206
+VPOPCNTQZrmk 5207
+VPOPCNTQZrmkz 5208
+VPOPCNTQZrr 5209
+VPOPCNTQZrrk 5210
+VPOPCNTQZrrkz 5211
+VPOPCNTWZ 5212
+VPOPCNTWZrm 5213
+VPOPCNTWZrmk 5214
+VPOPCNTWZrmkz 5215
+VPOPCNTWZrr 5216
+VPOPCNTWZrrk 5217
+VPOPCNTWZrrkz 5218
+VPORDZ 5219
+VPORDZrm 5220
+VPORDZrmb 5221
+VPORDZrmbk 5222
+VPORDZrmbkz 5223
+VPORDZrmk 5224
+VPORDZrmkz 5225
+VPORDZrr 5226
+VPORDZrrk 5227
+VPORDZrrkz 5228
+VPORQZ 5229
+VPORQZrm 5230
+VPORQZrmb 5231
+VPORQZrmbk 5232
+VPORQZrmbkz 5233
+VPORQZrmk 5234
+VPORQZrmkz 5235
+VPORQZrr 5236
+VPORQZrrk 5237
+VPORQZrrkz 5238
+VPORYrm 5239
+VPORYrr 5240
+VPORrm 5241
+VPORrr 5242
+VPPERMrmr 5243
+VPPERMrrm 5244
+VPPERMrrr 5245
+VPPERMrrr_REV 5246
+VPROLDZ 5247
+VPROLDZmbi 5248
+VPROLDZmbik 5249
+VPROLDZmbikz 5250
+VPROLDZmi 5251
+VPROLDZmik 5252
+VPROLDZmikz 5253
+VPROLDZri 5254
+VPROLDZrik 5255
+VPROLDZrikz 5256
+VPROLQZ 5257
+VPROLQZmbi 5258
+VPROLQZmbik 5259
+VPROLQZmbikz 5260
+VPROLQZmi 5261
+VPROLQZmik 5262
+VPROLQZmikz 5263
+VPROLQZri 5264
+VPROLQZrik 5265
+VPROLQZrikz 5266
+VPROLVDZ 5267
+VPROLVDZrm 5268
+VPROLVDZrmb 5269
+VPROLVDZrmbk 5270
+VPROLVDZrmbkz 5271
+VPROLVDZrmk 5272
+VPROLVDZrmkz 5273
+VPROLVDZrr 5274
+VPROLVDZrrk 5275
+VPROLVDZrrkz 5276
+VPROLVQZ 5277
+VPROLVQZrm 5278
+VPROLVQZrmb 5279
+VPROLVQZrmbk 5280
+VPROLVQZrmbkz 5281
+VPROLVQZrmk 5282
+VPROLVQZrmkz 5283
+VPROLVQZrr 5284
+VPROLVQZrrk 5285
+VPROLVQZrrkz 5286
+VPRORDZ 5287
+VPRORDZmbi 5288
+VPRORDZmbik 5289
+VPRORDZmbikz 5290
+VPRORDZmi 5291
+VPRORDZmik 5292
+VPRORDZmikz 5293
+VPRORDZri 5294
+VPRORDZrik 5295
+VPRORDZrikz 5296
+VPRORQZ 5297
+VPRORQZmbi 5298
+VPRORQZmbik 5299
+VPRORQZmbikz 5300
+VPRORQZmi 5301
+VPRORQZmik 5302
+VPRORQZmikz 5303
+VPRORQZri 5304
+VPRORQZrik 5305
+VPRORQZrikz 5306
+VPRORVDZ 5307
+VPRORVDZrm 5308
+VPRORVDZrmb 5309
+VPRORVDZrmbk 5310
+VPRORVDZrmbkz 5311
+VPRORVDZrmk 5312
+VPRORVDZrmkz 5313
+VPRORVDZrr 5314
+VPRORVDZrrk 5315
+VPRORVDZrrkz 5316
+VPRORVQZ 5317
+VPRORVQZrm 5318
+VPRORVQZrmb 5319
+VPRORVQZrmbk 5320
+VPRORVQZrmbkz 5321
+VPRORVQZrmk 5322
+VPRORVQZrmkz 5323
+VPRORVQZrr 5324
+VPRORVQZrrk 5325
+VPRORVQZrrkz 5326
+VPROTBmi 5327
+VPROTBmr 5328
+VPROTBri 5329
+VPROTBrm 5330
+VPROTBrr 5331
+VPROTBrr_REV 5332
+VPROTDmi 5333
+VPROTDmr 5334
+VPROTDri 5335
+VPROTDrm 5336
+VPROTDrr 5337
+VPROTDrr_REV 5338
+VPROTQmi 5339
+VPROTQmr 5340
+VPROTQri 5341
+VPROTQrm 5342
+VPROTQrr 5343
+VPROTQrr_REV 5344
+VPROTWmi 5345
+VPROTWmr 5346
+VPROTWri 5347
+VPROTWrm 5348
+VPROTWrr 5349
+VPROTWrr_REV 5350
+VPSADBWYrm 5351
+VPSADBWYrr 5352
+VPSADBWZ 5353
+VPSADBWZrm 5354
+VPSADBWZrr 5355
+VPSADBWrm 5356
+VPSADBWrr 5357
+VPSCATTERDDZ 5358
+VPSCATTERDDZmr 5359
+VPSCATTERDQZ 5360
+VPSCATTERDQZmr 5361
+VPSCATTERQDZ 5362
+VPSCATTERQDZmr 5363
+VPSCATTERQQZ 5364
+VPSCATTERQQZmr 5365
+VPSHABmr 5366
+VPSHABrm 5367
+VPSHABrr 5368
+VPSHABrr_REV 5369
+VPSHADmr 5370
+VPSHADrm 5371
+VPSHADrr 5372
+VPSHADrr_REV 5373
+VPSHAQmr 5374
+VPSHAQrm 5375
+VPSHAQrr 5376
+VPSHAQrr_REV 5377
+VPSHAWmr 5378
+VPSHAWrm 5379
+VPSHAWrr 5380
+VPSHAWrr_REV 5381
+VPSHLBmr 5382
+VPSHLBrm 5383
+VPSHLBrr 5384
+VPSHLBrr_REV 5385
+VPSHLDDZ 5386
+VPSHLDDZrmbi 5387
+VPSHLDDZrmbik 5388
+VPSHLDDZrmbikz 5389
+VPSHLDDZrmi 5390
+VPSHLDDZrmik 5391
+VPSHLDDZrmikz 5392
+VPSHLDDZrri 5393
+VPSHLDDZrrik 5394
+VPSHLDDZrrikz 5395
+VPSHLDQZ 5396
+VPSHLDQZrmbi 5397
+VPSHLDQZrmbik 5398
+VPSHLDQZrmbikz 5399
+VPSHLDQZrmi 5400
+VPSHLDQZrmik 5401
+VPSHLDQZrmikz 5402
+VPSHLDQZrri 5403
+VPSHLDQZrrik 5404
+VPSHLDQZrrikz 5405
+VPSHLDVDZ 5406
+VPSHLDVDZm 5407
+VPSHLDVDZmb 5408
+VPSHLDVDZmbk 5409
+VPSHLDVDZmbkz 5410
+VPSHLDVDZmk 5411
+VPSHLDVDZmkz 5412
+VPSHLDVDZr 5413
+VPSHLDVDZrk 5414
+VPSHLDVDZrkz 5415
+VPSHLDVQZ 5416
+VPSHLDVQZm 5417
+VPSHLDVQZmb 5418
+VPSHLDVQZmbk 5419
+VPSHLDVQZmbkz 5420
+VPSHLDVQZmk 5421
+VPSHLDVQZmkz 5422
+VPSHLDVQZr 5423
+VPSHLDVQZrk 5424
+VPSHLDVQZrkz 5425
+VPSHLDVWZ 5426
+VPSHLDVWZm 5427
+VPSHLDVWZmk 5428
+VPSHLDVWZmkz 5429
+VPSHLDVWZr 5430
+VPSHLDVWZrk 5431
+VPSHLDVWZrkz 5432
+VPSHLDWZ 5433
+VPSHLDWZrmi 5434
+VPSHLDWZrmik 5435
+VPSHLDWZrmikz 5436
+VPSHLDWZrri 5437
+VPSHLDWZrrik 5438
+VPSHLDWZrrikz 5439
+VPSHLDmr 5440
+VPSHLDrm 5441
+VPSHLDrr 5442
+VPSHLDrr_REV 5443
+VPSHLQmr 5444
+VPSHLQrm 5445
+VPSHLQrr 5446
+VPSHLQrr_REV 5447
+VPSHLWmr 5448
+VPSHLWrm 5449
+VPSHLWrr 5450
+VPSHLWrr_REV 5451
+VPSHRDDZ 5452
+VPSHRDDZrmbi 5453
+VPSHRDDZrmbik 5454
+VPSHRDDZrmbikz 5455
+VPSHRDDZrmi 5456
+VPSHRDDZrmik 5457
+VPSHRDDZrmikz 5458
+VPSHRDDZrri 5459
+VPSHRDDZrrik 5460
+VPSHRDDZrrikz 5461
+VPSHRDQZ 5462
+VPSHRDQZrmbi 5463
+VPSHRDQZrmbik 5464
+VPSHRDQZrmbikz 5465
+VPSHRDQZrmi 5466
+VPSHRDQZrmik 5467
+VPSHRDQZrmikz 5468
+VPSHRDQZrri 5469
+VPSHRDQZrrik 5470
+VPSHRDQZrrikz 5471
+VPSHRDVDZ 5472
+VPSHRDVDZm 5473
+VPSHRDVDZmb 5474
+VPSHRDVDZmbk 5475
+VPSHRDVDZmbkz 5476
+VPSHRDVDZmk 5477
+VPSHRDVDZmkz 5478
+VPSHRDVDZr 5479
+VPSHRDVDZrk 5480
+VPSHRDVDZrkz 5481
+VPSHRDVQZ 5482
+VPSHRDVQZm 5483
+VPSHRDVQZmb 5484
+VPSHRDVQZmbk 5485
+VPSHRDVQZmbkz 5486
+VPSHRDVQZmk 5487
+VPSHRDVQZmkz 5488
+VPSHRDVQZr 5489
+VPSHRDVQZrk 5490
+VPSHRDVQZrkz 5491
+VPSHRDVWZ 5492
+VPSHRDVWZm 5493
+VPSHRDVWZmk 5494
+VPSHRDVWZmkz 5495
+VPSHRDVWZr 5496
+VPSHRDVWZrk 5497
+VPSHRDVWZrkz 5498
+VPSHRDWZ 5499
+VPSHRDWZrmi 5500
+VPSHRDWZrmik 5501
+VPSHRDWZrmikz 5502
+VPSHRDWZrri 5503
+VPSHRDWZrrik 5504
+VPSHRDWZrrikz 5505
+VPSHUFBITQMBZ 5506
+VPSHUFBITQMBZrm 5507
+VPSHUFBITQMBZrmk 5508
+VPSHUFBITQMBZrr 5509
+VPSHUFBITQMBZrrk 5510
+VPSHUFBYrm 5511
+VPSHUFBYrr 5512
+VPSHUFBZ 5513
+VPSHUFBZrm 5514
+VPSHUFBZrmk 5515
+VPSHUFBZrmkz 5516
+VPSHUFBZrr 5517
+VPSHUFBZrrk 5518
+VPSHUFBZrrkz 5519
+VPSHUFBrm 5520
+VPSHUFBrr 5521
+VPSHUFDYmi 5522
+VPSHUFDYri 5523
+VPSHUFDZ 5524
+VPSHUFDZmbi 5525
+VPSHUFDZmbik 5526
+VPSHUFDZmbikz 5527
+VPSHUFDZmi 5528
+VPSHUFDZmik 5529
+VPSHUFDZmikz 5530
+VPSHUFDZri 5531
+VPSHUFDZrik 5532
+VPSHUFDZrikz 5533
+VPSHUFDmi 5534
+VPSHUFDri 5535
+VPSHUFHWYmi 5536
+VPSHUFHWYri 5537
+VPSHUFHWZ 5538
+VPSHUFHWZmi 5539
+VPSHUFHWZmik 5540
+VPSHUFHWZmikz 5541
+VPSHUFHWZri 5542
+VPSHUFHWZrik 5543
+VPSHUFHWZrikz 5544
+VPSHUFHWmi 5545
+VPSHUFHWri 5546
+VPSHUFLWYmi 5547
+VPSHUFLWYri 5548
+VPSHUFLWZ 5549
+VPSHUFLWZmi 5550
+VPSHUFLWZmik 5551
+VPSHUFLWZmikz 5552
+VPSHUFLWZri 5553
+VPSHUFLWZrik 5554
+VPSHUFLWZrikz 5555
+VPSHUFLWmi 5556
+VPSHUFLWri 5557
+VPSIGNBYrm 5558
+VPSIGNBYrr 5559
+VPSIGNBrm 5560
+VPSIGNBrr 5561
+VPSIGNDYrm 5562
+VPSIGNDYrr 5563
+VPSIGNDrm 5564
+VPSIGNDrr 5565
+VPSIGNWYrm 5566
+VPSIGNWYrr 5567
+VPSIGNWrm 5568
+VPSIGNWrr 5569
+VPSLLDQYri 5570
+VPSLLDQZ 5571
+VPSLLDQZmi 5572
+VPSLLDQZri 5573
+VPSLLDQri 5574
+VPSLLDYri 5575
+VPSLLDYrm 5576
+VPSLLDYrr 5577
+VPSLLDZ 5578
+VPSLLDZmbi 5579
+VPSLLDZmbik 5580
+VPSLLDZmbikz 5581
+VPSLLDZmi 5582
+VPSLLDZmik 5583
+VPSLLDZmikz 5584
+VPSLLDZri 5585
+VPSLLDZrik 5586
+VPSLLDZrikz 5587
+VPSLLDZrm 5588
+VPSLLDZrmk 5589
+VPSLLDZrmkz 5590
+VPSLLDZrr 5591
+VPSLLDZrrk 5592
+VPSLLDZrrkz 5593
+VPSLLDri 5594
+VPSLLDrm 5595
+VPSLLDrr 5596
+VPSLLQYri 5597
+VPSLLQYrm 5598
+VPSLLQYrr 5599
+VPSLLQZ 5600
+VPSLLQZmbi 5601
+VPSLLQZmbik 5602
+VPSLLQZmbikz 5603
+VPSLLQZmi 5604
+VPSLLQZmik 5605
+VPSLLQZmikz 5606
+VPSLLQZri 5607
+VPSLLQZrik 5608
+VPSLLQZrikz 5609
+VPSLLQZrm 5610
+VPSLLQZrmk 5611
+VPSLLQZrmkz 5612
+VPSLLQZrr 5613
+VPSLLQZrrk 5614
+VPSLLQZrrkz 5615
+VPSLLQri 5616
+VPSLLQrm 5617
+VPSLLQrr 5618
+VPSLLVDYrm 5619
+VPSLLVDYrr 5620
+VPSLLVDZ 5621
+VPSLLVDZrm 5622
+VPSLLVDZrmb 5623
+VPSLLVDZrmbk 5624
+VPSLLVDZrmbkz 5625
+VPSLLVDZrmk 5626
+VPSLLVDZrmkz 5627
+VPSLLVDZrr 5628
+VPSLLVDZrrk 5629
+VPSLLVDZrrkz 5630
+VPSLLVDrm 5631
+VPSLLVDrr 5632
+VPSLLVQYrm 5633
+VPSLLVQYrr 5634
+VPSLLVQZ 5635
+VPSLLVQZrm 5636
+VPSLLVQZrmb 5637
+VPSLLVQZrmbk 5638
+VPSLLVQZrmbkz 5639
+VPSLLVQZrmk 5640
+VPSLLVQZrmkz 5641
+VPSLLVQZrr 5642
+VPSLLVQZrrk 5643
+VPSLLVQZrrkz 5644
+VPSLLVQrm 5645
+VPSLLVQrr 5646
+VPSLLVWZ 5647
+VPSLLVWZrm 5648
+VPSLLVWZrmk 5649
+VPSLLVWZrmkz 5650
+VPSLLVWZrr 5651
+VPSLLVWZrrk 5652
+VPSLLVWZrrkz 5653
+VPSLLWYri 5654
+VPSLLWYrm 5655
+VPSLLWYrr 5656
+VPSLLWZ 5657
+VPSLLWZmi 5658
+VPSLLWZmik 5659
+VPSLLWZmikz 5660
+VPSLLWZri 5661
+VPSLLWZrik 5662
+VPSLLWZrikz 5663
+VPSLLWZrm 5664
+VPSLLWZrmk 5665
+VPSLLWZrmkz 5666
+VPSLLWZrr 5667
+VPSLLWZrrk 5668
+VPSLLWZrrkz 5669
+VPSLLWri 5670
+VPSLLWrm 5671
+VPSLLWrr 5672
+VPSRADYri 5673
+VPSRADYrm 5674
+VPSRADYrr 5675
+VPSRADZ 5676
+VPSRADZmbi 5677
+VPSRADZmbik 5678
+VPSRADZmbikz 5679
+VPSRADZmi 5680
+VPSRADZmik 5681
+VPSRADZmikz 5682
+VPSRADZri 5683
+VPSRADZrik 5684
+VPSRADZrikz 5685
+VPSRADZrm 5686
+VPSRADZrmk 5687
+VPSRADZrmkz 5688
+VPSRADZrr 5689
+VPSRADZrrk 5690
+VPSRADZrrkz 5691
+VPSRADri 5692
+VPSRADrm 5693
+VPSRADrr 5694
+VPSRAQZ 5695
+VPSRAQZmbi 5696
+VPSRAQZmbik 5697
+VPSRAQZmbikz 5698
+VPSRAQZmi 5699
+VPSRAQZmik 5700
+VPSRAQZmikz 5701
+VPSRAQZri 5702
+VPSRAQZrik 5703
+VPSRAQZrikz 5704
+VPSRAQZrm 5705
+VPSRAQZrmk 5706
+VPSRAQZrmkz 5707
+VPSRAQZrr 5708
+VPSRAQZrrk 5709
+VPSRAQZrrkz 5710
+VPSRAVDYrm 5711
+VPSRAVDYrr 5712
+VPSRAVDZ 5713
+VPSRAVDZrm 5714
+VPSRAVDZrmb 5715
+VPSRAVDZrmbk 5716
+VPSRAVDZrmbkz 5717
+VPSRAVDZrmk 5718
+VPSRAVDZrmkz 5719
+VPSRAVDZrr 5720
+VPSRAVDZrrk 5721
+VPSRAVDZrrkz 5722
+VPSRAVDrm 5723
+VPSRAVDrr 5724
+VPSRAVQZ 5725
+VPSRAVQZrm 5726
+VPSRAVQZrmb 5727
+VPSRAVQZrmbk 5728
+VPSRAVQZrmbkz 5729
+VPSRAVQZrmk 5730
+VPSRAVQZrmkz 5731
+VPSRAVQZrr 5732
+VPSRAVQZrrk 5733
+VPSRAVQZrrkz 5734
+VPSRAVWZ 5735
+VPSRAVWZrm 5736
+VPSRAVWZrmk 5737
+VPSRAVWZrmkz 5738
+VPSRAVWZrr 5739
+VPSRAVWZrrk 5740
+VPSRAVWZrrkz 5741
+VPSRAWYri 5742
+VPSRAWYrm 5743
+VPSRAWYrr 5744
+VPSRAWZ 5745
+VPSRAWZmi 5746
+VPSRAWZmik 5747
+VPSRAWZmikz 5748
+VPSRAWZri 5749
+VPSRAWZrik 5750
+VPSRAWZrikz 5751
+VPSRAWZrm 5752
+VPSRAWZrmk 5753
+VPSRAWZrmkz 5754
+VPSRAWZrr 5755
+VPSRAWZrrk 5756
+VPSRAWZrrkz 5757
+VPSRAWri 5758
+VPSRAWrm 5759
+VPSRAWrr 5760
+VPSRLDQYri 5761
+VPSRLDQZ 5762
+VPSRLDQZmi 5763
+VPSRLDQZri 5764
+VPSRLDQri 5765
+VPSRLDYri 5766
+VPSRLDYrm 5767
+VPSRLDYrr 5768
+VPSRLDZ 5769
+VPSRLDZmbi 5770
+VPSRLDZmbik 5771
+VPSRLDZmbikz 5772
+VPSRLDZmi 5773
+VPSRLDZmik 5774
+VPSRLDZmikz 5775
+VPSRLDZri 5776
+VPSRLDZrik 5777
+VPSRLDZrikz 5778
+VPSRLDZrm 5779
+VPSRLDZrmk 5780
+VPSRLDZrmkz 5781
+VPSRLDZrr 5782
+VPSRLDZrrk 5783
+VPSRLDZrrkz 5784
+VPSRLDri 5785
+VPSRLDrm 5786
+VPSRLDrr 5787
+VPSRLQYri 5788
+VPSRLQYrm 5789
+VPSRLQYrr 5790
+VPSRLQZ 5791
+VPSRLQZmbi 5792
+VPSRLQZmbik 5793
+VPSRLQZmbikz 5794
+VPSRLQZmi 5795
+VPSRLQZmik 5796
+VPSRLQZmikz 5797
+VPSRLQZri 5798
+VPSRLQZrik 5799
+VPSRLQZrikz 5800
+VPSRLQZrm 5801
+VPSRLQZrmk 5802
+VPSRLQZrmkz 5803
+VPSRLQZrr 5804
+VPSRLQZrrk 5805
+VPSRLQZrrkz 5806
+VPSRLQri 5807
+VPSRLQrm 5808
+VPSRLQrr 5809
+VPSRLVDYrm 5810
+VPSRLVDYrr 5811
+VPSRLVDZ 5812
+VPSRLVDZrm 5813
+VPSRLVDZrmb 5814
+VPSRLVDZrmbk 5815
+VPSRLVDZrmbkz 5816
+VPSRLVDZrmk 5817
+VPSRLVDZrmkz 5818
+VPSRLVDZrr 5819
+VPSRLVDZrrk 5820
+VPSRLVDZrrkz 5821
+VPSRLVDrm 5822
+VPSRLVDrr 5823
+VPSRLVQYrm 5824
+VPSRLVQYrr 5825
+VPSRLVQZ 5826
+VPSRLVQZrm 5827
+VPSRLVQZrmb 5828
+VPSRLVQZrmbk 5829
+VPSRLVQZrmbkz 5830
+VPSRLVQZrmk 5831
+VPSRLVQZrmkz 5832
+VPSRLVQZrr 5833
+VPSRLVQZrrk 5834
+VPSRLVQZrrkz 5835
+VPSRLVQrm 5836
+VPSRLVQrr 5837
+VPSRLVWZ 5838
+VPSRLVWZrm 5839
+VPSRLVWZrmk 5840
+VPSRLVWZrmkz 5841
+VPSRLVWZrr 5842
+VPSRLVWZrrk 5843
+VPSRLVWZrrkz 5844
+VPSRLWYri 5845
+VPSRLWYrm 5846
+VPSRLWYrr 5847
+VPSRLWZ 5848
+VPSRLWZmi 5849
+VPSRLWZmik 5850
+VPSRLWZmikz 5851
+VPSRLWZri 5852
+VPSRLWZrik 5853
+VPSRLWZrikz 5854
+VPSRLWZrm 5855
+VPSRLWZrmk 5856
+VPSRLWZrmkz 5857
+VPSRLWZrr 5858
+VPSRLWZrrk 5859
+VPSRLWZrrkz 5860
+VPSRLWri 5861
+VPSRLWrm 5862
+VPSRLWrr 5863
+VPSUBBYrm 5864
+VPSUBBYrr 5865
+VPSUBBZ 5866
+VPSUBBZrm 5867
+VPSUBBZrmk 5868
+VPSUBBZrmkz 5869
+VPSUBBZrr 5870
+VPSUBBZrrk 5871
+VPSUBBZrrkz 5872
+VPSUBBrm 5873
+VPSUBBrr 5874
+VPSUBDYrm 5875
+VPSUBDYrr 5876
+VPSUBDZ 5877
+VPSUBDZrm 5878
+VPSUBDZrmb 5879
+VPSUBDZrmbk 5880
+VPSUBDZrmbkz 5881
+VPSUBDZrmk 5882
+VPSUBDZrmkz 5883
+VPSUBDZrr 5884
+VPSUBDZrrk 5885
+VPSUBDZrrkz 5886
+VPSUBDrm 5887
+VPSUBDrr 5888
+VPSUBQYrm 5889
+VPSUBQYrr 5890
+VPSUBQZ 5891
+VPSUBQZrm 5892
+VPSUBQZrmb 5893
+VPSUBQZrmbk 5894
+VPSUBQZrmbkz 5895
+VPSUBQZrmk 5896
+VPSUBQZrmkz 5897
+VPSUBQZrr 5898
+VPSUBQZrrk 5899
+VPSUBQZrrkz 5900
+VPSUBQrm 5901
+VPSUBQrr 5902
+VPSUBSBYrm 5903
+VPSUBSBYrr 5904
+VPSUBSBZ 5905
+VPSUBSBZrm 5906
+VPSUBSBZrmk 5907
+VPSUBSBZrmkz 5908
+VPSUBSBZrr 5909
+VPSUBSBZrrk 5910
+VPSUBSBZrrkz 5911
+VPSUBSBrm 5912
+VPSUBSBrr 5913
+VPSUBSWYrm 5914
+VPSUBSWYrr 5915
+VPSUBSWZ 5916
+VPSUBSWZrm 5917
+VPSUBSWZrmk 5918
+VPSUBSWZrmkz 5919
+VPSUBSWZrr 5920
+VPSUBSWZrrk 5921
+VPSUBSWZrrkz 5922
+VPSUBSWrm 5923
+VPSUBSWrr 5924
+VPSUBUSBYrm 5925
+VPSUBUSBYrr 5926
+VPSUBUSBZ 5927
+VPSUBUSBZrm 5928
+VPSUBUSBZrmk 5929
+VPSUBUSBZrmkz 5930
+VPSUBUSBZrr 5931
+VPSUBUSBZrrk 5932
+VPSUBUSBZrrkz 5933
+VPSUBUSBrm 5934
+VPSUBUSBrr 5935
+VPSUBUSWYrm 5936
+VPSUBUSWYrr 5937
+VPSUBUSWZ 5938
+VPSUBUSWZrm 5939
+VPSUBUSWZrmk 5940
+VPSUBUSWZrmkz 5941
+VPSUBUSWZrr 5942
+VPSUBUSWZrrk 5943
+VPSUBUSWZrrkz 5944
+VPSUBUSWrm 5945
+VPSUBUSWrr 5946
+VPSUBWYrm 5947
+VPSUBWYrr 5948
+VPSUBWZ 5949
+VPSUBWZrm 5950
+VPSUBWZrmk 5951
+VPSUBWZrmkz 5952
+VPSUBWZrr 5953
+VPSUBWZrrk 5954
+VPSUBWZrrkz 5955
+VPSUBWrm 5956
+VPSUBWrr 5957
+VPTERNLOGDZ 5958
+VPTERNLOGDZrmbi 5959
+VPTERNLOGDZrmbik 5960
+VPTERNLOGDZrmbikz 5961
+VPTERNLOGDZrmi 5962
+VPTERNLOGDZrmik 5963
+VPTERNLOGDZrmikz 5964
+VPTERNLOGDZrri 5965
+VPTERNLOGDZrrik 5966
+VPTERNLOGDZrrikz 5967
+VPTERNLOGQZ 5968
+VPTERNLOGQZrmbi 5969
+VPTERNLOGQZrmbik 5970
+VPTERNLOGQZrmbikz 5971
+VPTERNLOGQZrmi 5972
+VPTERNLOGQZrmik 5973
+VPTERNLOGQZrmikz 5974
+VPTERNLOGQZrri 5975
+VPTERNLOGQZrrik 5976
+VPTERNLOGQZrrikz 5977
+VPTESTMBZ 5978
+VPTESTMBZrm 5979
+VPTESTMBZrmk 5980
+VPTESTMBZrr 5981
+VPTESTMBZrrk 5982
+VPTESTMDZ 5983
+VPTESTMDZrm 5984
+VPTESTMDZrmb 5985
+VPTESTMDZrmbk 5986
+VPTESTMDZrmk 5987
+VPTESTMDZrr 5988
+VPTESTMDZrrk 5989
+VPTESTMQZ 5990
+VPTESTMQZrm 5991
+VPTESTMQZrmb 5992
+VPTESTMQZrmbk 5993
+VPTESTMQZrmk 5994
+VPTESTMQZrr 5995
+VPTESTMQZrrk 5996
+VPTESTMWZ 5997
+VPTESTMWZrm 5998
+VPTESTMWZrmk 5999
+VPTESTMWZrr 6000
+VPTESTMWZrrk 6001
+VPTESTNMBZ 6002
+VPTESTNMBZrm 6003
+VPTESTNMBZrmk 6004
+VPTESTNMBZrr 6005
+VPTESTNMBZrrk 6006
+VPTESTNMDZ 6007
+VPTESTNMDZrm 6008
+VPTESTNMDZrmb 6009
+VPTESTNMDZrmbk 6010
+VPTESTNMDZrmk 6011
+VPTESTNMDZrr 6012
+VPTESTNMDZrrk 6013
+VPTESTNMQZ 6014
+VPTESTNMQZrm 6015
+VPTESTNMQZrmb 6016
+VPTESTNMQZrmbk 6017
+VPTESTNMQZrmk 6018
+VPTESTNMQZrr 6019
+VPTESTNMQZrrk 6020
+VPTESTNMWZ 6021
+VPTESTNMWZrm 6022
+VPTESTNMWZrmk 6023
+VPTESTNMWZrr 6024
+VPTESTNMWZrrk 6025
+VPTESTYrm 6026
+VPTESTYrr 6027
+VPTESTrm 6028
+VPTESTrr 6029
+VPUNPCKHBWYrm 6030
+VPUNPCKHBWYrr 6031
+VPUNPCKHBWZ 6032
+VPUNPCKHBWZrm 6033
+VPUNPCKHBWZrmk 6034
+VPUNPCKHBWZrmkz 6035
+VPUNPCKHBWZrr 6036
+VPUNPCKHBWZrrk 6037
+VPUNPCKHBWZrrkz 6038
+VPUNPCKHBWrm 6039
+VPUNPCKHBWrr 6040
+VPUNPCKHDQYrm 6041
+VPUNPCKHDQYrr 6042
+VPUNPCKHDQZ 6043
+VPUNPCKHDQZrm 6044
+VPUNPCKHDQZrmb 6045
+VPUNPCKHDQZrmbk 6046
+VPUNPCKHDQZrmbkz 6047
+VPUNPCKHDQZrmk 6048
+VPUNPCKHDQZrmkz 6049
+VPUNPCKHDQZrr 6050
+VPUNPCKHDQZrrk 6051
+VPUNPCKHDQZrrkz 6052
+VPUNPCKHDQrm 6053
+VPUNPCKHDQrr 6054
+VPUNPCKHQDQYrm 6055
+VPUNPCKHQDQYrr 6056
+VPUNPCKHQDQZ 6057
+VPUNPCKHQDQZrm 6058
+VPUNPCKHQDQZrmb 6059
+VPUNPCKHQDQZrmbk 6060
+VPUNPCKHQDQZrmbkz 6061
+VPUNPCKHQDQZrmk 6062
+VPUNPCKHQDQZrmkz 6063
+VPUNPCKHQDQZrr 6064
+VPUNPCKHQDQZrrk 6065
+VPUNPCKHQDQZrrkz 6066
+VPUNPCKHQDQrm 6067
+VPUNPCKHQDQrr 6068
+VPUNPCKHWDYrm 6069
+VPUNPCKHWDYrr 6070
+VPUNPCKHWDZ 6071
+VPUNPCKHWDZrm 6072
+VPUNPCKHWDZrmk 6073
+VPUNPCKHWDZrmkz 6074
+VPUNPCKHWDZrr 6075
+VPUNPCKHWDZrrk 6076
+VPUNPCKHWDZrrkz 6077
+VPUNPCKHWDrm 6078
+VPUNPCKHWDrr 6079
+VPUNPCKLBWYrm 6080
+VPUNPCKLBWYrr 6081
+VPUNPCKLBWZ 6082
+VPUNPCKLBWZrm 6083
+VPUNPCKLBWZrmk 6084
+VPUNPCKLBWZrmkz 6085
+VPUNPCKLBWZrr 6086
+VPUNPCKLBWZrrk 6087
+VPUNPCKLBWZrrkz 6088
+VPUNPCKLBWrm 6089
+VPUNPCKLBWrr 6090
+VPUNPCKLDQYrm 6091
+VPUNPCKLDQYrr 6092
+VPUNPCKLDQZ 6093
+VPUNPCKLDQZrm 6094
+VPUNPCKLDQZrmb 6095
+VPUNPCKLDQZrmbk 6096
+VPUNPCKLDQZrmbkz 6097
+VPUNPCKLDQZrmk 6098
+VPUNPCKLDQZrmkz 6099
+VPUNPCKLDQZrr 6100
+VPUNPCKLDQZrrk 6101
+VPUNPCKLDQZrrkz 6102
+VPUNPCKLDQrm 6103
+VPUNPCKLDQrr 6104
+VPUNPCKLQDQYrm 6105
+VPUNPCKLQDQYrr 6106
+VPUNPCKLQDQZ 6107
+VPUNPCKLQDQZrm 6108
+VPUNPCKLQDQZrmb 6109
+VPUNPCKLQDQZrmbk 6110
+VPUNPCKLQDQZrmbkz 6111
+VPUNPCKLQDQZrmk 6112
+VPUNPCKLQDQZrmkz 6113
+VPUNPCKLQDQZrr 6114
+VPUNPCKLQDQZrrk 6115
+VPUNPCKLQDQZrrkz 6116
+VPUNPCKLQDQrm 6117
+VPUNPCKLQDQrr 6118
+VPUNPCKLWDYrm 6119
+VPUNPCKLWDYrr 6120
+VPUNPCKLWDZ 6121
+VPUNPCKLWDZrm 6122
+VPUNPCKLWDZrmk 6123
+VPUNPCKLWDZrmkz 6124
+VPUNPCKLWDZrr 6125
+VPUNPCKLWDZrrk 6126
+VPUNPCKLWDZrrkz 6127
+VPUNPCKLWDrm 6128
+VPUNPCKLWDrr 6129
+VPXORDZ 6130
+VPXORDZrm 6131
+VPXORDZrmb 6132
+VPXORDZrmbk 6133
+VPXORDZrmbkz 6134
+VPXORDZrmk 6135
+VPXORDZrmkz 6136
+VPXORDZrr 6137
+VPXORDZrrk 6138
+VPXORDZrrkz 6139
+VPXORQZ 6140
+VPXORQZrm 6141
+VPXORQZrmb 6142
+VPXORQZrmbk 6143
+VPXORQZrmbkz 6144
+VPXORQZrmk 6145
+VPXORQZrmkz 6146
+VPXORQZrr 6147
+VPXORQZrrk 6148
+VPXORQZrrkz 6149
+VPXORYrm 6150
+VPXORYrr 6151
+VPXORrm 6152
+VPXORrr 6153
+VRANGEPDZ 6154
+VRANGEPDZrmbi 6155
+VRANGEPDZrmbik 6156
+VRANGEPDZrmbikz 6157
+VRANGEPDZrmi 6158
+VRANGEPDZrmik 6159
+VRANGEPDZrmikz 6160
+VRANGEPDZrri 6161
+VRANGEPDZrrib 6162
+VRANGEPDZrribk 6163
+VRANGEPDZrribkz 6164
+VRANGEPDZrrik 6165
+VRANGEPDZrrikz 6166
+VRANGEPSZ 6167
+VRANGEPSZrmbi 6168
+VRANGEPSZrmbik 6169
+VRANGEPSZrmbikz 6170
+VRANGEPSZrmi 6171
+VRANGEPSZrmik 6172
+VRANGEPSZrmikz 6173
+VRANGEPSZrri 6174
+VRANGEPSZrrib 6175
+VRANGEPSZrribk 6176
+VRANGEPSZrribkz 6177
+VRANGEPSZrrik 6178
+VRANGEPSZrrikz 6179
+VRANGESDZrmi 6180
+VRANGESDZrmik 6181
+VRANGESDZrmikz 6182
+VRANGESDZrri 6183
+VRANGESDZrrib 6184
+VRANGESDZrribk 6185
+VRANGESDZrribkz 6186
+VRANGESDZrrik 6187
+VRANGESDZrrikz 6188
+VRANGESSZrmi 6189
+VRANGESSZrmik 6190
+VRANGESSZrmikz 6191
+VRANGESSZrri 6192
+VRANGESSZrrib 6193
+VRANGESSZrribk 6194
+VRANGESSZrribkz 6195
+VRANGESSZrrik 6196
+VRANGESSZrrikz 6197
+VRCP 6198
+VRCPBF 6199
+VRCPPHZ 6200
+VRCPPHZm 6201
+VRCPPHZmb 6202
+VRCPPHZmbk 6203
+VRCPPHZmbkz 6204
+VRCPPHZmk 6205
+VRCPPHZmkz 6206
+VRCPPHZr 6207
+VRCPPHZrk 6208
+VRCPPHZrkz 6209
+VRCPPSYm 6210
+VRCPPSYr 6211
+VRCPPSm 6212
+VRCPPSr 6213
+VRCPSHZrm 6214
+VRCPSHZrmk 6215
+VRCPSHZrmkz 6216
+VRCPSHZrr 6217
+VRCPSHZrrk 6218
+VRCPSHZrrkz 6219
+VRCPSSm 6220
+VRCPSSm_Int 6221
+VRCPSSr 6222
+VRCPSSr_Int 6223
+VREDUCEBF 6224
+VREDUCEPDZ 6225
+VREDUCEPDZrmbi 6226
+VREDUCEPDZrmbik 6227
+VREDUCEPDZrmbikz 6228
+VREDUCEPDZrmi 6229
+VREDUCEPDZrmik 6230
+VREDUCEPDZrmikz 6231
+VREDUCEPDZrri 6232
+VREDUCEPDZrrib 6233
+VREDUCEPDZrribk 6234
+VREDUCEPDZrribkz 6235
+VREDUCEPDZrrik 6236
+VREDUCEPDZrrikz 6237
+VREDUCEPHZ 6238
+VREDUCEPHZrmbi 6239
+VREDUCEPHZrmbik 6240
+VREDUCEPHZrmbikz 6241
+VREDUCEPHZrmi 6242
+VREDUCEPHZrmik 6243
+VREDUCEPHZrmikz 6244
+VREDUCEPHZrri 6245
+VREDUCEPHZrrib 6246
+VREDUCEPHZrribk 6247
+VREDUCEPHZrribkz 6248
+VREDUCEPHZrrik 6249
+VREDUCEPHZrrikz 6250
+VREDUCEPSZ 6251
+VREDUCEPSZrmbi 6252
+VREDUCEPSZrmbik 6253
+VREDUCEPSZrmbikz 6254
+VREDUCEPSZrmi 6255
+VREDUCEPSZrmik 6256
+VREDUCEPSZrmikz 6257
+VREDUCEPSZrri 6258
+VREDUCEPSZrrib 6259
+VREDUCEPSZrribk 6260
+VREDUCEPSZrribkz 6261
+VREDUCEPSZrrik 6262
+VREDUCEPSZrrikz 6263
+VREDUCESDZrmi 6264
+VREDUCESDZrmik 6265
+VREDUCESDZrmikz 6266
+VREDUCESDZrri 6267
+VREDUCESDZrrib 6268
+VREDUCESDZrribk 6269
+VREDUCESDZrribkz 6270
+VREDUCESDZrrik 6271
+VREDUCESDZrrikz 6272
+VREDUCESHZrmi 6273
+VREDUCESHZrmik 6274
+VREDUCESHZrmikz 6275
+VREDUCESHZrri 6276
+VREDUCESHZrrib 6277
+VREDUCESHZrribk 6278
+VREDUCESHZrribkz 6279
+VREDUCESHZrrik 6280
+VREDUCESHZrrikz 6281
+VREDUCESSZrmi 6282
+VREDUCESSZrmik 6283
+VREDUCESSZrmikz 6284
+VREDUCESSZrri 6285
+VREDUCESSZrrib 6286
+VREDUCESSZrribk 6287
+VREDUCESSZrribkz 6288
+VREDUCESSZrrik 6289
+VREDUCESSZrrikz 6290
+VRNDSCALEBF 6291
+VRNDSCALEPDZ 6292
+VRNDSCALEPDZrmbi 6293
+VRNDSCALEPDZrmbik 6294
+VRNDSCALEPDZrmbikz 6295
+VRNDSCALEPDZrmi 6296
+VRNDSCALEPDZrmik 6297
+VRNDSCALEPDZrmikz 6298
+VRNDSCALEPDZrri 6299
+VRNDSCALEPDZrrib 6300
+VRNDSCALEPDZrribk 6301
+VRNDSCALEPDZrribkz 6302
+VRNDSCALEPDZrrik 6303
+VRNDSCALEPDZrrikz 6304
+VRNDSCALEPHZ 6305
+VRNDSCALEPHZrmbi 6306
+VRNDSCALEPHZrmbik 6307
+VRNDSCALEPHZrmbikz 6308
+VRNDSCALEPHZrmi 6309
+VRNDSCALEPHZrmik 6310
+VRNDSCALEPHZrmikz 6311
+VRNDSCALEPHZrri 6312
+VRNDSCALEPHZrrib 6313
+VRNDSCALEPHZrribk 6314
+VRNDSCALEPHZrribkz 6315
+VRNDSCALEPHZrrik 6316
+VRNDSCALEPHZrrikz 6317
+VRNDSCALEPSZ 6318
+VRNDSCALEPSZrmbi 6319
+VRNDSCALEPSZrmbik 6320
+VRNDSCALEPSZrmbikz 6321
+VRNDSCALEPSZrmi 6322
+VRNDSCALEPSZrmik 6323
+VRNDSCALEPSZrmikz 6324
+VRNDSCALEPSZrri 6325
+VRNDSCALEPSZrrib 6326
+VRNDSCALEPSZrribk 6327
+VRNDSCALEPSZrribkz 6328
+VRNDSCALEPSZrrik 6329
+VRNDSCALEPSZrrikz 6330
+VRNDSCALESDZrmi 6331
+VRNDSCALESDZrmi_Int 6332
+VRNDSCALESDZrmik_Int 6333
+VRNDSCALESDZrmikz_Int 6334
+VRNDSCALESDZrri 6335
+VRNDSCALESDZrri_Int 6336
+VRNDSCALESDZrrib_Int 6337
+VRNDSCALESDZrribk_Int 6338
+VRNDSCALESDZrribkz_Int 6339
+VRNDSCALESDZrrik_Int 6340
+VRNDSCALESDZrrikz_Int 6341
+VRNDSCALESHZrmi 6342
+VRNDSCALESHZrmi_Int 6343
+VRNDSCALESHZrmik_Int 6344
+VRNDSCALESHZrmikz_Int 6345
+VRNDSCALESHZrri 6346
+VRNDSCALESHZrri_Int 6347
+VRNDSCALESHZrrib_Int 6348
+VRNDSCALESHZrribk_Int 6349
+VRNDSCALESHZrribkz_Int 6350
+VRNDSCALESHZrrik_Int 6351
+VRNDSCALESHZrrikz_Int 6352
+VRNDSCALESSZrmi 6353
+VRNDSCALESSZrmi_Int 6354
+VRNDSCALESSZrmik_Int 6355
+VRNDSCALESSZrmikz_Int 6356
+VRNDSCALESSZrri 6357
+VRNDSCALESSZrri_Int 6358
+VRNDSCALESSZrrib_Int 6359
+VRNDSCALESSZrribk_Int 6360
+VRNDSCALESSZrribkz_Int 6361
+VRNDSCALESSZrrik_Int 6362
+VRNDSCALESSZrrikz_Int 6363
+VROUNDPDYmi 6364
+VROUNDPDYri 6365
+VROUNDPDmi 6366
+VROUNDPDri 6367
+VROUNDPSYmi 6368
+VROUNDPSYri 6369
+VROUNDPSmi 6370
+VROUNDPSri 6371
+VROUNDSDmi 6372
+VROUNDSDmi_Int 6373
+VROUNDSDri 6374
+VROUNDSDri_Int 6375
+VROUNDSSmi 6376
+VROUNDSSmi_Int 6377
+VROUNDSSri 6378
+VROUNDSSri_Int 6379
+VRSQRT 6380
+VRSQRTBF 6381
+VRSQRTPHZ 6382
+VRSQRTPHZm 6383
+VRSQRTPHZmb 6384
+VRSQRTPHZmbk 6385
+VRSQRTPHZmbkz 6386
+VRSQRTPHZmk 6387
+VRSQRTPHZmkz 6388
+VRSQRTPHZr 6389
+VRSQRTPHZrk 6390
+VRSQRTPHZrkz 6391
+VRSQRTPSYm 6392
+VRSQRTPSYr 6393
+VRSQRTPSm 6394
+VRSQRTPSr 6395
+VRSQRTSHZrm 6396
+VRSQRTSHZrmk 6397
+VRSQRTSHZrmkz 6398
+VRSQRTSHZrr 6399
+VRSQRTSHZrrk 6400
+VRSQRTSHZrrkz 6401
+VRSQRTSSm 6402
+VRSQRTSSm_Int 6403
+VRSQRTSSr 6404
+VRSQRTSSr_Int 6405
+VSCALEFBF 6406
+VSCALEFPDZ 6407
+VSCALEFPDZrm 6408
+VSCALEFPDZrmb 6409
+VSCALEFPDZrmbk 6410
+VSCALEFPDZrmbkz 6411
+VSCALEFPDZrmk 6412
+VSCALEFPDZrmkz 6413
+VSCALEFPDZrr 6414
+VSCALEFPDZrrb 6415
+VSCALEFPDZrrbk 6416
+VSCALEFPDZrrbkz 6417
+VSCALEFPDZrrk 6418
+VSCALEFPDZrrkz 6419
+VSCALEFPHZ 6420
+VSCALEFPHZrm 6421
+VSCALEFPHZrmb 6422
+VSCALEFPHZrmbk 6423
+VSCALEFPHZrmbkz 6424
+VSCALEFPHZrmk 6425
+VSCALEFPHZrmkz 6426
+VSCALEFPHZrr 6427
+VSCALEFPHZrrb 6428
+VSCALEFPHZrrbk 6429
+VSCALEFPHZrrbkz 6430
+VSCALEFPHZrrk 6431
+VSCALEFPHZrrkz 6432
+VSCALEFPSZ 6433
+VSCALEFPSZrm 6434
+VSCALEFPSZrmb 6435
+VSCALEFPSZrmbk 6436
+VSCALEFPSZrmbkz 6437
+VSCALEFPSZrmk 6438
+VSCALEFPSZrmkz 6439
+VSCALEFPSZrr 6440
+VSCALEFPSZrrb 6441
+VSCALEFPSZrrbk 6442
+VSCALEFPSZrrbkz 6443
+VSCALEFPSZrrk 6444
+VSCALEFPSZrrkz 6445
+VSCALEFSDZrm 6446
+VSCALEFSDZrmk 6447
+VSCALEFSDZrmkz 6448
+VSCALEFSDZrr 6449
+VSCALEFSDZrrb_Int 6450
+VSCALEFSDZrrbk_Int 6451
+VSCALEFSDZrrbkz_Int 6452
+VSCALEFSDZrrk 6453
+VSCALEFSDZrrkz 6454
+VSCALEFSHZrm 6455
+VSCALEFSHZrmk 6456
+VSCALEFSHZrmkz 6457
+VSCALEFSHZrr 6458
+VSCALEFSHZrrb_Int 6459
+VSCALEFSHZrrbk_Int 6460
+VSCALEFSHZrrbkz_Int 6461
+VSCALEFSHZrrk 6462
+VSCALEFSHZrrkz 6463
+VSCALEFSSZrm 6464
+VSCALEFSSZrmk 6465
+VSCALEFSSZrmkz 6466
+VSCALEFSSZrr 6467
+VSCALEFSSZrrb_Int 6468
+VSCALEFSSZrrbk_Int 6469
+VSCALEFSSZrrbkz_Int 6470
+VSCALEFSSZrrk 6471
+VSCALEFSSZrrkz 6472
+VSCATTERDPDZ 6473
+VSCATTERDPDZmr 6474
+VSCATTERDPSZ 6475
+VSCATTERDPSZmr 6476
+VSCATTERPF 6477
+VSCATTERQPDZ 6478
+VSCATTERQPDZmr 6479
+VSCATTERQPSZ 6480
+VSCATTERQPSZmr 6481
+VSHA 6482
+VSHUFF 6483
+VSHUFI 6484
+VSHUFPDYrmi 6485
+VSHUFPDYrri 6486
+VSHUFPDZ 6487
+VSHUFPDZrmbi 6488
+VSHUFPDZrmbik 6489
+VSHUFPDZrmbikz 6490
+VSHUFPDZrmi 6491
+VSHUFPDZrmik 6492
+VSHUFPDZrmikz 6493
+VSHUFPDZrri 6494
+VSHUFPDZrrik 6495
+VSHUFPDZrrikz 6496
+VSHUFPDrmi 6497
+VSHUFPDrri 6498
+VSHUFPSYrmi 6499
+VSHUFPSYrri 6500
+VSHUFPSZ 6501
+VSHUFPSZrmbi 6502
+VSHUFPSZrmbik 6503
+VSHUFPSZrmbikz 6504
+VSHUFPSZrmi 6505
+VSHUFPSZrmik 6506
+VSHUFPSZrmikz 6507
+VSHUFPSZrri 6508
+VSHUFPSZrrik 6509
+VSHUFPSZrrikz 6510
+VSHUFPSrmi 6511
+VSHUFPSrri 6512
+VSM 6513
+VSQRTBF 6514
+VSQRTPDYm 6515
+VSQRTPDYr 6516
+VSQRTPDZ 6517
+VSQRTPDZm 6518
+VSQRTPDZmb 6519
+VSQRTPDZmbk 6520
+VSQRTPDZmbkz 6521
+VSQRTPDZmk 6522
+VSQRTPDZmkz 6523
+VSQRTPDZr 6524
+VSQRTPDZrb 6525
+VSQRTPDZrbk 6526
+VSQRTPDZrbkz 6527
+VSQRTPDZrk 6528
+VSQRTPDZrkz 6529
+VSQRTPDm 6530
+VSQRTPDr 6531
+VSQRTPHZ 6532
+VSQRTPHZm 6533
+VSQRTPHZmb 6534
+VSQRTPHZmbk 6535
+VSQRTPHZmbkz 6536
+VSQRTPHZmk 6537
+VSQRTPHZmkz 6538
+VSQRTPHZr 6539
+VSQRTPHZrb 6540
+VSQRTPHZrbk 6541
+VSQRTPHZrbkz 6542
+VSQRTPHZrk 6543
+VSQRTPHZrkz 6544
+VSQRTPSYm 6545
+VSQRTPSYr 6546
+VSQRTPSZ 6547
+VSQRTPSZm 6548
+VSQRTPSZmb 6549
+VSQRTPSZmbk 6550
+VSQRTPSZmbkz 6551
+VSQRTPSZmk 6552
+VSQRTPSZmkz 6553
+VSQRTPSZr 6554
+VSQRTPSZrb 6555
+VSQRTPSZrbk 6556
+VSQRTPSZrbkz 6557
+VSQRTPSZrk 6558
+VSQRTPSZrkz 6559
+VSQRTPSm 6560
+VSQRTPSr 6561
+VSQRTSDZm 6562
+VSQRTSDZm_Int 6563
+VSQRTSDZmk_Int 6564
+VSQRTSDZmkz_Int 6565
+VSQRTSDZr 6566
+VSQRTSDZr_Int 6567
+VSQRTSDZrb_Int 6568
+VSQRTSDZrbk_Int 6569
+VSQRTSDZrbkz_Int 6570
+VSQRTSDZrk_Int 6571
+VSQRTSDZrkz_Int 6572
+VSQRTSDm 6573
+VSQRTSDm_Int 6574
+VSQRTSDr 6575
+VSQRTSDr_Int 6576
+VSQRTSHZm 6577
+VSQRTSHZm_Int 6578
+VSQRTSHZmk_Int 6579
+VSQRTSHZmkz_Int 6580
+VSQRTSHZr 6581
+VSQRTSHZr_Int 6582
+VSQRTSHZrb_Int 6583
+VSQRTSHZrbk_Int 6584
+VSQRTSHZrbkz_Int 6585
+VSQRTSHZrk_Int 6586
+VSQRTSHZrkz_Int 6587
+VSQRTSSZm 6588
+VSQRTSSZm_Int 6589
+VSQRTSSZmk_Int 6590
+VSQRTSSZmkz_Int 6591
+VSQRTSSZr 6592
+VSQRTSSZr_Int 6593
+VSQRTSSZrb_Int 6594
+VSQRTSSZrbk_Int 6595
+VSQRTSSZrbkz_Int 6596
+VSQRTSSZrk_Int 6597
+VSQRTSSZrkz_Int 6598
+VSQRTSSm 6599
+VSQRTSSm_Int 6600
+VSQRTSSr 6601
+VSQRTSSr_Int 6602
+VSTMXCSR 6603
+VSUBBF 6604
+VSUBPDYrm 6605
+VSUBPDYrr 6606
+VSUBPDZ 6607
+VSUBPDZrm 6608
+VSUBPDZrmb 6609
+VSUBPDZrmbk 6610
+VSUBPDZrmbkz 6611
+VSUBPDZrmk 6612
+VSUBPDZrmkz 6613
+VSUBPDZrr 6614
+VSUBPDZrrb 6615
+VSUBPDZrrbk 6616
+VSUBPDZrrbkz 6617
+VSUBPDZrrk 6618
+VSUBPDZrrkz 6619
+VSUBPDrm 6620
+VSUBPDrr 6621
+VSUBPHZ 6622
+VSUBPHZrm 6623
+VSUBPHZrmb 6624
+VSUBPHZrmbk 6625
+VSUBPHZrmbkz 6626
+VSUBPHZrmk 6627
+VSUBPHZrmkz 6628
+VSUBPHZrr 6629
+VSUBPHZrrb 6630
+VSUBPHZrrbk 6631
+VSUBPHZrrbkz 6632
+VSUBPHZrrk 6633
+VSUBPHZrrkz 6634
+VSUBPSYrm 6635
+VSUBPSYrr 6636
+VSUBPSZ 6637
+VSUBPSZrm 6638
+VSUBPSZrmb 6639
+VSUBPSZrmbk 6640
+VSUBPSZrmbkz 6641
+VSUBPSZrmk 6642
+VSUBPSZrmkz 6643
+VSUBPSZrr 6644
+VSUBPSZrrb 6645
+VSUBPSZrrbk 6646
+VSUBPSZrrbkz 6647
+VSUBPSZrrk 6648
+VSUBPSZrrkz 6649
+VSUBPSrm 6650
+VSUBPSrr 6651
+VSUBSDZrm 6652
+VSUBSDZrm_Int 6653
+VSUBSDZrmk_Int 6654
+VSUBSDZrmkz_Int 6655
+VSUBSDZrr 6656
+VSUBSDZrr_Int 6657
+VSUBSDZrrb_Int 6658
+VSUBSDZrrbk_Int 6659
+VSUBSDZrrbkz_Int 6660
+VSUBSDZrrk_Int 6661
+VSUBSDZrrkz_Int 6662
+VSUBSDrm 6663
+VSUBSDrm_Int 6664
+VSUBSDrr 6665
+VSUBSDrr_Int 6666
+VSUBSHZrm 6667
+VSUBSHZrm_Int 6668
+VSUBSHZrmk_Int 6669
+VSUBSHZrmkz_Int 6670
+VSUBSHZrr 6671
+VSUBSHZrr_Int 6672
+VSUBSHZrrb_Int 6673
+VSUBSHZrrbk_Int 6674
+VSUBSHZrrbkz_Int 6675
+VSUBSHZrrk_Int 6676
+VSUBSHZrrkz_Int 6677
+VSUBSSZrm 6678
+VSUBSSZrm_Int 6679
+VSUBSSZrmk_Int 6680
+VSUBSSZrmkz_Int 6681
+VSUBSSZrr 6682
+VSUBSSZrr_Int 6683
+VSUBSSZrrb_Int 6684
+VSUBSSZrrbk_Int 6685
+VSUBSSZrrbkz_Int 6686
+VSUBSSZrrk_Int 6687
+VSUBSSZrrkz_Int 6688
+VSUBSSrm 6689
+VSUBSSrm_Int 6690
+VSUBSSrr 6691
+VSUBSSrr_Int 6692
+VTESTPDYrm 6693
+VTESTPDYrr 6694
+VTESTPDrm 6695
+VTESTPDrr 6696
+VTESTPSYrm 6697
+VTESTPSYrr 6698
+VTESTPSrm 6699
+VTESTPSrr 6700
+VUCOMISDZrm 6701
+VUCOMISDZrm_Int 6702
+VUCOMISDZrr 6703
+VUCOMISDZrr_Int 6704
+VUCOMISDZrrb 6705
+VUCOMISDrm 6706
+VUCOMISDrm_Int 6707
+VUCOMISDrr 6708
+VUCOMISDrr_Int 6709
+VUCOMISHZrm 6710
+VUCOMISHZrm_Int 6711
+VUCOMISHZrr 6712
+VUCOMISHZrr_Int 6713
+VUCOMISHZrrb 6714
+VUCOMISSZrm 6715
+VUCOMISSZrm_Int 6716
+VUCOMISSZrr 6717
+VUCOMISSZrr_Int 6718
+VUCOMISSZrrb 6719
+VUCOMISSrm 6720
+VUCOMISSrm_Int 6721
+VUCOMISSrr 6722
+VUCOMISSrr_Int 6723
+VUCOMXSDZrm 6724
+VUCOMXSDZrm_Int 6725
+VUCOMXSDZrr 6726
+VUCOMXSDZrr_Int 6727
+VUCOMXSDZrrb_Int 6728
+VUCOMXSHZrm 6729
+VUCOMXSHZrm_Int 6730
+VUCOMXSHZrr 6731
+VUCOMXSHZrr_Int 6732
+VUCOMXSHZrrb_Int 6733
+VUCOMXSSZrm 6734
+VUCOMXSSZrm_Int 6735
+VUCOMXSSZrr 6736
+VUCOMXSSZrr_Int 6737
+VUCOMXSSZrrb_Int 6738
+VUNPCKHPDYrm 6739
+VUNPCKHPDYrr 6740
+VUNPCKHPDZ 6741
+VUNPCKHPDZrm 6742
+VUNPCKHPDZrmb 6743
+VUNPCKHPDZrmbk 6744
+VUNPCKHPDZrmbkz 6745
+VUNPCKHPDZrmk 6746
+VUNPCKHPDZrmkz 6747
+VUNPCKHPDZrr 6748
+VUNPCKHPDZrrk 6749
+VUNPCKHPDZrrkz 6750
+VUNPCKHPDrm 6751
+VUNPCKHPDrr 6752
+VUNPCKHPSYrm 6753
+VUNPCKHPSYrr 6754
+VUNPCKHPSZ 6755
+VUNPCKHPSZrm 6756
+VUNPCKHPSZrmb 6757
+VUNPCKHPSZrmbk 6758
+VUNPCKHPSZrmbkz 6759
+VUNPCKHPSZrmk 6760
+VUNPCKHPSZrmkz 6761
+VUNPCKHPSZrr 6762
+VUNPCKHPSZrrk 6763
+VUNPCKHPSZrrkz 6764
+VUNPCKHPSrm 6765
+VUNPCKHPSrr 6766
+VUNPCKLPDYrm 6767
+VUNPCKLPDYrr 6768
+VUNPCKLPDZ 6769
+VUNPCKLPDZrm 6770
+VUNPCKLPDZrmb 6771
+VUNPCKLPDZrmbk 6772
+VUNPCKLPDZrmbkz 6773
+VUNPCKLPDZrmk 6774
+VUNPCKLPDZrmkz 6775
+VUNPCKLPDZrr 6776
+VUNPCKLPDZrrk 6777
+VUNPCKLPDZrrkz 6778
+VUNPCKLPDrm 6779
+VUNPCKLPDrr 6780
+VUNPCKLPSYrm 6781
+VUNPCKLPSYrr 6782
+VUNPCKLPSZ 6783
+VUNPCKLPSZrm 6784
+VUNPCKLPSZrmb 6785
+VUNPCKLPSZrmbk 6786
+VUNPCKLPSZrmbkz 6787
+VUNPCKLPSZrmk 6788
+VUNPCKLPSZrmkz 6789
+VUNPCKLPSZrr 6790
+VUNPCKLPSZrrk 6791
+VUNPCKLPSZrrkz 6792
+VUNPCKLPSrm 6793
+VUNPCKLPSrr 6794
+VXORPDYrm 6795
+VXORPDYrr 6796
+VXORPDZ 6797
+VXORPDZrm 6798
+VXORPDZrmb 6799
+VXORPDZrmbk 6800
+VXORPDZrmbkz 6801
+VXORPDZrmk 6802
+VXORPDZrmkz 6803
+VXORPDZrr 6804
+VXORPDZrrk 6805
+VXORPDZrrkz 6806
+VXORPDrm 6807
+VXORPDrr 6808
+VXORPSYrm 6809
+VXORPSYrr 6810
+VXORPSZ 6811
+VXORPSZrm 6812
+VXORPSZrmb 6813
+VXORPSZrmbk 6814
+VXORPSZrmbkz 6815
+VXORPSZrmk 6816
+VXORPSZrmkz 6817
+VXORPSZrr 6818
+VXORPSZrrk 6819
+VXORPSZrrkz 6820
+VXORPSrm 6821
+VXORPSrr 6822
+VZEROALL 6823
+VZEROUPPER 6824
+V_SET 6825
+V_SETALLONES 6826
+WAIT 6827
+WBINVD 6828
+WBNOINVD 6829
+WRFLAGS 6830
+WRFSBASE 6831
+WRGSBASE 6832
+WRMSR 6833
+WRMSRLIST 6834
+WRMSRNS 6835
+WRMSRNSir 6836
+WRMSRNSir_EVEX 6837
+WRPKRUr 6838
+WRSSD 6839
+WRSSD_EVEX 6840
+WRSSQ 6841
+WRSSQ_EVEX 6842
+WRUSSD 6843
+WRUSSD_EVEX 6844
+WRUSSQ 6845
+WRUSSQ_EVEX 6846
+XABORT 6847
+XABORT_DEF 6848
+XACQUIRE_PREFIX 6849
+XADD 6850
+XAM_F 6851
+XAM_Fp 6852
+XBEGIN 6853
+XCHG 6854
+XCH_F 6855
+XCRYPTCBC 6856
+XCRYPTCFB 6857
+XCRYPTCTR 6858
+XCRYPTECB 6859
+XCRYPTOFB 6860
+XEND 6861
+XGETBV 6862
+XLAT 6863
+XOR 6864
+XORPDrm 6865
+XORPDrr 6866
+XORPSrm 6867
+XORPSrr 6868
+XRELEASE_PREFIX 6869
+XRESLDTRK 6870
+XRSTOR 6871
+XRSTORS 6872
+XSAVE 6873
+XSAVEC 6874
+XSAVEOPT 6875
+XSAVES 6876
+XSETBV 6877
+XSHA 6878
+XSTORE 6879
+XSUSLDTRK 6880
+XTEST 6881
+Immediate 6882
+CImmediate 6883
+FPImmediate 6884
+MBB 6885
+FrameIndex 6886
+ConstantPoolIndex 6887
+TargetIndex 6888
+JumpTableIndex 6889
+ExternalSymbol 6890
+GlobalAddress 6891
+BlockAddress 6892
+RegisterMask 6893
+RegisterLiveOut 6894
+Metadata 6895
+MCSymbol 6896
+CFIIndex 6897
+IntrinsicID 6898
+Predicate 6899
+ShuffleMask 6900
+PhyReg_GR8 6901
+PhyReg_GRH8 6902
+PhyReg_GR8_NOREX2 6903
+PhyReg_GR8_NOREX 6904
+PhyReg_GR8_ABCD_H 6905
+PhyReg_GR8_ABCD_L 6906
+PhyReg_GRH16 6907
+PhyReg_GR16 6908
+PhyReg_GR16_NOREX2 6909
+PhyReg_GR16_NOREX 6910
+PhyReg_VK1 6911
+PhyReg_VK16 6912
+PhyReg_VK2 6913
+PhyReg_VK4 6914
+PhyReg_VK8 6915
+PhyReg_VK16WM 6916
+PhyReg_VK1WM 6917
+PhyReg_VK2WM 6918
+PhyReg_VK4WM 6919
+PhyReg_VK8WM 6920
+PhyReg_SEGMENT_REG 6921
+PhyReg_GR16_ABCD 6922
+PhyReg_FPCCR 6923
+PhyReg_FR16X 6924
+PhyReg_FR16 6925
+PhyReg_VK16PAIR 6926
+PhyReg_VK1PAIR 6927
+PhyReg_VK2PAIR 6928
+PhyReg_VK4PAIR 6929
+PhyReg_VK8PAIR 6930
+PhyReg_VK1PAIR_with_sub_mask_0_in_VK1WM 6931
+PhyReg_LOW32_ADDR_ACCESS_RBP 6932
+PhyReg_LOW32_ADDR_ACCESS 6933
+PhyReg_LOW32_ADDR_ACCESS_RBP_with_sub_8bit 6934
+PhyReg_FR32X 6935
+PhyReg_GR32 6936
+PhyReg_GR32_NOSP 6937
+PhyReg_LOW32_ADDR_ACCESS_RBP_with_sub_16bit_in_GR16_NOREX2 6938
+PhyReg_DEBUG_REG 6939
+PhyReg_FR32 6940
+PhyReg_GR32_NOREX2 6941
+PhyReg_GR32_NOREX2_NOSP 6942
+PhyReg_LOW32_ADDR_ACCESS_RBP_with_sub_16bit_in_GR16_NOREX 6943
+PhyReg_GR32_NOREX 6944
+PhyReg_VK32 6945
+PhyReg_GR32_NOREX_NOSP 6946
+PhyReg_RFP32 6947
+PhyReg_VK32WM 6948
+PhyReg_GR32_ABCD 6949
+PhyReg_GR32_TC 6950
+PhyReg_GR32_ABCD_and_GR32_TC 6951
+PhyReg_GR32_AD 6952
+PhyReg_GR32_ArgRef 6953
+PhyReg_GR32_BPSP 6954
+PhyReg_GR32_BSI 6955
+PhyReg_GR32_CB 6956
+PhyReg_GR32_DC 6957
+PhyReg_GR32_DIBP 6958
+PhyReg_GR32_SIDI 6959
+PhyReg_LOW32_ADDR_ACCESS_RBP_with_sub_32bit 6960
+PhyReg_CCR 6961
+PhyReg_DFCCR 6962
+PhyReg_GR32_ABCD_and_GR32_BSI 6963
+PhyReg_GR32_AD_and_GR32_ArgRef 6964
+PhyReg_GR32_ArgRef_and_GR32_CB 6965
+PhyReg_GR32_BPSP_and_GR32_DIBP 6966
+PhyReg_GR32_BPSP_and_GR32_TC 6967
+PhyReg_GR32_BSI_and_GR32_SIDI 6968
+PhyReg_GR32_DIBP_and_GR32_SIDI 6969
+PhyReg_LOW32_ADDR_ACCESS_RBP_with_sub_8bit_with_sub_32bit 6970
+PhyReg_LOW32_ADDR_ACCESS_with_sub_32bit 6971
+PhyReg_RFP64 6972
+PhyReg_GR64 6973
+PhyReg_FR64X 6974
+PhyReg_GR64_with_sub_8bit 6975
+PhyReg_GR64_NOSP 6976
+PhyReg_GR64_NOREX2 6977
+PhyReg_CONTROL_REG 6978
+PhyReg_FR64 6979
+PhyReg_GR64_with_sub_16bit_in_GR16_NOREX2 6980
+PhyReg_GR64_NOREX2_NOSP 6981
+PhyReg_GR64PLTSafe 6982
+PhyReg_GR64_TC 6983
+PhyReg_GR64_NOREX 6984
+PhyReg_GR64_TCW64 6985
+PhyReg_GR64_TC_with_sub_8bit 6986
+PhyReg_GR64_NOREX2_NOSP_and_GR64_TC 6987
+PhyReg_GR64_TCW64_with_sub_8bit 6988
+PhyReg_GR64_TC_and_GR64_TCW64 6989
+PhyReg_GR64_with_sub_16bit_in_GR16_NOREX 6990
+PhyReg_VK64 6991
+PhyReg_VR64 6992
+PhyReg_GR64PLTSafe_and_GR64_TC 6993
+PhyReg_GR64_NOREX2_NOSP_and_GR64_TCW64 6994
+PhyReg_GR64_NOREX_NOSP 6995
+PhyReg_GR64_NOREX_and_GR64_TC 6996
+PhyReg_GR64_TCW64_and_GR64_TC_with_sub_8bit 6997
+PhyReg_VK64WM 6998
+PhyReg_GR64_TC_and_GR64_NOREX2_NOSP_and_GR64_TCW64 6999
+PhyReg_GR64_TC_and_GR64_with_sub_16bit_in_GR16_NOREX 7000
+PhyReg_GR64PLTSafe_and_GR64_TCW64 7001
+PhyReg_GR64_NOREX_and_GR64PLTSafe_and_GR64_TC 7002
+PhyReg_GR64_NOREX_and_GR64_TCW64 7003
+PhyReg_GR64_ABCD 7004
+PhyReg_GR64_with_sub_32bit_in_GR32_TC 7005
+PhyReg_GR64_with_sub_32bit_in_GR32_ABCD_and_GR32_TC 7006
+PhyReg_GR64_AD 7007
+PhyReg_GR64_ArgRef 7008
+PhyReg_GR64_and_LOW32_ADDR_ACCESS_RBP 7009
+PhyReg_GR64_with_sub_32bit_in_GR32_ArgRef 7010
+PhyReg_GR64_with_sub_32bit_in_GR32_BPSP 7011
+PhyReg_GR64_with_sub_32bit_in_GR32_BSI 7012
+PhyReg_GR64_with_sub_32bit_in_GR32_CB 7013
+PhyReg_GR64_with_sub_32bit_in_GR32_DIBP 7014
+PhyReg_GR64_with_sub_32bit_in_GR32_SIDI 7015
+PhyReg_GR64_A 7016
+PhyReg_GR64_ArgRef_and_GR64_TC 7017
+PhyReg_GR64_and_LOW32_ADDR_ACCESS 7018
+PhyReg_GR64_with_sub_32bit_in_GR32_ABCD_and_GR32_BSI 7019
+PhyReg_GR64_with_sub_32bit_in_GR32_AD_and_GR32_ArgRef 7020
+PhyReg_GR64_with_sub_32bit_in_GR32_ArgRef_and_GR32_CB 7021
+PhyReg_GR64_with_sub_32bit_in_GR32_BPSP_and_GR32_DIBP 7022
+PhyReg_GR64_with_sub_32bit_in_GR32_BPSP_and_GR32_TC 7023
+PhyReg_GR64_with_sub_32bit_in_GR32_BSI_and_GR32_SIDI 7024
+PhyReg_GR64_with_sub_32bit_in_GR32_DIBP_and_GR32_SIDI 7025
+PhyReg_RST 7026
+PhyReg_RFP80 7027
+PhyReg_RFP80_7 7028
+PhyReg_VR128X 7029
+PhyReg_VR128 7030
+PhyReg_VR256X 7031
+PhyReg_VR256 7032
+PhyReg_VR512 7033
+PhyReg_VR512_0_15 7034
+PhyReg_TILE 7035
+PhyReg_TILEPAIR 7036
+VirtReg_GR8 7037
+VirtReg_GRH8 7038
+VirtReg_GR8_NOREX2 7039
+VirtReg_GR8_NOREX 7040
+VirtReg_GR8_ABCD_H 7041
+VirtReg_GR8_ABCD_L 7042
+VirtReg_GRH16 7043
+VirtReg_GR16 7044
+VirtReg_GR16_NOREX2 7045
+VirtReg_GR16_NOREX 7046
+VirtReg_VK1 7047
+VirtReg_VK16 7048
+VirtReg_VK2 7049
+VirtReg_VK4 7050
+VirtReg_VK8 7051
+VirtReg_VK16WM 7052
+VirtReg_VK1WM 7053
+VirtReg_VK2WM 7054
+VirtReg_VK4WM 7055
+VirtReg_VK8WM 7056
+VirtReg_SEGMENT_REG 7057
+VirtReg_GR16_ABCD 7058
+VirtReg_FPCCR 7059
+VirtReg_FR16X 7060
+VirtReg_FR16 7061
+VirtReg_VK16PAIR 7062
+VirtReg_VK1PAIR 7063
+VirtReg_VK2PAIR 7064
+VirtReg_VK4PAIR 7065
+VirtReg_VK8PAIR 7066
+VirtReg_VK1PAIR_with_sub_mask_0_in_VK1WM 7067
+VirtReg_LOW32_ADDR_ACCESS_RBP 7068
+VirtReg_LOW32_ADDR_ACCESS 7069
+VirtReg_LOW32_ADDR_ACCESS_RBP_with_sub_8bit 7070
+VirtReg_FR32X 7071
+VirtReg_GR32 7072
+VirtReg_GR32_NOSP 7073
+VirtReg_LOW32_ADDR_ACCESS_RBP_with_sub_16bit_in_GR16_NOREX2 7074
+VirtReg_DEBUG_REG 7075
+VirtReg_FR32 7076
+VirtReg_GR32_NOREX2 7077
+VirtReg_GR32_NOREX2_NOSP 7078
+VirtReg_LOW32_ADDR_ACCESS_RBP_with_sub_16bit_in_GR16_NOREX 7079
+VirtReg_GR32_NOREX 7080
+VirtReg_VK32 7081
+VirtReg_GR32_NOREX_NOSP 7082
+VirtReg_RFP32 7083
+VirtReg_VK32WM 7084
+VirtReg_GR32_ABCD 7085
+VirtReg_GR32_TC 7086
+VirtReg_GR32_ABCD_and_GR32_TC 7087
+VirtReg_GR32_AD 7088
+VirtReg_GR32_ArgRef 7089
+VirtReg_GR32_BPSP 7090
+VirtReg_GR32_BSI 7091
+VirtReg_GR32_CB 7092
+VirtReg_GR32_DC 7093
+VirtReg_GR32_DIBP 7094
+VirtReg_GR32_SIDI 7095
+VirtReg_LOW32_ADDR_ACCESS_RBP_with_sub_32bit 7096
+VirtReg_CCR 7097
+VirtReg_DFCCR 7098
+VirtReg_GR32_ABCD_and_GR32_BSI 7099
+VirtReg_GR32_AD_and_GR32_ArgRef 7100
+VirtReg_GR32_ArgRef_and_GR32_CB 7101
+VirtReg_GR32_BPSP_and_GR32_DIBP 7102
+VirtReg_GR32_BPSP_and_GR32_TC 7103
+VirtReg_GR32_BSI_and_GR32_SIDI 7104
+VirtReg_GR32_DIBP_and_GR32_SIDI 7105
+VirtReg_LOW32_ADDR_ACCESS_RBP_with_sub_8bit_with_sub_32bit 7106
+VirtReg_LOW32_ADDR_ACCESS_with_sub_32bit 7107
+VirtReg_RFP64 7108
+VirtReg_GR64 7109
+VirtReg_FR64X 7110
+VirtReg_GR64_with_sub_8bit 7111
+VirtReg_GR64_NOSP 7112
+VirtReg_GR64_NOREX2 7113
+VirtReg_CONTROL_REG 7114
+VirtReg_FR64 7115
+VirtReg_GR64_with_sub_16bit_in_GR16_NOREX2 7116
+VirtReg_GR64_NOREX2_NOSP 7117
+VirtReg_GR64PLTSafe 7118
+VirtReg_GR64_TC 7119
+VirtReg_GR64_NOREX 7120
+VirtReg_GR64_TCW64 7121
+VirtReg_GR64_TC_with_sub_8bit 7122
+VirtReg_GR64_NOREX2_NOSP_and_GR64_TC 7123
+VirtReg_GR64_TCW64_with_sub_8bit 7124
+VirtReg_GR64_TC_and_GR64_TCW64 7125
+VirtReg_GR64_with_sub_16bit_in_GR16_NOREX 7126
+VirtReg_VK64 7127
+VirtReg_VR64 7128
+VirtReg_GR64PLTSafe_and_GR64_TC 7129
+VirtReg_GR64_NOREX2_NOSP_and_GR64_TCW64 7130
+VirtReg_GR64_NOREX_NOSP 7131
+VirtReg_GR64_NOREX_and_GR64_TC 7132
+VirtReg_GR64_TCW64_and_GR64_TC_with_sub_8bit 7133
+VirtReg_VK64WM 7134
+VirtReg_GR64_TC_and_GR64_NOREX2_NOSP_and_GR64_TCW64 7135
+VirtReg_GR64_TC_and_GR64_with_sub_16bit_in_GR16_NOREX 7136
+VirtReg_GR64PLTSafe_and_GR64_TCW64 7137
+VirtReg_GR64_NOREX_and_GR64PLTSafe_and_GR64_TC 7138
+VirtReg_GR64_NOREX_and_GR64_TCW64 7139
+VirtReg_GR64_ABCD 7140
+VirtReg_GR64_with_sub_32bit_in_GR32_TC 7141
+VirtReg_GR64_with_sub_32bit_in_GR32_ABCD_and_GR32_TC 7142
+VirtReg_GR64_AD 7143
+VirtReg_GR64_ArgRef 7144
+VirtReg_GR64_and_LOW32_ADDR_ACCESS_RBP 7145
+VirtReg_GR64_with_sub_32bit_in_GR32_ArgRef 7146
+VirtReg_GR64_with_sub_32bit_in_GR32_BPSP 7147
+VirtReg_GR64_with_sub_32bit_in_GR32_BSI 7148
+VirtReg_GR64_with_sub_32bit_in_GR32_CB 7149
+VirtReg_GR64_with_sub_32bit_in_GR32_DIBP 7150
+VirtReg_GR64_with_sub_32bit_in_GR32_SIDI 7151
+VirtReg_GR64_A 7152
+VirtReg_GR64_ArgRef_and_GR64_TC 7153
+VirtReg_GR64_and_LOW32_ADDR_ACCESS 7154
+VirtReg_GR64_with_sub_32bit_in_GR32_ABCD_and_GR32_BSI 7155
+VirtReg_GR64_with_sub_32bit_in_GR32_AD_and_GR32_ArgRef 7156
+VirtReg_GR64_with_sub_32bit_in_GR32_ArgRef_and_GR32_CB 7157
+VirtReg_GR64_with_sub_32bit_in_GR32_BPSP_and_GR32_DIBP 7158
+VirtReg_GR64_with_sub_32bit_in_GR32_BPSP_and_GR32_TC 7159
+VirtReg_GR64_with_sub_32bit_in_GR32_BSI_and_GR32_SIDI 7160
+VirtReg_GR64_with_sub_32bit_in_GR32_DIBP_and_GR32_SIDI 7161
+VirtReg_RST 7162
+VirtReg_RFP80 7163
+VirtReg_RFP80_7 7164
+VirtReg_VR128X 7165
+VirtReg_VR128 7166
+VirtReg_VR256X 7167
+VirtReg_VR256 7168
+VirtReg_VR512 7169
+VirtReg_VR512_0_15 7170
+VirtReg_TILE 7171
+VirtReg_TILEPAIR 7172
diff --git a/llvm/test/tools/llvm-ir2vec/triplets.mir b/llvm/test/tools/llvm-ir2vec/triplets.mir
new file mode 100644
index 0000000..274984a
--- /dev/null
+++ b/llvm/test/tools/llvm-ir2vec/triplets.mir
@@ -0,0 +1,61 @@
+# REQUIRES: x86_64-linux
+# RUN: llvm-ir2vec triplets --mode=mir %s -o 2>&1 %t1.log
+# RUN: diff %S/output/reference_triplets.txt %t1.log
+
+--- |
+ target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128"
+ target triple = "x86_64-unknown-linux-gnu"
+
+ define dso_local noundef i32 @add_function(i32 noundef %a, i32 noundef %b) {
+ entry:
+ %sum = add nsw i32 %a, %b
+ ret i32 %sum
+ }
+
+ define dso_local noundef i32 @mul_function(i32 noundef %x, i32 noundef %y) {
+ entry:
+ %product = mul nsw i32 %x, %y
+ ret i32 %product
+ }
+...
+---
+name: add_function
+alignment: 16
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: gr32 }
+ - { id: 1, class: gr32 }
+ - { id: 2, class: gr32 }
+liveins:
+ - { reg: '$edi', virtual-reg: '%0' }
+ - { reg: '$esi', virtual-reg: '%1' }
+body: |
+ bb.0.entry:
+ liveins: $edi, $esi
+
+ %1:gr32 = COPY $esi
+ %0:gr32 = COPY $edi
+ %2:gr32 = nsw ADD32rr %0, %1, implicit-def dead $eflags
+ $eax = COPY %2
+ RET 0, $eax
+
+---
+name: mul_function
+alignment: 16
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: gr32 }
+ - { id: 1, class: gr32 }
+ - { id: 2, class: gr32 }
+liveins:
+ - { reg: '$edi', virtual-reg: '%0' }
+ - { reg: '$esi', virtual-reg: '%1' }
+body: |
+ bb.0.entry:
+ liveins: $edi, $esi
+
+ %1:gr32 = COPY $esi
+ %0:gr32 = COPY $edi
+ %2:gr32 = nsw IMUL32rr %0, %1, implicit-def dead $eflags
+ $eax = COPY %2
+ RET 0, $eax
diff --git a/llvm/tools/llvm-ir2vec/llvm-ir2vec.cpp b/llvm/tools/llvm-ir2vec/llvm-ir2vec.cpp
index a723d37..7402782 100644
--- a/llvm/tools/llvm-ir2vec/llvm-ir2vec.cpp
+++ b/llvm/tools/llvm-ir2vec/llvm-ir2vec.cpp
@@ -19,12 +19,22 @@
/// Generates numeric triplets (head, tail, relation) for vocabulary
/// training. Output format: MAX_RELATION=N header followed by
/// head\ttail\trelation lines. Relations: 0=Type, 1=Next, 2+=Arg0,Arg1,...
-/// Usage: llvm-ir2vec triplets input.bc -o train2id.txt
+///
+/// For LLVM IR:
+/// llvm-ir2vec triplets input.bc -o train2id.txt
+///
+/// For Machine IR:
+/// llvm-ir2vec triplets -mode=mir input.mir -o train2id.txt
///
/// 2. Entity Mappings (entities):
/// Generates entity mappings for vocabulary training.
/// Output format: <total_entities> header followed by entity\tid lines.
-/// Usage: llvm-ir2vec entities input.bc -o entity2id.txt
+///
+/// For LLVM IR:
+/// llvm-ir2vec entities input.bc -o entity2id.txt
+///
+/// For Machine IR:
+/// llvm-ir2vec entities -mode=mir input.mir -o entity2id.txt
///
/// 3. Embedding Generation (embeddings):
/// Generates IR2Vec/MIR2Vec embeddings using a trained vocabulary.
@@ -67,6 +77,8 @@
#include "llvm/CodeGen/MIRParser/MIRParser.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/CodeGen/TargetInstrInfo.h"
+#include "llvm/CodeGen/TargetRegisterInfo.h"
#include "llvm/MC/TargetRegistry.h"
#include "llvm/Support/TargetSelect.h"
#include "llvm/Support/WithColor.h"
@@ -106,11 +118,10 @@ static cl::SubCommand
"Generate embeddings using trained vocabulary");
// Common options
-static cl::opt<std::string>
- InputFilename(cl::Positional,
- cl::desc("<input bitcode file or '-' for stdin>"),
- cl::init("-"), cl::sub(TripletsSubCmd),
- cl::sub(EmbeddingsSubCmd), cl::cat(CommonCategory));
+static cl::opt<std::string> InputFilename(
+ cl::Positional, cl::desc("<input bitcode/MIR file or '-' for stdin>"),
+ cl::init("-"), cl::sub(TripletsSubCmd), cl::sub(EntitiesSubCmd),
+ cl::sub(EmbeddingsSubCmd), cl::cat(CommonCategory));
static cl::opt<std::string> OutputFilename("o", cl::desc("Output filename"),
cl::value_desc("filename"),
@@ -345,6 +356,12 @@ Error processModule(Module &M, raw_ostream &OS) {
namespace mir2vec {
+/// Relation types for MIR2Vec triplet generation
+enum MIRRelationType {
+ MIRNextRelation = 0, ///< Sequential instruction relationship
+ MIRArgRelation = 1 ///< Instruction to operand relationship (ArgRelation + N)
+};
+
/// Helper class for MIR2Vec embedding generation
class MIR2VecTool {
private:
@@ -354,7 +371,7 @@ private:
public:
explicit MIR2VecTool(MachineModuleInfo &MMI) : MMI(MMI) {}
- /// Initialize MIR2Vec vocabulary
+ /// Initialize MIR2Vec vocabulary from file (for embeddings generation)
bool initializeVocabulary(const Module &M) {
MIR2VecVocabProvider Provider(MMI);
auto VocabOrErr = Provider.getVocabulary(M);
@@ -368,6 +385,146 @@ public:
return true;
}
+ /// Initialize vocabulary with layout information only.
+ /// This creates a minimal vocabulary with correct layout but no actual
+ /// embeddings. Sufficient for generating training data and entity mappings.
+ ///
+ /// Note: Requires target-specific information from the first machine function
+ /// to determine the vocabulary layout (number of opcodes, register classes).
+ ///
+ /// FIXME: Use --target option to get target info directly, avoiding the need
+ /// to parse machine functions for pre-training operations.
+ bool initializeVocabularyForLayout(const Module &M) {
+ for (const Function &F : M) {
+ if (F.isDeclaration())
+ continue;
+
+ MachineFunction *MF = MMI.getMachineFunction(F);
+ if (!MF)
+ continue;
+
+ const TargetInstrInfo &TII = *MF->getSubtarget().getInstrInfo();
+ const TargetRegisterInfo &TRI = *MF->getSubtarget().getRegisterInfo();
+ const MachineRegisterInfo &MRI = MF->getRegInfo();
+
+ auto VocabOrErr =
+ MIRVocabulary::createDummyVocabForTest(TII, TRI, MRI, 1);
+ if (!VocabOrErr) {
+ WithColor::error(errs(), ToolName)
+ << "Failed to create dummy vocabulary - "
+ << toString(VocabOrErr.takeError()) << "\n";
+ return false;
+ }
+ Vocab = std::make_unique<MIRVocabulary>(std::move(*VocabOrErr));
+ return true;
+ }
+
+ WithColor::error(errs(), ToolName)
+ << "No machine functions found to initialize vocabulary\n";
+ return false;
+ }
+
+ /// Generate triplets for the module
+ /// Output format: MAX_RELATION=N header followed by relationships
+ void generateTriplets(const Module &M, raw_ostream &OS) const {
+ unsigned MaxRelation = MIRNextRelation; // Track maximum relation ID
+ std::string Relationships;
+ raw_string_ostream RelOS(Relationships);
+
+ for (const Function &F : M) {
+ if (F.isDeclaration())
+ continue;
+
+ MachineFunction *MF = MMI.getMachineFunction(F);
+ if (!MF) {
+ WithColor::warning(errs(), ToolName)
+ << "No MachineFunction for " << F.getName() << "\n";
+ continue;
+ }
+
+ unsigned FuncMaxRelation = generateTriplets(*MF, RelOS);
+ MaxRelation = std::max(MaxRelation, FuncMaxRelation);
+ }
+
+ RelOS.flush();
+
+ // Write metadata header followed by relationships
+ OS << "MAX_RELATION=" << MaxRelation << '\n';
+ OS << Relationships;
+ }
+
+ /// Generate triplets for a single machine function
+ /// Returns the maximum relation ID used in this function
+ unsigned generateTriplets(const MachineFunction &MF, raw_ostream &OS) const {
+ unsigned MaxRelation = MIRNextRelation;
+ unsigned PrevOpcode = 0;
+ bool HasPrevOpcode = false;
+
+ if (!Vocab) {
+ WithColor::error(errs(), ToolName)
+ << "MIR Vocabulary must be initialized for triplet generation.\n";
+ return MaxRelation;
+ }
+
+ for (const MachineBasicBlock &MBB : MF) {
+ for (const MachineInstr &MI : MBB) {
+ // Skip debug instructions
+ if (MI.isDebugInstr())
+ continue;
+
+ // Get opcode entity ID
+ unsigned OpcodeID = Vocab->getEntityIDForOpcode(MI.getOpcode());
+
+ // Add "Next" relationship with previous instruction
+ if (HasPrevOpcode) {
+ OS << PrevOpcode << '\t' << OpcodeID << '\t' << MIRNextRelation
+ << '\n';
+ LLVM_DEBUG(dbgs()
+ << Vocab->getStringKey(PrevOpcode) << '\t'
+ << Vocab->getStringKey(OpcodeID) << '\t' << "Next\n");
+ }
+
+ // Add "Arg" relationships for operands
+ unsigned ArgIndex = 0;
+ for (const MachineOperand &MO : MI.operands()) {
+ auto OperandID = Vocab->getEntityIDForMachineOperand(MO);
+ unsigned RelationID = MIRArgRelation + ArgIndex;
+ OS << OpcodeID << '\t' << OperandID << '\t' << RelationID << '\n';
+ LLVM_DEBUG({
+ std::string OperandStr = Vocab->getStringKey(OperandID);
+ dbgs() << Vocab->getStringKey(OpcodeID) << '\t' << OperandStr
+ << '\t' << "Arg" << ArgIndex << '\n';
+ });
+
+ ++ArgIndex;
+ }
+
+ // Update MaxRelation if there were operands
+ if (ArgIndex > 0)
+ MaxRelation = std::max(MaxRelation, MIRArgRelation + ArgIndex - 1);
+
+ PrevOpcode = OpcodeID;
+ HasPrevOpcode = true;
+ }
+ }
+
+ return MaxRelation;
+ }
+
+ /// Generate entity mappings with vocabulary
+ void generateEntityMappings(raw_ostream &OS) const {
+ if (!Vocab) {
+ WithColor::error(errs(), ToolName)
+ << "Vocabulary must be initialized for entity mappings.\n";
+ return;
+ }
+
+ const unsigned EntityCount = Vocab->getCanonicalSize();
+ OS << EntityCount << "\n";
+ for (unsigned EntityID = 0; EntityID < EntityCount; ++EntityID)
+ OS << Vocab->getStringKey(EntityID) << '\t' << EntityID << '\n';
+ }
+
/// Generate embeddings for all machine functions in the module
void generateEmbeddings(const Module &M, raw_ostream &OS) const {
if (!Vocab) {
@@ -538,38 +695,67 @@ int main(int argc, char **argv) {
return 1;
}
- // Create MIR2Vec tool and initialize vocabulary
+ // Create MIR2Vec tool
MIR2VecTool Tool(*MMI);
- if (!Tool.initializeVocabulary(*M))
- return 1;
+ // Initialize vocabulary. For triplet/entity generation, only layout is
+ // needed For embedding generation, the full vocabulary is needed.
+ //
+ // Note: Unlike IR2Vec, MIR2Vec vocabulary initialization requires
+ // target-specific information for generating the vocabulary layout. So, we
+ // always initialize the vocabulary in this case.
+ if (TripletsSubCmd || EntitiesSubCmd) {
+ if (!Tool.initializeVocabularyForLayout(*M)) {
+ WithColor::error(errs(), ToolName)
+ << "Failed to initialize MIR2Vec vocabulary for layout.\n";
+ return 1;
+ }
+ } else {
+ if (!Tool.initializeVocabulary(*M)) {
+ WithColor::error(errs(), ToolName)
+ << "Failed to initialize MIR2Vec vocabulary.\n";
+ return 1;
+ }
+ }
+ assert(Tool.getVocabulary() &&
+ "MIR2Vec vocabulary should be initialized at this point");
LLVM_DEBUG(dbgs() << "MIR2Vec vocabulary loaded successfully.\n"
<< "Vocabulary dimension: "
<< Tool.getVocabulary()->getDimension() << "\n"
<< "Vocabulary size: "
<< Tool.getVocabulary()->getCanonicalSize() << "\n");
- // Generate embeddings based on subcommand
- if (!FunctionName.empty()) {
- // Process single function
- Function *F = M->getFunction(FunctionName);
- if (!F) {
- WithColor::error(errs(), ToolName)
- << "Function '" << FunctionName << "' not found\n";
- return 1;
- }
+ // Handle subcommands
+ if (TripletsSubCmd) {
+ Tool.generateTriplets(*M, OS);
+ } else if (EntitiesSubCmd) {
+ Tool.generateEntityMappings(OS);
+ } else if (EmbeddingsSubCmd) {
+ if (!FunctionName.empty()) {
+ // Process single function
+ Function *F = M->getFunction(FunctionName);
+ if (!F) {
+ WithColor::error(errs(), ToolName)
+ << "Function '" << FunctionName << "' not found\n";
+ return 1;
+ }
- MachineFunction *MF = MMI->getMachineFunction(*F);
- if (!MF) {
- WithColor::error(errs(), ToolName)
- << "No MachineFunction for " << FunctionName << "\n";
- return 1;
- }
+ MachineFunction *MF = MMI->getMachineFunction(*F);
+ if (!MF) {
+ WithColor::error(errs(), ToolName)
+ << "No MachineFunction for " << FunctionName << "\n";
+ return 1;
+ }
- Tool.generateEmbeddings(*MF, OS);
+ Tool.generateEmbeddings(*MF, OS);
+ } else {
+ // Process all functions
+ Tool.generateEmbeddings(*M, OS);
+ }
} else {
- // Process all functions
- Tool.generateEmbeddings(*M, OS);
+ WithColor::error(errs(), ToolName)
+ << "Please specify a subcommand: triplets, entities, or embeddings\n";
+ return 1;
}
return 0;
diff --git a/llvm/tools/llvm-lto2/llvm-lto2.cpp b/llvm/tools/llvm-lto2/llvm-lto2.cpp
index ff75bb5..399306f 100644
--- a/llvm/tools/llvm-lto2/llvm-lto2.cpp
+++ b/llvm/tools/llvm-lto2/llvm-lto2.cpp
@@ -111,6 +111,12 @@ static cl::opt<std::string> DTLTOCompiler(
"dtlto-compiler",
cl::desc("Compiler to use for DTLTO ThinLTO backend compilations."));
+static cl::list<std::string> DTLTOCompilerPrependArgs(
+ "dtlto-compiler-prepend-arg", cl::CommaSeparated,
+ cl::desc("Prepend arguments to pass to the remote compiler for backend "
+ "compilations."),
+ cl::value_desc("arg"));
+
static cl::list<std::string> DTLTOCompilerArgs(
"dtlto-compiler-arg", cl::CommaSeparated,
cl::desc("Arguments to pass to the remote compiler for backend "
@@ -371,6 +377,9 @@ static int run(int argc, char **argv) {
"with -dtlto-distributor\n";
auto DTLTODistributorArgsSV = llvm::to_vector<0>(llvm::map_range(
DTLTODistributorArgs, [](const std::string &S) { return StringRef(S); }));
+ auto DTLTOCompilerPrependArgsSV = llvm::to_vector<0>(
+ llvm::map_range(DTLTOCompilerPrependArgs,
+ [](const std::string &S) { return StringRef(S); }));
auto DTLTOCompilerArgsSV = llvm::to_vector<0>(llvm::map_range(
DTLTOCompilerArgs, [](const std::string &S) { return StringRef(S); }));
@@ -388,7 +397,7 @@ static int run(int argc, char **argv) {
llvm::heavyweight_hardware_concurrency(Threads),
/*OnWrite=*/{}, ThinLTOEmitIndexes, ThinLTOEmitImports, OutputFilename,
DTLTODistributor, DTLTODistributorArgsSV, DTLTOCompiler,
- DTLTOCompilerArgsSV, SaveTemps);
+ DTLTOCompilerPrependArgsSV, DTLTOCompilerArgsSV, SaveTemps);
} else
Backend = createInProcessThinBackend(
llvm::heavyweight_hardware_concurrency(Threads),
diff --git a/llvm/unittests/ADT/SmallVectorTest.cpp b/llvm/unittests/ADT/SmallVectorTest.cpp
index b216359..1a01f30 100644
--- a/llvm/unittests/ADT/SmallVectorTest.cpp
+++ b/llvm/unittests/ADT/SmallVectorTest.cpp
@@ -13,6 +13,7 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/Support/Compiler.h"
+#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include <list>
#include <stdarg.h>
@@ -1156,6 +1157,17 @@ TEST(SmallVectorTest, InitializerList) {
EXPECT_TRUE(ArrayRef(V2).equals({4, 5, 3, 2}));
}
+namespace namespace_with_adl {
+struct MyVector {
+ std::vector<int> data;
+};
+
+std::vector<int>::const_iterator begin(const MyVector &V) {
+ return V.data.begin();
+}
+std::vector<int>::const_iterator end(const MyVector &V) { return V.data.end(); }
+} // namespace namespace_with_adl
+
TEST(SmallVectorTest, ToVector) {
{
std::vector<char> v = {'a', 'b', 'c'};
@@ -1173,6 +1185,15 @@ TEST(SmallVectorTest, ToVector) {
for (size_t I = 0; I < v.size(); ++I)
EXPECT_EQ(v[I], Vector[I]);
}
+ {
+ // Check that to_vector and to_vector_of work with types that require ADL
+ // for being/end iterators.
+ namespace_with_adl::MyVector V = {{1, 2, 3}};
+ auto IntVector = to_vector(V);
+ EXPECT_THAT(IntVector, testing::ElementsAre(1, 2, 3));
+ IntVector = to_vector<3>(V);
+ EXPECT_THAT(IntVector, testing::ElementsAre(1, 2, 3));
+ }
}
struct To {
@@ -1231,6 +1252,15 @@ TEST(SmallVectorTest, ToVectorOf) {
for (size_t I = 0; I < StdVector.size(); ++I)
EXPECT_EQ(StdVector[I], Vector[I]);
}
+ {
+ // Check that to_vector works with types that require ADL for being/end
+ // iterators.
+ namespace_with_adl::MyVector V = {{1, 2, 3}};
+ auto UnsignedVector = to_vector_of<unsigned>(V);
+ EXPECT_THAT(UnsignedVector, testing::ElementsAre(1u, 2u, 3u));
+ UnsignedVector = to_vector_of<unsigned, 3>(V);
+ EXPECT_THAT(UnsignedVector, testing::ElementsAre(1u, 2u, 3u));
+ }
}
template <class VectorT>
diff --git a/llvm/unittests/ExecutionEngine/JITLink/JITLinkTestUtils.h b/llvm/unittests/ExecutionEngine/JITLink/JITLinkTestUtils.h
index dc077f9..f03c82f 100644
--- a/llvm/unittests/ExecutionEngine/JITLink/JITLinkTestUtils.h
+++ b/llvm/unittests/ExecutionEngine/JITLink/JITLinkTestUtils.h
@@ -132,7 +132,7 @@ public:
: JITLinkContext(&JD), MJMM(std::move(MJMM)),
HandleFailed(std::move(HandleFailed)) {}
- ~MockJITLinkContext() {
+ ~MockJITLinkContext() override {
if (auto Err = MJMM->deallocate(std::move(FinalizedAllocs)))
notifyFailed(std::move(Err));
}
diff --git a/llvm/unittests/ExecutionEngine/Orc/EPCGenericMemoryAccessTest.cpp b/llvm/unittests/ExecutionEngine/Orc/EPCGenericMemoryAccessTest.cpp
index 2bcff33..a72dc5f 100644
--- a/llvm/unittests/ExecutionEngine/Orc/EPCGenericMemoryAccessTest.cpp
+++ b/llvm/unittests/ExecutionEngine/Orc/EPCGenericMemoryAccessTest.cpp
@@ -140,7 +140,7 @@ public:
MemAccess = std::make_unique<EPCGenericMemoryAccess>(*EPC, FAs);
}
- ~EPCGenericMemoryAccessTest() { cantFail(EPC->disconnect()); }
+ ~EPCGenericMemoryAccessTest() override { cantFail(EPC->disconnect()); }
protected:
std::shared_ptr<SelfExecutorProcessControl> EPC;
diff --git a/llvm/unittests/ExecutionEngine/Orc/JITLinkRedirectionManagerTest.cpp b/llvm/unittests/ExecutionEngine/Orc/JITLinkRedirectionManagerTest.cpp
index a57241b..018a678 100644
--- a/llvm/unittests/ExecutionEngine/Orc/JITLinkRedirectionManagerTest.cpp
+++ b/llvm/unittests/ExecutionEngine/Orc/JITLinkRedirectionManagerTest.cpp
@@ -18,7 +18,7 @@ static int finalTarget() { return 53; }
class JITLinkRedirectionManagerTest : public testing::Test {
public:
- ~JITLinkRedirectionManagerTest() {
+ ~JITLinkRedirectionManagerTest() override {
if (ES)
if (auto Err = ES->endSession())
ES->reportError(std::move(Err));
diff --git a/llvm/unittests/ExecutionEngine/Orc/ObjectLinkingLayerTest.cpp b/llvm/unittests/ExecutionEngine/Orc/ObjectLinkingLayerTest.cpp
index 5ff3e26..5deebec9 100644
--- a/llvm/unittests/ExecutionEngine/Orc/ObjectLinkingLayerTest.cpp
+++ b/llvm/unittests/ExecutionEngine/Orc/ObjectLinkingLayerTest.cpp
@@ -34,7 +34,7 @@ ArrayRef<char> BlockContent(BlockContentBytes);
class ObjectLinkingLayerTest : public testing::Test {
public:
- ~ObjectLinkingLayerTest() {
+ ~ObjectLinkingLayerTest() override {
if (auto Err = ES.endSession())
ES.reportError(std::move(Err));
}
@@ -161,7 +161,7 @@ TEST_F(ObjectLinkingLayerTest, HandleErrorDuringPostAllocationPass) {
// abandon the in-flight allocation and report an error.
class TestPlugin : public ObjectLinkingLayer::Plugin {
public:
- ~TestPlugin() { EXPECT_TRUE(ErrorReported); }
+ ~TestPlugin() override { EXPECT_TRUE(ErrorReported); }
void modifyPassConfig(MaterializationResponsibility &MR,
jitlink::LinkGraph &G,
@@ -215,7 +215,7 @@ TEST_F(ObjectLinkingLayerTest, AddAndRemovePlugins) {
TestPlugin(size_t &ActivationCount, bool &PluginDestroyed)
: ActivationCount(ActivationCount), PluginDestroyed(PluginDestroyed) {}
- ~TestPlugin() { PluginDestroyed = true; }
+ ~TestPlugin() override { PluginDestroyed = true; }
void modifyPassConfig(MaterializationResponsibility &MR,
jitlink::LinkGraph &G,
@@ -336,7 +336,7 @@ TEST(ObjectLinkingLayerSearchGeneratorTest, AbsoluteSymbolsObjectLayer) {
class CheckDefs : public ObjectLinkingLayer::Plugin {
public:
- ~CheckDefs() { EXPECT_TRUE(SawSymbolDef); }
+ ~CheckDefs() override { EXPECT_TRUE(SawSymbolDef); }
void modifyPassConfig(MaterializationResponsibility &MR,
jitlink::LinkGraph &G,
diff --git a/llvm/unittests/ExecutionEngine/Orc/OrcTestCommon.h b/llvm/unittests/ExecutionEngine/Orc/OrcTestCommon.h
index 469de2a..e22c35f 100644
--- a/llvm/unittests/ExecutionEngine/Orc/OrcTestCommon.h
+++ b/llvm/unittests/ExecutionEngine/Orc/OrcTestCommon.h
@@ -47,7 +47,7 @@ namespace orc {
// (5) V -- A JITDylib associated with ES.
class CoreAPIsBasedStandardTest : public testing::Test {
public:
- ~CoreAPIsBasedStandardTest() {
+ ~CoreAPIsBasedStandardTest() override {
if (auto Err = ES.endSession())
ES.reportError(std::move(Err));
}
diff --git a/llvm/unittests/ExecutionEngine/Orc/ReOptimizeLayerTest.cpp b/llvm/unittests/ExecutionEngine/Orc/ReOptimizeLayerTest.cpp
index 686d85d..ac591c1 100644
--- a/llvm/unittests/ExecutionEngine/Orc/ReOptimizeLayerTest.cpp
+++ b/llvm/unittests/ExecutionEngine/Orc/ReOptimizeLayerTest.cpp
@@ -26,7 +26,7 @@ using namespace llvm::jitlink;
class ReOptimizeLayerTest : public testing::Test {
public:
- ~ReOptimizeLayerTest() {
+ ~ReOptimizeLayerTest() override {
if (ES)
if (auto Err = ES->endSession())
ES->reportError(std::move(Err));
diff --git a/llvm/unittests/ExecutionEngine/Orc/ResourceTrackerTest.cpp b/llvm/unittests/ExecutionEngine/Orc/ResourceTrackerTest.cpp
index 0e5d1517..3591829b 100644
--- a/llvm/unittests/ExecutionEngine/Orc/ResourceTrackerTest.cpp
+++ b/llvm/unittests/ExecutionEngine/Orc/ResourceTrackerTest.cpp
@@ -50,7 +50,7 @@ public:
SimpleResourceManager(SimpleResourceManager &&) = delete;
SimpleResourceManager &operator=(SimpleResourceManager &&) = delete;
- ~SimpleResourceManager() { ES.deregisterResourceManager(*this); }
+ ~SimpleResourceManager() override { ES.deregisterResourceManager(*this); }
/// Set the HandleRemove function object.
void setHandleRemove(HandleRemoveFunction HandleRemove) {
diff --git a/llvm/utils/TableGen/Basic/CodeGenIntrinsics.cpp b/llvm/utils/TableGen/Basic/CodeGenIntrinsics.cpp
index be7537c..cd86646 100644
--- a/llvm/utils/TableGen/Basic/CodeGenIntrinsics.cpp
+++ b/llvm/utils/TableGen/Basic/CodeGenIntrinsics.cpp
@@ -278,15 +278,21 @@ CodeGenIntrinsic::CodeGenIntrinsic(const Record *R,
TargetPrefix = R->getValueAsString("TargetPrefix");
Name = R->getValueAsString("LLVMName").str();
+ std::string DefaultName = "llvm." + EnumName.str();
+ llvm::replace(DefaultName, '_', '.');
+
if (Name == "") {
// If an explicit name isn't specified, derive one from the DefName.
- Name = "llvm." + EnumName.str();
- llvm::replace(Name, '_', '.');
+ Name = std::move(DefaultName);
} else {
// Verify it starts with "llvm.".
if (!StringRef(Name).starts_with("llvm."))
PrintFatalError(DefLoc, "Intrinsic '" + DefName +
"'s name does not start with 'llvm.'!");
+
+ if (Name == DefaultName)
+ PrintNote(DefLoc, "Explicitly specified name matches default name, "
+ "consider dropping it");
}
// If TargetPrefix is specified, make sure that Name starts with
diff --git a/llvm/utils/gn/secondary/clang/lib/Headers/BUILD.gn b/llvm/utils/gn/secondary/clang/lib/Headers/BUILD.gn
index 29c6178..3c523ae 100644
--- a/llvm/utils/gn/secondary/clang/lib/Headers/BUILD.gn
+++ b/llvm/utils/gn/secondary/clang/lib/Headers/BUILD.gn
@@ -104,6 +104,9 @@ copy("Headers") {
"__clang_hip_runtime_wrapper.h",
"__clang_hip_stdlib.h",
"__clang_spirv_builtins.h",
+ "__float_float.h",
+ "__float_header_macro.h",
+ "__float_infinity_nan.h",
"__stdarg___gnuc_va_list.h",
"__stdarg___va_copy.h",
"__stdarg_header_macro.h",