aboutsummaryrefslogtreecommitdiff
path: root/llvm/test
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test')
-rw-r--r--llvm/test/Analysis/CostModel/AArch64/cast.ll68
-rw-r--r--llvm/test/Analysis/CostModel/AArch64/div.ll168
-rw-r--r--llvm/test/Analysis/CostModel/AArch64/rem.ll211
-rw-r--r--llvm/test/Analysis/CostModel/AArch64/shuffle-extract.ll12
-rw-r--r--llvm/test/Analysis/CostModel/X86/shuffle-two-src-codesize.ll2
-rw-r--r--llvm/test/Analysis/CostModel/X86/shuffle-two-src-latency.ll2
-rw-r--r--llvm/test/Analysis/CostModel/X86/shuffle-two-src-sizelatency.ll2
-rw-r--r--llvm/test/Analysis/CostModel/X86/shuffle-two-src.ll2
-rw-r--r--llvm/test/Analysis/CostModel/X86/vector-insert-value.ll36
-rw-r--r--llvm/test/Analysis/Lint/abi-attrs.ll106
-rw-r--r--llvm/test/Analysis/UniformityAnalysis/NVPTX/daorder.ll5
-rw-r--r--llvm/test/Analysis/UniformityAnalysis/NVPTX/diverge.ll16
-rw-r--r--llvm/test/Analysis/UniformityAnalysis/NVPTX/hidden_diverge.ll5
-rw-r--r--llvm/test/Analysis/UniformityAnalysis/NVPTX/irreducible.ll4
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-fast-isel-conversion-fallback.ll14
-rw-r--r--llvm/test/CodeGen/AArch64/atomicrmw-fadd.ll66
-rw-r--r--llvm/test/CodeGen/AArch64/atomicrmw-fmax.ll112
-rw-r--r--llvm/test/CodeGen/AArch64/atomicrmw-fmin.ll112
-rw-r--r--llvm/test/CodeGen/AArch64/atomicrmw-fsub.ll66
-rw-r--r--llvm/test/CodeGen/AArch64/bf16-instructions.ll1102
-rw-r--r--llvm/test/CodeGen/AArch64/bf16-v8-instructions.ll1692
-rw-r--r--llvm/test/CodeGen/AArch64/cvt-fp-int-fp.ll24
-rw-r--r--llvm/test/CodeGen/AArch64/hwasan-zero-ptr.ll65
-rw-r--r--llvm/test/CodeGen/AArch64/register-coalesce-update-subranges-remat.mir93
-rw-r--r--llvm/test/CodeGen/AArch64/round-fptosi-sat-scalar.ll32
-rw-r--r--llvm/test/CodeGen/AArch64/sme-intrinsics-state.ll18
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/flat-scratch.ll110
-rw-r--r--llvm/test/CodeGen/AMDGPU/add64-low-32-bits-known-zero.ll193
-rw-r--r--llvm/test/CodeGen/AMDGPU/flat-scratch.ll108
-rw-r--r--llvm/test/CodeGen/AMDGPU/global-saddr-load.ll54
-rw-r--r--llvm/test/CodeGen/AMDGPU/minmax.ll478
-rw-r--r--llvm/test/CodeGen/AMDGPU/no-fold-accvgpr-mov.ll120
-rw-r--r--llvm/test/CodeGen/AMDGPU/no-fold-accvgpr-mov.mir235
-rw-r--r--llvm/test/CodeGen/AMDGPU/no-fold-accvgpr-read.mir182
-rw-r--r--llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll144
-rw-r--r--llvm/test/CodeGen/AMDGPU/remat-physreg-copy-subreg-extract-already-live-at-def-issue120970.mir85
-rw-r--r--llvm/test/CodeGen/AMDGPU/sdiv64.ll117
-rw-r--r--llvm/test/CodeGen/AMDGPU/smed3.ll8
-rw-r--r--llvm/test/CodeGen/AMDGPU/srem64.ll132
-rw-r--r--llvm/test/CodeGen/AMDGPU/sub64-low-32-bits-known-zero.ll193
-rw-r--r--llvm/test/CodeGen/AMDGPU/swdev502267-use-after-free-last-chance-recoloring-alloc-succeeds.mir8
-rw-r--r--llvm/test/CodeGen/AMDGPU/swdev503538-move-to-valu-stack-srd-physreg.ll23
-rw-r--r--llvm/test/CodeGen/AMDGPU/umed3.ll8
-rw-r--r--llvm/test/CodeGen/ARM/sink-store-pre-load-dependency.mir41
-rw-r--r--llvm/test/CodeGen/DirectX/BufferLoad-sm61.ll60
-rw-r--r--llvm/test/CodeGen/DirectX/BufferLoad.ll29
-rw-r--r--llvm/test/CodeGen/DirectX/HLSLControlFlowHint.ll98
-rw-r--r--llvm/test/CodeGen/DirectX/RawBufferLoad-error64.ll24
-rw-r--r--llvm/test/CodeGen/DirectX/RawBufferLoad.ll232
-rw-r--r--llvm/test/CodeGen/DirectX/ResourceAccess/load_typedbuffer.ll8
-rw-r--r--llvm/test/CodeGen/DirectX/ResourceAccess/store_typedbuffer.ll35
-rw-r--r--llvm/test/CodeGen/DirectX/ResourceGlobalElimination.ll10
-rw-r--r--llvm/test/CodeGen/DirectX/ShaderFlags/typed-uav-load-additional-formats.ll6
-rw-r--r--llvm/test/CodeGen/Hexagon/loopIdiom.ll75
-rw-r--r--llvm/test/CodeGen/NVPTX/b52037.ll5
-rw-r--r--llvm/test/CodeGen/NVPTX/bf16x2-instructions.ll8
-rw-r--r--llvm/test/CodeGen/NVPTX/bug21465.ll6
-rw-r--r--llvm/test/CodeGen/NVPTX/bug22322.ll5
-rw-r--r--llvm/test/CodeGen/NVPTX/bug26185.ll13
-rw-r--r--llvm/test/CodeGen/NVPTX/call-with-alloca-buffer.ll6
-rw-r--r--llvm/test/CodeGen/NVPTX/cluster-dim.ll7
-rw-r--r--llvm/test/CodeGen/NVPTX/disjoint-or-addr.ll25
-rw-r--r--llvm/test/CodeGen/NVPTX/fabs-fneg-free.ll34
-rw-r--r--llvm/test/CodeGen/NVPTX/generic-to-nvvm.ll6
-rw-r--r--llvm/test/CodeGen/NVPTX/i1-array-global.ll6
-rw-r--r--llvm/test/CodeGen/NVPTX/i1-ext-load.ll6
-rw-r--r--llvm/test/CodeGen/NVPTX/i1-global.ll6
-rw-r--r--llvm/test/CodeGen/NVPTX/i1-param.ll6
-rw-r--r--llvm/test/CodeGen/NVPTX/intr-range.ll18
-rw-r--r--llvm/test/CodeGen/NVPTX/kernel-param-align.ll8
-rw-r--r--llvm/test/CodeGen/NVPTX/load-with-non-coherent-cache.ll59
-rw-r--r--llvm/test/CodeGen/NVPTX/local-stack-frame.ll4
-rw-r--r--llvm/test/CodeGen/NVPTX/lower-alloca.ll4
-rw-r--r--llvm/test/CodeGen/NVPTX/lower-args-gridconstant.ll113
-rw-r--r--llvm/test/CodeGen/NVPTX/lower-args.ll13
-rw-r--r--llvm/test/CodeGen/NVPTX/lower-byval-args.ll150
-rw-r--r--llvm/test/CodeGen/NVPTX/lower-ctor-dtor.ll6
-rw-r--r--llvm/test/CodeGen/NVPTX/lower-kernel-ptr-arg.ll10
-rw-r--r--llvm/test/CodeGen/NVPTX/maxclusterrank.ll5
-rw-r--r--llvm/test/CodeGen/NVPTX/noduplicate-syncthreads.ll5
-rw-r--r--llvm/test/CodeGen/NVPTX/noreturn.ll9
-rw-r--r--llvm/test/CodeGen/NVPTX/nvcl-param-align.ll5
-rw-r--r--llvm/test/CodeGen/NVPTX/nvvm-reflect-arch.ll4
-rw-r--r--llvm/test/CodeGen/NVPTX/nvvm-reflect-ocl.ll4
-rw-r--r--llvm/test/CodeGen/NVPTX/nvvm-reflect-opaque.ll6
-rw-r--r--llvm/test/CodeGen/NVPTX/nvvm-reflect.ll7
-rw-r--r--llvm/test/CodeGen/NVPTX/refl1.ll6
-rw-r--r--llvm/test/CodeGen/NVPTX/reg-copy.ll6
-rw-r--r--llvm/test/CodeGen/NVPTX/simple-call.ll8
-rw-r--r--llvm/test/CodeGen/NVPTX/surf-read-cuda.ll14
-rw-r--r--llvm/test/CodeGen/NVPTX/surf-read.ll7
-rw-r--r--llvm/test/CodeGen/NVPTX/surf-tex.py36
-rw-r--r--llvm/test/CodeGen/NVPTX/surf-write-cuda.ll10
-rw-r--r--llvm/test/CodeGen/NVPTX/surf-write.ll7
-rw-r--r--llvm/test/CodeGen/NVPTX/tex-read-cuda.ll13
-rw-r--r--llvm/test/CodeGen/NVPTX/tex-read.ll5
-rw-r--r--llvm/test/CodeGen/NVPTX/unreachable.ll5
-rw-r--r--llvm/test/CodeGen/NVPTX/variadics-backend.ll35
-rw-r--r--llvm/test/CodeGen/PowerPC/global-merge-aix-zero-size-struct.ll20
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/add-imm.ll22
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/alu-roundtrip.ll14
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/combine-neg-abs.ll44
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/combine.ll3
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/double-zfa.ll34
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/freeze.ll61
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/iabs.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer-info-validation.mir3
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-abs-rv64.mir7
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv64.mir3
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-addo-subo-rv64.mir11
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-const-rv64.mir3
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctlz-rv64.mir44
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctpop-rv64.mir13
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-cttz-rv64.mir32
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ext-rv64.mir3
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-rotate-rv64.mir6
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sat-rv64.mir17
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sub-rv64.mir3
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb.ll105
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/scmp.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/ucmp.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/add_sext_shl_constant.ll13
-rw-r--r--llvm/test/CodeGen/RISCV/attributes.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/kcfi-isel-mir.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/kcfi-mir.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll23
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll214
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll18
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-formation.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll1
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll294
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll294
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fold-binary-reduce.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll458
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll108
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll24
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vand-vp.ll24
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vandn-vp.ll24
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vdiv-vp.ll24
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll24
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll215
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll207
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll74
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll74
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll207
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfptosi-vp-mask.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll49
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfptoui-vp-mask.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll49
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll66
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll207
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir243
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vl-opt.mir20
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vlopt-volatile-ld.mir13
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmacc-vp.ll44
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll24
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll24
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll24
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll24
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmul-vp.ll24
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vnmsac-vp.ll44
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vor-vp.ll24
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll24
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll24
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vrsub-vp.ll24
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll24
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll24
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll24
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll24
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vsub-vp.ll24
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll24
-rw-r--r--llvm/test/CodeGen/SPIRV/AtomicCompareExchange.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/event-zero-const.ll8
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_function_pointers/fp-simple-hierarchy.ll24
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_function_pointers/fp_const.ll5
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_function_pointers/fun-ptr-addrcast.ll8
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_inline_assembly/inline_asm.ll24
-rw-r--r--llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_shader_clock/shader_clock.ll4
-rw-r--r--llvm/test/CodeGen/SPIRV/global-var-name-linkage.ll59
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-intrinsics/cross.ll8
-rw-r--r--llvm/test/CodeGen/SPIRV/hlsl-intrinsics/length.ll14
-rw-r--r--llvm/test/CodeGen/SPIRV/iaddcarry-builtin.ll10
-rw-r--r--llvm/test/CodeGen/SPIRV/image-unoptimized.ll4
-rw-r--r--llvm/test/CodeGen/SPIRV/isubborrow-builtin.ll10
-rw-r--r--llvm/test/CodeGen/SPIRV/keep-tracked-const.ll6
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/fshl.ll32
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/fshr.ll26
-rw-r--r--llvm/test/CodeGen/SPIRV/llvm-intrinsics/memset.ll8
-rw-r--r--llvm/test/CodeGen/SPIRV/logical-access-chain.ll10
-rw-r--r--llvm/test/CodeGen/SPIRV/opencl/degrees.ll10
-rw-r--r--llvm/test/CodeGen/SPIRV/opencl/radians.ll10
-rw-r--r--llvm/test/CodeGen/SPIRV/pointers/PtrCast-null-in-OpSpecConstantOp.ll6
-rw-r--r--llvm/test/CodeGen/SPIRV/pointers/struct-opaque-pointers.ll12
-rw-r--r--llvm/test/CodeGen/SPIRV/structurizer/HLSLControlFlowHint-pass-check.ll90
-rw-r--r--llvm/test/CodeGen/SPIRV/structurizer/HLSLControlFlowHint.ll91
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/SampledImage.ll8
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/cl-types.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/spirv-private-array-initialization.ll10
-rw-r--r--llvm/test/CodeGen/SPIRV/transcoding/sub_group_non_uniform_arithmetic.ll2
-rw-r--r--llvm/test/CodeGen/SPIRV/unnamed-global.ll8
-rw-r--r--llvm/test/CodeGen/X86/codegen-prepare-addrmode-sext.ll457
-rw-r--r--llvm/test/CodeGen/X86/uint_to_half.ll198
-rw-r--r--llvm/test/CodeGen/X86/vector-shuffle-256-v32.ll56
-rw-r--r--llvm/test/DebugInfo/NVPTX/debug-addr-class.ll4
-rw-r--r--llvm/test/DebugInfo/NVPTX/debug-info.ll8
-rw-r--r--llvm/test/DebugInfo/X86/dwarf5-debug-names-addr-tu-to-non-tu.ll83
-rw-r--r--llvm/test/ExecutionEngine/JITLink/AArch32/ELF_data_alignment.s10
-rw-r--r--llvm/test/ExecutionEngine/JITLink/AArch64/ELF_ehframe.s4
-rw-r--r--llvm/test/ExecutionEngine/JITLink/AArch64/MachO_compact_unwind.s3
-rw-r--r--llvm/test/ExecutionEngine/JITLink/AArch64/MachO_ehframe.s4
-rw-r--r--llvm/test/ExecutionEngine/JITLink/LoongArch/ELF_loongarch64_ehframe.s6
-rw-r--r--llvm/test/ExecutionEngine/JITLink/RISCV/ELF_relax_call.s12
-rw-r--r--llvm/test/ExecutionEngine/JITLink/RISCV/ELF_relax_call_rvc.s48
-rw-r--r--llvm/test/ExecutionEngine/JITLink/RISCV/anonymous_symbol.s3
-rw-r--r--llvm/test/ExecutionEngine/JITLink/ppc64/ELF_ppc64_ehframe.s10
-rw-r--r--llvm/test/ExecutionEngine/JITLink/ppc64/external_weak.s5
-rw-r--r--llvm/test/ExecutionEngine/JITLink/x86-64/COFF_abs.s3
-rw-r--r--llvm/test/ExecutionEngine/JITLink/x86-64/COFF_comdat_any.test5
-rw-r--r--llvm/test/ExecutionEngine/JITLink/x86-64/COFF_comdat_associative.test21
-rw-r--r--llvm/test/ExecutionEngine/JITLink/x86-64/COFF_comdat_exact_match.test5
-rw-r--r--llvm/test/ExecutionEngine/JITLink/x86-64/COFF_comdat_intervene.test5
-rw-r--r--llvm/test/ExecutionEngine/JITLink/x86-64/COFF_comdat_largest.test5
-rw-r--r--llvm/test/ExecutionEngine/JITLink/x86-64/COFF_comdat_noduplicate.test5
-rw-r--r--llvm/test/ExecutionEngine/JITLink/x86-64/COFF_comdat_offset.test5
-rw-r--r--llvm/test/ExecutionEngine/JITLink/x86-64/COFF_comdat_same_size.test5
-rw-r--r--llvm/test/ExecutionEngine/JITLink/x86-64/COFF_comdat_weak.s3
-rw-r--r--llvm/test/ExecutionEngine/JITLink/x86-64/COFF_common_symbol.s3
-rw-r--r--llvm/test/ExecutionEngine/JITLink/x86-64/COFF_duplicate_externals.test10
-rw-r--r--llvm/test/ExecutionEngine/JITLink/x86-64/COFF_file_debug.s4
-rw-r--r--llvm/test/ExecutionEngine/JITLink/x86-64/COFF_static_var.s4
-rw-r--r--llvm/test/ExecutionEngine/JITLink/x86-64/COFF_weak_external.s4
-rw-r--r--llvm/test/ExecutionEngine/JITLink/x86-64/ELF_debug_section_lifetime_is_NoAlloc.yaml3
-rw-r--r--llvm/test/ExecutionEngine/JITLink/x86-64/ELF_ehframe_basic.s5
-rw-r--r--llvm/test/ExecutionEngine/JITLink/x86-64/ELF_ehframe_large_static_personality_encodings.s5
-rw-r--r--llvm/test/ExecutionEngine/JITLink/x86-64/LocalDependencyPropagation.s5
-rw-r--r--llvm/test/ExecutionEngine/JITLink/x86-64/MachO-check-dwarf-filename.s5
-rw-r--r--llvm/test/ExecutionEngine/JITLink/x86-64/MachO_compact_unwind.s3
-rw-r--r--llvm/test/ExecutionEngine/JITLink/x86-64/MachO_cstring_section_alignment.s3
-rw-r--r--llvm/test/ExecutionEngine/JITLink/x86-64/MachO_cstring_section_splitting.s5
-rw-r--r--llvm/test/ExecutionEngine/JITLink/x86-64/MachO_non_subsections_via_symbols.s3
-rw-r--r--llvm/test/Instrumentation/HWAddressSanitizer/zero-ptr.ll35
-rw-r--r--llvm/test/Instrumentation/TypeSanitizer/access-with-offset.ll2
-rw-r--r--llvm/test/Instrumentation/TypeSanitizer/alloca-only.ll2
-rw-r--r--llvm/test/Instrumentation/TypeSanitizer/alloca.ll2
-rw-r--r--llvm/test/Instrumentation/TypeSanitizer/anon.ll2
-rw-r--r--llvm/test/Instrumentation/TypeSanitizer/basic-nosan.ll2
-rw-r--r--llvm/test/Instrumentation/TypeSanitizer/basic.ll2
-rw-r--r--llvm/test/Instrumentation/TypeSanitizer/byval.ll2
-rw-r--r--llvm/test/Instrumentation/TypeSanitizer/globals.ll2
-rw-r--r--llvm/test/Instrumentation/TypeSanitizer/invalid-metadata.ll2
-rw-r--r--llvm/test/Instrumentation/TypeSanitizer/memintrinsics.ll2
-rw-r--r--llvm/test/Instrumentation/TypeSanitizer/nosanitize.ll2
-rw-r--r--llvm/test/Instrumentation/TypeSanitizer/sanitize-no-tbaa.ll2
-rw-r--r--llvm/test/Instrumentation/TypeSanitizer/swifterror.ll2
-rw-r--r--llvm/test/MC/AArch64/armv9.6a-rme-gpc3.s10
-rw-r--r--llvm/test/MC/AMDGPU/gfx1030_err.s6
-rw-r--r--llvm/test/MC/AMDGPU/gfx10_asm_smem_err.s2
-rw-r--r--llvm/test/MC/AMDGPU/gfx11_asm_mimg_err.s6
-rw-r--r--llvm/test/MC/AMDGPU/gfx11_asm_smem_err.s3
-rw-r--r--llvm/test/MC/AMDGPU/gfx11_asm_vop3.s144
-rw-r--r--llvm/test/MC/AMDGPU/gfx11_asm_vop3_dpp16.s202
-rw-r--r--llvm/test/MC/AMDGPU/gfx11_asm_vop3_dpp8.s174
-rw-r--r--llvm/test/MC/AMDGPU/gfx12_asm_mimg_err.s5
-rw-r--r--llvm/test/MC/AMDGPU/gfx12_asm_smem_err.s18
-rw-r--r--llvm/test/MC/AMDGPU/gfx12_asm_vop3.s144
-rw-r--r--llvm/test/MC/AMDGPU/gfx12_asm_vop3_aliases.s8
-rw-r--r--llvm/test/MC/AMDGPU/gfx12_asm_vop3_dpp16.s218
-rw-r--r--llvm/test/MC/AMDGPU/gfx12_asm_vop3_dpp8.s190
-rw-r--r--llvm/test/MC/Disassembler/AArch64/armv9.6a-rme-gpc3.txt12
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3.txt198
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3_dpp16.txt200
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3_dpp8.txt180
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3.txt202
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3_dpp16.txt217
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3_dpp8.txt190
-rw-r--r--llvm/test/MC/RISCV/custom_reloc.s21
-rw-r--r--llvm/test/MC/RISCV/xqcicm-invalid.s152
-rw-r--r--llvm/test/MC/RISCV/xqcicm-valid.s123
-rw-r--r--llvm/test/ThinLTO/X86/memprof-recursive.ll141
-rw-r--r--llvm/test/Transforms/InstCombine/and-xor-or.ll59
-rw-r--r--llvm/test/Transforms/InstCombine/compare-signs.ll13
-rw-r--r--llvm/test/Transforms/InstCombine/icmp-add.ll13
-rw-r--r--llvm/test/Transforms/InstCombine/onehot_merge.ll16
-rw-r--r--llvm/test/Transforms/InstCombine/phi.ll11
-rw-r--r--llvm/test/Transforms/InstCombine/select-divrem.ll15
-rw-r--r--llvm/test/Transforms/InstCombine/select.ll34
-rw-r--r--llvm/test/Transforms/InstCombine/xor-and-or.ll12
-rw-r--r--llvm/test/Transforms/InstSimplify/const-fold-nvvm-f2i-d2i.ll1129
-rw-r--r--llvm/test/Transforms/InstSimplify/const-fold-nvvm-f2ll-d2ll.ll1129
-rw-r--r--llvm/test/Transforms/LoadStoreVectorizer/X86/massive_indirection.ll180
-rw-r--r--llvm/test/Transforms/LoopStrengthReduce/NVPTX/trunc.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/blend-costs.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/force-target-instruction-cost.ll168
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/blend-any-of-reduction-cost.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/divrem.ll14
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-reverse-load-store.ll8
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/drop-inbounds-flags-for-reverse-vector-pointer.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/pr109581-unused-blend.ll70
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/replicate-uniform-call.ll9
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/scatter_crash.ll10
-rw-r--r--llvm/test/Transforms/LoopVectorize/blend-in-header.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/if-pred-stores.ll18
-rw-r--r--llvm/test/Transforms/LoopVectorize/induction.ll25
-rw-r--r--llvm/test/Transforms/LoopVectorize/invariant-store-vectorization-2.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/pr37248.ll12
-rw-r--r--llvm/test/Transforms/LoopVectorize/pr55167-fold-tail-live-out.ll14
-rw-r--r--llvm/test/Transforms/LoopVectorize/reduction-small-size.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/select-cmp.ll24
-rw-r--r--llvm/test/Transforms/LoopVectorize/single_early_exit.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/tail-folding-switch.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/uncountable-single-exit-loops.ll52
-rw-r--r--llvm/test/Transforms/LoopVectorize/uniform-blend.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination.ll539
-rw-r--r--llvm/test/Transforms/MemProfContextDisambiguation/recursive.ll159
-rw-r--r--llvm/test/Transforms/PhaseOrdering/AArch64/block_scaling_decompr_8bit.ll12
-rw-r--r--llvm/test/Transforms/PhaseOrdering/X86/hadd.ll18
-rw-r--r--llvm/test/Transforms/PhaseOrdering/X86/hsub.ll18
-rw-r--r--llvm/test/Transforms/SLPVectorizer/AArch64/reduce-fadd.ll11
-rw-r--r--llvm/test/Transforms/SLPVectorizer/RISCV/long-gep-chains.ll76
-rw-r--r--llvm/test/Transforms/StraightLineStrengthReduce/NVPTX/speculative-slsr.ll6
-rw-r--r--llvm/test/Transforms/VectorCombine/X86/extract-binop-inseltpoison.ll29
-rw-r--r--llvm/test/Transforms/VectorCombine/X86/extract-binop.ll41
-rw-r--r--llvm/test/Transforms/VectorCombine/X86/shuffle-of-cmps.ll34
-rw-r--r--llvm/test/tools/UpdateTestChecks/lit.local.cfg16
-rw-r--r--llvm/test/tools/dxil-dis/fastmath.ll23
-rw-r--r--llvm/test/tools/llvm-cov/Inputs/mcdc-templates-merge.cpp51
-rw-r--r--llvm/test/tools/llvm-cov/Inputs/mcdc-templates-merge.proftext73
-rw-r--r--llvm/test/tools/llvm-cov/Inputs/mcdc-templates-merge.yaml105
-rw-r--r--llvm/test/tools/llvm-cov/branch-macros.test8
-rw-r--r--llvm/test/tools/llvm-cov/mcdc-templates-merge.test27
-rw-r--r--llvm/test/tools/llvm-gsymutil/ARM_AArch64/macho-merged-funcs-dwarf.yaml8
-rw-r--r--llvm/test/tools/llvm-mca/ARM/cortex-a57-memory-instructions.s6
-rw-r--r--llvm/test/tools/llvm-mca/RISCV/SiFiveP400/div.s1009
-rw-r--r--llvm/test/tools/llvm-mca/RISCV/SiFiveP400/mul-cpop.s60
-rw-r--r--llvm/test/tools/llvm-objcopy/MachO/strip-with-encryption-info.test217
-rw-r--r--llvm/test/tools/llvm-profgen/context-depth.test125
-rw-r--r--llvm/test/tools/llvm-profgen/recursion-compression-pseudoprobe.test20
338 files changed, 15158 insertions, 6462 deletions
diff --git a/llvm/test/Analysis/CostModel/AArch64/cast.ll b/llvm/test/Analysis/CostModel/AArch64/cast.ll
index c363b58..b82c2f1 100644
--- a/llvm/test/Analysis/CostModel/AArch64/cast.ll
+++ b/llvm/test/Analysis/CostModel/AArch64/cast.ll
@@ -1,6 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py
; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=aarch64 %s | FileCheck --check-prefixes=CHECK,CHECK-NOFP16 %s
; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=aarch64 -mattr=+fullfp16 %s | FileCheck --check-prefixes=CHECK,CHECK-FP16 %s
+; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=aarch64 -mattr=+fullfp16,+bf16 %s | FileCheck --check-prefixes=CHECK,CHECK-FP16,CHECK-BF16 %s
target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
@@ -1237,28 +1238,51 @@ define void @fp16cast() {
}
define void @bf16cast() {
-; CHECK-LABEL: 'bf16cast'
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %extf16f32 = fpext bfloat undef to float
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %extv2f16f32 = fpext <2 x bfloat> undef to <2 x float>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %extv4f16f32 = fpext <4 x bfloat> undef to <4 x float>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %extv8f16f32 = fpext <8 x bfloat> undef to <8 x float>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %extv16f16f32 = fpext <16 x bfloat> undef to <16 x float>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %extf16f64 = fpext bfloat undef to double
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %extv2f16f64 = fpext <2 x bfloat> undef to <2 x double>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %extv4f16f64 = fpext <4 x bfloat> undef to <4 x double>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %extv8f16f64 = fpext <8 x bfloat> undef to <8 x double>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %extv16f16f64 = fpext <16 x bfloat> undef to <16 x double>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %truncf16f32 = fptrunc float undef to bfloat
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %truncv2f16f32 = fptrunc <2 x float> undef to <2 x bfloat>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %truncv4f16f32 = fptrunc <4 x float> undef to <4 x bfloat>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 33 for instruction: %truncv8f16f32 = fptrunc <8 x float> undef to <8 x bfloat>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 66 for instruction: %truncv16f16f32 = fptrunc <16 x float> undef to <16 x bfloat>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %truncf16f64 = fptrunc double undef to bfloat
-; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %truncv2f16f64 = fptrunc <2 x double> undef to <2 x bfloat>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %truncv4f16f64 = fptrunc <4 x double> undef to <4 x bfloat>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 27 for instruction: %truncv8f16f64 = fptrunc <8 x double> undef to <8 x bfloat>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 54 for instruction: %truncv16f16f64 = fptrunc <16 x double> undef to <16 x bfloat>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+; CHECK-NOFP16-LABEL: 'bf16cast'
+; CHECK-NOFP16-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %extf16f32 = fpext bfloat undef to float
+; CHECK-NOFP16-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %extv2f16f32 = fpext <2 x bfloat> undef to <2 x float>
+; CHECK-NOFP16-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %extv4f16f32 = fpext <4 x bfloat> undef to <4 x float>
+; CHECK-NOFP16-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %extv8f16f32 = fpext <8 x bfloat> undef to <8 x float>
+; CHECK-NOFP16-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %extv16f16f32 = fpext <16 x bfloat> undef to <16 x float>
+; CHECK-NOFP16-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %extf16f64 = fpext bfloat undef to double
+; CHECK-NOFP16-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %extv2f16f64 = fpext <2 x bfloat> undef to <2 x double>
+; CHECK-NOFP16-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %extv4f16f64 = fpext <4 x bfloat> undef to <4 x double>
+; CHECK-NOFP16-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %extv8f16f64 = fpext <8 x bfloat> undef to <8 x double>
+; CHECK-NOFP16-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %extv16f16f64 = fpext <16 x bfloat> undef to <16 x double>
+; CHECK-NOFP16-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %truncf16f32 = fptrunc float undef to bfloat
+; CHECK-NOFP16-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %truncv2f16f32 = fptrunc <2 x float> undef to <2 x bfloat>
+; CHECK-NOFP16-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %truncv4f16f32 = fptrunc <4 x float> undef to <4 x bfloat>
+; CHECK-NOFP16-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %truncv8f16f32 = fptrunc <8 x float> undef to <8 x bfloat>
+; CHECK-NOFP16-NEXT: Cost Model: Found an estimated cost of 30 for instruction: %truncv16f16f32 = fptrunc <16 x float> undef to <16 x bfloat>
+; CHECK-NOFP16-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %truncf16f64 = fptrunc double undef to bfloat
+; CHECK-NOFP16-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %truncv2f16f64 = fptrunc <2 x double> undef to <2 x bfloat>
+; CHECK-NOFP16-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %truncv4f16f64 = fptrunc <4 x double> undef to <4 x bfloat>
+; CHECK-NOFP16-NEXT: Cost Model: Found an estimated cost of 19 for instruction: %truncv8f16f64 = fptrunc <8 x double> undef to <8 x bfloat>
+; CHECK-NOFP16-NEXT: Cost Model: Found an estimated cost of 38 for instruction: %truncv16f16f64 = fptrunc <16 x double> undef to <16 x bfloat>
+; CHECK-NOFP16-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+; CHECK-BF16-LABEL: 'bf16cast'
+; CHECK-BF16-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %extf16f32 = fpext bfloat undef to float
+; CHECK-BF16-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %extv2f16f32 = fpext <2 x bfloat> undef to <2 x float>
+; CHECK-BF16-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %extv4f16f32 = fpext <4 x bfloat> undef to <4 x float>
+; CHECK-BF16-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %extv8f16f32 = fpext <8 x bfloat> undef to <8 x float>
+; CHECK-BF16-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %extv16f16f32 = fpext <16 x bfloat> undef to <16 x float>
+; CHECK-BF16-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %extf16f64 = fpext bfloat undef to double
+; CHECK-BF16-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %extv2f16f64 = fpext <2 x bfloat> undef to <2 x double>
+; CHECK-BF16-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %extv4f16f64 = fpext <4 x bfloat> undef to <4 x double>
+; CHECK-BF16-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %extv8f16f64 = fpext <8 x bfloat> undef to <8 x double>
+; CHECK-BF16-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %extv16f16f64 = fpext <16 x bfloat> undef to <16 x double>
+; CHECK-BF16-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %truncf16f32 = fptrunc float undef to bfloat
+; CHECK-BF16-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %truncv2f16f32 = fptrunc <2 x float> undef to <2 x bfloat>
+; CHECK-BF16-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %truncv4f16f32 = fptrunc <4 x float> undef to <4 x bfloat>
+; CHECK-BF16-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %truncv8f16f32 = fptrunc <8 x float> undef to <8 x bfloat>
+; CHECK-BF16-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %truncv16f16f32 = fptrunc <16 x float> undef to <16 x bfloat>
+; CHECK-BF16-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %truncf16f64 = fptrunc double undef to bfloat
+; CHECK-BF16-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %truncv2f16f64 = fptrunc <2 x double> undef to <2 x bfloat>
+; CHECK-BF16-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %truncv4f16f64 = fptrunc <4 x double> undef to <4 x bfloat>
+; CHECK-BF16-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %truncv8f16f64 = fptrunc <8 x double> undef to <8 x bfloat>
+; CHECK-BF16-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %truncv16f16f64 = fptrunc <16 x double> undef to <16 x bfloat>
+; CHECK-BF16-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
%extf16f32 = fpext bfloat undef to float
%extv2f16f32 = fpext <2 x bfloat> undef to <2 x float>
diff --git a/llvm/test/Analysis/CostModel/AArch64/div.ll b/llvm/test/Analysis/CostModel/AArch64/div.ll
index ada0be6..ef52d0d 100644
--- a/llvm/test/Analysis/CostModel/AArch64/div.ll
+++ b/llvm/test/Analysis/CostModel/AArch64/div.ll
@@ -11,14 +11,20 @@ define i32 @sdiv() {
; CHECK-NEXT: Cost Model: Found an estimated cost of 56 for instruction: %V4i64 = sdiv <4 x i64> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 112 for instruction: %V8i64 = sdiv <8 x i64> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I32 = sdiv i32 undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V2i32 = sdiv <2 x i32> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %V4i32 = sdiv <4 x i32> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 96 for instruction: %V8i32 = sdiv <8 x i32> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 192 for instruction: %V16i32 = sdiv <16 x i32> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I16 = sdiv i16 undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V2i16 = sdiv <2 x i16> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %V4i16 = sdiv <4 x i16> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 88 for instruction: %V8i16 = sdiv <8 x i16> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 176 for instruction: %V16i16 = sdiv <16 x i16> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 352 for instruction: %V32i16 = sdiv <32 x i16> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I8 = sdiv i8 undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V2i8 = sdiv <2 x i8> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %V4i8 = sdiv <4 x i8> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 88 for instruction: %V8i8 = sdiv <8 x i8> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 168 for instruction: %V16i8 = sdiv <16 x i8> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 336 for instruction: %V32i8 = sdiv <32 x i8> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 672 for instruction: %V64i8 = sdiv <64 x i8> undef, undef
@@ -32,16 +38,22 @@ define i32 @sdiv() {
%V8i64 = sdiv <8 x i64> undef, undef
%I32 = sdiv i32 undef, undef
+ %V2i32 = sdiv <2 x i32> undef, undef
%V4i32 = sdiv <4 x i32> undef, undef
%V8i32 = sdiv <8 x i32> undef, undef
%V16i32 = sdiv <16 x i32> undef, undef
%I16 = sdiv i16 undef, undef
+ %V2i16 = sdiv <2 x i16> undef, undef
+ %V4i16 = sdiv <4 x i16> undef, undef
%V8i16 = sdiv <8 x i16> undef, undef
%V16i16 = sdiv <16 x i16> undef, undef
%V32i16 = sdiv <32 x i16> undef, undef
%I8 = sdiv i8 undef, undef
+ %V2i8 = sdiv <2 x i8> undef, undef
+ %V4i8 = sdiv <4 x i8> undef, undef
+ %V8i8 = sdiv <8 x i8> undef, undef
%V16i8 = sdiv <16 x i8> undef, undef
%V32i8 = sdiv <32 x i8> undef, undef
%V64i8 = sdiv <64 x i8> undef, undef
@@ -57,14 +69,20 @@ define i32 @udiv() {
; CHECK-NEXT: Cost Model: Found an estimated cost of 56 for instruction: %V4i64 = udiv <4 x i64> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 112 for instruction: %V8i64 = udiv <8 x i64> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I32 = udiv i32 undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V2i32 = udiv <2 x i32> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %V4i32 = udiv <4 x i32> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 96 for instruction: %V8i32 = udiv <8 x i32> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 192 for instruction: %V16i32 = udiv <16 x i32> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I16 = udiv i16 undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V2i16 = udiv <2 x i16> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %V4i16 = udiv <4 x i16> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 88 for instruction: %V8i16 = udiv <8 x i16> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 176 for instruction: %V16i16 = udiv <16 x i16> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 352 for instruction: %V32i16 = udiv <32 x i16> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I8 = udiv i8 undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V2i8 = udiv <2 x i8> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %V4i8 = udiv <4 x i8> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 88 for instruction: %V8i8 = udiv <8 x i8> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 168 for instruction: %V16i8 = udiv <16 x i8> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 336 for instruction: %V32i8 = udiv <32 x i8> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 672 for instruction: %V64i8 = udiv <64 x i8> undef, undef
@@ -78,16 +96,22 @@ define i32 @udiv() {
%V8i64 = udiv <8 x i64> undef, undef
%I32 = udiv i32 undef, undef
+ %V2i32 = udiv <2 x i32> undef, undef
%V4i32 = udiv <4 x i32> undef, undef
%V8i32 = udiv <8 x i32> undef, undef
%V16i32 = udiv <16 x i32> undef, undef
%I16 = udiv i16 undef, undef
+ %V2i16 = udiv <2 x i16> undef, undef
+ %V4i16 = udiv <4 x i16> undef, undef
%V8i16 = udiv <8 x i16> undef, undef
%V16i16 = udiv <16 x i16> undef, undef
%V32i16 = udiv <32 x i16> undef, undef
%I8 = udiv i8 undef, undef
+ %V2i8 = udiv <2 x i8> undef, undef
+ %V4i8 = udiv <4 x i8> undef, undef
+ %V8i8 = udiv <8 x i8> undef, undef
%V16i8 = udiv <16 x i8> undef, undef
%V32i8 = udiv <32 x i8> undef, undef
%V64i8 = udiv <64 x i8> undef, undef
@@ -103,14 +127,20 @@ define i32 @sdiv_const() {
; CHECK-NEXT: Cost Model: Found an estimated cost of 56 for instruction: %V4i64 = sdiv <4 x i64> undef, <i64 4, i64 5, i64 6, i64 7>
; CHECK-NEXT: Cost Model: Found an estimated cost of 112 for instruction: %V8i64 = sdiv <8 x i64> undef, <i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11>
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I32 = sdiv i32 undef, 7
+; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V2i32 = sdiv <2 x i32> undef, <i32 4, i32 5>
; CHECK-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %V4i32 = sdiv <4 x i32> undef, <i32 4, i32 5, i32 6, i32 7>
; CHECK-NEXT: Cost Model: Found an estimated cost of 96 for instruction: %V8i32 = sdiv <8 x i32> undef, <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11>
; CHECK-NEXT: Cost Model: Found an estimated cost of 192 for instruction: %V16i32 = sdiv <16 x i32> undef, <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19>
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I16 = sdiv i16 undef, 7
+; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V2i16 = sdiv <2 x i16> undef, <i16 4, i16 5>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %V4i16 = sdiv <4 x i16> undef, <i16 4, i16 5, i16 6, i16 7>
; CHECK-NEXT: Cost Model: Found an estimated cost of 88 for instruction: %V8i16 = sdiv <8 x i16> undef, <i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11>
; CHECK-NEXT: Cost Model: Found an estimated cost of 176 for instruction: %V16i16 = sdiv <16 x i16> undef, <i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 16, i16 17, i16 18, i16 19>
; CHECK-NEXT: Cost Model: Found an estimated cost of 352 for instruction: %V32i16 = sdiv <32 x i16> undef, <i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 16, i16 17, i16 18, i16 19, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 16, i16 17, i16 18, i16 19>
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I8 = sdiv i8 undef, 7
+; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V2i8 = sdiv <2 x i8> undef, <i8 4, i8 5>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %V4i8 = sdiv <4 x i8> undef, <i8 4, i8 5, i8 6, i8 7>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 88 for instruction: %V8i8 = sdiv <8 x i8> undef, <i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11>
; CHECK-NEXT: Cost Model: Found an estimated cost of 168 for instruction: %V16i8 = sdiv <16 x i8> undef, <i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19>
; CHECK-NEXT: Cost Model: Found an estimated cost of 336 for instruction: %V32i8 = sdiv <32 x i8> undef, <i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19>
; CHECK-NEXT: Cost Model: Found an estimated cost of 672 for instruction: %V64i8 = sdiv <64 x i8> undef, <i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19>
@@ -124,16 +154,22 @@ define i32 @sdiv_const() {
%V8i64 = sdiv <8 x i64> undef, <i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11>
%I32 = sdiv i32 undef, 7
+ %V2i32 = sdiv <2 x i32> undef, <i32 4, i32 5>
%V4i32 = sdiv <4 x i32> undef, <i32 4, i32 5, i32 6, i32 7>
%V8i32 = sdiv <8 x i32> undef, <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11>
%V16i32 = sdiv <16 x i32> undef, <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19>
%I16 = sdiv i16 undef, 7
+ %V2i16 = sdiv <2 x i16> undef, <i16 4, i16 5>
+ %V4i16 = sdiv <4 x i16> undef, <i16 4, i16 5, i16 6, i16 7>
%V8i16 = sdiv <8 x i16> undef, <i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11>
%V16i16 = sdiv <16 x i16> undef, <i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 16, i16 17, i16 18, i16 19>
%V32i16 = sdiv <32 x i16> undef, <i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 16, i16 17, i16 18, i16 19, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 16, i16 17, i16 18, i16 19>
%I8 = sdiv i8 undef, 7
+ %V2i8 = sdiv <2 x i8> undef, <i8 4, i8 5>
+ %V4i8 = sdiv <4 x i8> undef, <i8 4, i8 5, i8 6, i8 7>
+ %V8i8 = sdiv <8 x i8> undef, <i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11>
%V16i8 = sdiv <16 x i8> undef, <i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19>
%V32i8 = sdiv <32 x i8> undef, <i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19>
%V64i8 = sdiv <64 x i8> undef, <i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19>
@@ -149,14 +185,20 @@ define i32 @udiv_const() {
; CHECK-NEXT: Cost Model: Found an estimated cost of 56 for instruction: %V4i64 = udiv <4 x i64> undef, <i64 4, i64 5, i64 6, i64 7>
; CHECK-NEXT: Cost Model: Found an estimated cost of 112 for instruction: %V8i64 = udiv <8 x i64> undef, <i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11>
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I32 = udiv i32 undef, 7
+; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V2i32 = udiv <2 x i32> undef, <i32 4, i32 5>
; CHECK-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %V4i32 = udiv <4 x i32> undef, <i32 4, i32 5, i32 6, i32 7>
; CHECK-NEXT: Cost Model: Found an estimated cost of 96 for instruction: %V8i32 = udiv <8 x i32> undef, <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11>
; CHECK-NEXT: Cost Model: Found an estimated cost of 192 for instruction: %V16i32 = udiv <16 x i32> undef, <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19>
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I16 = udiv i16 undef, 7
+; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V2i16 = udiv <2 x i16> undef, <i16 4, i16 5>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %V4i16 = udiv <4 x i16> undef, <i16 4, i16 5, i16 6, i16 7>
; CHECK-NEXT: Cost Model: Found an estimated cost of 88 for instruction: %V8i16 = udiv <8 x i16> undef, <i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11>
; CHECK-NEXT: Cost Model: Found an estimated cost of 176 for instruction: %V16i16 = udiv <16 x i16> undef, <i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 16, i16 17, i16 18, i16 19>
; CHECK-NEXT: Cost Model: Found an estimated cost of 352 for instruction: %V32i16 = udiv <32 x i16> undef, <i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 16, i16 17, i16 18, i16 19, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 16, i16 17, i16 18, i16 19>
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I8 = udiv i8 undef, 7
+; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V2i8 = udiv <2 x i8> undef, <i8 4, i8 5>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %V4i8 = udiv <4 x i8> undef, <i8 4, i8 5, i8 6, i8 7>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 88 for instruction: %V8i8 = udiv <8 x i8> undef, <i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11>
; CHECK-NEXT: Cost Model: Found an estimated cost of 168 for instruction: %V16i8 = udiv <16 x i8> undef, <i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19>
; CHECK-NEXT: Cost Model: Found an estimated cost of 336 for instruction: %V32i8 = udiv <32 x i8> undef, <i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19>
; CHECK-NEXT: Cost Model: Found an estimated cost of 672 for instruction: %V64i8 = udiv <64 x i8> undef, <i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19>
@@ -171,16 +213,22 @@ define i32 @udiv_const() {
%V8i64 = udiv <8 x i64> undef, <i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11>
%I32 = udiv i32 undef, 7
+ %V2i32 = udiv <2 x i32> undef, <i32 4, i32 5>
%V4i32 = udiv <4 x i32> undef, <i32 4, i32 5, i32 6, i32 7>
%V8i32 = udiv <8 x i32> undef, <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11>
%V16i32 = udiv <16 x i32> undef, <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19>
%I16 = udiv i16 undef, 7
+ %V2i16 = udiv <2 x i16> undef, <i16 4, i16 5>
+ %V4i16 = udiv <4 x i16> undef, <i16 4, i16 5, i16 6, i16 7>
%V8i16 = udiv <8 x i16> undef, <i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11>
%V16i16 = udiv <16 x i16> undef, <i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 16, i16 17, i16 18, i16 19>
%V32i16 = udiv <32 x i16> undef, <i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 16, i16 17, i16 18, i16 19, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 16, i16 17, i16 18, i16 19>
%I8 = udiv i8 undef, 7
+ %V2i8 = udiv <2 x i8> undef, <i8 4, i8 5>
+ %V4i8 = udiv <4 x i8> undef, <i8 4, i8 5, i8 6, i8 7>
+ %V8i8 = udiv <8 x i8> undef, <i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11>
%V16i8 = udiv <16 x i8> undef, <i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19>
%V32i8 = udiv <32 x i8> undef, <i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19>
%V64i8 = udiv <64 x i8> undef, <i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19>
@@ -196,14 +244,20 @@ define i32 @sdiv_uniformconst() {
; CHECK-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %V4i64 = sdiv <4 x i64> undef, splat (i64 7)
; CHECK-NEXT: Cost Model: Found an estimated cost of 40 for instruction: %V8i64 = sdiv <8 x i64> undef, splat (i64 7)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I32 = sdiv i32 undef, 7
+; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V2i32 = sdiv <2 x i32> undef, splat (i32 7)
; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V4i32 = sdiv <4 x i32> undef, splat (i32 7)
; CHECK-NEXT: Cost Model: Found an estimated cost of 40 for instruction: %V8i32 = sdiv <8 x i32> undef, splat (i32 7)
; CHECK-NEXT: Cost Model: Found an estimated cost of 80 for instruction: %V16i32 = sdiv <16 x i32> undef, splat (i32 7)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I16 = sdiv i16 undef, 7
+; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V2i16 = sdiv <2 x i16> undef, splat (i16 7)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %V4i16 = sdiv <4 x i16> undef, splat (i16 7)
; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V8i16 = sdiv <8 x i16> undef, splat (i16 7)
; CHECK-NEXT: Cost Model: Found an estimated cost of 80 for instruction: %V16i16 = sdiv <16 x i16> undef, splat (i16 7)
; CHECK-NEXT: Cost Model: Found an estimated cost of 160 for instruction: %V32i16 = sdiv <32 x i16> undef, splat (i16 7)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I8 = sdiv i8 undef, 7
+; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V2i8 = sdiv <2 x i8> undef, splat (i8 7)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %V4i8 = sdiv <4 x i8> undef, splat (i8 7)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 40 for instruction: %V8i8 = sdiv <8 x i8> undef, splat (i8 7)
; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V16i8 = sdiv <16 x i8> undef, splat (i8 7)
; CHECK-NEXT: Cost Model: Found an estimated cost of 160 for instruction: %V32i8 = sdiv <32 x i8> undef, splat (i8 7)
; CHECK-NEXT: Cost Model: Found an estimated cost of 320 for instruction: %V64i8 = sdiv <64 x i8> undef, splat (i8 7)
@@ -217,16 +271,22 @@ define i32 @sdiv_uniformconst() {
%V8i64 = sdiv <8 x i64> undef, <i64 7, i64 7, i64 7, i64 7, i64 7, i64 7, i64 7, i64 7>
%I32 = sdiv i32 undef, 7
+ %V2i32 = sdiv <2 x i32> undef, <i32 7, i32 7>
%V4i32 = sdiv <4 x i32> undef, <i32 7, i32 7, i32 7, i32 7>
%V8i32 = sdiv <8 x i32> undef, <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
%V16i32 = sdiv <16 x i32> undef, <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
%I16 = sdiv i16 undef, 7
+ %V2i16 = sdiv <2 x i16> undef, <i16 7, i16 7>
+ %V4i16 = sdiv <4 x i16> undef, <i16 7, i16 7, i16 7, i16 7>
%V8i16 = sdiv <8 x i16> undef, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
%V16i16 = sdiv <16 x i16> undef, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
%V32i16 = sdiv <32 x i16> undef, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
%I8 = sdiv i8 undef, 7
+ %V2i8 = sdiv <2 x i8> undef, <i8 7, i8 7>
+ %V4i8 = sdiv <4 x i8> undef, <i8 7, i8 7, i8 7, i8 7>
+ %V8i8 = sdiv <8 x i8> undef, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
%V16i8 = sdiv <16 x i8> undef, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
%V32i8 = sdiv <32 x i8> undef, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
%V64i8 = sdiv <64 x i8> undef, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
@@ -242,14 +302,20 @@ define i32 @udiv_uniformconst() {
; CHECK-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %V4i64 = udiv <4 x i64> undef, splat (i64 7)
; CHECK-NEXT: Cost Model: Found an estimated cost of 40 for instruction: %V8i64 = udiv <8 x i64> undef, splat (i64 7)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I32 = udiv i32 undef, 7
+; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V2i32 = udiv <2 x i32> undef, splat (i32 7)
; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V4i32 = udiv <4 x i32> undef, splat (i32 7)
; CHECK-NEXT: Cost Model: Found an estimated cost of 40 for instruction: %V8i32 = udiv <8 x i32> undef, splat (i32 7)
; CHECK-NEXT: Cost Model: Found an estimated cost of 80 for instruction: %V16i32 = udiv <16 x i32> undef, splat (i32 7)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I16 = udiv i16 undef, 7
+; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V2i16 = udiv <2 x i16> undef, splat (i16 7)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %V4i16 = udiv <4 x i16> undef, splat (i16 7)
; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V8i16 = udiv <8 x i16> undef, splat (i16 7)
; CHECK-NEXT: Cost Model: Found an estimated cost of 80 for instruction: %V16i16 = udiv <16 x i16> undef, splat (i16 7)
; CHECK-NEXT: Cost Model: Found an estimated cost of 160 for instruction: %V32i16 = udiv <32 x i16> undef, splat (i16 7)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I8 = udiv i8 undef, 7
+; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V2i8 = udiv <2 x i8> undef, splat (i8 7)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %V4i8 = udiv <4 x i8> undef, splat (i8 7)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 40 for instruction: %V8i8 = udiv <8 x i8> undef, splat (i8 7)
; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V16i8 = udiv <16 x i8> undef, splat (i8 7)
; CHECK-NEXT: Cost Model: Found an estimated cost of 160 for instruction: %V32i8 = udiv <32 x i8> undef, splat (i8 7)
; CHECK-NEXT: Cost Model: Found an estimated cost of 320 for instruction: %V64i8 = udiv <64 x i8> undef, splat (i8 7)
@@ -263,16 +329,22 @@ define i32 @udiv_uniformconst() {
%V8i64 = udiv <8 x i64> undef, <i64 7, i64 7, i64 7, i64 7, i64 7, i64 7, i64 7, i64 7>
%I32 = udiv i32 undef, 7
+ %V2i32 = udiv <2 x i32> undef, <i32 7, i32 7>
%V4i32 = udiv <4 x i32> undef, <i32 7, i32 7, i32 7, i32 7>
%V8i32 = udiv <8 x i32> undef, <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
%V16i32 = udiv <16 x i32> undef, <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
%I16 = udiv i16 undef, 7
+ %V2i16 = udiv <2 x i16> undef, <i16 7, i16 7>
+ %V4i16 = udiv <4 x i16> undef, <i16 7, i16 7, i16 7, i16 7>
%V8i16 = udiv <8 x i16> undef, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
%V16i16 = udiv <16 x i16> undef, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
%V32i16 = udiv <32 x i16> undef, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
%I8 = udiv i8 undef, 7
+ %V2i8 = udiv <2 x i8> undef, <i8 7, i8 7>
+ %V4i8 = udiv <4 x i8> undef, <i8 7, i8 7, i8 7, i8 7>
+ %V8i8 = udiv <8 x i8> undef, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
%V16i8 = udiv <16 x i8> undef, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
%V32i8 = udiv <32 x i8> undef, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
%V64i8 = udiv <64 x i8> undef, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
@@ -288,14 +360,20 @@ define i32 @sdiv_constpow2() {
; CHECK-NEXT: Cost Model: Found an estimated cost of 56 for instruction: %V4i64 = sdiv <4 x i64> undef, <i64 2, i64 4, i64 8, i64 16>
; CHECK-NEXT: Cost Model: Found an estimated cost of 112 for instruction: %V8i64 = sdiv <8 x i64> undef, <i64 2, i64 4, i64 8, i64 16, i64 32, i64 64, i64 128, i64 256>
; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %I32 = sdiv i32 undef, 16
+; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V2i32 = sdiv <2 x i32> undef, <i32 2, i32 4>
; CHECK-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %V4i32 = sdiv <4 x i32> undef, <i32 2, i32 4, i32 8, i32 16>
; CHECK-NEXT: Cost Model: Found an estimated cost of 96 for instruction: %V8i32 = sdiv <8 x i32> undef, <i32 2, i32 4, i32 8, i32 16, i32 32, i32 64, i32 128, i32 256>
; CHECK-NEXT: Cost Model: Found an estimated cost of 192 for instruction: %V16i32 = sdiv <16 x i32> undef, <i32 2, i32 4, i32 8, i32 16, i32 32, i32 64, i32 128, i32 256, i32 2, i32 4, i32 8, i32 16, i32 32, i32 64, i32 128, i32 256>
; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %I16 = sdiv i16 undef, 16
+; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V2i16 = sdiv <2 x i16> undef, <i16 2, i16 4>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %V4i16 = sdiv <4 x i16> undef, <i16 2, i16 4, i16 8, i16 16>
; CHECK-NEXT: Cost Model: Found an estimated cost of 88 for instruction: %V8i16 = sdiv <8 x i16> undef, <i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256>
; CHECK-NEXT: Cost Model: Found an estimated cost of 176 for instruction: %V16i16 = sdiv <16 x i16> undef, <i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256, i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256>
; CHECK-NEXT: Cost Model: Found an estimated cost of 352 for instruction: %V32i16 = sdiv <32 x i16> undef, <i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256, i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256, i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256, i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256>
; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %I8 = sdiv i8 undef, 16
+; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V2i8 = sdiv <2 x i8> undef, <i8 2, i8 4>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %V4i8 = sdiv <4 x i8> undef, <i8 2, i8 4, i8 8, i8 16>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 88 for instruction: %V8i8 = sdiv <8 x i8> undef, <i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16>
; CHECK-NEXT: Cost Model: Found an estimated cost of 168 for instruction: %V16i8 = sdiv <16 x i8> undef, <i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16>
; CHECK-NEXT: Cost Model: Found an estimated cost of 336 for instruction: %V32i8 = sdiv <32 x i8> undef, <i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16>
; CHECK-NEXT: Cost Model: Found an estimated cost of 672 for instruction: %V64i8 = sdiv <64 x i8> undef, <i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16>
@@ -309,16 +387,22 @@ define i32 @sdiv_constpow2() {
%V8i64 = sdiv <8 x i64> undef, <i64 2, i64 4, i64 8, i64 16, i64 32, i64 64, i64 128, i64 256>
%I32 = sdiv i32 undef, 16
+ %V2i32 = sdiv <2 x i32> undef, <i32 2, i32 4>
%V4i32 = sdiv <4 x i32> undef, <i32 2, i32 4, i32 8, i32 16>
%V8i32 = sdiv <8 x i32> undef, <i32 2, i32 4, i32 8, i32 16, i32 32, i32 64, i32 128, i32 256>
%V16i32 = sdiv <16 x i32> undef, <i32 2, i32 4, i32 8, i32 16, i32 32, i32 64, i32 128, i32 256, i32 2, i32 4, i32 8, i32 16, i32 32, i32 64, i32 128, i32 256>
%I16 = sdiv i16 undef, 16
+ %V2i16 = sdiv <2 x i16> undef, <i16 2, i16 4>
+ %V4i16 = sdiv <4 x i16> undef, <i16 2, i16 4, i16 8, i16 16>
%V8i16 = sdiv <8 x i16> undef, <i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256>
%V16i16 = sdiv <16 x i16> undef, <i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256, i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256>
%V32i16 = sdiv <32 x i16> undef, <i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256, i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256, i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256, i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256>
%I8 = sdiv i8 undef, 16
+ %V2i8 = sdiv <2 x i8> undef, <i8 2, i8 4>
+ %V4i8 = sdiv <4 x i8> undef, <i8 2, i8 4, i8 8, i8 16>
+ %V8i8 = sdiv <8 x i8> undef, <i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16>
%V16i8 = sdiv <16 x i8> undef, <i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16>
%V32i8 = sdiv <32 x i8> undef, <i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16>
%V64i8 = sdiv <64 x i8> undef, <i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16>
@@ -334,14 +418,20 @@ define i32 @udiv_constpow2() {
; CHECK-NEXT: Cost Model: Found an estimated cost of 56 for instruction: %V4i64 = udiv <4 x i64> undef, <i64 2, i64 4, i64 8, i64 16>
; CHECK-NEXT: Cost Model: Found an estimated cost of 112 for instruction: %V8i64 = udiv <8 x i64> undef, <i64 2, i64 4, i64 8, i64 16, i64 32, i64 64, i64 128, i64 256>
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I32 = udiv i32 undef, 16
+; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V2i32 = udiv <2 x i32> undef, <i32 2, i32 4>
; CHECK-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %V4i32 = udiv <4 x i32> undef, <i32 2, i32 4, i32 8, i32 16>
; CHECK-NEXT: Cost Model: Found an estimated cost of 96 for instruction: %V8i32 = udiv <8 x i32> undef, <i32 2, i32 4, i32 8, i32 16, i32 32, i32 64, i32 128, i32 256>
; CHECK-NEXT: Cost Model: Found an estimated cost of 192 for instruction: %V16i32 = udiv <16 x i32> undef, <i32 2, i32 4, i32 8, i32 16, i32 32, i32 64, i32 128, i32 256, i32 2, i32 4, i32 8, i32 16, i32 32, i32 64, i32 128, i32 256>
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I16 = udiv i16 undef, 16
+; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V2i16 = udiv <2 x i16> undef, <i16 2, i16 4>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %V4i16 = udiv <4 x i16> undef, <i16 2, i16 4, i16 8, i16 16>
; CHECK-NEXT: Cost Model: Found an estimated cost of 88 for instruction: %V8i16 = udiv <8 x i16> undef, <i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256>
; CHECK-NEXT: Cost Model: Found an estimated cost of 176 for instruction: %V16i16 = udiv <16 x i16> undef, <i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256, i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256>
; CHECK-NEXT: Cost Model: Found an estimated cost of 352 for instruction: %V32i16 = udiv <32 x i16> undef, <i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256, i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256, i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256, i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256>
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I8 = udiv i8 undef, 16
+; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V2i8 = udiv <2 x i8> undef, <i8 2, i8 4>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %V4i8 = udiv <4 x i8> undef, <i8 2, i8 4, i8 8, i8 16>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 88 for instruction: %V8i8 = udiv <8 x i8> undef, <i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16>
; CHECK-NEXT: Cost Model: Found an estimated cost of 168 for instruction: %V16i8 = udiv <16 x i8> undef, <i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16>
; CHECK-NEXT: Cost Model: Found an estimated cost of 336 for instruction: %V32i8 = udiv <32 x i8> undef, <i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16>
; CHECK-NEXT: Cost Model: Found an estimated cost of 672 for instruction: %V64i8 = udiv <64 x i8> undef, <i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16>
@@ -355,16 +445,22 @@ define i32 @udiv_constpow2() {
%V8i64 = udiv <8 x i64> undef, <i64 2, i64 4, i64 8, i64 16, i64 32, i64 64, i64 128, i64 256>
%I32 = udiv i32 undef, 16
+ %V2i32 = udiv <2 x i32> undef, <i32 2, i32 4>
%V4i32 = udiv <4 x i32> undef, <i32 2, i32 4, i32 8, i32 16>
%V8i32 = udiv <8 x i32> undef, <i32 2, i32 4, i32 8, i32 16, i32 32, i32 64, i32 128, i32 256>
%V16i32 = udiv <16 x i32> undef, <i32 2, i32 4, i32 8, i32 16, i32 32, i32 64, i32 128, i32 256, i32 2, i32 4, i32 8, i32 16, i32 32, i32 64, i32 128, i32 256>
%I16 = udiv i16 undef, 16
+ %V2i16 = udiv <2 x i16> undef, <i16 2, i16 4>
+ %V4i16 = udiv <4 x i16> undef, <i16 2, i16 4, i16 8, i16 16>
%V8i16 = udiv <8 x i16> undef, <i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256>
%V16i16 = udiv <16 x i16> undef, <i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256, i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256>
%V32i16 = udiv <32 x i16> undef, <i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256, i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256, i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256, i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256>
%I8 = udiv i8 undef, 16
+ %V2i8 = udiv <2 x i8> undef, <i8 2, i8 4>
+ %V4i8 = udiv <4 x i8> undef, <i8 2, i8 4, i8 8, i8 16>
+ %V8i8 = udiv <8 x i8> undef, <i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16>
%V16i8 = udiv <16 x i8> undef, <i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16>
%V32i8 = udiv <32 x i8> undef, <i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16>
%V64i8 = udiv <64 x i8> undef, <i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16>
@@ -380,14 +476,20 @@ define i32 @sdiv_uniformconstpow2() {
; CHECK-NEXT: Cost Model: Found an estimated cost of 30 for instruction: %V4i64 = sdiv <4 x i64> undef, splat (i64 16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 60 for instruction: %V8i64 = sdiv <8 x i64> undef, splat (i64 16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %I32 = sdiv i32 undef, 16
+; CHECK-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %V2i32 = sdiv <2 x i32> undef, splat (i32 16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 27 for instruction: %V4i32 = sdiv <4 x i32> undef, splat (i32 16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 54 for instruction: %V8i32 = sdiv <8 x i32> undef, splat (i32 16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 108 for instruction: %V16i32 = sdiv <16 x i32> undef, splat (i32 16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %I16 = sdiv i16 undef, 16
+; CHECK-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %V2i16 = sdiv <2 x i16> undef, splat (i16 16)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 27 for instruction: %V4i16 = sdiv <4 x i16> undef, splat (i16 16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 51 for instruction: %V8i16 = sdiv <8 x i16> undef, splat (i16 16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 102 for instruction: %V16i16 = sdiv <16 x i16> undef, splat (i16 16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 204 for instruction: %V32i16 = sdiv <32 x i16> undef, splat (i16 16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %I8 = sdiv i8 undef, 16
+; CHECK-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %V2i8 = sdiv <2 x i8> undef, splat (i8 16)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 27 for instruction: %V4i8 = sdiv <4 x i8> undef, splat (i8 16)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 51 for instruction: %V8i8 = sdiv <8 x i8> undef, splat (i8 16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 99 for instruction: %V16i8 = sdiv <16 x i8> undef, splat (i8 16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 198 for instruction: %V32i8 = sdiv <32 x i8> undef, splat (i8 16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 396 for instruction: %V64i8 = sdiv <64 x i8> undef, splat (i8 16)
@@ -401,16 +503,22 @@ define i32 @sdiv_uniformconstpow2() {
%V8i64 = sdiv <8 x i64> undef, <i64 16, i64 16, i64 16, i64 16, i64 16, i64 16, i64 16, i64 16>
%I32 = sdiv i32 undef, 16
+ %V2i32 = sdiv <2 x i32> undef, <i32 16, i32 16>
%V4i32 = sdiv <4 x i32> undef, <i32 16, i32 16, i32 16, i32 16>
%V8i32 = sdiv <8 x i32> undef, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
%V16i32 = sdiv <16 x i32> undef, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
%I16 = sdiv i16 undef, 16
+ %V2i16 = sdiv <2 x i16> undef, <i16 16, i16 16>
+ %V4i16 = sdiv <4 x i16> undef, <i16 16, i16 16, i16 16, i16 16>
%V8i16 = sdiv <8 x i16> undef, <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>
%V16i16 = sdiv <16 x i16> undef, <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>
%V32i16 = sdiv <32 x i16> undef, <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>
%I8 = sdiv i8 undef, 16
+ %V2i8 = sdiv <2 x i8> undef, <i8 16, i8 16>
+ %V4i8 = sdiv <4 x i8> undef, <i8 16, i8 16, i8 16, i8 16>
+ %V8i8 = sdiv <8 x i8> undef, <i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16>
%V16i8 = sdiv <16 x i8> undef, <i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16>
%V32i8 = sdiv <32 x i8> undef, <i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16>
%V64i8 = sdiv <64 x i8> undef, <i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16>
@@ -426,14 +534,20 @@ define i32 @udiv_uniformconstpow2() {
; CHECK-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %V4i64 = udiv <4 x i64> undef, splat (i64 16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 40 for instruction: %V8i64 = udiv <8 x i64> undef, splat (i64 16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I32 = udiv i32 undef, 16
+; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V2i32 = udiv <2 x i32> undef, splat (i32 16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V4i32 = udiv <4 x i32> undef, splat (i32 16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 40 for instruction: %V8i32 = udiv <8 x i32> undef, splat (i32 16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 80 for instruction: %V16i32 = udiv <16 x i32> undef, splat (i32 16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I16 = udiv i16 undef, 16
+; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V2i16 = udiv <2 x i16> undef, splat (i16 16)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %V4i16 = udiv <4 x i16> undef, splat (i16 16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V8i16 = udiv <8 x i16> undef, splat (i16 16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 80 for instruction: %V16i16 = udiv <16 x i16> undef, splat (i16 16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 160 for instruction: %V32i16 = udiv <32 x i16> undef, splat (i16 16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I8 = udiv i8 undef, 16
+; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V2i8 = udiv <2 x i8> undef, splat (i8 16)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %V4i8 = udiv <4 x i8> undef, splat (i8 16)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 40 for instruction: %V8i8 = udiv <8 x i8> undef, splat (i8 16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V16i8 = udiv <16 x i8> undef, splat (i8 16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 160 for instruction: %V32i8 = udiv <32 x i8> undef, splat (i8 16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 320 for instruction: %V64i8 = udiv <64 x i8> undef, splat (i8 16)
@@ -447,16 +561,22 @@ define i32 @udiv_uniformconstpow2() {
%V8i64 = udiv <8 x i64> undef, <i64 16, i64 16, i64 16, i64 16, i64 16, i64 16, i64 16, i64 16>
%I32 = udiv i32 undef, 16
+ %V2i32 = udiv <2 x i32> undef, <i32 16, i32 16>
%V4i32 = udiv <4 x i32> undef, <i32 16, i32 16, i32 16, i32 16>
%V8i32 = udiv <8 x i32> undef, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
%V16i32 = udiv <16 x i32> undef, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
%I16 = udiv i16 undef, 16
+ %V2i16 = udiv <2 x i16> undef, <i16 16, i16 16>
+ %V4i16 = udiv <4 x i16> undef, <i16 16, i16 16, i16 16, i16 16>
%V8i16 = udiv <8 x i16> undef, <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>
%V16i16 = udiv <16 x i16> undef, <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>
%V32i16 = udiv <32 x i16> undef, <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>
%I8 = udiv i8 undef, 16
+ %V2i8 = udiv <2 x i8> undef, <i8 16, i8 16>
+ %V4i8 = udiv <4 x i8> undef, <i8 16, i8 16, i8 16, i8 16>
+ %V8i8 = udiv <8 x i8> undef, <i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16>
%V16i8 = udiv <16 x i8> undef, <i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16>
%V32i8 = udiv <32 x i8> undef, <i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16>
%V64i8 = udiv <64 x i8> undef, <i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16>
@@ -472,14 +592,20 @@ define i32 @sdiv_constnegpow2() {
; CHECK-NEXT: Cost Model: Found an estimated cost of 56 for instruction: %V4i64 = sdiv <4 x i64> undef, <i64 -2, i64 -4, i64 -8, i64 -16>
; CHECK-NEXT: Cost Model: Found an estimated cost of 112 for instruction: %V8i64 = sdiv <8 x i64> undef, <i64 -2, i64 -4, i64 -8, i64 -16, i64 -32, i64 -64, i64 -128, i64 -256>
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I32 = sdiv i32 undef, -16
+; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V2i32 = sdiv <2 x i32> undef, <i32 -2, i32 -4>
; CHECK-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %V4i32 = sdiv <4 x i32> undef, <i32 -2, i32 -4, i32 -8, i32 -16>
; CHECK-NEXT: Cost Model: Found an estimated cost of 96 for instruction: %V8i32 = sdiv <8 x i32> undef, <i32 -2, i32 -4, i32 -8, i32 -16, i32 -32, i32 -64, i32 -128, i32 -256>
; CHECK-NEXT: Cost Model: Found an estimated cost of 192 for instruction: %V16i32 = sdiv <16 x i32> undef, <i32 -2, i32 -4, i32 -8, i32 -16, i32 -32, i32 -64, i32 -128, i32 -256, i32 -2, i32 -4, i32 -8, i32 -16, i32 -32, i32 -64, i32 -128, i32 -256>
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I16 = sdiv i16 undef, -16
+; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V2i16 = sdiv <2 x i16> undef, <i16 -2, i16 -4>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %V4i16 = sdiv <4 x i16> undef, <i16 -2, i16 -4, i16 -8, i16 -16>
; CHECK-NEXT: Cost Model: Found an estimated cost of 88 for instruction: %V8i16 = sdiv <8 x i16> undef, <i16 -2, i16 -4, i16 -8, i16 -16, i16 -32, i16 -64, i16 -128, i16 -256>
; CHECK-NEXT: Cost Model: Found an estimated cost of 176 for instruction: %V16i16 = sdiv <16 x i16> undef, <i16 -2, i16 -4, i16 -8, i16 -16, i16 -32, i16 -64, i16 -128, i16 -256, i16 -2, i16 -4, i16 -8, i16 -16, i16 -32, i16 -64, i16 -128, i16 -256>
; CHECK-NEXT: Cost Model: Found an estimated cost of 352 for instruction: %V32i16 = sdiv <32 x i16> undef, <i16 -2, i16 -4, i16 -8, i16 -16, i16 -32, i16 -64, i16 -128, i16 -256, i16 -2, i16 -4, i16 -8, i16 -16, i16 -32, i16 -64, i16 -128, i16 -256, i16 -2, i16 -4, i16 -8, i16 -16, i16 -32, i16 -64, i16 -128, i16 -256, i16 -2, i16 -4, i16 -8, i16 -16, i16 -32, i16 -64, i16 -128, i16 -256>
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I8 = sdiv i8 undef, -16
+; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V2i8 = sdiv <2 x i8> undef, <i8 -2, i8 -4>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %V4i8 = sdiv <4 x i8> undef, <i8 -2, i8 -4, i8 -8, i8 -16>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 88 for instruction: %V8i8 = sdiv <8 x i8> undef, <i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16>
; CHECK-NEXT: Cost Model: Found an estimated cost of 168 for instruction: %V16i8 = sdiv <16 x i8> undef, <i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16>
; CHECK-NEXT: Cost Model: Found an estimated cost of 336 for instruction: %V32i8 = sdiv <32 x i8> undef, <i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16>
; CHECK-NEXT: Cost Model: Found an estimated cost of 672 for instruction: %V64i8 = sdiv <64 x i8> undef, <i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16>
@@ -493,16 +619,22 @@ define i32 @sdiv_constnegpow2() {
%V8i64 = sdiv <8 x i64> undef, <i64 -2, i64 -4, i64 -8, i64 -16, i64 -32, i64 -64, i64 -128, i64 -256>
%I32 = sdiv i32 undef, -16
+ %V2i32 = sdiv <2 x i32> undef, <i32 -2, i32 -4>
%V4i32 = sdiv <4 x i32> undef, <i32 -2, i32 -4, i32 -8, i32 -16>
%V8i32 = sdiv <8 x i32> undef, <i32 -2, i32 -4, i32 -8, i32 -16, i32 -32, i32 -64, i32 -128, i32 -256>
%V16i32 = sdiv <16 x i32> undef, <i32 -2, i32 -4, i32 -8, i32 -16, i32 -32, i32 -64, i32 -128, i32 -256, i32 -2, i32 -4, i32 -8, i32 -16, i32 -32, i32 -64, i32 -128, i32 -256>
%I16 = sdiv i16 undef, -16
+ %V2i16 = sdiv <2 x i16> undef, <i16 -2, i16 -4>
+ %V4i16 = sdiv <4 x i16> undef, <i16 -2, i16 -4, i16 -8, i16 -16>
%V8i16 = sdiv <8 x i16> undef, <i16 -2, i16 -4, i16 -8, i16 -16, i16 -32, i16 -64, i16 -128, i16 -256>
%V16i16 = sdiv <16 x i16> undef, <i16 -2, i16 -4, i16 -8, i16 -16, i16 -32, i16 -64, i16 -128, i16 -256, i16 -2, i16 -4, i16 -8, i16 -16, i16 -32, i16 -64, i16 -128, i16 -256>
%V32i16 = sdiv <32 x i16> undef, <i16 -2, i16 -4, i16 -8, i16 -16, i16 -32, i16 -64, i16 -128, i16 -256, i16 -2, i16 -4, i16 -8, i16 -16, i16 -32, i16 -64, i16 -128, i16 -256, i16 -2, i16 -4, i16 -8, i16 -16, i16 -32, i16 -64, i16 -128, i16 -256, i16 -2, i16 -4, i16 -8, i16 -16, i16 -32, i16 -64, i16 -128, i16 -256>
%I8 = sdiv i8 undef, -16
+ %V2i8 = sdiv <2 x i8> undef, <i8 -2, i8 -4>
+ %V4i8 = sdiv <4 x i8> undef, <i8 -2, i8 -4, i8 -8, i8 -16>
+ %V8i8 = sdiv <8 x i8> undef, <i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16>
%V16i8 = sdiv <16 x i8> undef, <i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16>
%V32i8 = sdiv <32 x i8> undef, <i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16>
%V64i8 = sdiv <64 x i8> undef, <i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16>
@@ -518,14 +650,20 @@ define i32 @udiv_constnegpow2() {
; CHECK-NEXT: Cost Model: Found an estimated cost of 56 for instruction: %V4i64 = udiv <4 x i64> undef, <i64 -2, i64 -4, i64 -8, i64 -16>
; CHECK-NEXT: Cost Model: Found an estimated cost of 112 for instruction: %V8i64 = udiv <8 x i64> undef, <i64 -2, i64 -4, i64 -8, i64 -16, i64 -32, i64 -64, i64 -128, i64 -256>
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I32 = udiv i32 undef, -16
+; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V2i32 = udiv <2 x i32> undef, <i32 -2, i32 -4>
; CHECK-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %V4i32 = udiv <4 x i32> undef, <i32 -2, i32 -4, i32 -8, i32 -16>
; CHECK-NEXT: Cost Model: Found an estimated cost of 96 for instruction: %V8i32 = udiv <8 x i32> undef, <i32 -2, i32 -4, i32 -8, i32 -16, i32 -32, i32 -64, i32 -128, i32 -256>
; CHECK-NEXT: Cost Model: Found an estimated cost of 192 for instruction: %V16i32 = udiv <16 x i32> undef, <i32 -2, i32 -4, i32 -8, i32 -16, i32 -32, i32 -64, i32 -128, i32 -256, i32 -2, i32 -4, i32 -8, i32 -16, i32 -32, i32 -64, i32 -128, i32 -256>
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I16 = udiv i16 undef, -16
+; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V2i16 = udiv <2 x i16> undef, <i16 -2, i16 -4>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %V4i16 = udiv <4 x i16> undef, <i16 -2, i16 -4, i16 -8, i16 -16>
; CHECK-NEXT: Cost Model: Found an estimated cost of 88 for instruction: %V8i16 = udiv <8 x i16> undef, <i16 -2, i16 -4, i16 -8, i16 -16, i16 -32, i16 -64, i16 -128, i16 -256>
; CHECK-NEXT: Cost Model: Found an estimated cost of 176 for instruction: %V16i16 = udiv <16 x i16> undef, <i16 -2, i16 -4, i16 -8, i16 -16, i16 -32, i16 -64, i16 -128, i16 -256, i16 -2, i16 -4, i16 -8, i16 -16, i16 -32, i16 -64, i16 -128, i16 -256>
; CHECK-NEXT: Cost Model: Found an estimated cost of 352 for instruction: %V32i16 = udiv <32 x i16> undef, <i16 -2, i16 -4, i16 -8, i16 -16, i16 -32, i16 -64, i16 -128, i16 -256, i16 -2, i16 -4, i16 -8, i16 -16, i16 -32, i16 -64, i16 -128, i16 -256, i16 -2, i16 -4, i16 -8, i16 -16, i16 -32, i16 -64, i16 -128, i16 -256, i16 -2, i16 -4, i16 -8, i16 -16, i16 -32, i16 -64, i16 -128, i16 -256>
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I8 = udiv i8 undef, -16
+; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V2i8 = udiv <2 x i8> undef, <i8 -2, i8 -4>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 48 for instruction: %V4i8 = udiv <4 x i8> undef, <i8 -2, i8 -4, i8 -8, i8 -16>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 88 for instruction: %V8i8 = udiv <8 x i8> undef, <i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16>
; CHECK-NEXT: Cost Model: Found an estimated cost of 168 for instruction: %V16i8 = udiv <16 x i8> undef, <i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16>
; CHECK-NEXT: Cost Model: Found an estimated cost of 336 for instruction: %V32i8 = udiv <32 x i8> undef, <i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16>
; CHECK-NEXT: Cost Model: Found an estimated cost of 672 for instruction: %V64i8 = udiv <64 x i8> undef, <i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16>
@@ -539,16 +677,22 @@ define i32 @udiv_constnegpow2() {
%V8i64 = udiv <8 x i64> undef, <i64 -2, i64 -4, i64 -8, i64 -16, i64 -32, i64 -64, i64 -128, i64 -256>
%I32 = udiv i32 undef, -16
+ %V2i32 = udiv <2 x i32> undef, <i32 -2, i32 -4>
%V4i32 = udiv <4 x i32> undef, <i32 -2, i32 -4, i32 -8, i32 -16>
%V8i32 = udiv <8 x i32> undef, <i32 -2, i32 -4, i32 -8, i32 -16, i32 -32, i32 -64, i32 -128, i32 -256>
%V16i32 = udiv <16 x i32> undef, <i32 -2, i32 -4, i32 -8, i32 -16, i32 -32, i32 -64, i32 -128, i32 -256, i32 -2, i32 -4, i32 -8, i32 -16, i32 -32, i32 -64, i32 -128, i32 -256>
%I16 = udiv i16 undef, -16
+ %V2i16 = udiv <2 x i16> undef, <i16 -2, i16 -4>
+ %V4i16 = udiv <4 x i16> undef, <i16 -2, i16 -4, i16 -8, i16 -16>
%V8i16 = udiv <8 x i16> undef, <i16 -2, i16 -4, i16 -8, i16 -16, i16 -32, i16 -64, i16 -128, i16 -256>
%V16i16 = udiv <16 x i16> undef, <i16 -2, i16 -4, i16 -8, i16 -16, i16 -32, i16 -64, i16 -128, i16 -256, i16 -2, i16 -4, i16 -8, i16 -16, i16 -32, i16 -64, i16 -128, i16 -256>
%V32i16 = udiv <32 x i16> undef, <i16 -2, i16 -4, i16 -8, i16 -16, i16 -32, i16 -64, i16 -128, i16 -256, i16 -2, i16 -4, i16 -8, i16 -16, i16 -32, i16 -64, i16 -128, i16 -256, i16 -2, i16 -4, i16 -8, i16 -16, i16 -32, i16 -64, i16 -128, i16 -256, i16 -2, i16 -4, i16 -8, i16 -16, i16 -32, i16 -64, i16 -128, i16 -256>
%I8 = udiv i8 undef, -16
+ %V2i8 = udiv <2 x i8> undef, <i8 -2, i8 -4>
+ %V4i8 = udiv <4 x i8> undef, <i8 -2, i8 -4, i8 -8, i8 -16>
+ %V8i8 = udiv <8 x i8> undef, <i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16>
%V16i8 = udiv <16 x i8> undef, <i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16>
%V32i8 = udiv <32 x i8> undef, <i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16>
%V64i8 = udiv <64 x i8> undef, <i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16>
@@ -564,14 +708,20 @@ define i32 @sdiv_uniformconstnegpow2() {
; CHECK-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %V4i64 = sdiv <4 x i64> undef, splat (i64 -16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 40 for instruction: %V8i64 = sdiv <8 x i64> undef, splat (i64 -16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I32 = sdiv i32 undef, -16
+; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V2i32 = sdiv <2 x i32> undef, splat (i32 -16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V4i32 = sdiv <4 x i32> undef, splat (i32 -16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 40 for instruction: %V8i32 = sdiv <8 x i32> undef, splat (i32 -16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 80 for instruction: %V16i32 = sdiv <16 x i32> undef, splat (i32 -16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I16 = sdiv i16 undef, -16
+; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V2i16 = sdiv <2 x i16> undef, splat (i16 -16)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %V4i16 = sdiv <4 x i16> undef, splat (i16 -16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V8i16 = sdiv <8 x i16> undef, splat (i16 -16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 80 for instruction: %V16i16 = sdiv <16 x i16> undef, splat (i16 -16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 160 for instruction: %V32i16 = sdiv <32 x i16> undef, splat (i16 -16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I8 = sdiv i8 undef, -16
+; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V2i8 = sdiv <2 x i8> undef, splat (i8 -16)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %V4i8 = sdiv <4 x i8> undef, splat (i8 -16)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 40 for instruction: %V8i8 = sdiv <8 x i8> undef, splat (i8 -16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V16i8 = sdiv <16 x i8> undef, splat (i8 -16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 160 for instruction: %V32i8 = sdiv <32 x i8> undef, splat (i8 -16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 320 for instruction: %V64i8 = sdiv <64 x i8> undef, splat (i8 -16)
@@ -585,16 +735,22 @@ define i32 @sdiv_uniformconstnegpow2() {
%V8i64 = sdiv <8 x i64> undef, <i64 -16, i64 -16, i64 -16, i64 -16, i64 -16, i64 -16, i64 -16, i64 -16>
%I32 = sdiv i32 undef, -16
+ %V2i32 = sdiv <2 x i32> undef, <i32 -16, i32 -16>
%V4i32 = sdiv <4 x i32> undef, <i32 -16, i32 -16, i32 -16, i32 -16>
%V8i32 = sdiv <8 x i32> undef, <i32 -16, i32 -16, i32 -16, i32 -16, i32 -16, i32 -16, i32 -16, i32 -16>
%V16i32 = sdiv <16 x i32> undef, <i32 -16, i32 -16, i32 -16, i32 -16, i32 -16, i32 -16, i32 -16, i32 -16, i32 -16, i32 -16, i32 -16, i32 -16, i32 -16, i32 -16, i32 -16, i32 -16>
%I16 = sdiv i16 undef, -16
+ %V2i16 = sdiv <2 x i16> undef, <i16 -16, i16 -16>
+ %V4i16 = sdiv <4 x i16> undef, <i16 -16, i16 -16, i16 -16, i16 -16>
%V8i16 = sdiv <8 x i16> undef, <i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16>
%V16i16 = sdiv <16 x i16> undef, <i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16>
%V32i16 = sdiv <32 x i16> undef, <i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16>
%I8 = sdiv i8 undef, -16
+ %V2i8 = sdiv <2 x i8> undef, <i8 -16, i8 -16>
+ %V4i8 = sdiv <4 x i8> undef, <i8 -16, i8 -16, i8 -16, i8 -16>
+ %V8i8 = sdiv <8 x i8> undef, <i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16>
%V16i8 = sdiv <16 x i8> undef, <i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16>
%V32i8 = sdiv <32 x i8> undef, <i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16>
%V64i8 = sdiv <64 x i8> undef, <i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16>
@@ -610,14 +766,20 @@ define i32 @udiv_uniformconstnegpow2() {
; CHECK-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %V4i64 = udiv <4 x i64> undef, splat (i64 -16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 40 for instruction: %V8i64 = udiv <8 x i64> undef, splat (i64 -16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I32 = udiv i32 undef, -16
+; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V2i32 = udiv <2 x i32> undef, splat (i32 -16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V4i32 = udiv <4 x i32> undef, splat (i32 -16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 40 for instruction: %V8i32 = udiv <8 x i32> undef, splat (i32 -16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 80 for instruction: %V16i32 = udiv <16 x i32> undef, splat (i32 -16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I16 = udiv i16 undef, -16
+; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V2i16 = udiv <2 x i16> undef, splat (i16 -16)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %V4i16 = udiv <4 x i16> undef, splat (i16 -16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V8i16 = udiv <8 x i16> undef, splat (i16 -16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 80 for instruction: %V16i16 = udiv <16 x i16> undef, splat (i16 -16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 160 for instruction: %V32i16 = udiv <32 x i16> undef, splat (i16 -16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %I8 = udiv i8 undef, -16
+; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V2i8 = udiv <2 x i8> undef, splat (i8 -16)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %V4i8 = udiv <4 x i8> undef, splat (i8 -16)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 40 for instruction: %V8i8 = udiv <8 x i8> undef, splat (i8 -16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V16i8 = udiv <16 x i8> undef, splat (i8 -16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 160 for instruction: %V32i8 = udiv <32 x i8> undef, splat (i8 -16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 320 for instruction: %V64i8 = udiv <64 x i8> undef, splat (i8 -16)
@@ -631,16 +793,22 @@ define i32 @udiv_uniformconstnegpow2() {
%V8i64 = udiv <8 x i64> undef, <i64 -16, i64 -16, i64 -16, i64 -16, i64 -16, i64 -16, i64 -16, i64 -16>
%I32 = udiv i32 undef, -16
+ %V2i32 = udiv <2 x i32> undef, <i32 -16, i32 -16>
%V4i32 = udiv <4 x i32> undef, <i32 -16, i32 -16, i32 -16, i32 -16>
%V8i32 = udiv <8 x i32> undef, <i32 -16, i32 -16, i32 -16, i32 -16, i32 -16, i32 -16, i32 -16, i32 -16>
%V16i32 = udiv <16 x i32> undef, <i32 -16, i32 -16, i32 -16, i32 -16, i32 -16, i32 -16, i32 -16, i32 -16, i32 -16, i32 -16, i32 -16, i32 -16, i32 -16, i32 -16, i32 -16, i32 -16>
%I16 = udiv i16 undef, -16
+ %V2i16 = udiv <2 x i16> undef, <i16 -16, i16 -16>
+ %V4i16 = udiv <4 x i16> undef, <i16 -16, i16 -16, i16 -16, i16 -16>
%V8i16 = udiv <8 x i16> undef, <i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16>
%V16i16 = udiv <16 x i16> undef, <i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16>
%V32i16 = udiv <32 x i16> undef, <i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16>
%I8 = udiv i8 undef, -16
+ %V2i8 = udiv <2 x i8> undef, <i8 -16, i8 -16>
+ %V4i8 = udiv <4 x i8> undef, <i8 -16, i8 -16, i8 -16, i8 -16>
+ %V8i8 = udiv <8 x i8> undef, <i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16>
%V16i8 = udiv <16 x i8> undef, <i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16>
%V32i8 = udiv <32 x i8> undef, <i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16>
%V64i8 = udiv <64 x i8> undef, <i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16>
diff --git a/llvm/test/Analysis/CostModel/AArch64/rem.ll b/llvm/test/Analysis/CostModel/AArch64/rem.ll
index 2f1e8c8..06c05ae 100644
--- a/llvm/test/Analysis/CostModel/AArch64/rem.ll
+++ b/llvm/test/Analysis/CostModel/AArch64/rem.ll
@@ -5,40 +5,55 @@ target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
define i32 @srem() {
; CHECK-LABEL: 'srem'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %I128 = srem i128 undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %I64 = srem i64 undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %V2i64 = srem <2 x i64> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V4i64 = srem <4 x i64> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 56 for instruction: %V8i64 = srem <8 x i64> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %I32 = srem i32 undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %V2i32 = srem <2 x i32> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V4i32 = srem <4 x i32> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 56 for instruction: %V8i32 = srem <8 x i32> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 112 for instruction: %V16i32 = srem <16 x i32> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %I16 = srem i16 undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %V2i16 = srem <2 x i16> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V4i16 = srem <4 x i16> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 56 for instruction: %V8i16 = srem <8 x i16> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 112 for instruction: %V16i16 = srem <16 x i16> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 224 for instruction: %V32i16 = srem <32 x i16> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %I8 = srem i8 undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %V2i8 = srem <2 x i8> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V4i8 = srem <4 x i8> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 56 for instruction: %V8i8 = srem <8 x i8> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 112 for instruction: %V16i8 = srem <16 x i8> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 224 for instruction: %V32i8 = srem <32 x i8> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 448 for instruction: %V64i8 = srem <64 x i8> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
;
+ %I128 = srem i128 undef, undef
+
%I64 = srem i64 undef, undef
%V2i64 = srem <2 x i64> undef, undef
%V4i64 = srem <4 x i64> undef, undef
%V8i64 = srem <8 x i64> undef, undef
%I32 = srem i32 undef, undef
+ %V2i32 = srem <2 x i32> undef, undef
%V4i32 = srem <4 x i32> undef, undef
%V8i32 = srem <8 x i32> undef, undef
%V16i32 = srem <16 x i32> undef, undef
%I16 = srem i16 undef, undef
+ %V2i16 = srem <2 x i16> undef, undef
+ %V4i16 = srem <4 x i16> undef, undef
%V8i16 = srem <8 x i16> undef, undef
%V16i16 = srem <16 x i16> undef, undef
%V32i16 = srem <32 x i16> undef, undef
%I8 = srem i8 undef, undef
+ %V2i8 = srem <2 x i8> undef, undef
+ %V4i8 = srem <4 x i8> undef, undef
+ %V8i8 = srem <8 x i8> undef, undef
%V16i8 = srem <16 x i8> undef, undef
%V32i8 = srem <32 x i8> undef, undef
%V64i8 = srem <64 x i8> undef, undef
@@ -48,40 +63,55 @@ define i32 @srem() {
define i32 @urem() {
; CHECK-LABEL: 'urem'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %I128 = urem i128 undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %I64 = urem i64 undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %V2i64 = urem <2 x i64> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V4i64 = urem <4 x i64> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 56 for instruction: %V8i64 = urem <8 x i64> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %I32 = urem i32 undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %V2i32 = urem <2 x i32> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V4i32 = urem <4 x i32> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 56 for instruction: %V8i32 = urem <8 x i32> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 112 for instruction: %V16i32 = urem <16 x i32> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %I16 = urem i16 undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %V2i16 = urem <2 x i16> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V4i16 = urem <4 x i16> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 56 for instruction: %V8i16 = urem <8 x i16> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 112 for instruction: %V16i16 = urem <16 x i16> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 224 for instruction: %V32i16 = urem <32 x i16> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %I8 = urem i8 undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %V2i8 = urem <2 x i8> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V4i8 = urem <4 x i8> undef, undef
+; CHECK-NEXT: Cost Model: Found an estimated cost of 56 for instruction: %V8i8 = urem <8 x i8> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 112 for instruction: %V16i8 = urem <16 x i8> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 224 for instruction: %V32i8 = urem <32 x i8> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 448 for instruction: %V64i8 = urem <64 x i8> undef, undef
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
;
+ %I128 = urem i128 undef, undef
+
%I64 = urem i64 undef, undef
%V2i64 = urem <2 x i64> undef, undef
%V4i64 = urem <4 x i64> undef, undef
%V8i64 = urem <8 x i64> undef, undef
%I32 = urem i32 undef, undef
+ %V2i32 = urem <2 x i32> undef, undef
%V4i32 = urem <4 x i32> undef, undef
%V8i32 = urem <8 x i32> undef, undef
%V16i32 = urem <16 x i32> undef, undef
%I16 = urem i16 undef, undef
+ %V2i16 = urem <2 x i16> undef, undef
+ %V4i16 = urem <4 x i16> undef, undef
%V8i16 = urem <8 x i16> undef, undef
%V16i16 = urem <16 x i16> undef, undef
%V32i16 = urem <32 x i16> undef, undef
%I8 = urem i8 undef, undef
+ %V2i8 = urem <2 x i8> undef, undef
+ %V4i8 = urem <4 x i8> undef, undef
+ %V8i8 = urem <8 x i8> undef, undef
%V16i8 = urem <16 x i8> undef, undef
%V32i8 = urem <32 x i8> undef, undef
%V64i8 = urem <64 x i8> undef, undef
@@ -91,40 +121,55 @@ define i32 @urem() {
define i32 @srem_const() {
; CHECK-LABEL: 'srem_const'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %I128 = srem i128 undef, 7
; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %I64 = srem i64 undef, 7
; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %V2i64 = srem <2 x i64> undef, <i64 6, i64 7>
; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V4i64 = srem <4 x i64> undef, <i64 4, i64 5, i64 6, i64 7>
; CHECK-NEXT: Cost Model: Found an estimated cost of 56 for instruction: %V8i64 = srem <8 x i64> undef, <i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11>
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %I32 = srem i32 undef, 7
+; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %V2i32 = srem <2 x i32> undef, <i32 4, i32 5>
; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V4i32 = srem <4 x i32> undef, <i32 4, i32 5, i32 6, i32 7>
; CHECK-NEXT: Cost Model: Found an estimated cost of 56 for instruction: %V8i32 = srem <8 x i32> undef, <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11>
; CHECK-NEXT: Cost Model: Found an estimated cost of 112 for instruction: %V16i32 = srem <16 x i32> undef, <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19>
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %I16 = srem i16 undef, 7
+; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %V2i16 = srem <2 x i16> undef, <i16 4, i16 5>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V4i16 = srem <4 x i16> undef, <i16 4, i16 5, i16 6, i16 7>
; CHECK-NEXT: Cost Model: Found an estimated cost of 56 for instruction: %V8i16 = srem <8 x i16> undef, <i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11>
; CHECK-NEXT: Cost Model: Found an estimated cost of 112 for instruction: %V16i16 = srem <16 x i16> undef, <i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 16, i16 17, i16 18, i16 19>
; CHECK-NEXT: Cost Model: Found an estimated cost of 224 for instruction: %V32i16 = srem <32 x i16> undef, <i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 16, i16 17, i16 18, i16 19, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 16, i16 17, i16 18, i16 19>
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %I8 = srem i8 undef, 7
+; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %V2i8 = srem <2 x i8> undef, <i8 4, i8 5>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V4i8 = srem <4 x i8> undef, <i8 4, i8 5, i8 6, i8 7>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 56 for instruction: %V8i8 = srem <8 x i8> undef, <i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11>
; CHECK-NEXT: Cost Model: Found an estimated cost of 112 for instruction: %V16i8 = srem <16 x i8> undef, <i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19>
; CHECK-NEXT: Cost Model: Found an estimated cost of 224 for instruction: %V32i8 = srem <32 x i8> undef, <i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19>
; CHECK-NEXT: Cost Model: Found an estimated cost of 448 for instruction: %V64i8 = srem <64 x i8> undef, <i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19>
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
;
+ %I128 = srem i128 undef, 7
+
%I64 = srem i64 undef, 7
%V2i64 = srem <2 x i64> undef, <i64 6, i64 7>
%V4i64 = srem <4 x i64> undef, <i64 4, i64 5, i64 6, i64 7>
%V8i64 = srem <8 x i64> undef, <i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11>
%I32 = srem i32 undef, 7
+ %V2i32 = srem <2 x i32> undef, <i32 4, i32 5>
%V4i32 = srem <4 x i32> undef, <i32 4, i32 5, i32 6, i32 7>
%V8i32 = srem <8 x i32> undef, <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11>
%V16i32 = srem <16 x i32> undef, <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19>
%I16 = srem i16 undef, 7
+ %V2i16 = srem <2 x i16> undef, <i16 4, i16 5>
+ %V4i16 = srem <4 x i16> undef, <i16 4, i16 5, i16 6, i16 7>
%V8i16 = srem <8 x i16> undef, <i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11>
%V16i16 = srem <16 x i16> undef, <i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 16, i16 17, i16 18, i16 19>
%V32i16 = srem <32 x i16> undef, <i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 16, i16 17, i16 18, i16 19, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 16, i16 17, i16 18, i16 19>
%I8 = srem i8 undef, 7
+ %V2i8 = srem <2 x i8> undef, <i8 4, i8 5>
+ %V4i8 = srem <4 x i8> undef, <i8 4, i8 5, i8 6, i8 7>
+ %V8i8 = srem <8 x i8> undef, <i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11>
%V16i8 = srem <16 x i8> undef, <i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19>
%V32i8 = srem <32 x i8> undef, <i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19>
%V64i8 = srem <64 x i8> undef, <i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19>
@@ -134,40 +179,56 @@ define i32 @srem_const() {
define i32 @urem_const() {
; CHECK-LABEL: 'urem_const'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %I128 = urem i128 undef, 7
; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %I64 = urem i64 undef, 7
; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %V2i64 = urem <2 x i64> undef, <i64 6, i64 7>
; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V4i64 = urem <4 x i64> undef, <i64 4, i64 5, i64 6, i64 7>
; CHECK-NEXT: Cost Model: Found an estimated cost of 56 for instruction: %V8i64 = urem <8 x i64> undef, <i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11>
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %I32 = urem i32 undef, 7
+; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %V2i32 = urem <2 x i32> undef, <i32 4, i32 5>
; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V4i32 = urem <4 x i32> undef, <i32 4, i32 5, i32 6, i32 7>
; CHECK-NEXT: Cost Model: Found an estimated cost of 56 for instruction: %V8i32 = urem <8 x i32> undef, <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11>
; CHECK-NEXT: Cost Model: Found an estimated cost of 112 for instruction: %V16i32 = urem <16 x i32> undef, <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19>
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %I16 = urem i16 undef, 7
+; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %V2i16 = urem <2 x i16> undef, <i16 4, i16 5>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V4i16 = urem <4 x i16> undef, <i16 4, i16 5, i16 6, i16 7>
; CHECK-NEXT: Cost Model: Found an estimated cost of 56 for instruction: %V8i16 = urem <8 x i16> undef, <i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11>
; CHECK-NEXT: Cost Model: Found an estimated cost of 112 for instruction: %V16i16 = urem <16 x i16> undef, <i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 16, i16 17, i16 18, i16 19>
; CHECK-NEXT: Cost Model: Found an estimated cost of 224 for instruction: %V32i16 = urem <32 x i16> undef, <i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 16, i16 17, i16 18, i16 19, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 16, i16 17, i16 18, i16 19>
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %I8 = urem i8 undef, 7
+; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %V2i8 = urem <2 x i8> undef, <i8 4, i8 5>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V4i8 = urem <4 x i8> undef, <i8 4, i8 5, i8 6, i8 7>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 56 for instruction: %V8i8 = urem <8 x i8> undef, <i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11>
; CHECK-NEXT: Cost Model: Found an estimated cost of 112 for instruction: %V16i8 = urem <16 x i8> undef, <i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19>
; CHECK-NEXT: Cost Model: Found an estimated cost of 224 for instruction: %V32i8 = urem <32 x i8> undef, <i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19>
; CHECK-NEXT: Cost Model: Found an estimated cost of 448 for instruction: %V64i8 = urem <64 x i8> undef, <i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19>
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
;
+
+ %I128 = urem i128 undef, 7
+
%I64 = urem i64 undef, 7
%V2i64 = urem <2 x i64> undef, <i64 6, i64 7>
%V4i64 = urem <4 x i64> undef, <i64 4, i64 5, i64 6, i64 7>
%V8i64 = urem <8 x i64> undef, <i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11>
%I32 = urem i32 undef, 7
+ %V2i32 = urem <2 x i32> undef, <i32 4, i32 5>
%V4i32 = urem <4 x i32> undef, <i32 4, i32 5, i32 6, i32 7>
%V8i32 = urem <8 x i32> undef, <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11>
%V16i32 = urem <16 x i32> undef, <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19>
%I16 = urem i16 undef, 7
+ %V2i16 = urem <2 x i16> undef, <i16 4, i16 5>
+ %V4i16 = urem <4 x i16> undef, <i16 4, i16 5, i16 6, i16 7>
%V8i16 = urem <8 x i16> undef, <i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11>
%V16i16 = urem <16 x i16> undef, <i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 16, i16 17, i16 18, i16 19>
%V32i16 = urem <32 x i16> undef, <i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 16, i16 17, i16 18, i16 19, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 16, i16 17, i16 18, i16 19>
%I8 = urem i8 undef, 7
+ %V2i8 = urem <2 x i8> undef, <i8 4, i8 5>
+ %V4i8 = urem <4 x i8> undef, <i8 4, i8 5, i8 6, i8 7>
+ %V8i8 = urem <8 x i8> undef, <i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11>
%V16i8 = urem <16 x i8> undef, <i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19>
%V32i8 = urem <32 x i8> undef, <i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19>
%V64i8 = urem <64 x i8> undef, <i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 17, i8 18, i8 19>
@@ -177,40 +238,55 @@ define i32 @urem_const() {
define i32 @srem_uniformconst() {
; CHECK-LABEL: 'srem_uniformconst'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %I128 = srem i128 undef, 7
; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %I64 = srem i64 undef, 7
; CHECK-NEXT: Cost Model: Found an estimated cost of 26 for instruction: %V2i64 = srem <2 x i64> undef, splat (i64 7)
; CHECK-NEXT: Cost Model: Found an estimated cost of 52 for instruction: %V4i64 = srem <4 x i64> undef, splat (i64 7)
; CHECK-NEXT: Cost Model: Found an estimated cost of 104 for instruction: %V8i64 = srem <8 x i64> undef, splat (i64 7)
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %I32 = srem i32 undef, 7
+; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %V2i32 = srem <2 x i32> undef, splat (i32 7)
; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V4i32 = srem <4 x i32> undef, splat (i32 7)
; CHECK-NEXT: Cost Model: Found an estimated cost of 56 for instruction: %V8i32 = srem <8 x i32> undef, splat (i32 7)
; CHECK-NEXT: Cost Model: Found an estimated cost of 112 for instruction: %V16i32 = srem <16 x i32> undef, splat (i32 7)
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %I16 = srem i16 undef, 7
+; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %V2i16 = srem <2 x i16> undef, splat (i16 7)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V4i16 = srem <4 x i16> undef, splat (i16 7)
; CHECK-NEXT: Cost Model: Found an estimated cost of 56 for instruction: %V8i16 = srem <8 x i16> undef, splat (i16 7)
; CHECK-NEXT: Cost Model: Found an estimated cost of 112 for instruction: %V16i16 = srem <16 x i16> undef, splat (i16 7)
; CHECK-NEXT: Cost Model: Found an estimated cost of 224 for instruction: %V32i16 = srem <32 x i16> undef, splat (i16 7)
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %I8 = srem i8 undef, 7
+; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %V2i8 = srem <2 x i8> undef, splat (i8 7)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V4i8 = srem <4 x i8> undef, splat (i8 7)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 56 for instruction: %V8i8 = srem <8 x i8> undef, splat (i8 7)
; CHECK-NEXT: Cost Model: Found an estimated cost of 112 for instruction: %V16i8 = srem <16 x i8> undef, splat (i8 7)
; CHECK-NEXT: Cost Model: Found an estimated cost of 224 for instruction: %V32i8 = srem <32 x i8> undef, splat (i8 7)
; CHECK-NEXT: Cost Model: Found an estimated cost of 448 for instruction: %V64i8 = srem <64 x i8> undef, splat (i8 7)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
;
+ %I128 = srem i128 undef, 7
+
%I64 = srem i64 undef, 7
%V2i64 = srem <2 x i64> undef, <i64 7, i64 7>
%V4i64 = srem <4 x i64> undef, <i64 7, i64 7, i64 7, i64 7>
%V8i64 = srem <8 x i64> undef, <i64 7, i64 7, i64 7, i64 7, i64 7, i64 7, i64 7, i64 7>
%I32 = srem i32 undef, 7
+ %V2i32 = srem <2 x i32> undef, <i32 7, i32 7>
%V4i32 = srem <4 x i32> undef, <i32 7, i32 7, i32 7, i32 7>
%V8i32 = srem <8 x i32> undef, <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
%V16i32 = srem <16 x i32> undef, <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
%I16 = srem i16 undef, 7
+ %V2i16 = srem <2 x i16> undef, <i16 7, i16 7>
+ %V4i16 = srem <4 x i16> undef, <i16 7, i16 7, i16 7, i16 7>
%V8i16 = srem <8 x i16> undef, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
%V16i16 = srem <16 x i16> undef, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
%V32i16 = srem <32 x i16> undef, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
%I8 = srem i8 undef, 7
+ %V2i8 = srem <2 x i8> undef, <i8 7, i8 7>
+ %V4i8 = srem <4 x i8> undef, <i8 7, i8 7, i8 7, i8 7>
+ %V8i8 = srem <8 x i8> undef, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
%V16i8 = srem <16 x i8> undef, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
%V32i8 = srem <32 x i8> undef, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
%V64i8 = srem <64 x i8> undef, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
@@ -220,40 +296,55 @@ define i32 @srem_uniformconst() {
define i32 @urem_uniformconst() {
; CHECK-LABEL: 'urem_uniformconst'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %I128 = urem i128 undef, 7
; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %I64 = urem i64 undef, 7
; CHECK-NEXT: Cost Model: Found an estimated cost of 26 for instruction: %V2i64 = urem <2 x i64> undef, splat (i64 7)
; CHECK-NEXT: Cost Model: Found an estimated cost of 52 for instruction: %V4i64 = urem <4 x i64> undef, splat (i64 7)
; CHECK-NEXT: Cost Model: Found an estimated cost of 104 for instruction: %V8i64 = urem <8 x i64> undef, splat (i64 7)
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %I32 = urem i32 undef, 7
+; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %V2i32 = urem <2 x i32> undef, splat (i32 7)
; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V4i32 = urem <4 x i32> undef, splat (i32 7)
; CHECK-NEXT: Cost Model: Found an estimated cost of 56 for instruction: %V8i32 = urem <8 x i32> undef, splat (i32 7)
; CHECK-NEXT: Cost Model: Found an estimated cost of 112 for instruction: %V16i32 = urem <16 x i32> undef, splat (i32 7)
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %I16 = urem i16 undef, 7
+; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %V2i16 = urem <2 x i16> undef, splat (i16 7)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V4i16 = urem <4 x i16> undef, splat (i16 7)
; CHECK-NEXT: Cost Model: Found an estimated cost of 56 for instruction: %V8i16 = urem <8 x i16> undef, splat (i16 7)
; CHECK-NEXT: Cost Model: Found an estimated cost of 112 for instruction: %V16i16 = urem <16 x i16> undef, splat (i16 7)
; CHECK-NEXT: Cost Model: Found an estimated cost of 224 for instruction: %V32i16 = urem <32 x i16> undef, splat (i16 7)
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %I8 = urem i8 undef, 7
+; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %V2i8 = urem <2 x i8> undef, splat (i8 7)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V4i8 = urem <4 x i8> undef, splat (i8 7)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 56 for instruction: %V8i8 = urem <8 x i8> undef, splat (i8 7)
; CHECK-NEXT: Cost Model: Found an estimated cost of 112 for instruction: %V16i8 = urem <16 x i8> undef, splat (i8 7)
; CHECK-NEXT: Cost Model: Found an estimated cost of 224 for instruction: %V32i8 = urem <32 x i8> undef, splat (i8 7)
; CHECK-NEXT: Cost Model: Found an estimated cost of 448 for instruction: %V64i8 = urem <64 x i8> undef, splat (i8 7)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
;
+ %I128 = urem i128 undef, 7
+
%I64 = urem i64 undef, 7
%V2i64 = urem <2 x i64> undef, <i64 7, i64 7>
%V4i64 = urem <4 x i64> undef, <i64 7, i64 7, i64 7, i64 7>
%V8i64 = urem <8 x i64> undef, <i64 7, i64 7, i64 7, i64 7, i64 7, i64 7, i64 7, i64 7>
%I32 = urem i32 undef, 7
+ %V2i32 = urem <2 x i32> undef, <i32 7, i32 7>
%V4i32 = urem <4 x i32> undef, <i32 7, i32 7, i32 7, i32 7>
%V8i32 = urem <8 x i32> undef, <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
%V16i32 = urem <16 x i32> undef, <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
%I16 = urem i16 undef, 7
+ %V2i16 = urem <2 x i16> undef, <i16 7, i16 7>
+ %V4i16 = urem <4 x i16> undef, <i16 7, i16 7, i16 7, i16 7>
%V8i16 = urem <8 x i16> undef, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
%V16i16 = urem <16 x i16> undef, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
%V32i16 = urem <32 x i16> undef, <i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7, i16 7>
%I8 = urem i8 undef, 7
+ %V2i8 = urem <2 x i8> undef, <i8 7, i8 7>
+ %V4i8 = urem <4 x i8> undef, <i8 7, i8 7, i8 7, i8 7>
+ %V8i8 = urem <8 x i8> undef, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
%V16i8 = urem <16 x i8> undef, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
%V32i8 = urem <32 x i8> undef, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
%V64i8 = urem <64 x i8> undef, <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
@@ -263,40 +354,55 @@ define i32 @urem_uniformconst() {
define i32 @srem_constpow2() {
; CHECK-LABEL: 'srem_constpow2'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %I128 = srem i128 undef, 16
; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %I64 = srem i64 undef, 16
; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %V2i64 = srem <2 x i64> undef, <i64 8, i64 16>
; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V4i64 = srem <4 x i64> undef, <i64 2, i64 4, i64 8, i64 16>
; CHECK-NEXT: Cost Model: Found an estimated cost of 56 for instruction: %V8i64 = srem <8 x i64> undef, <i64 2, i64 4, i64 8, i64 16, i64 32, i64 64, i64 128, i64 256>
; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %I32 = srem i32 undef, 16
+; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %V2i32 = srem <2 x i32> undef, <i32 2, i32 4>
; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V4i32 = srem <4 x i32> undef, <i32 2, i32 4, i32 8, i32 16>
; CHECK-NEXT: Cost Model: Found an estimated cost of 56 for instruction: %V8i32 = srem <8 x i32> undef, <i32 2, i32 4, i32 8, i32 16, i32 32, i32 64, i32 128, i32 256>
; CHECK-NEXT: Cost Model: Found an estimated cost of 112 for instruction: %V16i32 = srem <16 x i32> undef, <i32 2, i32 4, i32 8, i32 16, i32 32, i32 64, i32 128, i32 256, i32 2, i32 4, i32 8, i32 16, i32 32, i32 64, i32 128, i32 256>
; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %I16 = srem i16 undef, 16
+; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %V2i16 = srem <2 x i16> undef, <i16 2, i16 4>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V4i16 = srem <4 x i16> undef, <i16 2, i16 4, i16 8, i16 16>
; CHECK-NEXT: Cost Model: Found an estimated cost of 56 for instruction: %V8i16 = srem <8 x i16> undef, <i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256>
; CHECK-NEXT: Cost Model: Found an estimated cost of 112 for instruction: %V16i16 = srem <16 x i16> undef, <i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256, i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256>
; CHECK-NEXT: Cost Model: Found an estimated cost of 224 for instruction: %V32i16 = srem <32 x i16> undef, <i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256, i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256, i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256, i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256>
; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %I8 = srem i8 undef, 16
+; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %V2i8 = srem <2 x i8> undef, <i8 2, i8 4>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V4i8 = srem <4 x i8> undef, <i8 2, i8 4, i8 8, i8 16>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 56 for instruction: %V8i8 = srem <8 x i8> undef, <i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16>
; CHECK-NEXT: Cost Model: Found an estimated cost of 112 for instruction: %V16i8 = srem <16 x i8> undef, <i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16>
; CHECK-NEXT: Cost Model: Found an estimated cost of 224 for instruction: %V32i8 = srem <32 x i8> undef, <i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16>
; CHECK-NEXT: Cost Model: Found an estimated cost of 448 for instruction: %V64i8 = srem <64 x i8> undef, <i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16>
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
;
+ %I128 = srem i128 undef, 16
+
%I64 = srem i64 undef, 16
%V2i64 = srem <2 x i64> undef, <i64 8, i64 16>
%V4i64 = srem <4 x i64> undef, <i64 2, i64 4, i64 8, i64 16>
%V8i64 = srem <8 x i64> undef, <i64 2, i64 4, i64 8, i64 16, i64 32, i64 64, i64 128, i64 256>
%I32 = srem i32 undef, 16
+ %V2i32 = srem <2 x i32> undef, <i32 2, i32 4>
%V4i32 = srem <4 x i32> undef, <i32 2, i32 4, i32 8, i32 16>
%V8i32 = srem <8 x i32> undef, <i32 2, i32 4, i32 8, i32 16, i32 32, i32 64, i32 128, i32 256>
%V16i32 = srem <16 x i32> undef, <i32 2, i32 4, i32 8, i32 16, i32 32, i32 64, i32 128, i32 256, i32 2, i32 4, i32 8, i32 16, i32 32, i32 64, i32 128, i32 256>
%I16 = srem i16 undef, 16
+ %V2i16 = srem <2 x i16> undef, <i16 2, i16 4>
+ %V4i16 = srem <4 x i16> undef, <i16 2, i16 4, i16 8, i16 16>
%V8i16 = srem <8 x i16> undef, <i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256>
%V16i16 = srem <16 x i16> undef, <i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256, i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256>
%V32i16 = srem <32 x i16> undef, <i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256, i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256, i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256, i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256>
%I8 = srem i8 undef, 16
+ %V2i8 = srem <2 x i8> undef, <i8 2, i8 4>
+ %V4i8 = srem <4 x i8> undef, <i8 2, i8 4, i8 8, i8 16>
+ %V8i8 = srem <8 x i8> undef, <i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16>
%V16i8 = srem <16 x i8> undef, <i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16>
%V32i8 = srem <32 x i8> undef, <i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16>
%V64i8 = srem <64 x i8> undef, <i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16>
@@ -306,40 +412,55 @@ define i32 @srem_constpow2() {
define i32 @urem_constpow2() {
; CHECK-LABEL: 'urem_constpow2'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %I128 = urem i128 undef, 16
; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %I64 = urem i64 undef, 16
; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %V2i64 = urem <2 x i64> undef, <i64 8, i64 16>
; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V4i64 = urem <4 x i64> undef, <i64 2, i64 4, i64 8, i64 16>
; CHECK-NEXT: Cost Model: Found an estimated cost of 56 for instruction: %V8i64 = urem <8 x i64> undef, <i64 2, i64 4, i64 8, i64 16, i64 32, i64 64, i64 128, i64 256>
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %I32 = urem i32 undef, 16
+; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %V2i32 = urem <2 x i32> undef, <i32 2, i32 4>
; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V4i32 = urem <4 x i32> undef, <i32 2, i32 4, i32 8, i32 16>
; CHECK-NEXT: Cost Model: Found an estimated cost of 56 for instruction: %V8i32 = urem <8 x i32> undef, <i32 2, i32 4, i32 8, i32 16, i32 32, i32 64, i32 128, i32 256>
; CHECK-NEXT: Cost Model: Found an estimated cost of 112 for instruction: %V16i32 = urem <16 x i32> undef, <i32 2, i32 4, i32 8, i32 16, i32 32, i32 64, i32 128, i32 256, i32 2, i32 4, i32 8, i32 16, i32 32, i32 64, i32 128, i32 256>
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %I16 = urem i16 undef, 16
+; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %V2i16 = urem <2 x i16> undef, <i16 2, i16 4>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V4i16 = urem <4 x i16> undef, <i16 2, i16 4, i16 8, i16 16>
; CHECK-NEXT: Cost Model: Found an estimated cost of 56 for instruction: %V8i16 = urem <8 x i16> undef, <i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256>
; CHECK-NEXT: Cost Model: Found an estimated cost of 112 for instruction: %V16i16 = urem <16 x i16> undef, <i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256, i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256>
; CHECK-NEXT: Cost Model: Found an estimated cost of 224 for instruction: %V32i16 = urem <32 x i16> undef, <i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256, i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256, i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256, i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256>
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %I8 = urem i8 undef, 16
+; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %V2i8 = urem <2 x i8> undef, <i8 2, i8 4>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V4i8 = urem <4 x i8> undef, <i8 2, i8 4, i8 8, i8 16>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 56 for instruction: %V8i8 = urem <8 x i8> undef, <i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16>
; CHECK-NEXT: Cost Model: Found an estimated cost of 112 for instruction: %V16i8 = urem <16 x i8> undef, <i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16>
; CHECK-NEXT: Cost Model: Found an estimated cost of 224 for instruction: %V32i8 = urem <32 x i8> undef, <i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16>
; CHECK-NEXT: Cost Model: Found an estimated cost of 448 for instruction: %V64i8 = urem <64 x i8> undef, <i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16>
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
;
+ %I128 = urem i128 undef, 16
+
%I64 = urem i64 undef, 16
%V2i64 = urem <2 x i64> undef, <i64 8, i64 16>
%V4i64 = urem <4 x i64> undef, <i64 2, i64 4, i64 8, i64 16>
%V8i64 = urem <8 x i64> undef, <i64 2, i64 4, i64 8, i64 16, i64 32, i64 64, i64 128, i64 256>
%I32 = urem i32 undef, 16
+ %V2i32 = urem <2 x i32> undef, <i32 2, i32 4>
%V4i32 = urem <4 x i32> undef, <i32 2, i32 4, i32 8, i32 16>
%V8i32 = urem <8 x i32> undef, <i32 2, i32 4, i32 8, i32 16, i32 32, i32 64, i32 128, i32 256>
%V16i32 = urem <16 x i32> undef, <i32 2, i32 4, i32 8, i32 16, i32 32, i32 64, i32 128, i32 256, i32 2, i32 4, i32 8, i32 16, i32 32, i32 64, i32 128, i32 256>
%I16 = urem i16 undef, 16
+ %V2i16 = urem <2 x i16> undef, <i16 2, i16 4>
+ %V4i16 = urem <4 x i16> undef, <i16 2, i16 4, i16 8, i16 16>
%V8i16 = urem <8 x i16> undef, <i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256>
%V16i16 = urem <16 x i16> undef, <i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256, i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256>
%V32i16 = urem <32 x i16> undef, <i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256, i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256, i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256, i16 2, i16 4, i16 8, i16 16, i16 32, i16 64, i16 128, i16 256>
%I8 = urem i8 undef, 16
+ %V2i8 = urem <2 x i8> undef, <i8 2, i8 4>
+ %V4i8 = urem <4 x i8> undef, <i8 2, i8 4, i8 8, i8 16>
+ %V8i8 = urem <8 x i8> undef, <i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16>
%V16i8 = urem <16 x i8> undef, <i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16>
%V32i8 = urem <32 x i8> undef, <i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16>
%V64i8 = urem <64 x i8> undef, <i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16, i8 2, i8 4, i8 8, i8 16>
@@ -349,40 +470,55 @@ define i32 @urem_constpow2() {
define i32 @srem_uniformconstpow2() {
; CHECK-LABEL: 'srem_uniformconstpow2'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %I128 = srem i128 undef, 16
; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %I64 = srem i64 undef, 16
; CHECK-NEXT: Cost Model: Found an estimated cost of 22 for instruction: %V2i64 = srem <2 x i64> undef, splat (i64 16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 44 for instruction: %V4i64 = srem <4 x i64> undef, splat (i64 16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 88 for instruction: %V8i64 = srem <8 x i64> undef, splat (i64 16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %I32 = srem i32 undef, 16
+; CHECK-NEXT: Cost Model: Found an estimated cost of 22 for instruction: %V2i32 = srem <2 x i32> undef, splat (i32 16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 44 for instruction: %V4i32 = srem <4 x i32> undef, splat (i32 16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 88 for instruction: %V8i32 = srem <8 x i32> undef, splat (i32 16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 176 for instruction: %V16i32 = srem <16 x i32> undef, splat (i32 16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %I16 = srem i16 undef, 16
+; CHECK-NEXT: Cost Model: Found an estimated cost of 22 for instruction: %V2i16 = srem <2 x i16> undef, splat (i16 16)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 44 for instruction: %V4i16 = srem <4 x i16> undef, splat (i16 16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 88 for instruction: %V8i16 = srem <8 x i16> undef, splat (i16 16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 176 for instruction: %V16i16 = srem <16 x i16> undef, splat (i16 16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 352 for instruction: %V32i16 = srem <32 x i16> undef, splat (i16 16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %I8 = srem i8 undef, 16
+; CHECK-NEXT: Cost Model: Found an estimated cost of 22 for instruction: %V2i8 = srem <2 x i8> undef, splat (i8 16)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 44 for instruction: %V4i8 = srem <4 x i8> undef, splat (i8 16)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 88 for instruction: %V8i8 = srem <8 x i8> undef, splat (i8 16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 176 for instruction: %V16i8 = srem <16 x i8> undef, splat (i8 16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 352 for instruction: %V32i8 = srem <32 x i8> undef, splat (i8 16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 704 for instruction: %V64i8 = srem <64 x i8> undef, splat (i8 16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
;
+ %I128 = srem i128 undef, 16
+
%I64 = srem i64 undef, 16
%V2i64 = srem <2 x i64> undef, <i64 16, i64 16>
%V4i64 = srem <4 x i64> undef, <i64 16, i64 16, i64 16, i64 16>
%V8i64 = srem <8 x i64> undef, <i64 16, i64 16, i64 16, i64 16, i64 16, i64 16, i64 16, i64 16>
%I32 = srem i32 undef, 16
+ %V2i32 = srem <2 x i32> undef, <i32 16, i32 16>
%V4i32 = srem <4 x i32> undef, <i32 16, i32 16, i32 16, i32 16>
%V8i32 = srem <8 x i32> undef, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
%V16i32 = srem <16 x i32> undef, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
%I16 = srem i16 undef, 16
+ %V2i16 = srem <2 x i16> undef, <i16 16, i16 16>
+ %V4i16 = srem <4 x i16> undef, <i16 16, i16 16, i16 16, i16 16>
%V8i16 = srem <8 x i16> undef, <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>
%V16i16 = srem <16 x i16> undef, <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>
%V32i16 = srem <32 x i16> undef, <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>
%I8 = srem i8 undef, 16
+ %V2i8 = srem <2 x i8> undef, <i8 16, i8 16>
+ %V4i8 = srem <4 x i8> undef, <i8 16, i8 16, i8 16, i8 16>
+ %V8i8 = srem <8 x i8> undef, <i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16>
%V16i8 = srem <16 x i8> undef, <i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16>
%V32i8 = srem <32 x i8> undef, <i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16>
%V64i8 = srem <64 x i8> undef, <i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16>
@@ -392,40 +528,55 @@ define i32 @srem_uniformconstpow2() {
define i32 @urem_uniformconstpow2() {
; CHECK-LABEL: 'urem_uniformconstpow2'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %I128 = urem i128 undef, 16
; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %I64 = urem i64 undef, 16
; CHECK-NEXT: Cost Model: Found an estimated cost of 26 for instruction: %V2i64 = urem <2 x i64> undef, splat (i64 16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 52 for instruction: %V4i64 = urem <4 x i64> undef, splat (i64 16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 104 for instruction: %V8i64 = urem <8 x i64> undef, splat (i64 16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %I32 = urem i32 undef, 16
+; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %V2i32 = urem <2 x i32> undef, splat (i32 16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V4i32 = urem <4 x i32> undef, splat (i32 16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 56 for instruction: %V8i32 = urem <8 x i32> undef, splat (i32 16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 112 for instruction: %V16i32 = urem <16 x i32> undef, splat (i32 16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %I16 = urem i16 undef, 16
+; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %V2i16 = urem <2 x i16> undef, splat (i16 16)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V4i16 = urem <4 x i16> undef, splat (i16 16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 56 for instruction: %V8i16 = urem <8 x i16> undef, splat (i16 16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 112 for instruction: %V16i16 = urem <16 x i16> undef, splat (i16 16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 224 for instruction: %V32i16 = urem <32 x i16> undef, splat (i16 16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %I8 = urem i8 undef, 16
+; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %V2i8 = urem <2 x i8> undef, splat (i8 16)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V4i8 = urem <4 x i8> undef, splat (i8 16)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 56 for instruction: %V8i8 = urem <8 x i8> undef, splat (i8 16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 112 for instruction: %V16i8 = urem <16 x i8> undef, splat (i8 16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 224 for instruction: %V32i8 = urem <32 x i8> undef, splat (i8 16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 448 for instruction: %V64i8 = urem <64 x i8> undef, splat (i8 16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
;
+ %I128 = urem i128 undef, 16
+
%I64 = urem i64 undef, 16
%V2i64 = urem <2 x i64> undef, <i64 16, i64 16>
%V4i64 = urem <4 x i64> undef, <i64 16, i64 16, i64 16, i64 16>
%V8i64 = urem <8 x i64> undef, <i64 16, i64 16, i64 16, i64 16, i64 16, i64 16, i64 16, i64 16>
%I32 = urem i32 undef, 16
+ %V2i32 = urem <2 x i32> undef, <i32 16, i32 16>
%V4i32 = urem <4 x i32> undef, <i32 16, i32 16, i32 16, i32 16>
%V8i32 = urem <8 x i32> undef, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
%V16i32 = urem <16 x i32> undef, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
%I16 = urem i16 undef, 16
+ %V2i16 = urem <2 x i16> undef, <i16 16, i16 16>
+ %V4i16 = urem <4 x i16> undef, <i16 16, i16 16, i16 16, i16 16>
%V8i16 = urem <8 x i16> undef, <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>
%V16i16 = urem <16 x i16> undef, <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>
%V32i16 = urem <32 x i16> undef, <i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16>
%I8 = urem i8 undef, 16
+ %V2i8 = urem <2 x i8> undef, <i8 16, i8 16>
+ %V4i8 = urem <4 x i8> undef, <i8 16, i8 16, i8 16, i8 16>
+ %V8i8 = urem <8 x i8> undef, <i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16>
%V16i8 = urem <16 x i8> undef, <i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16>
%V32i8 = urem <32 x i8> undef, <i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16>
%V64i8 = urem <64 x i8> undef, <i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16>
@@ -435,40 +586,55 @@ define i32 @urem_uniformconstpow2() {
define i32 @srem_constnegpow2() {
; CHECK-LABEL: 'srem_constnegpow2'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %I128 = srem i128 undef, -16
; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %I64 = srem i64 undef, -16
; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %V2i64 = srem <2 x i64> undef, <i64 -8, i64 -16>
; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V4i64 = srem <4 x i64> undef, <i64 -2, i64 -4, i64 -8, i64 -16>
; CHECK-NEXT: Cost Model: Found an estimated cost of 56 for instruction: %V8i64 = srem <8 x i64> undef, <i64 -2, i64 -4, i64 -8, i64 -16, i64 -32, i64 -64, i64 -128, i64 -256>
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %I32 = srem i32 undef, -16
+; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %V2i32 = srem <2 x i32> undef, <i32 -2, i32 -4>
; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V4i32 = srem <4 x i32> undef, <i32 -2, i32 -4, i32 -8, i32 -16>
; CHECK-NEXT: Cost Model: Found an estimated cost of 56 for instruction: %V8i32 = srem <8 x i32> undef, <i32 -2, i32 -4, i32 -8, i32 -16, i32 -32, i32 -64, i32 -128, i32 -256>
; CHECK-NEXT: Cost Model: Found an estimated cost of 112 for instruction: %V16i32 = srem <16 x i32> undef, <i32 -2, i32 -4, i32 -8, i32 -16, i32 -32, i32 -64, i32 -128, i32 -256, i32 -2, i32 -4, i32 -8, i32 -16, i32 -32, i32 -64, i32 -128, i32 -256>
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %I16 = srem i16 undef, -16
+; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %V2i16 = srem <2 x i16> undef, <i16 -2, i16 -4>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V4i16 = srem <4 x i16> undef, <i16 -2, i16 -4, i16 -8, i16 -16>
; CHECK-NEXT: Cost Model: Found an estimated cost of 56 for instruction: %V8i16 = srem <8 x i16> undef, <i16 -2, i16 -4, i16 -8, i16 -16, i16 -32, i16 -64, i16 -128, i16 -256>
; CHECK-NEXT: Cost Model: Found an estimated cost of 112 for instruction: %V16i16 = srem <16 x i16> undef, <i16 -2, i16 -4, i16 -8, i16 -16, i16 -32, i16 -64, i16 -128, i16 -256, i16 -2, i16 -4, i16 -8, i16 -16, i16 -32, i16 -64, i16 -128, i16 -256>
; CHECK-NEXT: Cost Model: Found an estimated cost of 224 for instruction: %V32i16 = srem <32 x i16> undef, <i16 -2, i16 -4, i16 -8, i16 -16, i16 -32, i16 -64, i16 -128, i16 -256, i16 -2, i16 -4, i16 -8, i16 -16, i16 -32, i16 -64, i16 -128, i16 -256, i16 -2, i16 -4, i16 -8, i16 -16, i16 -32, i16 -64, i16 -128, i16 -256, i16 -2, i16 -4, i16 -8, i16 -16, i16 -32, i16 -64, i16 -128, i16 -256>
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %I8 = srem i8 undef, -16
+; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %V2i8 = srem <2 x i8> undef, <i8 -2, i8 -4>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V4i8 = srem <4 x i8> undef, <i8 -2, i8 -4, i8 -8, i8 -16>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 56 for instruction: %V8i8 = srem <8 x i8> undef, <i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16>
; CHECK-NEXT: Cost Model: Found an estimated cost of 112 for instruction: %V16i8 = srem <16 x i8> undef, <i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16>
; CHECK-NEXT: Cost Model: Found an estimated cost of 224 for instruction: %V32i8 = srem <32 x i8> undef, <i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16>
; CHECK-NEXT: Cost Model: Found an estimated cost of 448 for instruction: %V64i8 = srem <64 x i8> undef, <i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16>
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
;
+ %I128 = srem i128 undef, -16
+
%I64 = srem i64 undef, -16
%V2i64 = srem <2 x i64> undef, <i64 -8, i64 -16>
%V4i64 = srem <4 x i64> undef, <i64 -2, i64 -4, i64 -8, i64 -16>
%V8i64 = srem <8 x i64> undef, <i64 -2, i64 -4, i64 -8, i64 -16, i64 -32, i64 -64, i64 -128, i64 -256>
%I32 = srem i32 undef, -16
+ %V2i32 = srem <2 x i32> undef, <i32 -2, i32 -4>
%V4i32 = srem <4 x i32> undef, <i32 -2, i32 -4, i32 -8, i32 -16>
%V8i32 = srem <8 x i32> undef, <i32 -2, i32 -4, i32 -8, i32 -16, i32 -32, i32 -64, i32 -128, i32 -256>
%V16i32 = srem <16 x i32> undef, <i32 -2, i32 -4, i32 -8, i32 -16, i32 -32, i32 -64, i32 -128, i32 -256, i32 -2, i32 -4, i32 -8, i32 -16, i32 -32, i32 -64, i32 -128, i32 -256>
%I16 = srem i16 undef, -16
+ %V2i16 = srem <2 x i16> undef, <i16 -2, i16 -4>
+ %V4i16 = srem <4 x i16> undef, <i16 -2, i16 -4, i16 -8, i16 -16>
%V8i16 = srem <8 x i16> undef, <i16 -2, i16 -4, i16 -8, i16 -16, i16 -32, i16 -64, i16 -128, i16 -256>
%V16i16 = srem <16 x i16> undef, <i16 -2, i16 -4, i16 -8, i16 -16, i16 -32, i16 -64, i16 -128, i16 -256, i16 -2, i16 -4, i16 -8, i16 -16, i16 -32, i16 -64, i16 -128, i16 -256>
%V32i16 = srem <32 x i16> undef, <i16 -2, i16 -4, i16 -8, i16 -16, i16 -32, i16 -64, i16 -128, i16 -256, i16 -2, i16 -4, i16 -8, i16 -16, i16 -32, i16 -64, i16 -128, i16 -256, i16 -2, i16 -4, i16 -8, i16 -16, i16 -32, i16 -64, i16 -128, i16 -256, i16 -2, i16 -4, i16 -8, i16 -16, i16 -32, i16 -64, i16 -128, i16 -256>
%I8 = srem i8 undef, -16
+ %V2i8 = srem <2 x i8> undef, <i8 -2, i8 -4>
+ %V4i8 = srem <4 x i8> undef, <i8 -2, i8 -4, i8 -8, i8 -16>
+ %V8i8 = srem <8 x i8> undef, <i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16>
%V16i8 = srem <16 x i8> undef, <i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16>
%V32i8 = srem <32 x i8> undef, <i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16>
%V64i8 = srem <64 x i8> undef, <i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16>
@@ -478,40 +644,55 @@ define i32 @srem_constnegpow2() {
define i32 @urem_constnegpow2() {
; CHECK-LABEL: 'urem_constnegpow2'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %I128 = urem i128 undef, -16
; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %I64 = urem i64 undef, -16
; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %V2i64 = urem <2 x i64> undef, <i64 -8, i64 -16>
; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V4i64 = urem <4 x i64> undef, <i64 -2, i64 -4, i64 -8, i64 -16>
; CHECK-NEXT: Cost Model: Found an estimated cost of 56 for instruction: %V8i64 = urem <8 x i64> undef, <i64 -2, i64 -4, i64 -8, i64 -16, i64 -32, i64 -64, i64 -128, i64 -256>
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %I32 = urem i32 undef, -16
+; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %V2i32 = urem <2 x i32> undef, <i32 -2, i32 -4>
; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V4i32 = urem <4 x i32> undef, <i32 -2, i32 -4, i32 -8, i32 -16>
; CHECK-NEXT: Cost Model: Found an estimated cost of 56 for instruction: %V8i32 = urem <8 x i32> undef, <i32 -2, i32 -4, i32 -8, i32 -16, i32 -32, i32 -64, i32 -128, i32 -256>
; CHECK-NEXT: Cost Model: Found an estimated cost of 112 for instruction: %V16i32 = urem <16 x i32> undef, <i32 -2, i32 -4, i32 -8, i32 -16, i32 -32, i32 -64, i32 -128, i32 -256, i32 -2, i32 -4, i32 -8, i32 -16, i32 -32, i32 -64, i32 -128, i32 -256>
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %I16 = urem i16 undef, -16
+; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %V2i16 = urem <2 x i16> undef, <i16 -2, i16 -4>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V4i16 = urem <4 x i16> undef, <i16 -2, i16 -4, i16 -8, i16 -16>
; CHECK-NEXT: Cost Model: Found an estimated cost of 56 for instruction: %V8i16 = urem <8 x i16> undef, <i16 -2, i16 -4, i16 -8, i16 -16, i16 -32, i16 -64, i16 -128, i16 -256>
; CHECK-NEXT: Cost Model: Found an estimated cost of 112 for instruction: %V16i16 = urem <16 x i16> undef, <i16 -2, i16 -4, i16 -8, i16 -16, i16 -32, i16 -64, i16 -128, i16 -256, i16 -2, i16 -4, i16 -8, i16 -16, i16 -32, i16 -64, i16 -128, i16 -256>
; CHECK-NEXT: Cost Model: Found an estimated cost of 224 for instruction: %V32i16 = urem <32 x i16> undef, <i16 -2, i16 -4, i16 -8, i16 -16, i16 -32, i16 -64, i16 -128, i16 -256, i16 -2, i16 -4, i16 -8, i16 -16, i16 -32, i16 -64, i16 -128, i16 -256, i16 -2, i16 -4, i16 -8, i16 -16, i16 -32, i16 -64, i16 -128, i16 -256, i16 -2, i16 -4, i16 -8, i16 -16, i16 -32, i16 -64, i16 -128, i16 -256>
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %I8 = urem i8 undef, -16
+; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %V2i8 = urem <2 x i8> undef, <i8 -2, i8 -4>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V4i8 = urem <4 x i8> undef, <i8 -2, i8 -4, i8 -8, i8 -16>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 56 for instruction: %V8i8 = urem <8 x i8> undef, <i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16>
; CHECK-NEXT: Cost Model: Found an estimated cost of 112 for instruction: %V16i8 = urem <16 x i8> undef, <i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16>
; CHECK-NEXT: Cost Model: Found an estimated cost of 224 for instruction: %V32i8 = urem <32 x i8> undef, <i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16>
; CHECK-NEXT: Cost Model: Found an estimated cost of 448 for instruction: %V64i8 = urem <64 x i8> undef, <i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16>
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
;
+ %I128 = urem i128 undef, -16
+
%I64 = urem i64 undef, -16
%V2i64 = urem <2 x i64> undef, <i64 -8, i64 -16>
%V4i64 = urem <4 x i64> undef, <i64 -2, i64 -4, i64 -8, i64 -16>
%V8i64 = urem <8 x i64> undef, <i64 -2, i64 -4, i64 -8, i64 -16, i64 -32, i64 -64, i64 -128, i64 -256>
%I32 = urem i32 undef, -16
+ %V2i32 = urem <2 x i32> undef, <i32 -2, i32 -4>
%V4i32 = urem <4 x i32> undef, <i32 -2, i32 -4, i32 -8, i32 -16>
%V8i32 = urem <8 x i32> undef, <i32 -2, i32 -4, i32 -8, i32 -16, i32 -32, i32 -64, i32 -128, i32 -256>
%V16i32 = urem <16 x i32> undef, <i32 -2, i32 -4, i32 -8, i32 -16, i32 -32, i32 -64, i32 -128, i32 -256, i32 -2, i32 -4, i32 -8, i32 -16, i32 -32, i32 -64, i32 -128, i32 -256>
%I16 = urem i16 undef, -16
+ %V2i16 = urem <2 x i16> undef, <i16 -2, i16 -4>
+ %V4i16 = urem <4 x i16> undef, <i16 -2, i16 -4, i16 -8, i16 -16>
%V8i16 = urem <8 x i16> undef, <i16 -2, i16 -4, i16 -8, i16 -16, i16 -32, i16 -64, i16 -128, i16 -256>
%V16i16 = urem <16 x i16> undef, <i16 -2, i16 -4, i16 -8, i16 -16, i16 -32, i16 -64, i16 -128, i16 -256, i16 -2, i16 -4, i16 -8, i16 -16, i16 -32, i16 -64, i16 -128, i16 -256>
%V32i16 = urem <32 x i16> undef, <i16 -2, i16 -4, i16 -8, i16 -16, i16 -32, i16 -64, i16 -128, i16 -256, i16 -2, i16 -4, i16 -8, i16 -16, i16 -32, i16 -64, i16 -128, i16 -256, i16 -2, i16 -4, i16 -8, i16 -16, i16 -32, i16 -64, i16 -128, i16 -256, i16 -2, i16 -4, i16 -8, i16 -16, i16 -32, i16 -64, i16 -128, i16 -256>
%I8 = urem i8 undef, -16
+ %V2i8 = urem <2 x i8> undef, <i8 -2, i8 -4>
+ %V4i8 = urem <4 x i8> undef, <i8 -2, i8 -4, i8 -8, i8 -16>
+ %V8i8 = urem <8 x i8> undef, <i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16>
%V16i8 = urem <16 x i8> undef, <i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16>
%V32i8 = urem <32 x i8> undef, <i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16>
%V64i8 = urem <64 x i8> undef, <i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16, i8 -2, i8 -4, i8 -8, i8 -16>
@@ -521,40 +702,55 @@ define i32 @urem_constnegpow2() {
define i32 @srem_uniformconstnegpow2() {
; CHECK-LABEL: 'srem_uniformconstnegpow2'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %I128 = srem i128 undef, -16
; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %I64 = srem i64 undef, -16
; CHECK-NEXT: Cost Model: Found an estimated cost of 26 for instruction: %V2i64 = srem <2 x i64> undef, splat (i64 -16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 52 for instruction: %V4i64 = srem <4 x i64> undef, splat (i64 -16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 104 for instruction: %V8i64 = srem <8 x i64> undef, splat (i64 -16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %I32 = srem i32 undef, -16
+; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %V2i32 = srem <2 x i32> undef, splat (i32 -16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V4i32 = srem <4 x i32> undef, splat (i32 -16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 56 for instruction: %V8i32 = srem <8 x i32> undef, splat (i32 -16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 112 for instruction: %V16i32 = srem <16 x i32> undef, splat (i32 -16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %I16 = srem i16 undef, -16
+; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %V2i16 = srem <2 x i16> undef, splat (i16 -16)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V4i16 = srem <4 x i16> undef, splat (i16 -16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 56 for instruction: %V8i16 = srem <8 x i16> undef, splat (i16 -16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 112 for instruction: %V16i16 = srem <16 x i16> undef, splat (i16 -16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 224 for instruction: %V32i16 = srem <32 x i16> undef, splat (i16 -16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %I8 = srem i8 undef, -16
+; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %V2i8 = srem <2 x i8> undef, splat (i8 -16)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V4i8 = srem <4 x i8> undef, splat (i8 -16)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 56 for instruction: %V8i8 = srem <8 x i8> undef, splat (i8 -16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 112 for instruction: %V16i8 = srem <16 x i8> undef, splat (i8 -16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 224 for instruction: %V32i8 = srem <32 x i8> undef, splat (i8 -16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 448 for instruction: %V64i8 = srem <64 x i8> undef, splat (i8 -16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
;
+ %I128 = srem i128 undef, -16
+
%I64 = srem i64 undef, -16
%V2i64 = srem <2 x i64> undef, <i64 -16, i64 -16>
%V4i64 = srem <4 x i64> undef, <i64 -16, i64 -16, i64 -16, i64 -16>
%V8i64 = srem <8 x i64> undef, <i64 -16, i64 -16, i64 -16, i64 -16, i64 -16, i64 -16, i64 -16, i64 -16>
%I32 = srem i32 undef, -16
+ %V2i32 = srem <2 x i32> undef, <i32 -16, i32 -16>
%V4i32 = srem <4 x i32> undef, <i32 -16, i32 -16, i32 -16, i32 -16>
%V8i32 = srem <8 x i32> undef, <i32 -16, i32 -16, i32 -16, i32 -16, i32 -16, i32 -16, i32 -16, i32 -16>
%V16i32 = srem <16 x i32> undef, <i32 -16, i32 -16, i32 -16, i32 -16, i32 -16, i32 -16, i32 -16, i32 -16, i32 -16, i32 -16, i32 -16, i32 -16, i32 -16, i32 -16, i32 -16, i32 -16>
%I16 = srem i16 undef, -16
+ %V2i16 = srem <2 x i16> undef, <i16 -16, i16 -16>
+ %V4i16 = srem <4 x i16> undef, <i16 -16, i16 -16, i16 -16, i16 -16>
%V8i16 = srem <8 x i16> undef, <i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16>
%V16i16 = srem <16 x i16> undef, <i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16>
%V32i16 = srem <32 x i16> undef, <i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16>
%I8 = srem i8 undef, -16
+ %V2i8 = srem <2 x i8> undef, <i8 -16, i8 -16>
+ %V4i8 = srem <4 x i8> undef, <i8 -16, i8 -16, i8 -16, i8 -16>
+ %V8i8 = srem <8 x i8> undef, <i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16>
%V16i8 = srem <16 x i8> undef, <i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16>
%V32i8 = srem <32 x i8> undef, <i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16>
%V64i8 = srem <64 x i8> undef, <i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16>
@@ -564,40 +760,55 @@ define i32 @srem_uniformconstnegpow2() {
define i32 @urem_uniformconstnegpow2() {
; CHECK-LABEL: 'urem_uniformconstnegpow2'
+; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %I128 = urem i128 undef, -16
; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %I64 = urem i64 undef, -16
; CHECK-NEXT: Cost Model: Found an estimated cost of 26 for instruction: %V2i64 = urem <2 x i64> undef, splat (i64 -16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 52 for instruction: %V4i64 = urem <4 x i64> undef, splat (i64 -16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 104 for instruction: %V8i64 = urem <8 x i64> undef, splat (i64 -16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %I32 = urem i32 undef, -16
+; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %V2i32 = urem <2 x i32> undef, splat (i32 -16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V4i32 = urem <4 x i32> undef, splat (i32 -16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 56 for instruction: %V8i32 = urem <8 x i32> undef, splat (i32 -16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 112 for instruction: %V16i32 = urem <16 x i32> undef, splat (i32 -16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %I16 = urem i16 undef, -16
+; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %V2i16 = urem <2 x i16> undef, splat (i16 -16)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V4i16 = urem <4 x i16> undef, splat (i16 -16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 56 for instruction: %V8i16 = urem <8 x i16> undef, splat (i16 -16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 112 for instruction: %V16i16 = urem <16 x i16> undef, splat (i16 -16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 224 for instruction: %V32i16 = urem <32 x i16> undef, splat (i16 -16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %I8 = urem i8 undef, -16
+; CHECK-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %V2i8 = urem <2 x i8> undef, splat (i8 -16)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 28 for instruction: %V4i8 = urem <4 x i8> undef, splat (i8 -16)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 56 for instruction: %V8i8 = urem <8 x i8> undef, splat (i8 -16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 112 for instruction: %V16i8 = urem <16 x i8> undef, splat (i8 -16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 224 for instruction: %V32i8 = urem <32 x i8> undef, splat (i8 -16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 448 for instruction: %V64i8 = urem <64 x i8> undef, splat (i8 -16)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
;
+ %I128 = urem i128 undef, -16
+
%I64 = urem i64 undef, -16
%V2i64 = urem <2 x i64> undef, <i64 -16, i64 -16>
%V4i64 = urem <4 x i64> undef, <i64 -16, i64 -16, i64 -16, i64 -16>
%V8i64 = urem <8 x i64> undef, <i64 -16, i64 -16, i64 -16, i64 -16, i64 -16, i64 -16, i64 -16, i64 -16>
%I32 = urem i32 undef, -16
+ %V2i32 = urem <2 x i32> undef, <i32 -16, i32 -16>
%V4i32 = urem <4 x i32> undef, <i32 -16, i32 -16, i32 -16, i32 -16>
%V8i32 = urem <8 x i32> undef, <i32 -16, i32 -16, i32 -16, i32 -16, i32 -16, i32 -16, i32 -16, i32 -16>
%V16i32 = urem <16 x i32> undef, <i32 -16, i32 -16, i32 -16, i32 -16, i32 -16, i32 -16, i32 -16, i32 -16, i32 -16, i32 -16, i32 -16, i32 -16, i32 -16, i32 -16, i32 -16, i32 -16>
%I16 = urem i16 undef, -16
+ %V2i16 = urem <2 x i16> undef, <i16 -16, i16 -16>
+ %V4i16 = urem <4 x i16> undef, <i16 -16, i16 -16, i16 -16, i16 -16>
%V8i16 = urem <8 x i16> undef, <i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16>
%V16i16 = urem <16 x i16> undef, <i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16>
%V32i16 = urem <32 x i16> undef, <i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16>
%I8 = urem i8 undef, -16
+ %V2i8 = urem <2 x i8> undef, <i8 -16, i8 -16>
+ %V4i8 = urem <4 x i8> undef, <i8 -16, i8 -16, i8 -16, i8 -16>
+ %V8i8 = urem <8 x i8> undef, <i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16>
%V16i8 = urem <16 x i8> undef, <i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16>
%V32i8 = urem <32 x i8> undef, <i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16>
%V64i8 = urem <64 x i8> undef, <i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16, i8 -16>
diff --git a/llvm/test/Analysis/CostModel/AArch64/shuffle-extract.ll b/llvm/test/Analysis/CostModel/AArch64/shuffle-extract.ll
index 5035619..b81b6a9 100644
--- a/llvm/test/Analysis/CostModel/AArch64/shuffle-extract.ll
+++ b/llvm/test/Analysis/CostModel/AArch64/shuffle-extract.ll
@@ -15,7 +15,7 @@ define void @extract_half() {
; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8i8_hi = shufflevector <8 x i8> poison, <8 x i8> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v16i8_lo = shufflevector <16 x i8> poison, <16 x i8> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16i8_mi = shufflevector <16 x i8> poison, <16 x i8> poison, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v16i8_hi = shufflevector <16 x i8> poison, <16 x i8> poison, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16i8_hi = shufflevector <16 x i8> poison, <16 x i8> poison, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i16_lo = shufflevector <2 x i16> poison, <2 x i16> poison, <1 x i32> zeroinitializer
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2i16_hi = shufflevector <2 x i16> poison, <2 x i16> poison, <1 x i32> <i32 1>
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i16_lo = shufflevector <4 x i16> poison, <4 x i16> poison, <2 x i32> <i32 0, i32 1>
@@ -23,7 +23,7 @@ define void @extract_half() {
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v4i16_hi = shufflevector <4 x i16> poison, <4 x i16> poison, <2 x i32> <i32 2, i32 3>
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v8i16_lo = shufflevector <8 x i16> poison, <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8i16_mi = shufflevector <8 x i16> poison, <8 x i16> poison, <4 x i32> <i32 2, i32 3, i32 4, i32 5>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8i16_hi = shufflevector <8 x i16> poison, <8 x i16> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i16_hi = shufflevector <8 x i16> poison, <8 x i16> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v16i16_lo = shufflevector <16 x i16> poison, <16 x i16> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v16i16_mi = shufflevector <16 x i16> poison, <16 x i16> poison, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11>
; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v16i16_hi = shufflevector <16 x i16> poison, <16 x i16> poison, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
@@ -31,7 +31,7 @@ define void @extract_half() {
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2i32_hi = shufflevector <2 x i32> poison, <2 x i32> poison, <1 x i32> <i32 1>
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i32_lo = shufflevector <4 x i32> poison, <4 x i32> poison, <2 x i32> <i32 0, i32 1>
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v4i32_mi = shufflevector <4 x i32> poison, <4 x i32> poison, <2 x i32> <i32 1, i32 2>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v4i32_hi = shufflevector <4 x i32> poison, <4 x i32> poison, <2 x i32> <i32 2, i32 3>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i32_hi = shufflevector <4 x i32> poison, <4 x i32> poison, <2 x i32> <i32 2, i32 3>
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v8i32_lo = shufflevector <8 x i32> poison, <8 x i32> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v8i32_mi = shufflevector <8 x i32> poison, <8 x i32> poison, <4 x i32> <i32 2, i32 3, i32 4, i32 5>
; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v8i32_hi = shufflevector <8 x i32> poison, <8 x i32> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
@@ -114,19 +114,19 @@ define void @extract_qtr() {
; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8i16_mi = shufflevector <8 x i16> poison, <8 x i16> poison, <2 x i32> <i32 2, i32 3>
; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8i16_hi = shufflevector <8 x i16> poison, <8 x i16> poison, <2 x i32> <i32 4, i32 5>
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v16i16_lo = shufflevector <16 x i16> poison, <16 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v16i16_mi = shufflevector <16 x i16> poison, <16 x i16> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16i16_mi = shufflevector <16 x i16> poison, <16 x i16> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v16i16_hi = shufflevector <16 x i16> poison, <16 x i16> poison, <4 x i32> <i32 8, i32 9, i32 10, i32 11>
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i32_lo = shufflevector <4 x i32> poison, <4 x i32> poison, <1 x i32> zeroinitializer
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v4i32_mi = shufflevector <4 x i32> poison, <4 x i32> poison, <1 x i32> <i32 1>
; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v4i32_hi = shufflevector <4 x i32> poison, <4 x i32> poison, <1 x i32> <i32 2>
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v8i32_lo = shufflevector <8 x i32> poison, <8 x i32> poison, <2 x i32> <i32 0, i32 1>
-; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v8i32_mi = shufflevector <8 x i32> poison, <8 x i32> poison, <2 x i32> <i32 2, i32 3>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8i32_mi = shufflevector <8 x i32> poison, <8 x i32> poison, <2 x i32> <i32 2, i32 3>
; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v8i32_hi = shufflevector <8 x i32> poison, <8 x i32> poison, <2 x i32> <i32 4, i32 5>
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v16i32_lo = shufflevector <16 x i32> poison, <16 x i32> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
; CHECK-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %v16i32_mi = shufflevector <16 x i32> poison, <16 x i32> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
; CHECK-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %v16i32_hi = shufflevector <16 x i32> poison, <16 x i32> poison, <4 x i32> <i32 8, i32 9, i32 10, i32 11>
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i64_lo = shufflevector <4 x i64> poison, <4 x i64> poison, <1 x i32> zeroinitializer
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i64_mi = shufflevector <4 x i64> poison, <4 x i64> poison, <1 x i32> <i32 1>
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4i64_mi = shufflevector <4 x i64> poison, <4 x i64> poison, <1 x i32> <i32 1>
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4i64_hi = shufflevector <4 x i64> poison, <4 x i64> poison, <1 x i32> <i32 2>
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v8i64_lo = shufflevector <8 x i64> poison, <8 x i64> poison, <2 x i32> <i32 0, i32 1>
; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v8i64_mi = shufflevector <8 x i64> poison, <8 x i64> poison, <2 x i32> <i32 2, i32 3>
diff --git a/llvm/test/Analysis/CostModel/X86/shuffle-two-src-codesize.ll b/llvm/test/Analysis/CostModel/X86/shuffle-two-src-codesize.ll
index 027af62..b672df5 100644
--- a/llvm/test/Analysis/CostModel/X86/shuffle-two-src-codesize.ll
+++ b/llvm/test/Analysis/CostModel/X86/shuffle-two-src-codesize.ll
@@ -124,7 +124,7 @@ define void @test_vXf32(<2 x float> %src64, <4 x float> %src128, <8 x float> %sr
; SSE42-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V64 = shufflevector <2 x float> %src64, <2 x float> %src64_1, <2 x i32> <i32 3, i32 0>
; SSE42-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V128 = shufflevector <4 x float> %src128, <4 x float> %src128_1, <4 x i32> <i32 3, i32 6, i32 1, i32 5>
; SSE42-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V256 = shufflevector <8 x float> %src256, <8 x float> %src256_1, <8 x i32> <i32 7, i32 6, i32 8, i32 4, i32 3, i32 2, i32 12, i32 0>
-; SSE42-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V512 = shufflevector <16 x float> %src512, <16 x float> %src512_1, <16 x i32> <i32 15, i32 17, i32 13, i32 20, i32 11, i32 10, i32 8, i32 8, i32 7, i32 22, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; SSE42-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V512 = shufflevector <16 x float> %src512, <16 x float> %src512_1, <16 x i32> <i32 15, i32 17, i32 13, i32 20, i32 11, i32 10, i32 8, i32 8, i32 7, i32 22, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
; SSE42-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %V1024 = shufflevector <32 x float> %src1024, <32 x float> %src1024_1, <32 x i32> <i32 31, i32 33, i32 20, i32 28, i32 27, i32 26, i32 25, i32 24, i32 23, i32 22, i32 21, i32 20, i32 19, i32 18, i32 17, i32 16, i32 15, i32 48, i32 13, i32 12, i32 11, i32 11, i32 9, i32 45, i32 7, i32 11, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
; SSE42-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
;
diff --git a/llvm/test/Analysis/CostModel/X86/shuffle-two-src-latency.ll b/llvm/test/Analysis/CostModel/X86/shuffle-two-src-latency.ll
index f9f045f..fc8c0cd 100644
--- a/llvm/test/Analysis/CostModel/X86/shuffle-two-src-latency.ll
+++ b/llvm/test/Analysis/CostModel/X86/shuffle-two-src-latency.ll
@@ -124,7 +124,7 @@ define void @test_vXf32(<2 x float> %src64, <4 x float> %src128, <8 x float> %sr
; SSE42-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V64 = shufflevector <2 x float> %src64, <2 x float> %src64_1, <2 x i32> <i32 3, i32 0>
; SSE42-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V128 = shufflevector <4 x float> %src128, <4 x float> %src128_1, <4 x i32> <i32 3, i32 6, i32 1, i32 5>
; SSE42-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V256 = shufflevector <8 x float> %src256, <8 x float> %src256_1, <8 x i32> <i32 7, i32 6, i32 8, i32 4, i32 3, i32 2, i32 12, i32 0>
-; SSE42-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V512 = shufflevector <16 x float> %src512, <16 x float> %src512_1, <16 x i32> <i32 15, i32 17, i32 13, i32 20, i32 11, i32 10, i32 8, i32 8, i32 7, i32 22, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; SSE42-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V512 = shufflevector <16 x float> %src512, <16 x float> %src512_1, <16 x i32> <i32 15, i32 17, i32 13, i32 20, i32 11, i32 10, i32 8, i32 8, i32 7, i32 22, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
; SSE42-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %V1024 = shufflevector <32 x float> %src1024, <32 x float> %src1024_1, <32 x i32> <i32 31, i32 33, i32 20, i32 28, i32 27, i32 26, i32 25, i32 24, i32 23, i32 22, i32 21, i32 20, i32 19, i32 18, i32 17, i32 16, i32 15, i32 48, i32 13, i32 12, i32 11, i32 11, i32 9, i32 45, i32 7, i32 11, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
; SSE42-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
;
diff --git a/llvm/test/Analysis/CostModel/X86/shuffle-two-src-sizelatency.ll b/llvm/test/Analysis/CostModel/X86/shuffle-two-src-sizelatency.ll
index 76690af..b48b620 100644
--- a/llvm/test/Analysis/CostModel/X86/shuffle-two-src-sizelatency.ll
+++ b/llvm/test/Analysis/CostModel/X86/shuffle-two-src-sizelatency.ll
@@ -124,7 +124,7 @@ define void @test_vXf32(<2 x float> %src64, <4 x float> %src128, <8 x float> %sr
; SSE42-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V64 = shufflevector <2 x float> %src64, <2 x float> %src64_1, <2 x i32> <i32 3, i32 0>
; SSE42-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V128 = shufflevector <4 x float> %src128, <4 x float> %src128_1, <4 x i32> <i32 3, i32 6, i32 1, i32 5>
; SSE42-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V256 = shufflevector <8 x float> %src256, <8 x float> %src256_1, <8 x i32> <i32 7, i32 6, i32 8, i32 4, i32 3, i32 2, i32 12, i32 0>
-; SSE42-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V512 = shufflevector <16 x float> %src512, <16 x float> %src512_1, <16 x i32> <i32 15, i32 17, i32 13, i32 20, i32 11, i32 10, i32 8, i32 8, i32 7, i32 22, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; SSE42-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V512 = shufflevector <16 x float> %src512, <16 x float> %src512_1, <16 x i32> <i32 15, i32 17, i32 13, i32 20, i32 11, i32 10, i32 8, i32 8, i32 7, i32 22, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
; SSE42-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %V1024 = shufflevector <32 x float> %src1024, <32 x float> %src1024_1, <32 x i32> <i32 31, i32 33, i32 20, i32 28, i32 27, i32 26, i32 25, i32 24, i32 23, i32 22, i32 21, i32 20, i32 19, i32 18, i32 17, i32 16, i32 15, i32 48, i32 13, i32 12, i32 11, i32 11, i32 9, i32 45, i32 7, i32 11, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
; SSE42-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
;
diff --git a/llvm/test/Analysis/CostModel/X86/shuffle-two-src.ll b/llvm/test/Analysis/CostModel/X86/shuffle-two-src.ll
index 034ec0a..efa0f2e 100644
--- a/llvm/test/Analysis/CostModel/X86/shuffle-two-src.ll
+++ b/llvm/test/Analysis/CostModel/X86/shuffle-two-src.ll
@@ -124,7 +124,7 @@ define void @test_vXf32(<2 x float> %src64, <4 x float> %src128, <8 x float> %sr
; SSE42-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V64 = shufflevector <2 x float> %src64, <2 x float> %src64_1, <2 x i32> <i32 3, i32 0>
; SSE42-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V128 = shufflevector <4 x float> %src128, <4 x float> %src128_1, <4 x i32> <i32 3, i32 6, i32 1, i32 5>
; SSE42-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V256 = shufflevector <8 x float> %src256, <8 x float> %src256_1, <8 x i32> <i32 7, i32 6, i32 8, i32 4, i32 3, i32 2, i32 12, i32 0>
-; SSE42-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V512 = shufflevector <16 x float> %src512, <16 x float> %src512_1, <16 x i32> <i32 15, i32 17, i32 13, i32 20, i32 11, i32 10, i32 8, i32 8, i32 7, i32 22, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; SSE42-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %V512 = shufflevector <16 x float> %src512, <16 x float> %src512_1, <16 x i32> <i32 15, i32 17, i32 13, i32 20, i32 11, i32 10, i32 8, i32 8, i32 7, i32 22, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
; SSE42-NEXT: Cost Model: Found an estimated cost of 13 for instruction: %V1024 = shufflevector <32 x float> %src1024, <32 x float> %src1024_1, <32 x i32> <i32 31, i32 33, i32 20, i32 28, i32 27, i32 26, i32 25, i32 24, i32 23, i32 22, i32 21, i32 20, i32 19, i32 18, i32 17, i32 16, i32 15, i32 48, i32 13, i32 12, i32 11, i32 11, i32 9, i32 45, i32 7, i32 11, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
; SSE42-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
diff --git a/llvm/test/Analysis/CostModel/X86/vector-insert-value.ll b/llvm/test/Analysis/CostModel/X86/vector-insert-value.ll
index 2524976..ee82e10 100644
--- a/llvm/test/Analysis/CostModel/X86/vector-insert-value.ll
+++ b/llvm/test/Analysis/CostModel/X86/vector-insert-value.ll
@@ -76,58 +76,58 @@ define i32 @insert_double(i32 %arg, double %val, <2 x double> %src128, <4 x doub
define i32 @insert_float(i32 %arg, float %val, <2 x float> %src64, <4 x float> %src128, <8 x float> %src256, <16 x float> %src512) {
; SSE2-LABEL: 'insert_float'
; SSE2-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v2f32_a = insertelement <2 x float> %src64, float %val, i32 %arg
-; SSE2-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2f32_0 = insertelement <2 x float> %src64, float %val, i32 0
+; SSE2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2f32_0 = insertelement <2 x float> %src64, float %val, i32 0
; SSE2-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2f32_1 = insertelement <2 x float> %src64, float %val, i32 1
; SSE2-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v4f32_a = insertelement <4 x float> %src128, float %val, i32 %arg
-; SSE2-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4f32_0 = insertelement <4 x float> %src128, float %val, i32 0
+; SSE2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4f32_0 = insertelement <4 x float> %src128, float %val, i32 0
; SSE2-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4f32_3 = insertelement <4 x float> %src128, float %val, i32 3
; SSE2-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v8f32_a = insertelement <8 x float> %src256, float %val, i32 %arg
-; SSE2-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8f32_0 = insertelement <8 x float> %src256, float %val, i32 0
+; SSE2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8f32_0 = insertelement <8 x float> %src256, float %val, i32 0
; SSE2-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8f32_3 = insertelement <8 x float> %src256, float %val, i32 3
-; SSE2-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8f32_4 = insertelement <8 x float> %src256, float %val, i32 4
+; SSE2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8f32_4 = insertelement <8 x float> %src256, float %val, i32 4
; SSE2-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8f32_7 = insertelement <8 x float> %src256, float %val, i32 7
; SSE2-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %v16f32_a = insertelement <16 x float> %src512, float %val, i32 %arg
-; SSE2-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16f32_0 = insertelement <16 x float> %src512, float %val, i32 0
+; SSE2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16f32_0 = insertelement <16 x float> %src512, float %val, i32 0
; SSE2-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16f32_3 = insertelement <16 x float> %src512, float %val, i32 3
-; SSE2-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16f32_8 = insertelement <16 x float> %src512, float %val, i32 8
+; SSE2-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16f32_8 = insertelement <16 x float> %src512, float %val, i32 8
; SSE2-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16f32_15 = insertelement <16 x float> %src512, float %val, i32 15
; SSE2-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
;
; SSE3-LABEL: 'insert_float'
; SSE3-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v2f32_a = insertelement <2 x float> %src64, float %val, i32 %arg
-; SSE3-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2f32_0 = insertelement <2 x float> %src64, float %val, i32 0
+; SSE3-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2f32_0 = insertelement <2 x float> %src64, float %val, i32 0
; SSE3-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2f32_1 = insertelement <2 x float> %src64, float %val, i32 1
; SSE3-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v4f32_a = insertelement <4 x float> %src128, float %val, i32 %arg
-; SSE3-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4f32_0 = insertelement <4 x float> %src128, float %val, i32 0
+; SSE3-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4f32_0 = insertelement <4 x float> %src128, float %val, i32 0
; SSE3-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4f32_3 = insertelement <4 x float> %src128, float %val, i32 3
; SSE3-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v8f32_a = insertelement <8 x float> %src256, float %val, i32 %arg
-; SSE3-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8f32_0 = insertelement <8 x float> %src256, float %val, i32 0
+; SSE3-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8f32_0 = insertelement <8 x float> %src256, float %val, i32 0
; SSE3-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8f32_3 = insertelement <8 x float> %src256, float %val, i32 3
-; SSE3-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8f32_4 = insertelement <8 x float> %src256, float %val, i32 4
+; SSE3-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8f32_4 = insertelement <8 x float> %src256, float %val, i32 4
; SSE3-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8f32_7 = insertelement <8 x float> %src256, float %val, i32 7
; SSE3-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %v16f32_a = insertelement <16 x float> %src512, float %val, i32 %arg
-; SSE3-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16f32_0 = insertelement <16 x float> %src512, float %val, i32 0
+; SSE3-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16f32_0 = insertelement <16 x float> %src512, float %val, i32 0
; SSE3-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16f32_3 = insertelement <16 x float> %src512, float %val, i32 3
-; SSE3-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16f32_8 = insertelement <16 x float> %src512, float %val, i32 8
+; SSE3-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16f32_8 = insertelement <16 x float> %src512, float %val, i32 8
; SSE3-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16f32_15 = insertelement <16 x float> %src512, float %val, i32 15
; SSE3-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
;
; SSSE3-LABEL: 'insert_float'
; SSSE3-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v2f32_a = insertelement <2 x float> %src64, float %val, i32 %arg
-; SSSE3-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2f32_0 = insertelement <2 x float> %src64, float %val, i32 0
+; SSSE3-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v2f32_0 = insertelement <2 x float> %src64, float %val, i32 0
; SSSE3-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2f32_1 = insertelement <2 x float> %src64, float %val, i32 1
; SSSE3-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v4f32_a = insertelement <4 x float> %src128, float %val, i32 %arg
-; SSSE3-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4f32_0 = insertelement <4 x float> %src128, float %val, i32 0
+; SSSE3-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v4f32_0 = insertelement <4 x float> %src128, float %val, i32 0
; SSSE3-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v4f32_3 = insertelement <4 x float> %src128, float %val, i32 3
; SSSE3-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %v8f32_a = insertelement <8 x float> %src256, float %val, i32 %arg
-; SSSE3-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8f32_0 = insertelement <8 x float> %src256, float %val, i32 0
+; SSSE3-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8f32_0 = insertelement <8 x float> %src256, float %val, i32 0
; SSSE3-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8f32_3 = insertelement <8 x float> %src256, float %val, i32 3
-; SSSE3-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8f32_4 = insertelement <8 x float> %src256, float %val, i32 4
+; SSSE3-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v8f32_4 = insertelement <8 x float> %src256, float %val, i32 4
; SSSE3-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v8f32_7 = insertelement <8 x float> %src256, float %val, i32 7
; SSSE3-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %v16f32_a = insertelement <16 x float> %src512, float %val, i32 %arg
-; SSSE3-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16f32_0 = insertelement <16 x float> %src512, float %val, i32 0
+; SSSE3-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16f32_0 = insertelement <16 x float> %src512, float %val, i32 0
; SSSE3-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16f32_3 = insertelement <16 x float> %src512, float %val, i32 3
-; SSSE3-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16f32_8 = insertelement <16 x float> %src512, float %val, i32 8
+; SSSE3-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %v16f32_8 = insertelement <16 x float> %src512, float %val, i32 8
; SSSE3-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v16f32_15 = insertelement <16 x float> %src512, float %val, i32 15
; SSSE3-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret i32 undef
;
diff --git a/llvm/test/Analysis/Lint/abi-attrs.ll b/llvm/test/Analysis/Lint/abi-attrs.ll
new file mode 100644
index 0000000..5a3ece6
--- /dev/null
+++ b/llvm/test/Analysis/Lint/abi-attrs.ll
@@ -0,0 +1,106 @@
+; RUN: opt < %s -passes=lint -disable-output 2>&1 | FileCheck %s
+
+declare void @fn_nothing_i8(i8 %x)
+declare void @fn_zeroext(i8 zeroext %x)
+declare void @fn_signext(i8 signext %x)
+declare void @fn_inreg(i8 inreg %x)
+
+declare void @fn_nothing_ptr(ptr %x)
+declare void @fn_byval(ptr byval(i8) %x)
+declare void @fn_byref(ptr byref(i8) %x)
+declare void @fn_inalloca(ptr inalloca(i8) %x)
+declare void @fn_preallocated(ptr preallocated(i8) %x)
+declare void @fn_sret(ptr sret(i8) %x)
+
+define void @caller_zeroext(i8 %x) {
+; CHECK: Undefined behavior: ABI attribute zeroext not present on both function and call-site
+; CHECK: call void @fn_zeroext(i8 %x)
+ call void @fn_zeroext(i8 %x)
+
+; CHECK: Undefined behavior: ABI attribute zeroext not present on both function and call-site
+; CHECK: call void @fn_nothing_i8(i8 zeroext %x)
+ call void @fn_nothing_i8(i8 zeroext %x)
+ ret void
+}
+
+define void @caller_signext(i8 %x) {
+; CHECK: Undefined behavior: ABI attribute signext not present on both function and call-site
+; CHECK: call void @fn_signext(i8 %x)
+ call void @fn_signext(i8 %x)
+
+; CHECK: Undefined behavior: ABI attribute signext not present on both function and call-site
+; CHECK: call void @fn_nothing_i8(i8 signext %x)
+ call void @fn_nothing_i8(i8 signext %x)
+ ret void
+}
+
+define void @caller_inreg(i8 %x) {
+; CHECK: Undefined behavior: ABI attribute inreg not present on both function and call-site
+; CHECK: call void @fn_inreg(i8 %x)
+ call void @fn_inreg(i8 %x)
+
+; CHECK: Undefined behavior: ABI attribute inreg not present on both function and call-site
+; CHECK: call void @fn_nothing_i8(i8 inreg %x)
+ call void @fn_nothing_i8(i8 inreg %x)
+ ret void
+}
+
+define void @caller_byval(ptr %x) {
+; CHECK: Undefined behavior: ABI attribute byval not present on both function and call-site
+; CHECK: call void @fn_byval(ptr %x)
+ call void @fn_byval(ptr %x)
+
+; CHECK: Undefined behavior: ABI attribute byval not present on both function and call-site
+; CHECK: call void @fn_nothing_ptr(ptr byval(i8) %x)
+ call void @fn_nothing_ptr(ptr byval(i8) %x)
+
+; CHECK: Undefined behavior: ABI attribute byval does not have same argument for function and call-site
+; CHECK: call void @fn_byval(ptr byval(i16) %x)
+ call void @fn_byval(ptr byval(i16) %x)
+ ret void
+}
+
+define void @caller_byref(ptr %x) {
+; CHECK: Undefined behavior: ABI attribute byref not present on both function and call-site
+; CHECK: call void @fn_byref(ptr %x)
+ call void @fn_byref(ptr %x)
+
+; CHECK: Undefined behavior: ABI attribute byref not present on both function and call-site
+; CHECK: call void @fn_nothing_ptr(ptr byref(i8) %x)
+ call void @fn_nothing_ptr(ptr byref(i8) %x)
+
+; CHECK: Undefined behavior: ABI attribute byref does not have same argument for function and call-site
+; CHECK: call void @fn_byref(ptr byref(i16) %x)
+ call void @fn_byref(ptr byref(i16) %x)
+ ret void
+}
+
+define void @caller_inalloca(ptr %x) {
+; CHECK: Undefined behavior: ABI attribute inalloca not present on both function and call-site
+; CHECK: call void @fn_inalloca(ptr %x)
+ call void @fn_inalloca(ptr %x)
+
+; CHECK: Undefined behavior: ABI attribute inalloca not present on both function and call-site
+; CHECK: call void @fn_nothing_ptr(ptr inalloca(i8) %x)
+ call void @fn_nothing_ptr(ptr inalloca(i8) %x)
+
+; CHECK: Undefined behavior: ABI attribute inalloca does not have same argument for function and call-site
+; CHECK: call void @fn_inalloca(ptr inalloca(i16) %x)
+ call void @fn_inalloca(ptr inalloca(i16) %x)
+ ret void
+}
+
+define void @caller_sret(ptr %x) {
+; CHECK: Undefined behavior: ABI attribute sret not present on both function and call-site
+; CHECK: call void @fn_sret(ptr %x)
+ call void @fn_sret(ptr %x)
+
+; CHECK: Undefined behavior: ABI attribute sret not present on both function and call-site
+; CHECK: call void @fn_nothing_ptr(ptr sret(i8) %x)
+ call void @fn_nothing_ptr(ptr sret(i8) %x)
+
+; CHECK: Undefined behavior: ABI attribute sret does not have same argument for function and call-site
+; CHECK: call void @fn_sret(ptr sret(i16) %x)
+ call void @fn_sret(ptr sret(i16) %x)
+ ret void
+}
diff --git a/llvm/test/Analysis/UniformityAnalysis/NVPTX/daorder.ll b/llvm/test/Analysis/UniformityAnalysis/NVPTX/daorder.ll
index 89d8c5a..14f33d7 100644
--- a/llvm/test/Analysis/UniformityAnalysis/NVPTX/daorder.ll
+++ b/llvm/test/Analysis/UniformityAnalysis/NVPTX/daorder.ll
@@ -3,7 +3,7 @@
target datalayout = "e-i64:64-v16:16-v32:32-n16:32:64"
target triple = "nvptx64-nvidia-cuda"
-define i32 @daorder(i32 %n) {
+define ptx_kernel i32 @daorder(i32 %n) {
; CHECK-LABEL: for function 'daorder'
entry:
%tid = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
@@ -43,6 +43,3 @@ declare i32 @llvm.nvvm.read.ptx.sreg.tid.x()
declare i32 @llvm.nvvm.read.ptx.sreg.tid.y()
declare i32 @llvm.nvvm.read.ptx.sreg.tid.z()
declare i32 @llvm.nvvm.read.ptx.sreg.laneid()
-
-!nvvm.annotations = !{!0}
-!0 = !{ptr @daorder, !"kernel", i32 1}
diff --git a/llvm/test/Analysis/UniformityAnalysis/NVPTX/diverge.ll b/llvm/test/Analysis/UniformityAnalysis/NVPTX/diverge.ll
index 0ac1b5f5..cf8ffad 100644
--- a/llvm/test/Analysis/UniformityAnalysis/NVPTX/diverge.ll
+++ b/llvm/test/Analysis/UniformityAnalysis/NVPTX/diverge.ll
@@ -4,7 +4,7 @@ target datalayout = "e-i64:64-v16:16-v32:32-n16:32:64"
target triple = "nvptx64-nvidia-cuda"
; return (n < 0 ? a + threadIdx.x : b + threadIdx.x)
-define i32 @no_diverge(i32 %n, i32 %a, i32 %b) {
+define ptx_kernel i32 @no_diverge(i32 %n, i32 %a, i32 %b) {
; CHECK-LABEL: for function 'no_diverge'
entry:
%tid = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
@@ -27,7 +27,7 @@ merge:
; if (threadIdx.x < 5) // divergent: data dependent
; c = b;
; return c; // c is divergent: sync dependent
-define i32 @sync(i32 %a, i32 %b) {
+define ptx_kernel i32 @sync(i32 %a, i32 %b) {
; CHECK-LABEL: for function 'sync'
bb1:
%tid = call i32 @llvm.nvvm.read.ptx.sreg.tid.y()
@@ -49,7 +49,7 @@ bb3:
; }
; // c here is divergent because it is sync dependent on threadIdx.x >= 5
; return c;
-define i32 @mixed(i32 %n, i32 %a, i32 %b) {
+define ptx_kernel i32 @mixed(i32 %n, i32 %a, i32 %b) {
; CHECK-LABEL: for function 'mixed'
bb1:
%tid = call i32 @llvm.nvvm.read.ptx.sreg.tid.z()
@@ -101,7 +101,7 @@ merge:
; return i == 10 ? 0 : 1; // i here is divergent
;
; The i defined in the loop is used outside.
-define i32 @loop() {
+define ptx_kernel i32 @loop() {
; CHECK-LABEL: for function 'loop'
entry:
%laneid = call i32 @llvm.nvvm.read.ptx.sreg.laneid()
@@ -149,7 +149,7 @@ else:
}
; Verifies sync-dependence is computed correctly in the absense of loops.
-define i32 @sync_no_loop(i32 %arg) {
+define ptx_kernel i32 @sync_no_loop(i32 %arg) {
; CHECK-LABEL: for function 'sync_no_loop'
entry:
%0 = add i32 %arg, 1
@@ -174,9 +174,3 @@ declare i32 @llvm.nvvm.read.ptx.sreg.tid.y()
declare i32 @llvm.nvvm.read.ptx.sreg.tid.z()
declare i32 @llvm.nvvm.read.ptx.sreg.laneid()
-!nvvm.annotations = !{!0, !1, !2, !3, !4}
-!0 = !{ptr @no_diverge, !"kernel", i32 1}
-!1 = !{ptr @sync, !"kernel", i32 1}
-!2 = !{ptr @mixed, !"kernel", i32 1}
-!3 = !{ptr @loop, !"kernel", i32 1}
-!4 = !{ptr @sync_no_loop, !"kernel", i32 1}
diff --git a/llvm/test/Analysis/UniformityAnalysis/NVPTX/hidden_diverge.ll b/llvm/test/Analysis/UniformityAnalysis/NVPTX/hidden_diverge.ll
index e319211..65512bf 100644
--- a/llvm/test/Analysis/UniformityAnalysis/NVPTX/hidden_diverge.ll
+++ b/llvm/test/Analysis/UniformityAnalysis/NVPTX/hidden_diverge.ll
@@ -3,7 +3,7 @@
target datalayout = "e-i64:64-v16:16-v32:32-n16:32:64"
target triple = "nvptx64-nvidia-cuda"
-define i32 @hidden_diverge(i32 %n, i32 %a, i32 %b) {
+define ptx_kernel i32 @hidden_diverge(i32 %n, i32 %a, i32 %b) {
; CHECK-LABEL: for function 'hidden_diverge'
entry:
%tid = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
@@ -27,6 +27,3 @@ merge:
}
declare i32 @llvm.nvvm.read.ptx.sreg.tid.x()
-
-!nvvm.annotations = !{!0}
-!0 = !{ptr @hidden_diverge, !"kernel", i32 1}
diff --git a/llvm/test/Analysis/UniformityAnalysis/NVPTX/irreducible.ll b/llvm/test/Analysis/UniformityAnalysis/NVPTX/irreducible.ll
index cd729a9..e1ecc69 100644
--- a/llvm/test/Analysis/UniformityAnalysis/NVPTX/irreducible.ll
+++ b/llvm/test/Analysis/UniformityAnalysis/NVPTX/irreducible.ll
@@ -23,7 +23,7 @@ target triple = "nvptx64-nvidia-cuda"
; V
; if (i3 == 5) // divergent
; because sync dependent on (tid / i3).
-define i32 @unstructured_loop(i1 %entry_cond) {
+define ptx_kernel i32 @unstructured_loop(i1 %entry_cond) {
; CHECK-LABEL: for function 'unstructured_loop'
entry:
%tid = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
@@ -59,5 +59,3 @@ declare i32 @llvm.nvvm.read.ptx.sreg.tid.y()
declare i32 @llvm.nvvm.read.ptx.sreg.tid.z()
declare i32 @llvm.nvvm.read.ptx.sreg.laneid()
-!nvvm.annotations = !{!0}
-!0 = !{ptr @unstructured_loop, !"kernel", i32 1}
diff --git a/llvm/test/CodeGen/AArch64/arm64-fast-isel-conversion-fallback.ll b/llvm/test/CodeGen/AArch64/arm64-fast-isel-conversion-fallback.ll
index 1aa28f5..9a1203f 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fast-isel-conversion-fallback.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fast-isel-conversion-fallback.ll
@@ -156,11 +156,10 @@ define i32 @fptosi_bf(bfloat %a) nounwind ssp {
; CHECK-LABEL: fptosi_bf:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: fmov s1, s0
-; CHECK-NEXT: // implicit-def: $s0
+; CHECK-NEXT: // implicit-def: $d0
; CHECK-NEXT: fmov s0, s1
-; CHECK-NEXT: fmov w8, s0
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fmov s0, w8
+; CHECK-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-NEXT: // kill: def $s0 killed $s0 killed $q0
; CHECK-NEXT: fcvtzs w0, s0
; CHECK-NEXT: ret
entry:
@@ -173,11 +172,10 @@ define i32 @fptoui_sbf(bfloat %a) nounwind ssp {
; CHECK-LABEL: fptoui_sbf:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: fmov s1, s0
-; CHECK-NEXT: // implicit-def: $s0
+; CHECK-NEXT: // implicit-def: $d0
; CHECK-NEXT: fmov s0, s1
-; CHECK-NEXT: fmov w8, s0
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fmov s0, w8
+; CHECK-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-NEXT: // kill: def $s0 killed $s0 killed $q0
; CHECK-NEXT: fcvtzu w0, s0
; CHECK-NEXT: ret
entry:
diff --git a/llvm/test/CodeGen/AArch64/atomicrmw-fadd.ll b/llvm/test/CodeGen/AArch64/atomicrmw-fadd.ll
index ed9c1b0..fb40dfc 100644
--- a/llvm/test/CodeGen/AArch64/atomicrmw-fadd.ll
+++ b/llvm/test/CodeGen/AArch64/atomicrmw-fadd.ll
@@ -182,17 +182,14 @@ define half @test_atomicrmw_fadd_f16_seq_cst_align4(ptr %ptr, half %value) #0 {
define bfloat @test_atomicrmw_fadd_bf16_seq_cst_align2(ptr %ptr, bfloat %value) #0 {
; NOLSE-LABEL: test_atomicrmw_fadd_bf16_seq_cst_align2:
; NOLSE: // %bb.0:
-; NOLSE-NEXT: // kill: def $h0 killed $h0 def $s0
-; NOLSE-NEXT: fmov w9, s0
+; NOLSE-NEXT: // kill: def $h0 killed $h0 def $d0
+; NOLSE-NEXT: shll v1.4s, v0.4h, #16
; NOLSE-NEXT: mov w8, #32767 // =0x7fff
-; NOLSE-NEXT: lsl w9, w9, #16
-; NOLSE-NEXT: fmov s1, w9
; NOLSE-NEXT: .LBB2_1: // %atomicrmw.start
; NOLSE-NEXT: // =>This Inner Loop Header: Depth=1
; NOLSE-NEXT: ldaxrh w9, [x0]
; NOLSE-NEXT: fmov s0, w9
-; NOLSE-NEXT: lsl w9, w9, #16
-; NOLSE-NEXT: fmov s2, w9
+; NOLSE-NEXT: shll v2.4s, v0.4h, #16
; NOLSE-NEXT: fadd s2, s2, s1
; NOLSE-NEXT: fmov w9, s2
; NOLSE-NEXT: ubfx w10, w9, #16, #1
@@ -202,36 +199,34 @@ define bfloat @test_atomicrmw_fadd_bf16_seq_cst_align2(ptr %ptr, bfloat %value)
; NOLSE-NEXT: stlxrh w10, w9, [x0]
; NOLSE-NEXT: cbnz w10, .LBB2_1
; NOLSE-NEXT: // %bb.2: // %atomicrmw.end
-; NOLSE-NEXT: // kill: def $h0 killed $h0 killed $s0
+; NOLSE-NEXT: // kill: def $h0 killed $h0 killed $d0
; NOLSE-NEXT: ret
;
; LSE-LABEL: test_atomicrmw_fadd_bf16_seq_cst_align2:
; LSE: // %bb.0:
-; LSE-NEXT: // kill: def $h0 killed $h0 def $s0
-; LSE-NEXT: fmov w9, s0
+; LSE-NEXT: // kill: def $h0 killed $h0 def $d0
+; LSE-NEXT: shll v1.4s, v0.4h, #16
; LSE-NEXT: mov w8, #32767 // =0x7fff
; LSE-NEXT: ldr h0, [x0]
-; LSE-NEXT: lsl w9, w9, #16
-; LSE-NEXT: fmov s1, w9
; LSE-NEXT: .LBB2_1: // %atomicrmw.start
; LSE-NEXT: // =>This Inner Loop Header: Depth=1
-; LSE-NEXT: fmov w9, s0
-; LSE-NEXT: lsl w9, w9, #16
-; LSE-NEXT: fmov s2, w9
+; LSE-NEXT: shll v2.4s, v0.4h, #16
; LSE-NEXT: fadd s2, s2, s1
; LSE-NEXT: fmov w9, s2
; LSE-NEXT: ubfx w10, w9, #16, #1
; LSE-NEXT: add w9, w9, w8
; LSE-NEXT: add w9, w10, w9
-; LSE-NEXT: fmov w10, s0
; LSE-NEXT: lsr w9, w9, #16
-; LSE-NEXT: mov w11, w10
-; LSE-NEXT: casalh w11, w9, [x0]
+; LSE-NEXT: fmov s2, w9
+; LSE-NEXT: fmov w9, s0
+; LSE-NEXT: fmov w10, s2
+; LSE-NEXT: mov w11, w9
+; LSE-NEXT: casalh w11, w10, [x0]
; LSE-NEXT: fmov s0, w11
-; LSE-NEXT: cmp w11, w10, uxth
+; LSE-NEXT: cmp w11, w9, uxth
; LSE-NEXT: b.ne .LBB2_1
; LSE-NEXT: // %bb.2: // %atomicrmw.end
-; LSE-NEXT: // kill: def $h0 killed $h0 killed $s0
+; LSE-NEXT: // kill: def $h0 killed $h0 killed $d0
; LSE-NEXT: ret
;
; SOFTFP-NOLSE-LABEL: test_atomicrmw_fadd_bf16_seq_cst_align2:
@@ -281,17 +276,14 @@ define bfloat @test_atomicrmw_fadd_bf16_seq_cst_align2(ptr %ptr, bfloat %value)
define bfloat @test_atomicrmw_fadd_bf16_seq_cst_align4(ptr %ptr, bfloat %value) #0 {
; NOLSE-LABEL: test_atomicrmw_fadd_bf16_seq_cst_align4:
; NOLSE: // %bb.0:
-; NOLSE-NEXT: // kill: def $h0 killed $h0 def $s0
-; NOLSE-NEXT: fmov w9, s0
+; NOLSE-NEXT: // kill: def $h0 killed $h0 def $d0
+; NOLSE-NEXT: shll v1.4s, v0.4h, #16
; NOLSE-NEXT: mov w8, #32767 // =0x7fff
-; NOLSE-NEXT: lsl w9, w9, #16
-; NOLSE-NEXT: fmov s1, w9
; NOLSE-NEXT: .LBB3_1: // %atomicrmw.start
; NOLSE-NEXT: // =>This Inner Loop Header: Depth=1
; NOLSE-NEXT: ldaxrh w9, [x0]
; NOLSE-NEXT: fmov s0, w9
-; NOLSE-NEXT: lsl w9, w9, #16
-; NOLSE-NEXT: fmov s2, w9
+; NOLSE-NEXT: shll v2.4s, v0.4h, #16
; NOLSE-NEXT: fadd s2, s2, s1
; NOLSE-NEXT: fmov w9, s2
; NOLSE-NEXT: ubfx w10, w9, #16, #1
@@ -301,36 +293,34 @@ define bfloat @test_atomicrmw_fadd_bf16_seq_cst_align4(ptr %ptr, bfloat %value)
; NOLSE-NEXT: stlxrh w10, w9, [x0]
; NOLSE-NEXT: cbnz w10, .LBB3_1
; NOLSE-NEXT: // %bb.2: // %atomicrmw.end
-; NOLSE-NEXT: // kill: def $h0 killed $h0 killed $s0
+; NOLSE-NEXT: // kill: def $h0 killed $h0 killed $d0
; NOLSE-NEXT: ret
;
; LSE-LABEL: test_atomicrmw_fadd_bf16_seq_cst_align4:
; LSE: // %bb.0:
-; LSE-NEXT: // kill: def $h0 killed $h0 def $s0
-; LSE-NEXT: fmov w9, s0
+; LSE-NEXT: // kill: def $h0 killed $h0 def $d0
+; LSE-NEXT: shll v1.4s, v0.4h, #16
; LSE-NEXT: mov w8, #32767 // =0x7fff
; LSE-NEXT: ldr h0, [x0]
-; LSE-NEXT: lsl w9, w9, #16
-; LSE-NEXT: fmov s1, w9
; LSE-NEXT: .LBB3_1: // %atomicrmw.start
; LSE-NEXT: // =>This Inner Loop Header: Depth=1
-; LSE-NEXT: fmov w9, s0
-; LSE-NEXT: lsl w9, w9, #16
-; LSE-NEXT: fmov s2, w9
+; LSE-NEXT: shll v2.4s, v0.4h, #16
; LSE-NEXT: fadd s2, s2, s1
; LSE-NEXT: fmov w9, s2
; LSE-NEXT: ubfx w10, w9, #16, #1
; LSE-NEXT: add w9, w9, w8
; LSE-NEXT: add w9, w10, w9
-; LSE-NEXT: fmov w10, s0
; LSE-NEXT: lsr w9, w9, #16
-; LSE-NEXT: mov w11, w10
-; LSE-NEXT: casalh w11, w9, [x0]
+; LSE-NEXT: fmov s2, w9
+; LSE-NEXT: fmov w9, s0
+; LSE-NEXT: fmov w10, s2
+; LSE-NEXT: mov w11, w9
+; LSE-NEXT: casalh w11, w10, [x0]
; LSE-NEXT: fmov s0, w11
-; LSE-NEXT: cmp w11, w10, uxth
+; LSE-NEXT: cmp w11, w9, uxth
; LSE-NEXT: b.ne .LBB3_1
; LSE-NEXT: // %bb.2: // %atomicrmw.end
-; LSE-NEXT: // kill: def $h0 killed $h0 killed $s0
+; LSE-NEXT: // kill: def $h0 killed $h0 killed $d0
; LSE-NEXT: ret
;
; SOFTFP-NOLSE-LABEL: test_atomicrmw_fadd_bf16_seq_cst_align4:
diff --git a/llvm/test/CodeGen/AArch64/atomicrmw-fmax.ll b/llvm/test/CodeGen/AArch64/atomicrmw-fmax.ll
index 888b795..818dcf3 100644
--- a/llvm/test/CodeGen/AArch64/atomicrmw-fmax.ll
+++ b/llvm/test/CodeGen/AArch64/atomicrmw-fmax.ll
@@ -184,17 +184,14 @@ define half @test_atomicrmw_fmax_f16_seq_cst_align4(ptr %ptr, half %value) #0 {
define bfloat @test_atomicrmw_fmax_bf16_seq_cst_align2(ptr %ptr, bfloat %value) #0 {
; NOLSE-LABEL: test_atomicrmw_fmax_bf16_seq_cst_align2:
; NOLSE: // %bb.0:
-; NOLSE-NEXT: // kill: def $h0 killed $h0 def $s0
-; NOLSE-NEXT: fmov w9, s0
+; NOLSE-NEXT: // kill: def $h0 killed $h0 def $d0
+; NOLSE-NEXT: shll v1.4s, v0.4h, #16
; NOLSE-NEXT: mov w8, #32767 // =0x7fff
-; NOLSE-NEXT: lsl w9, w9, #16
-; NOLSE-NEXT: fmov s1, w9
; NOLSE-NEXT: .LBB2_1: // %atomicrmw.start
; NOLSE-NEXT: // =>This Inner Loop Header: Depth=1
; NOLSE-NEXT: ldaxrh w9, [x0]
; NOLSE-NEXT: fmov s0, w9
-; NOLSE-NEXT: lsl w9, w9, #16
-; NOLSE-NEXT: fmov s2, w9
+; NOLSE-NEXT: shll v2.4s, v0.4h, #16
; NOLSE-NEXT: fmaxnm s2, s2, s1
; NOLSE-NEXT: fmov w9, s2
; NOLSE-NEXT: ubfx w10, w9, #16, #1
@@ -204,36 +201,34 @@ define bfloat @test_atomicrmw_fmax_bf16_seq_cst_align2(ptr %ptr, bfloat %value)
; NOLSE-NEXT: stlxrh w10, w9, [x0]
; NOLSE-NEXT: cbnz w10, .LBB2_1
; NOLSE-NEXT: // %bb.2: // %atomicrmw.end
-; NOLSE-NEXT: // kill: def $h0 killed $h0 killed $s0
+; NOLSE-NEXT: // kill: def $h0 killed $h0 killed $d0
; NOLSE-NEXT: ret
;
; LSE-LABEL: test_atomicrmw_fmax_bf16_seq_cst_align2:
; LSE: // %bb.0:
-; LSE-NEXT: // kill: def $h0 killed $h0 def $s0
-; LSE-NEXT: fmov w9, s0
+; LSE-NEXT: // kill: def $h0 killed $h0 def $d0
+; LSE-NEXT: shll v1.4s, v0.4h, #16
; LSE-NEXT: mov w8, #32767 // =0x7fff
; LSE-NEXT: ldr h0, [x0]
-; LSE-NEXT: lsl w9, w9, #16
-; LSE-NEXT: fmov s1, w9
; LSE-NEXT: .LBB2_1: // %atomicrmw.start
; LSE-NEXT: // =>This Inner Loop Header: Depth=1
-; LSE-NEXT: fmov w9, s0
-; LSE-NEXT: lsl w9, w9, #16
-; LSE-NEXT: fmov s2, w9
+; LSE-NEXT: shll v2.4s, v0.4h, #16
; LSE-NEXT: fmaxnm s2, s2, s1
; LSE-NEXT: fmov w9, s2
; LSE-NEXT: ubfx w10, w9, #16, #1
; LSE-NEXT: add w9, w9, w8
; LSE-NEXT: add w9, w10, w9
-; LSE-NEXT: fmov w10, s0
; LSE-NEXT: lsr w9, w9, #16
-; LSE-NEXT: mov w11, w10
-; LSE-NEXT: casalh w11, w9, [x0]
+; LSE-NEXT: fmov s2, w9
+; LSE-NEXT: fmov w9, s0
+; LSE-NEXT: fmov w10, s2
+; LSE-NEXT: mov w11, w9
+; LSE-NEXT: casalh w11, w10, [x0]
; LSE-NEXT: fmov s0, w11
-; LSE-NEXT: cmp w11, w10, uxth
+; LSE-NEXT: cmp w11, w9, uxth
; LSE-NEXT: b.ne .LBB2_1
; LSE-NEXT: // %bb.2: // %atomicrmw.end
-; LSE-NEXT: // kill: def $h0 killed $h0 killed $s0
+; LSE-NEXT: // kill: def $h0 killed $h0 killed $d0
; LSE-NEXT: ret
;
; SOFTFP-NOLSE-LABEL: test_atomicrmw_fmax_bf16_seq_cst_align2:
@@ -283,17 +278,14 @@ define bfloat @test_atomicrmw_fmax_bf16_seq_cst_align2(ptr %ptr, bfloat %value)
define bfloat @test_atomicrmw_fmax_bf16_seq_cst_align4(ptr %ptr, bfloat %value) #0 {
; NOLSE-LABEL: test_atomicrmw_fmax_bf16_seq_cst_align4:
; NOLSE: // %bb.0:
-; NOLSE-NEXT: // kill: def $h0 killed $h0 def $s0
-; NOLSE-NEXT: fmov w9, s0
+; NOLSE-NEXT: // kill: def $h0 killed $h0 def $d0
+; NOLSE-NEXT: shll v1.4s, v0.4h, #16
; NOLSE-NEXT: mov w8, #32767 // =0x7fff
-; NOLSE-NEXT: lsl w9, w9, #16
-; NOLSE-NEXT: fmov s1, w9
; NOLSE-NEXT: .LBB3_1: // %atomicrmw.start
; NOLSE-NEXT: // =>This Inner Loop Header: Depth=1
; NOLSE-NEXT: ldaxrh w9, [x0]
; NOLSE-NEXT: fmov s0, w9
-; NOLSE-NEXT: lsl w9, w9, #16
-; NOLSE-NEXT: fmov s2, w9
+; NOLSE-NEXT: shll v2.4s, v0.4h, #16
; NOLSE-NEXT: fmaxnm s2, s2, s1
; NOLSE-NEXT: fmov w9, s2
; NOLSE-NEXT: ubfx w10, w9, #16, #1
@@ -303,36 +295,34 @@ define bfloat @test_atomicrmw_fmax_bf16_seq_cst_align4(ptr %ptr, bfloat %value)
; NOLSE-NEXT: stlxrh w10, w9, [x0]
; NOLSE-NEXT: cbnz w10, .LBB3_1
; NOLSE-NEXT: // %bb.2: // %atomicrmw.end
-; NOLSE-NEXT: // kill: def $h0 killed $h0 killed $s0
+; NOLSE-NEXT: // kill: def $h0 killed $h0 killed $d0
; NOLSE-NEXT: ret
;
; LSE-LABEL: test_atomicrmw_fmax_bf16_seq_cst_align4:
; LSE: // %bb.0:
-; LSE-NEXT: // kill: def $h0 killed $h0 def $s0
-; LSE-NEXT: fmov w9, s0
+; LSE-NEXT: // kill: def $h0 killed $h0 def $d0
+; LSE-NEXT: shll v1.4s, v0.4h, #16
; LSE-NEXT: mov w8, #32767 // =0x7fff
; LSE-NEXT: ldr h0, [x0]
-; LSE-NEXT: lsl w9, w9, #16
-; LSE-NEXT: fmov s1, w9
; LSE-NEXT: .LBB3_1: // %atomicrmw.start
; LSE-NEXT: // =>This Inner Loop Header: Depth=1
-; LSE-NEXT: fmov w9, s0
-; LSE-NEXT: lsl w9, w9, #16
-; LSE-NEXT: fmov s2, w9
+; LSE-NEXT: shll v2.4s, v0.4h, #16
; LSE-NEXT: fmaxnm s2, s2, s1
; LSE-NEXT: fmov w9, s2
; LSE-NEXT: ubfx w10, w9, #16, #1
; LSE-NEXT: add w9, w9, w8
; LSE-NEXT: add w9, w10, w9
-; LSE-NEXT: fmov w10, s0
; LSE-NEXT: lsr w9, w9, #16
-; LSE-NEXT: mov w11, w10
-; LSE-NEXT: casalh w11, w9, [x0]
+; LSE-NEXT: fmov s2, w9
+; LSE-NEXT: fmov w9, s0
+; LSE-NEXT: fmov w10, s2
+; LSE-NEXT: mov w11, w9
+; LSE-NEXT: casalh w11, w10, [x0]
; LSE-NEXT: fmov s0, w11
-; LSE-NEXT: cmp w11, w10, uxth
+; LSE-NEXT: cmp w11, w9, uxth
; LSE-NEXT: b.ne .LBB3_1
; LSE-NEXT: // %bb.2: // %atomicrmw.end
-; LSE-NEXT: // kill: def $h0 killed $h0 killed $s0
+; LSE-NEXT: // kill: def $h0 killed $h0 killed $d0
; LSE-NEXT: ret
;
; SOFTFP-NOLSE-LABEL: test_atomicrmw_fmax_bf16_seq_cst_align4:
@@ -653,31 +643,23 @@ define <2 x bfloat> @test_atomicrmw_fmax_v2bf16_seq_cst_align4(ptr %ptr, <2 x bf
; NOLSE-LABEL: test_atomicrmw_fmax_v2bf16_seq_cst_align4:
; NOLSE: // %bb.0:
; NOLSE-NEXT: // kill: def $d0 killed $d0 def $q0
-; NOLSE-NEXT: mov h1, v0.h[1]
-; NOLSE-NEXT: fmov w10, s0
+; NOLSE-NEXT: dup v1.4h, v0.h[1]
; NOLSE-NEXT: mov w8, #32767 // =0x7fff
-; NOLSE-NEXT: lsl w10, w10, #16
-; NOLSE-NEXT: fmov w9, s1
-; NOLSE-NEXT: fmov s1, w10
-; NOLSE-NEXT: lsl w9, w9, #16
-; NOLSE-NEXT: fmov s0, w9
+; NOLSE-NEXT: shll v0.4s, v0.4h, #16
+; NOLSE-NEXT: shll v1.4s, v1.4h, #16
; NOLSE-NEXT: .LBB7_1: // %atomicrmw.start
; NOLSE-NEXT: // =>This Inner Loop Header: Depth=1
; NOLSE-NEXT: ldaxr w9, [x0]
; NOLSE-NEXT: fmov s2, w9
-; NOLSE-NEXT: mov h3, v2.h[1]
-; NOLSE-NEXT: fmov w11, s2
-; NOLSE-NEXT: lsl w11, w11, #16
-; NOLSE-NEXT: fmov w10, s3
-; NOLSE-NEXT: fmov s3, w11
-; NOLSE-NEXT: lsl w10, w10, #16
-; NOLSE-NEXT: fmaxnm s3, s3, s1
-; NOLSE-NEXT: fmov s2, w10
+; NOLSE-NEXT: dup v3.4h, v2.h[1]
+; NOLSE-NEXT: shll v2.4s, v2.4h, #16
; NOLSE-NEXT: fmaxnm s2, s2, s0
-; NOLSE-NEXT: fmov w11, s3
+; NOLSE-NEXT: shll v3.4s, v3.4h, #16
+; NOLSE-NEXT: fmaxnm s3, s3, s1
+; NOLSE-NEXT: fmov w11, s2
; NOLSE-NEXT: ubfx w13, w11, #16, #1
; NOLSE-NEXT: add w11, w11, w8
-; NOLSE-NEXT: fmov w10, s2
+; NOLSE-NEXT: fmov w10, s3
; NOLSE-NEXT: add w11, w13, w11
; NOLSE-NEXT: lsr w11, w11, #16
; NOLSE-NEXT: ubfx w12, w10, #16, #1
@@ -697,25 +679,17 @@ define <2 x bfloat> @test_atomicrmw_fmax_v2bf16_seq_cst_align4(ptr %ptr, <2 x bf
; LSE-LABEL: test_atomicrmw_fmax_v2bf16_seq_cst_align4:
; LSE: // %bb.0:
; LSE-NEXT: // kill: def $d0 killed $d0 def $q0
-; LSE-NEXT: mov h1, v0.h[1]
-; LSE-NEXT: fmov w10, s0
+; LSE-NEXT: dup v1.4h, v0.h[1]
+; LSE-NEXT: shll v2.4s, v0.4h, #16
; LSE-NEXT: mov w8, #32767 // =0x7fff
; LSE-NEXT: ldr s0, [x0]
-; LSE-NEXT: lsl w10, w10, #16
-; LSE-NEXT: fmov w9, s1
-; LSE-NEXT: fmov s2, w10
-; LSE-NEXT: lsl w9, w9, #16
-; LSE-NEXT: fmov s1, w9
+; LSE-NEXT: shll v1.4s, v1.4h, #16
; LSE-NEXT: .LBB7_1: // %atomicrmw.start
; LSE-NEXT: // =>This Inner Loop Header: Depth=1
-; LSE-NEXT: mov h3, v0.h[1]
-; LSE-NEXT: fmov w10, s0
-; LSE-NEXT: lsl w10, w10, #16
-; LSE-NEXT: fmov w9, s3
-; LSE-NEXT: fmov s4, w10
-; LSE-NEXT: lsl w9, w9, #16
+; LSE-NEXT: dup v3.4h, v0.h[1]
+; LSE-NEXT: shll v4.4s, v0.4h, #16
; LSE-NEXT: fmaxnm s4, s4, s2
-; LSE-NEXT: fmov s3, w9
+; LSE-NEXT: shll v3.4s, v3.4h, #16
; LSE-NEXT: fmaxnm s3, s3, s1
; LSE-NEXT: fmov w10, s4
; LSE-NEXT: ubfx w12, w10, #16, #1
diff --git a/llvm/test/CodeGen/AArch64/atomicrmw-fmin.ll b/llvm/test/CodeGen/AArch64/atomicrmw-fmin.ll
index a3665c6..b969241e 100644
--- a/llvm/test/CodeGen/AArch64/atomicrmw-fmin.ll
+++ b/llvm/test/CodeGen/AArch64/atomicrmw-fmin.ll
@@ -184,17 +184,14 @@ define half @test_atomicrmw_fmin_f16_seq_cst_align4(ptr %ptr, half %value) #0 {
define bfloat @test_atomicrmw_fmin_bf16_seq_cst_align2(ptr %ptr, bfloat %value) #0 {
; NOLSE-LABEL: test_atomicrmw_fmin_bf16_seq_cst_align2:
; NOLSE: // %bb.0:
-; NOLSE-NEXT: // kill: def $h0 killed $h0 def $s0
-; NOLSE-NEXT: fmov w9, s0
+; NOLSE-NEXT: // kill: def $h0 killed $h0 def $d0
+; NOLSE-NEXT: shll v1.4s, v0.4h, #16
; NOLSE-NEXT: mov w8, #32767 // =0x7fff
-; NOLSE-NEXT: lsl w9, w9, #16
-; NOLSE-NEXT: fmov s1, w9
; NOLSE-NEXT: .LBB2_1: // %atomicrmw.start
; NOLSE-NEXT: // =>This Inner Loop Header: Depth=1
; NOLSE-NEXT: ldaxrh w9, [x0]
; NOLSE-NEXT: fmov s0, w9
-; NOLSE-NEXT: lsl w9, w9, #16
-; NOLSE-NEXT: fmov s2, w9
+; NOLSE-NEXT: shll v2.4s, v0.4h, #16
; NOLSE-NEXT: fminnm s2, s2, s1
; NOLSE-NEXT: fmov w9, s2
; NOLSE-NEXT: ubfx w10, w9, #16, #1
@@ -204,36 +201,34 @@ define bfloat @test_atomicrmw_fmin_bf16_seq_cst_align2(ptr %ptr, bfloat %value)
; NOLSE-NEXT: stlxrh w10, w9, [x0]
; NOLSE-NEXT: cbnz w10, .LBB2_1
; NOLSE-NEXT: // %bb.2: // %atomicrmw.end
-; NOLSE-NEXT: // kill: def $h0 killed $h0 killed $s0
+; NOLSE-NEXT: // kill: def $h0 killed $h0 killed $d0
; NOLSE-NEXT: ret
;
; LSE-LABEL: test_atomicrmw_fmin_bf16_seq_cst_align2:
; LSE: // %bb.0:
-; LSE-NEXT: // kill: def $h0 killed $h0 def $s0
-; LSE-NEXT: fmov w9, s0
+; LSE-NEXT: // kill: def $h0 killed $h0 def $d0
+; LSE-NEXT: shll v1.4s, v0.4h, #16
; LSE-NEXT: mov w8, #32767 // =0x7fff
; LSE-NEXT: ldr h0, [x0]
-; LSE-NEXT: lsl w9, w9, #16
-; LSE-NEXT: fmov s1, w9
; LSE-NEXT: .LBB2_1: // %atomicrmw.start
; LSE-NEXT: // =>This Inner Loop Header: Depth=1
-; LSE-NEXT: fmov w9, s0
-; LSE-NEXT: lsl w9, w9, #16
-; LSE-NEXT: fmov s2, w9
+; LSE-NEXT: shll v2.4s, v0.4h, #16
; LSE-NEXT: fminnm s2, s2, s1
; LSE-NEXT: fmov w9, s2
; LSE-NEXT: ubfx w10, w9, #16, #1
; LSE-NEXT: add w9, w9, w8
; LSE-NEXT: add w9, w10, w9
-; LSE-NEXT: fmov w10, s0
; LSE-NEXT: lsr w9, w9, #16
-; LSE-NEXT: mov w11, w10
-; LSE-NEXT: casalh w11, w9, [x0]
+; LSE-NEXT: fmov s2, w9
+; LSE-NEXT: fmov w9, s0
+; LSE-NEXT: fmov w10, s2
+; LSE-NEXT: mov w11, w9
+; LSE-NEXT: casalh w11, w10, [x0]
; LSE-NEXT: fmov s0, w11
-; LSE-NEXT: cmp w11, w10, uxth
+; LSE-NEXT: cmp w11, w9, uxth
; LSE-NEXT: b.ne .LBB2_1
; LSE-NEXT: // %bb.2: // %atomicrmw.end
-; LSE-NEXT: // kill: def $h0 killed $h0 killed $s0
+; LSE-NEXT: // kill: def $h0 killed $h0 killed $d0
; LSE-NEXT: ret
;
; SOFTFP-NOLSE-LABEL: test_atomicrmw_fmin_bf16_seq_cst_align2:
@@ -283,17 +278,14 @@ define bfloat @test_atomicrmw_fmin_bf16_seq_cst_align2(ptr %ptr, bfloat %value)
define bfloat @test_atomicrmw_fmin_bf16_seq_cst_align4(ptr %ptr, bfloat %value) #0 {
; NOLSE-LABEL: test_atomicrmw_fmin_bf16_seq_cst_align4:
; NOLSE: // %bb.0:
-; NOLSE-NEXT: // kill: def $h0 killed $h0 def $s0
-; NOLSE-NEXT: fmov w9, s0
+; NOLSE-NEXT: // kill: def $h0 killed $h0 def $d0
+; NOLSE-NEXT: shll v1.4s, v0.4h, #16
; NOLSE-NEXT: mov w8, #32767 // =0x7fff
-; NOLSE-NEXT: lsl w9, w9, #16
-; NOLSE-NEXT: fmov s1, w9
; NOLSE-NEXT: .LBB3_1: // %atomicrmw.start
; NOLSE-NEXT: // =>This Inner Loop Header: Depth=1
; NOLSE-NEXT: ldaxrh w9, [x0]
; NOLSE-NEXT: fmov s0, w9
-; NOLSE-NEXT: lsl w9, w9, #16
-; NOLSE-NEXT: fmov s2, w9
+; NOLSE-NEXT: shll v2.4s, v0.4h, #16
; NOLSE-NEXT: fminnm s2, s2, s1
; NOLSE-NEXT: fmov w9, s2
; NOLSE-NEXT: ubfx w10, w9, #16, #1
@@ -303,36 +295,34 @@ define bfloat @test_atomicrmw_fmin_bf16_seq_cst_align4(ptr %ptr, bfloat %value)
; NOLSE-NEXT: stlxrh w10, w9, [x0]
; NOLSE-NEXT: cbnz w10, .LBB3_1
; NOLSE-NEXT: // %bb.2: // %atomicrmw.end
-; NOLSE-NEXT: // kill: def $h0 killed $h0 killed $s0
+; NOLSE-NEXT: // kill: def $h0 killed $h0 killed $d0
; NOLSE-NEXT: ret
;
; LSE-LABEL: test_atomicrmw_fmin_bf16_seq_cst_align4:
; LSE: // %bb.0:
-; LSE-NEXT: // kill: def $h0 killed $h0 def $s0
-; LSE-NEXT: fmov w9, s0
+; LSE-NEXT: // kill: def $h0 killed $h0 def $d0
+; LSE-NEXT: shll v1.4s, v0.4h, #16
; LSE-NEXT: mov w8, #32767 // =0x7fff
; LSE-NEXT: ldr h0, [x0]
-; LSE-NEXT: lsl w9, w9, #16
-; LSE-NEXT: fmov s1, w9
; LSE-NEXT: .LBB3_1: // %atomicrmw.start
; LSE-NEXT: // =>This Inner Loop Header: Depth=1
-; LSE-NEXT: fmov w9, s0
-; LSE-NEXT: lsl w9, w9, #16
-; LSE-NEXT: fmov s2, w9
+; LSE-NEXT: shll v2.4s, v0.4h, #16
; LSE-NEXT: fminnm s2, s2, s1
; LSE-NEXT: fmov w9, s2
; LSE-NEXT: ubfx w10, w9, #16, #1
; LSE-NEXT: add w9, w9, w8
; LSE-NEXT: add w9, w10, w9
-; LSE-NEXT: fmov w10, s0
; LSE-NEXT: lsr w9, w9, #16
-; LSE-NEXT: mov w11, w10
-; LSE-NEXT: casalh w11, w9, [x0]
+; LSE-NEXT: fmov s2, w9
+; LSE-NEXT: fmov w9, s0
+; LSE-NEXT: fmov w10, s2
+; LSE-NEXT: mov w11, w9
+; LSE-NEXT: casalh w11, w10, [x0]
; LSE-NEXT: fmov s0, w11
-; LSE-NEXT: cmp w11, w10, uxth
+; LSE-NEXT: cmp w11, w9, uxth
; LSE-NEXT: b.ne .LBB3_1
; LSE-NEXT: // %bb.2: // %atomicrmw.end
-; LSE-NEXT: // kill: def $h0 killed $h0 killed $s0
+; LSE-NEXT: // kill: def $h0 killed $h0 killed $d0
; LSE-NEXT: ret
;
; SOFTFP-NOLSE-LABEL: test_atomicrmw_fmin_bf16_seq_cst_align4:
@@ -653,31 +643,23 @@ define <2 x bfloat> @test_atomicrmw_fmin_v2bf16_seq_cst_align4(ptr %ptr, <2 x bf
; NOLSE-LABEL: test_atomicrmw_fmin_v2bf16_seq_cst_align4:
; NOLSE: // %bb.0:
; NOLSE-NEXT: // kill: def $d0 killed $d0 def $q0
-; NOLSE-NEXT: mov h1, v0.h[1]
-; NOLSE-NEXT: fmov w10, s0
+; NOLSE-NEXT: dup v1.4h, v0.h[1]
; NOLSE-NEXT: mov w8, #32767 // =0x7fff
-; NOLSE-NEXT: lsl w10, w10, #16
-; NOLSE-NEXT: fmov w9, s1
-; NOLSE-NEXT: fmov s1, w10
-; NOLSE-NEXT: lsl w9, w9, #16
-; NOLSE-NEXT: fmov s0, w9
+; NOLSE-NEXT: shll v0.4s, v0.4h, #16
+; NOLSE-NEXT: shll v1.4s, v1.4h, #16
; NOLSE-NEXT: .LBB7_1: // %atomicrmw.start
; NOLSE-NEXT: // =>This Inner Loop Header: Depth=1
; NOLSE-NEXT: ldaxr w9, [x0]
; NOLSE-NEXT: fmov s2, w9
-; NOLSE-NEXT: mov h3, v2.h[1]
-; NOLSE-NEXT: fmov w11, s2
-; NOLSE-NEXT: lsl w11, w11, #16
-; NOLSE-NEXT: fmov w10, s3
-; NOLSE-NEXT: fmov s3, w11
-; NOLSE-NEXT: lsl w10, w10, #16
-; NOLSE-NEXT: fminnm s3, s3, s1
-; NOLSE-NEXT: fmov s2, w10
+; NOLSE-NEXT: dup v3.4h, v2.h[1]
+; NOLSE-NEXT: shll v2.4s, v2.4h, #16
; NOLSE-NEXT: fminnm s2, s2, s0
-; NOLSE-NEXT: fmov w11, s3
+; NOLSE-NEXT: shll v3.4s, v3.4h, #16
+; NOLSE-NEXT: fminnm s3, s3, s1
+; NOLSE-NEXT: fmov w11, s2
; NOLSE-NEXT: ubfx w13, w11, #16, #1
; NOLSE-NEXT: add w11, w11, w8
-; NOLSE-NEXT: fmov w10, s2
+; NOLSE-NEXT: fmov w10, s3
; NOLSE-NEXT: add w11, w13, w11
; NOLSE-NEXT: lsr w11, w11, #16
; NOLSE-NEXT: ubfx w12, w10, #16, #1
@@ -697,25 +679,17 @@ define <2 x bfloat> @test_atomicrmw_fmin_v2bf16_seq_cst_align4(ptr %ptr, <2 x bf
; LSE-LABEL: test_atomicrmw_fmin_v2bf16_seq_cst_align4:
; LSE: // %bb.0:
; LSE-NEXT: // kill: def $d0 killed $d0 def $q0
-; LSE-NEXT: mov h1, v0.h[1]
-; LSE-NEXT: fmov w10, s0
+; LSE-NEXT: dup v1.4h, v0.h[1]
+; LSE-NEXT: shll v2.4s, v0.4h, #16
; LSE-NEXT: mov w8, #32767 // =0x7fff
; LSE-NEXT: ldr s0, [x0]
-; LSE-NEXT: lsl w10, w10, #16
-; LSE-NEXT: fmov w9, s1
-; LSE-NEXT: fmov s2, w10
-; LSE-NEXT: lsl w9, w9, #16
-; LSE-NEXT: fmov s1, w9
+; LSE-NEXT: shll v1.4s, v1.4h, #16
; LSE-NEXT: .LBB7_1: // %atomicrmw.start
; LSE-NEXT: // =>This Inner Loop Header: Depth=1
-; LSE-NEXT: mov h3, v0.h[1]
-; LSE-NEXT: fmov w10, s0
-; LSE-NEXT: lsl w10, w10, #16
-; LSE-NEXT: fmov w9, s3
-; LSE-NEXT: fmov s4, w10
-; LSE-NEXT: lsl w9, w9, #16
+; LSE-NEXT: dup v3.4h, v0.h[1]
+; LSE-NEXT: shll v4.4s, v0.4h, #16
; LSE-NEXT: fminnm s4, s4, s2
-; LSE-NEXT: fmov s3, w9
+; LSE-NEXT: shll v3.4s, v3.4h, #16
; LSE-NEXT: fminnm s3, s3, s1
; LSE-NEXT: fmov w10, s4
; LSE-NEXT: ubfx w12, w10, #16, #1
diff --git a/llvm/test/CodeGen/AArch64/atomicrmw-fsub.ll b/llvm/test/CodeGen/AArch64/atomicrmw-fsub.ll
index 7725ce0..e603337 100644
--- a/llvm/test/CodeGen/AArch64/atomicrmw-fsub.ll
+++ b/llvm/test/CodeGen/AArch64/atomicrmw-fsub.ll
@@ -182,17 +182,14 @@ define half @test_atomicrmw_fsub_f16_seq_cst_align4(ptr %ptr, half %value) #0 {
define bfloat @test_atomicrmw_fsub_bf16_seq_cst_align2(ptr %ptr, bfloat %value) #0 {
; NOLSE-LABEL: test_atomicrmw_fsub_bf16_seq_cst_align2:
; NOLSE: // %bb.0:
-; NOLSE-NEXT: // kill: def $h0 killed $h0 def $s0
-; NOLSE-NEXT: fmov w9, s0
+; NOLSE-NEXT: // kill: def $h0 killed $h0 def $d0
+; NOLSE-NEXT: shll v1.4s, v0.4h, #16
; NOLSE-NEXT: mov w8, #32767 // =0x7fff
-; NOLSE-NEXT: lsl w9, w9, #16
-; NOLSE-NEXT: fmov s1, w9
; NOLSE-NEXT: .LBB2_1: // %atomicrmw.start
; NOLSE-NEXT: // =>This Inner Loop Header: Depth=1
; NOLSE-NEXT: ldaxrh w9, [x0]
; NOLSE-NEXT: fmov s0, w9
-; NOLSE-NEXT: lsl w9, w9, #16
-; NOLSE-NEXT: fmov s2, w9
+; NOLSE-NEXT: shll v2.4s, v0.4h, #16
; NOLSE-NEXT: fsub s2, s2, s1
; NOLSE-NEXT: fmov w9, s2
; NOLSE-NEXT: ubfx w10, w9, #16, #1
@@ -202,36 +199,34 @@ define bfloat @test_atomicrmw_fsub_bf16_seq_cst_align2(ptr %ptr, bfloat %value)
; NOLSE-NEXT: stlxrh w10, w9, [x0]
; NOLSE-NEXT: cbnz w10, .LBB2_1
; NOLSE-NEXT: // %bb.2: // %atomicrmw.end
-; NOLSE-NEXT: // kill: def $h0 killed $h0 killed $s0
+; NOLSE-NEXT: // kill: def $h0 killed $h0 killed $d0
; NOLSE-NEXT: ret
;
; LSE-LABEL: test_atomicrmw_fsub_bf16_seq_cst_align2:
; LSE: // %bb.0:
-; LSE-NEXT: // kill: def $h0 killed $h0 def $s0
-; LSE-NEXT: fmov w9, s0
+; LSE-NEXT: // kill: def $h0 killed $h0 def $d0
+; LSE-NEXT: shll v1.4s, v0.4h, #16
; LSE-NEXT: mov w8, #32767 // =0x7fff
; LSE-NEXT: ldr h0, [x0]
-; LSE-NEXT: lsl w9, w9, #16
-; LSE-NEXT: fmov s1, w9
; LSE-NEXT: .LBB2_1: // %atomicrmw.start
; LSE-NEXT: // =>This Inner Loop Header: Depth=1
-; LSE-NEXT: fmov w9, s0
-; LSE-NEXT: lsl w9, w9, #16
-; LSE-NEXT: fmov s2, w9
+; LSE-NEXT: shll v2.4s, v0.4h, #16
; LSE-NEXT: fsub s2, s2, s1
; LSE-NEXT: fmov w9, s2
; LSE-NEXT: ubfx w10, w9, #16, #1
; LSE-NEXT: add w9, w9, w8
; LSE-NEXT: add w9, w10, w9
-; LSE-NEXT: fmov w10, s0
; LSE-NEXT: lsr w9, w9, #16
-; LSE-NEXT: mov w11, w10
-; LSE-NEXT: casalh w11, w9, [x0]
+; LSE-NEXT: fmov s2, w9
+; LSE-NEXT: fmov w9, s0
+; LSE-NEXT: fmov w10, s2
+; LSE-NEXT: mov w11, w9
+; LSE-NEXT: casalh w11, w10, [x0]
; LSE-NEXT: fmov s0, w11
-; LSE-NEXT: cmp w11, w10, uxth
+; LSE-NEXT: cmp w11, w9, uxth
; LSE-NEXT: b.ne .LBB2_1
; LSE-NEXT: // %bb.2: // %atomicrmw.end
-; LSE-NEXT: // kill: def $h0 killed $h0 killed $s0
+; LSE-NEXT: // kill: def $h0 killed $h0 killed $d0
; LSE-NEXT: ret
;
; SOFTFP-NOLSE-LABEL: test_atomicrmw_fsub_bf16_seq_cst_align2:
@@ -281,17 +276,14 @@ define bfloat @test_atomicrmw_fsub_bf16_seq_cst_align2(ptr %ptr, bfloat %value)
define bfloat @test_atomicrmw_fsub_bf16_seq_cst_align4(ptr %ptr, bfloat %value) #0 {
; NOLSE-LABEL: test_atomicrmw_fsub_bf16_seq_cst_align4:
; NOLSE: // %bb.0:
-; NOLSE-NEXT: // kill: def $h0 killed $h0 def $s0
-; NOLSE-NEXT: fmov w9, s0
+; NOLSE-NEXT: // kill: def $h0 killed $h0 def $d0
+; NOLSE-NEXT: shll v1.4s, v0.4h, #16
; NOLSE-NEXT: mov w8, #32767 // =0x7fff
-; NOLSE-NEXT: lsl w9, w9, #16
-; NOLSE-NEXT: fmov s1, w9
; NOLSE-NEXT: .LBB3_1: // %atomicrmw.start
; NOLSE-NEXT: // =>This Inner Loop Header: Depth=1
; NOLSE-NEXT: ldaxrh w9, [x0]
; NOLSE-NEXT: fmov s0, w9
-; NOLSE-NEXT: lsl w9, w9, #16
-; NOLSE-NEXT: fmov s2, w9
+; NOLSE-NEXT: shll v2.4s, v0.4h, #16
; NOLSE-NEXT: fsub s2, s2, s1
; NOLSE-NEXT: fmov w9, s2
; NOLSE-NEXT: ubfx w10, w9, #16, #1
@@ -301,36 +293,34 @@ define bfloat @test_atomicrmw_fsub_bf16_seq_cst_align4(ptr %ptr, bfloat %value)
; NOLSE-NEXT: stlxrh w10, w9, [x0]
; NOLSE-NEXT: cbnz w10, .LBB3_1
; NOLSE-NEXT: // %bb.2: // %atomicrmw.end
-; NOLSE-NEXT: // kill: def $h0 killed $h0 killed $s0
+; NOLSE-NEXT: // kill: def $h0 killed $h0 killed $d0
; NOLSE-NEXT: ret
;
; LSE-LABEL: test_atomicrmw_fsub_bf16_seq_cst_align4:
; LSE: // %bb.0:
-; LSE-NEXT: // kill: def $h0 killed $h0 def $s0
-; LSE-NEXT: fmov w9, s0
+; LSE-NEXT: // kill: def $h0 killed $h0 def $d0
+; LSE-NEXT: shll v1.4s, v0.4h, #16
; LSE-NEXT: mov w8, #32767 // =0x7fff
; LSE-NEXT: ldr h0, [x0]
-; LSE-NEXT: lsl w9, w9, #16
-; LSE-NEXT: fmov s1, w9
; LSE-NEXT: .LBB3_1: // %atomicrmw.start
; LSE-NEXT: // =>This Inner Loop Header: Depth=1
-; LSE-NEXT: fmov w9, s0
-; LSE-NEXT: lsl w9, w9, #16
-; LSE-NEXT: fmov s2, w9
+; LSE-NEXT: shll v2.4s, v0.4h, #16
; LSE-NEXT: fsub s2, s2, s1
; LSE-NEXT: fmov w9, s2
; LSE-NEXT: ubfx w10, w9, #16, #1
; LSE-NEXT: add w9, w9, w8
; LSE-NEXT: add w9, w10, w9
-; LSE-NEXT: fmov w10, s0
; LSE-NEXT: lsr w9, w9, #16
-; LSE-NEXT: mov w11, w10
-; LSE-NEXT: casalh w11, w9, [x0]
+; LSE-NEXT: fmov s2, w9
+; LSE-NEXT: fmov w9, s0
+; LSE-NEXT: fmov w10, s2
+; LSE-NEXT: mov w11, w9
+; LSE-NEXT: casalh w11, w10, [x0]
; LSE-NEXT: fmov s0, w11
-; LSE-NEXT: cmp w11, w10, uxth
+; LSE-NEXT: cmp w11, w9, uxth
; LSE-NEXT: b.ne .LBB3_1
; LSE-NEXT: // %bb.2: // %atomicrmw.end
-; LSE-NEXT: // kill: def $h0 killed $h0 killed $s0
+; LSE-NEXT: // kill: def $h0 killed $h0 killed $d0
; LSE-NEXT: ret
;
; SOFTFP-NOLSE-LABEL: test_atomicrmw_fsub_bf16_seq_cst_align4:
diff --git a/llvm/test/CodeGen/AArch64/bf16-instructions.ll b/llvm/test/CodeGen/AArch64/bf16-instructions.ll
index 3399761..bc06453 100644
--- a/llvm/test/CodeGen/AArch64/bf16-instructions.ll
+++ b/llvm/test/CodeGen/AArch64/bf16-instructions.ll
@@ -5,16 +5,12 @@
define bfloat @test_fadd(bfloat %a, bfloat %b) #0 {
; CHECK-CVT-LABEL: test_fadd:
; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: // kill: def $h1 killed $h1 def $s1
-; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-CVT-NEXT: fmov w9, s1
-; CHECK-CVT-NEXT: fmov w10, s0
+; CHECK-CVT-NEXT: // kill: def $h1 killed $h1 def $d1
+; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $d0
; CHECK-CVT-NEXT: mov w8, #32767 // =0x7fff
-; CHECK-CVT-NEXT: lsl w9, w9, #16
-; CHECK-CVT-NEXT: lsl w10, w10, #16
-; CHECK-CVT-NEXT: fmov s0, w9
-; CHECK-CVT-NEXT: fmov s1, w10
-; CHECK-CVT-NEXT: fadd s0, s1, s0
+; CHECK-CVT-NEXT: shll v1.4s, v1.4h, #16
+; CHECK-CVT-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-CVT-NEXT: fadd s0, s0, s1
; CHECK-CVT-NEXT: fmov w9, s0
; CHECK-CVT-NEXT: ubfx w10, w9, #16, #1
; CHECK-CVT-NEXT: add w8, w9, w8
@@ -26,15 +22,11 @@ define bfloat @test_fadd(bfloat %a, bfloat %b) #0 {
;
; CHECK-BF16-LABEL: test_fadd:
; CHECK-BF16: // %bb.0:
-; CHECK-BF16-NEXT: // kill: def $h1 killed $h1 def $s1
-; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-BF16-NEXT: fmov w8, s1
-; CHECK-BF16-NEXT: fmov w9, s0
-; CHECK-BF16-NEXT: lsl w8, w8, #16
-; CHECK-BF16-NEXT: lsl w9, w9, #16
-; CHECK-BF16-NEXT: fmov s0, w8
-; CHECK-BF16-NEXT: fmov s1, w9
-; CHECK-BF16-NEXT: fadd s0, s1, s0
+; CHECK-BF16-NEXT: // kill: def $h1 killed $h1 def $d1
+; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-BF16-NEXT: shll v1.4s, v1.4h, #16
+; CHECK-BF16-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-BF16-NEXT: fadd s0, s0, s1
; CHECK-BF16-NEXT: bfcvt h0, s0
; CHECK-BF16-NEXT: ret
%r = fadd bfloat %a, %b
@@ -44,16 +36,12 @@ define bfloat @test_fadd(bfloat %a, bfloat %b) #0 {
define bfloat @test_fsub(bfloat %a, bfloat %b) #0 {
; CHECK-CVT-LABEL: test_fsub:
; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: // kill: def $h1 killed $h1 def $s1
-; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-CVT-NEXT: fmov w9, s1
-; CHECK-CVT-NEXT: fmov w10, s0
+; CHECK-CVT-NEXT: // kill: def $h1 killed $h1 def $d1
+; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $d0
; CHECK-CVT-NEXT: mov w8, #32767 // =0x7fff
-; CHECK-CVT-NEXT: lsl w9, w9, #16
-; CHECK-CVT-NEXT: lsl w10, w10, #16
-; CHECK-CVT-NEXT: fmov s0, w9
-; CHECK-CVT-NEXT: fmov s1, w10
-; CHECK-CVT-NEXT: fsub s0, s1, s0
+; CHECK-CVT-NEXT: shll v1.4s, v1.4h, #16
+; CHECK-CVT-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-CVT-NEXT: fsub s0, s0, s1
; CHECK-CVT-NEXT: fmov w9, s0
; CHECK-CVT-NEXT: ubfx w10, w9, #16, #1
; CHECK-CVT-NEXT: add w8, w9, w8
@@ -65,15 +53,11 @@ define bfloat @test_fsub(bfloat %a, bfloat %b) #0 {
;
; CHECK-BF16-LABEL: test_fsub:
; CHECK-BF16: // %bb.0:
-; CHECK-BF16-NEXT: // kill: def $h1 killed $h1 def $s1
-; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-BF16-NEXT: fmov w8, s1
-; CHECK-BF16-NEXT: fmov w9, s0
-; CHECK-BF16-NEXT: lsl w8, w8, #16
-; CHECK-BF16-NEXT: lsl w9, w9, #16
-; CHECK-BF16-NEXT: fmov s0, w8
-; CHECK-BF16-NEXT: fmov s1, w9
-; CHECK-BF16-NEXT: fsub s0, s1, s0
+; CHECK-BF16-NEXT: // kill: def $h1 killed $h1 def $d1
+; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-BF16-NEXT: shll v1.4s, v1.4h, #16
+; CHECK-BF16-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-BF16-NEXT: fsub s0, s0, s1
; CHECK-BF16-NEXT: bfcvt h0, s0
; CHECK-BF16-NEXT: ret
%r = fsub bfloat %a, %b
@@ -83,16 +67,12 @@ define bfloat @test_fsub(bfloat %a, bfloat %b) #0 {
define bfloat @test_fmul(bfloat %a, bfloat %b) #0 {
; CHECK-CVT-LABEL: test_fmul:
; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: // kill: def $h1 killed $h1 def $s1
-; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-CVT-NEXT: fmov w9, s1
-; CHECK-CVT-NEXT: fmov w10, s0
+; CHECK-CVT-NEXT: // kill: def $h1 killed $h1 def $d1
+; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $d0
; CHECK-CVT-NEXT: mov w8, #32767 // =0x7fff
-; CHECK-CVT-NEXT: lsl w9, w9, #16
-; CHECK-CVT-NEXT: lsl w10, w10, #16
-; CHECK-CVT-NEXT: fmov s0, w9
-; CHECK-CVT-NEXT: fmov s1, w10
-; CHECK-CVT-NEXT: fmul s0, s1, s0
+; CHECK-CVT-NEXT: shll v1.4s, v1.4h, #16
+; CHECK-CVT-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-CVT-NEXT: fmul s0, s0, s1
; CHECK-CVT-NEXT: fmov w9, s0
; CHECK-CVT-NEXT: ubfx w10, w9, #16, #1
; CHECK-CVT-NEXT: add w8, w9, w8
@@ -104,15 +84,11 @@ define bfloat @test_fmul(bfloat %a, bfloat %b) #0 {
;
; CHECK-BF16-LABEL: test_fmul:
; CHECK-BF16: // %bb.0:
-; CHECK-BF16-NEXT: // kill: def $h1 killed $h1 def $s1
-; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-BF16-NEXT: fmov w8, s1
-; CHECK-BF16-NEXT: fmov w9, s0
-; CHECK-BF16-NEXT: lsl w8, w8, #16
-; CHECK-BF16-NEXT: lsl w9, w9, #16
-; CHECK-BF16-NEXT: fmov s0, w8
-; CHECK-BF16-NEXT: fmov s1, w9
-; CHECK-BF16-NEXT: fmul s0, s1, s0
+; CHECK-BF16-NEXT: // kill: def $h1 killed $h1 def $d1
+; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-BF16-NEXT: shll v1.4s, v1.4h, #16
+; CHECK-BF16-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-BF16-NEXT: fmul s0, s0, s1
; CHECK-BF16-NEXT: bfcvt h0, s0
; CHECK-BF16-NEXT: ret
%r = fmul bfloat %a, %b
@@ -122,27 +98,21 @@ define bfloat @test_fmul(bfloat %a, bfloat %b) #0 {
define bfloat @test_fmadd(bfloat %a, bfloat %b, bfloat %c) #0 {
; CHECK-CVT-LABEL: test_fmadd:
; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: // kill: def $h1 killed $h1 def $s1
-; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-CVT-NEXT: fmov w8, s1
-; CHECK-CVT-NEXT: fmov w9, s0
+; CHECK-CVT-NEXT: // kill: def $h1 killed $h1 def $d1
+; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $d0
; CHECK-CVT-NEXT: mov w10, #32767 // =0x7fff
-; CHECK-CVT-NEXT: // kill: def $h2 killed $h2 def $s2
-; CHECK-CVT-NEXT: lsl w8, w8, #16
-; CHECK-CVT-NEXT: lsl w9, w9, #16
-; CHECK-CVT-NEXT: fmov s0, w8
-; CHECK-CVT-NEXT: fmov s1, w9
-; CHECK-CVT-NEXT: fmul s0, s1, s0
+; CHECK-CVT-NEXT: // kill: def $h2 killed $h2 def $d2
+; CHECK-CVT-NEXT: shll v1.4s, v1.4h, #16
+; CHECK-CVT-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-CVT-NEXT: fmul s0, s0, s1
+; CHECK-CVT-NEXT: shll v1.4s, v2.4h, #16
; CHECK-CVT-NEXT: fmov w8, s0
; CHECK-CVT-NEXT: ubfx w9, w8, #16, #1
; CHECK-CVT-NEXT: add w8, w8, w10
; CHECK-CVT-NEXT: add w8, w9, w8
-; CHECK-CVT-NEXT: fmov w9, s2
; CHECK-CVT-NEXT: lsr w8, w8, #16
-; CHECK-CVT-NEXT: lsl w8, w8, #16
-; CHECK-CVT-NEXT: lsl w9, w9, #16
; CHECK-CVT-NEXT: fmov s0, w8
-; CHECK-CVT-NEXT: fmov s1, w9
+; CHECK-CVT-NEXT: shll v0.4s, v0.4h, #16
; CHECK-CVT-NEXT: fadd s0, s0, s1
; CHECK-CVT-NEXT: fmov w8, s0
; CHECK-CVT-NEXT: ubfx w9, w8, #16, #1
@@ -155,23 +125,15 @@ define bfloat @test_fmadd(bfloat %a, bfloat %b, bfloat %c) #0 {
;
; CHECK-BF16-LABEL: test_fmadd:
; CHECK-BF16: // %bb.0:
-; CHECK-BF16-NEXT: // kill: def $h1 killed $h1 def $s1
-; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-BF16-NEXT: fmov w8, s1
-; CHECK-BF16-NEXT: fmov w9, s0
-; CHECK-BF16-NEXT: // kill: def $h2 killed $h2 def $s2
-; CHECK-BF16-NEXT: lsl w8, w8, #16
-; CHECK-BF16-NEXT: lsl w9, w9, #16
-; CHECK-BF16-NEXT: fmov s0, w8
-; CHECK-BF16-NEXT: fmov s1, w9
-; CHECK-BF16-NEXT: fmov w9, s2
-; CHECK-BF16-NEXT: fmul s0, s1, s0
-; CHECK-BF16-NEXT: lsl w9, w9, #16
-; CHECK-BF16-NEXT: fmov s1, w9
+; CHECK-BF16-NEXT: // kill: def $h1 killed $h1 def $d1
+; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-BF16-NEXT: // kill: def $h2 killed $h2 def $d2
+; CHECK-BF16-NEXT: shll v1.4s, v1.4h, #16
+; CHECK-BF16-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-BF16-NEXT: fmul s0, s0, s1
+; CHECK-BF16-NEXT: shll v1.4s, v2.4h, #16
; CHECK-BF16-NEXT: bfcvt h0, s0
-; CHECK-BF16-NEXT: fmov w8, s0
-; CHECK-BF16-NEXT: lsl w8, w8, #16
-; CHECK-BF16-NEXT: fmov s0, w8
+; CHECK-BF16-NEXT: shll v0.4s, v0.4h, #16
; CHECK-BF16-NEXT: fadd s0, s0, s1
; CHECK-BF16-NEXT: bfcvt h0, s0
; CHECK-BF16-NEXT: ret
@@ -183,16 +145,12 @@ define bfloat @test_fmadd(bfloat %a, bfloat %b, bfloat %c) #0 {
define bfloat @test_fdiv(bfloat %a, bfloat %b) #0 {
; CHECK-CVT-LABEL: test_fdiv:
; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: // kill: def $h1 killed $h1 def $s1
-; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-CVT-NEXT: fmov w9, s1
-; CHECK-CVT-NEXT: fmov w10, s0
+; CHECK-CVT-NEXT: // kill: def $h1 killed $h1 def $d1
+; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $d0
; CHECK-CVT-NEXT: mov w8, #32767 // =0x7fff
-; CHECK-CVT-NEXT: lsl w9, w9, #16
-; CHECK-CVT-NEXT: lsl w10, w10, #16
-; CHECK-CVT-NEXT: fmov s0, w9
-; CHECK-CVT-NEXT: fmov s1, w10
-; CHECK-CVT-NEXT: fdiv s0, s1, s0
+; CHECK-CVT-NEXT: shll v1.4s, v1.4h, #16
+; CHECK-CVT-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-CVT-NEXT: fdiv s0, s0, s1
; CHECK-CVT-NEXT: fmov w9, s0
; CHECK-CVT-NEXT: ubfx w10, w9, #16, #1
; CHECK-CVT-NEXT: add w8, w9, w8
@@ -204,15 +162,11 @@ define bfloat @test_fdiv(bfloat %a, bfloat %b) #0 {
;
; CHECK-BF16-LABEL: test_fdiv:
; CHECK-BF16: // %bb.0:
-; CHECK-BF16-NEXT: // kill: def $h1 killed $h1 def $s1
-; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-BF16-NEXT: fmov w8, s1
-; CHECK-BF16-NEXT: fmov w9, s0
-; CHECK-BF16-NEXT: lsl w8, w8, #16
-; CHECK-BF16-NEXT: lsl w9, w9, #16
-; CHECK-BF16-NEXT: fmov s0, w8
-; CHECK-BF16-NEXT: fmov s1, w9
-; CHECK-BF16-NEXT: fdiv s0, s1, s0
+; CHECK-BF16-NEXT: // kill: def $h1 killed $h1 def $d1
+; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-BF16-NEXT: shll v1.4s, v1.4h, #16
+; CHECK-BF16-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-BF16-NEXT: fdiv s0, s0, s1
; CHECK-BF16-NEXT: bfcvt h0, s0
; CHECK-BF16-NEXT: ret
%r = fdiv bfloat %a, %b
@@ -223,14 +177,12 @@ define bfloat @test_frem(bfloat %a, bfloat %b) #0 {
; CHECK-CVT-LABEL: test_frem:
; CHECK-CVT: // %bb.0:
; CHECK-CVT-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-CVT-NEXT: // kill: def $h1 killed $h1 def $s1
-; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-CVT-NEXT: fmov w8, s0
-; CHECK-CVT-NEXT: fmov w9, s1
-; CHECK-CVT-NEXT: lsl w8, w8, #16
-; CHECK-CVT-NEXT: lsl w9, w9, #16
-; CHECK-CVT-NEXT: fmov s0, w8
-; CHECK-CVT-NEXT: fmov s1, w9
+; CHECK-CVT-NEXT: // kill: def $h1 killed $h1 def $d1
+; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-CVT-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-CVT-NEXT: shll v1.4s, v1.4h, #16
+; CHECK-CVT-NEXT: // kill: def $s0 killed $s0 killed $q0
+; CHECK-CVT-NEXT: // kill: def $s1 killed $s1 killed $q1
; CHECK-CVT-NEXT: bl fmodf
; CHECK-CVT-NEXT: fmov w9, s0
; CHECK-CVT-NEXT: mov w8, #32767 // =0x7fff
@@ -246,14 +198,12 @@ define bfloat @test_frem(bfloat %a, bfloat %b) #0 {
; CHECK-BF16-LABEL: test_frem:
; CHECK-BF16: // %bb.0:
; CHECK-BF16-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-BF16-NEXT: // kill: def $h1 killed $h1 def $s1
-; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-BF16-NEXT: fmov w8, s0
-; CHECK-BF16-NEXT: fmov w9, s1
-; CHECK-BF16-NEXT: lsl w8, w8, #16
-; CHECK-BF16-NEXT: lsl w9, w9, #16
-; CHECK-BF16-NEXT: fmov s0, w8
-; CHECK-BF16-NEXT: fmov s1, w9
+; CHECK-BF16-NEXT: // kill: def $h1 killed $h1 def $d1
+; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-BF16-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-BF16-NEXT: shll v1.4s, v1.4h, #16
+; CHECK-BF16-NEXT: // kill: def $s0 killed $s0 killed $q0
+; CHECK-BF16-NEXT: // kill: def $s1 killed $s1 killed $q1
; CHECK-BF16-NEXT: bl fmodf
; CHECK-BF16-NEXT: bfcvt h0, s0
; CHECK-BF16-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
@@ -334,17 +284,13 @@ define bfloat @test_select(bfloat %a, bfloat %b, i1 zeroext %c) #0 {
define bfloat @test_select_cc(bfloat %a, bfloat %b, bfloat %c, bfloat %d) #0 {
; CHECK-LABEL: test_select_cc:
; CHECK: // %bb.0:
-; CHECK-NEXT: // kill: def $h3 killed $h3 def $s3
-; CHECK-NEXT: // kill: def $h2 killed $h2 def $s2
-; CHECK-NEXT: fmov w8, s3
-; CHECK-NEXT: fmov w9, s2
+; CHECK-NEXT: // kill: def $h3 killed $h3 def $d3
+; CHECK-NEXT: // kill: def $h2 killed $h2 def $d2
; CHECK-NEXT: // kill: def $h0 killed $h0 def $s0
; CHECK-NEXT: // kill: def $h1 killed $h1 def $s1
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s2, w8
-; CHECK-NEXT: fmov s3, w9
-; CHECK-NEXT: fcmp s3, s2
+; CHECK-NEXT: shll v3.4s, v3.4h, #16
+; CHECK-NEXT: shll v2.4s, v2.4h, #16
+; CHECK-NEXT: fcmp s2, s3
; CHECK-NEXT: fcsel s0, s0, s1, ne
; CHECK-NEXT: // kill: def $h0 killed $h0 killed $s0
; CHECK-NEXT: ret
@@ -356,15 +302,11 @@ define bfloat @test_select_cc(bfloat %a, bfloat %b, bfloat %c, bfloat %d) #0 {
define float @test_select_cc_f32_f16(float %a, float %b, bfloat %c, bfloat %d) #0 {
; CHECK-LABEL: test_select_cc_f32_f16:
; CHECK: // %bb.0:
-; CHECK-NEXT: // kill: def $h3 killed $h3 def $s3
-; CHECK-NEXT: // kill: def $h2 killed $h2 def $s2
-; CHECK-NEXT: fmov w8, s3
-; CHECK-NEXT: fmov w9, s2
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s2, w8
-; CHECK-NEXT: fmov s3, w9
-; CHECK-NEXT: fcmp s3, s2
+; CHECK-NEXT: // kill: def $h3 killed $h3 def $d3
+; CHECK-NEXT: // kill: def $h2 killed $h2 def $d2
+; CHECK-NEXT: shll v3.4s, v3.4h, #16
+; CHECK-NEXT: shll v2.4s, v2.4h, #16
+; CHECK-NEXT: fcmp s2, s3
; CHECK-NEXT: fcsel s0, s0, s1, ne
; CHECK-NEXT: ret
%cc = fcmp une bfloat %c, %d
@@ -389,15 +331,11 @@ define bfloat @test_select_cc_f16_f32(bfloat %a, bfloat %b, float %c, float %d)
define i1 @test_fcmp_une(bfloat %a, bfloat %b) #0 {
; CHECK-LABEL: test_fcmp_une:
; CHECK: // %bb.0:
-; CHECK-NEXT: // kill: def $h1 killed $h1 def $s1
-; CHECK-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: fmov w9, s0
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: fmov s1, w9
-; CHECK-NEXT: fcmp s1, s0
+; CHECK-NEXT: // kill: def $h1 killed $h1 def $d1
+; CHECK-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-NEXT: shll v1.4s, v1.4h, #16
+; CHECK-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-NEXT: fcmp s0, s1
; CHECK-NEXT: cset w0, ne
; CHECK-NEXT: ret
%r = fcmp une bfloat %a, %b
@@ -407,15 +345,11 @@ define i1 @test_fcmp_une(bfloat %a, bfloat %b) #0 {
define i1 @test_fcmp_ueq(bfloat %a, bfloat %b) #0 {
; CHECK-LABEL: test_fcmp_ueq:
; CHECK: // %bb.0:
-; CHECK-NEXT: // kill: def $h1 killed $h1 def $s1
-; CHECK-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: fmov w9, s0
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: fmov s1, w9
-; CHECK-NEXT: fcmp s1, s0
+; CHECK-NEXT: // kill: def $h1 killed $h1 def $d1
+; CHECK-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-NEXT: shll v1.4s, v1.4h, #16
+; CHECK-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-NEXT: fcmp s0, s1
; CHECK-NEXT: cset w8, eq
; CHECK-NEXT: csinc w0, w8, wzr, vc
; CHECK-NEXT: ret
@@ -426,15 +360,11 @@ define i1 @test_fcmp_ueq(bfloat %a, bfloat %b) #0 {
define i1 @test_fcmp_ugt(bfloat %a, bfloat %b) #0 {
; CHECK-LABEL: test_fcmp_ugt:
; CHECK: // %bb.0:
-; CHECK-NEXT: // kill: def $h1 killed $h1 def $s1
-; CHECK-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: fmov w9, s0
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: fmov s1, w9
-; CHECK-NEXT: fcmp s1, s0
+; CHECK-NEXT: // kill: def $h1 killed $h1 def $d1
+; CHECK-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-NEXT: shll v1.4s, v1.4h, #16
+; CHECK-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-NEXT: fcmp s0, s1
; CHECK-NEXT: cset w0, hi
; CHECK-NEXT: ret
%r = fcmp ugt bfloat %a, %b
@@ -444,15 +374,11 @@ define i1 @test_fcmp_ugt(bfloat %a, bfloat %b) #0 {
define i1 @test_fcmp_uge(bfloat %a, bfloat %b) #0 {
; CHECK-LABEL: test_fcmp_uge:
; CHECK: // %bb.0:
-; CHECK-NEXT: // kill: def $h1 killed $h1 def $s1
-; CHECK-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: fmov w9, s0
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: fmov s1, w9
-; CHECK-NEXT: fcmp s1, s0
+; CHECK-NEXT: // kill: def $h1 killed $h1 def $d1
+; CHECK-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-NEXT: shll v1.4s, v1.4h, #16
+; CHECK-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-NEXT: fcmp s0, s1
; CHECK-NEXT: cset w0, pl
; CHECK-NEXT: ret
%r = fcmp uge bfloat %a, %b
@@ -462,15 +388,11 @@ define i1 @test_fcmp_uge(bfloat %a, bfloat %b) #0 {
define i1 @test_fcmp_ult(bfloat %a, bfloat %b) #0 {
; CHECK-LABEL: test_fcmp_ult:
; CHECK: // %bb.0:
-; CHECK-NEXT: // kill: def $h1 killed $h1 def $s1
-; CHECK-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: fmov w9, s0
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: fmov s1, w9
-; CHECK-NEXT: fcmp s1, s0
+; CHECK-NEXT: // kill: def $h1 killed $h1 def $d1
+; CHECK-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-NEXT: shll v1.4s, v1.4h, #16
+; CHECK-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-NEXT: fcmp s0, s1
; CHECK-NEXT: cset w0, lt
; CHECK-NEXT: ret
%r = fcmp ult bfloat %a, %b
@@ -480,15 +402,11 @@ define i1 @test_fcmp_ult(bfloat %a, bfloat %b) #0 {
define i1 @test_fcmp_ule(bfloat %a, bfloat %b) #0 {
; CHECK-LABEL: test_fcmp_ule:
; CHECK: // %bb.0:
-; CHECK-NEXT: // kill: def $h1 killed $h1 def $s1
-; CHECK-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: fmov w9, s0
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: fmov s1, w9
-; CHECK-NEXT: fcmp s1, s0
+; CHECK-NEXT: // kill: def $h1 killed $h1 def $d1
+; CHECK-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-NEXT: shll v1.4s, v1.4h, #16
+; CHECK-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-NEXT: fcmp s0, s1
; CHECK-NEXT: cset w0, le
; CHECK-NEXT: ret
%r = fcmp ule bfloat %a, %b
@@ -498,15 +416,11 @@ define i1 @test_fcmp_ule(bfloat %a, bfloat %b) #0 {
define i1 @test_fcmp_uno(bfloat %a, bfloat %b) #0 {
; CHECK-LABEL: test_fcmp_uno:
; CHECK: // %bb.0:
-; CHECK-NEXT: // kill: def $h1 killed $h1 def $s1
-; CHECK-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: fmov w9, s0
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: fmov s1, w9
-; CHECK-NEXT: fcmp s1, s0
+; CHECK-NEXT: // kill: def $h1 killed $h1 def $d1
+; CHECK-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-NEXT: shll v1.4s, v1.4h, #16
+; CHECK-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-NEXT: fcmp s0, s1
; CHECK-NEXT: cset w0, vs
; CHECK-NEXT: ret
%r = fcmp uno bfloat %a, %b
@@ -516,15 +430,11 @@ define i1 @test_fcmp_uno(bfloat %a, bfloat %b) #0 {
define i1 @test_fcmp_one(bfloat %a, bfloat %b) #0 {
; CHECK-LABEL: test_fcmp_one:
; CHECK: // %bb.0:
-; CHECK-NEXT: // kill: def $h1 killed $h1 def $s1
-; CHECK-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: fmov w9, s0
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: fmov s1, w9
-; CHECK-NEXT: fcmp s1, s0
+; CHECK-NEXT: // kill: def $h1 killed $h1 def $d1
+; CHECK-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-NEXT: shll v1.4s, v1.4h, #16
+; CHECK-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-NEXT: fcmp s0, s1
; CHECK-NEXT: cset w8, mi
; CHECK-NEXT: csinc w0, w8, wzr, le
; CHECK-NEXT: ret
@@ -535,15 +445,11 @@ define i1 @test_fcmp_one(bfloat %a, bfloat %b) #0 {
define i1 @test_fcmp_oeq(bfloat %a, bfloat %b) #0 {
; CHECK-LABEL: test_fcmp_oeq:
; CHECK: // %bb.0:
-; CHECK-NEXT: // kill: def $h1 killed $h1 def $s1
-; CHECK-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: fmov w9, s0
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: fmov s1, w9
-; CHECK-NEXT: fcmp s1, s0
+; CHECK-NEXT: // kill: def $h1 killed $h1 def $d1
+; CHECK-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-NEXT: shll v1.4s, v1.4h, #16
+; CHECK-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-NEXT: fcmp s0, s1
; CHECK-NEXT: cset w0, eq
; CHECK-NEXT: ret
%r = fcmp oeq bfloat %a, %b
@@ -553,15 +459,11 @@ define i1 @test_fcmp_oeq(bfloat %a, bfloat %b) #0 {
define i1 @test_fcmp_ogt(bfloat %a, bfloat %b) #0 {
; CHECK-LABEL: test_fcmp_ogt:
; CHECK: // %bb.0:
-; CHECK-NEXT: // kill: def $h1 killed $h1 def $s1
-; CHECK-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: fmov w9, s0
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: fmov s1, w9
-; CHECK-NEXT: fcmp s1, s0
+; CHECK-NEXT: // kill: def $h1 killed $h1 def $d1
+; CHECK-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-NEXT: shll v1.4s, v1.4h, #16
+; CHECK-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-NEXT: fcmp s0, s1
; CHECK-NEXT: cset w0, gt
; CHECK-NEXT: ret
%r = fcmp ogt bfloat %a, %b
@@ -571,15 +473,11 @@ define i1 @test_fcmp_ogt(bfloat %a, bfloat %b) #0 {
define i1 @test_fcmp_oge(bfloat %a, bfloat %b) #0 {
; CHECK-LABEL: test_fcmp_oge:
; CHECK: // %bb.0:
-; CHECK-NEXT: // kill: def $h1 killed $h1 def $s1
-; CHECK-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: fmov w9, s0
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: fmov s1, w9
-; CHECK-NEXT: fcmp s1, s0
+; CHECK-NEXT: // kill: def $h1 killed $h1 def $d1
+; CHECK-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-NEXT: shll v1.4s, v1.4h, #16
+; CHECK-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-NEXT: fcmp s0, s1
; CHECK-NEXT: cset w0, ge
; CHECK-NEXT: ret
%r = fcmp oge bfloat %a, %b
@@ -589,15 +487,11 @@ define i1 @test_fcmp_oge(bfloat %a, bfloat %b) #0 {
define i1 @test_fcmp_olt(bfloat %a, bfloat %b) #0 {
; CHECK-LABEL: test_fcmp_olt:
; CHECK: // %bb.0:
-; CHECK-NEXT: // kill: def $h1 killed $h1 def $s1
-; CHECK-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: fmov w9, s0
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: fmov s1, w9
-; CHECK-NEXT: fcmp s1, s0
+; CHECK-NEXT: // kill: def $h1 killed $h1 def $d1
+; CHECK-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-NEXT: shll v1.4s, v1.4h, #16
+; CHECK-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-NEXT: fcmp s0, s1
; CHECK-NEXT: cset w0, mi
; CHECK-NEXT: ret
%r = fcmp olt bfloat %a, %b
@@ -607,15 +501,11 @@ define i1 @test_fcmp_olt(bfloat %a, bfloat %b) #0 {
define i1 @test_fcmp_ole(bfloat %a, bfloat %b) #0 {
; CHECK-LABEL: test_fcmp_ole:
; CHECK: // %bb.0:
-; CHECK-NEXT: // kill: def $h1 killed $h1 def $s1
-; CHECK-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: fmov w9, s0
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: fmov s1, w9
-; CHECK-NEXT: fcmp s1, s0
+; CHECK-NEXT: // kill: def $h1 killed $h1 def $d1
+; CHECK-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-NEXT: shll v1.4s, v1.4h, #16
+; CHECK-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-NEXT: fcmp s0, s1
; CHECK-NEXT: cset w0, ls
; CHECK-NEXT: ret
%r = fcmp ole bfloat %a, %b
@@ -625,15 +515,11 @@ define i1 @test_fcmp_ole(bfloat %a, bfloat %b) #0 {
define i1 @test_fcmp_ord(bfloat %a, bfloat %b) #0 {
; CHECK-LABEL: test_fcmp_ord:
; CHECK: // %bb.0:
-; CHECK-NEXT: // kill: def $h1 killed $h1 def $s1
-; CHECK-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: fmov w9, s0
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: fmov s1, w9
-; CHECK-NEXT: fcmp s1, s0
+; CHECK-NEXT: // kill: def $h1 killed $h1 def $d1
+; CHECK-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-NEXT: shll v1.4s, v1.4h, #16
+; CHECK-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-NEXT: fcmp s0, s1
; CHECK-NEXT: cset w0, vc
; CHECK-NEXT: ret
%r = fcmp ord bfloat %a, %b
@@ -643,13 +529,11 @@ define i1 @test_fcmp_ord(bfloat %a, bfloat %b) #0 {
define void @test_fccmp(bfloat %in, ptr %out) {
; CHECK-LABEL: test_fccmp:
; CHECK: // %bb.0:
-; CHECK-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-NEXT: fmov w8, s0
; CHECK-NEXT: movi v1.2s, #69, lsl #24
-; CHECK-NEXT: movi v3.2s, #72, lsl #24
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fmov s2, w8
+; CHECK-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-NEXT: shll v2.4s, v0.4h, #16
; CHECK-NEXT: adrp x8, .LCPI29_0
+; CHECK-NEXT: movi v3.2s, #72, lsl #24
; CHECK-NEXT: fcmp s2, s1
; CHECK-NEXT: ldr h1, [x8, :lo12:.LCPI29_0]
; CHECK-NEXT: fccmp s2, s3, #4, mi
@@ -667,15 +551,11 @@ define void @test_fccmp(bfloat %in, ptr %out) {
define void @test_br_cc(bfloat %a, bfloat %b, ptr %p1, ptr %p2) #0 {
; CHECK-LABEL: test_br_cc:
; CHECK: // %bb.0: // %common.ret
-; CHECK-NEXT: // kill: def $h1 killed $h1 def $s1
-; CHECK-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: fmov w9, s0
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: fmov s1, w9
-; CHECK-NEXT: fcmp s1, s0
+; CHECK-NEXT: // kill: def $h1 killed $h1 def $d1
+; CHECK-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-NEXT: shll v1.4s, v1.4h, #16
+; CHECK-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-NEXT: fcmp s0, s1
; CHECK-NEXT: csel x8, x0, x1, pl
; CHECK-NEXT: str wzr, [x8]
; CHECK-NEXT: ret
@@ -725,10 +605,8 @@ declare i1 @test_dummy(ptr %p1) #0
define i32 @test_fptosi_i32(bfloat %a) #0 {
; CHECK-LABEL: test_fptosi_i32:
; CHECK: // %bb.0:
-; CHECK-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-NEXT: fmov w8, s0
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fmov s0, w8
+; CHECK-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-NEXT: shll v0.4s, v0.4h, #16
; CHECK-NEXT: fcvtzs w0, s0
; CHECK-NEXT: ret
%r = fptosi bfloat %a to i32
@@ -738,10 +616,8 @@ define i32 @test_fptosi_i32(bfloat %a) #0 {
define i64 @test_fptosi_i64(bfloat %a) #0 {
; CHECK-LABEL: test_fptosi_i64:
; CHECK: // %bb.0:
-; CHECK-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-NEXT: fmov w8, s0
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fmov s0, w8
+; CHECK-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-NEXT: shll v0.4s, v0.4h, #16
; CHECK-NEXT: fcvtzs x0, s0
; CHECK-NEXT: ret
%r = fptosi bfloat %a to i64
@@ -751,10 +627,8 @@ define i64 @test_fptosi_i64(bfloat %a) #0 {
define i32 @test_fptoui_i32(bfloat %a) #0 {
; CHECK-LABEL: test_fptoui_i32:
; CHECK: // %bb.0:
-; CHECK-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-NEXT: fmov w8, s0
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fmov s0, w8
+; CHECK-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-NEXT: shll v0.4s, v0.4h, #16
; CHECK-NEXT: fcvtzu w0, s0
; CHECK-NEXT: ret
%r = fptoui bfloat %a to i32
@@ -764,10 +638,8 @@ define i32 @test_fptoui_i32(bfloat %a) #0 {
define i64 @test_fptoui_i64(bfloat %a) #0 {
; CHECK-LABEL: test_fptoui_i64:
; CHECK: // %bb.0:
-; CHECK-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-NEXT: fmov w8, s0
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fmov s0, w8
+; CHECK-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-NEXT: shll v0.4s, v0.4h, #16
; CHECK-NEXT: fcvtzu x0, s0
; CHECK-NEXT: ret
%r = fptoui bfloat %a to i64
@@ -927,7 +799,8 @@ define bfloat @test_uitofp_i32_fadd(i32 %a, bfloat %b) #0 {
; CHECK-CVT: // %bb.0:
; CHECK-CVT-NEXT: ucvtf d1, w0
; CHECK-CVT-NEXT: mov w8, #32767 // =0x7fff
-; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $s0
+; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-CVT-NEXT: shll v0.4s, v0.4h, #16
; CHECK-CVT-NEXT: fcvtxn s1, d1
; CHECK-CVT-NEXT: fmov w9, s1
; CHECK-CVT-NEXT: ubfx w10, w9, #16, #1
@@ -935,12 +808,7 @@ define bfloat @test_uitofp_i32_fadd(i32 %a, bfloat %b) #0 {
; CHECK-CVT-NEXT: add w9, w10, w9
; CHECK-CVT-NEXT: lsr w9, w9, #16
; CHECK-CVT-NEXT: fmov s1, w9
-; CHECK-CVT-NEXT: fmov w9, s0
-; CHECK-CVT-NEXT: fmov w10, s1
-; CHECK-CVT-NEXT: lsl w9, w9, #16
-; CHECK-CVT-NEXT: fmov s0, w9
-; CHECK-CVT-NEXT: lsl w10, w10, #16
-; CHECK-CVT-NEXT: fmov s1, w10
+; CHECK-CVT-NEXT: shll v1.4s, v1.4h, #16
; CHECK-CVT-NEXT: fadd s0, s0, s1
; CHECK-CVT-NEXT: fmov w9, s0
; CHECK-CVT-NEXT: ubfx w10, w9, #16, #1
@@ -954,15 +822,11 @@ define bfloat @test_uitofp_i32_fadd(i32 %a, bfloat %b) #0 {
; CHECK-BF16-LABEL: test_uitofp_i32_fadd:
; CHECK-BF16: // %bb.0:
; CHECK-BF16-NEXT: ucvtf d1, w0
-; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-BF16-NEXT: fmov w8, s0
-; CHECK-BF16-NEXT: lsl w8, w8, #16
+; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-BF16-NEXT: shll v0.4s, v0.4h, #16
; CHECK-BF16-NEXT: fcvtxn s1, d1
-; CHECK-BF16-NEXT: fmov s0, w8
; CHECK-BF16-NEXT: bfcvt h1, s1
-; CHECK-BF16-NEXT: fmov w9, s1
-; CHECK-BF16-NEXT: lsl w9, w9, #16
-; CHECK-BF16-NEXT: fmov s1, w9
+; CHECK-BF16-NEXT: shll v1.4s, v1.4h, #16
; CHECK-BF16-NEXT: fadd s0, s0, s1
; CHECK-BF16-NEXT: bfcvt h0, s0
; CHECK-BF16-NEXT: ret
@@ -976,7 +840,8 @@ define bfloat @test_sitofp_i32_fadd(i32 %a, bfloat %b) #0 {
; CHECK-CVT: // %bb.0:
; CHECK-CVT-NEXT: scvtf d1, w0
; CHECK-CVT-NEXT: mov w8, #32767 // =0x7fff
-; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $s0
+; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-CVT-NEXT: shll v0.4s, v0.4h, #16
; CHECK-CVT-NEXT: fcvtxn s1, d1
; CHECK-CVT-NEXT: fmov w9, s1
; CHECK-CVT-NEXT: ubfx w10, w9, #16, #1
@@ -984,12 +849,7 @@ define bfloat @test_sitofp_i32_fadd(i32 %a, bfloat %b) #0 {
; CHECK-CVT-NEXT: add w9, w10, w9
; CHECK-CVT-NEXT: lsr w9, w9, #16
; CHECK-CVT-NEXT: fmov s1, w9
-; CHECK-CVT-NEXT: fmov w9, s0
-; CHECK-CVT-NEXT: fmov w10, s1
-; CHECK-CVT-NEXT: lsl w9, w9, #16
-; CHECK-CVT-NEXT: fmov s0, w9
-; CHECK-CVT-NEXT: lsl w10, w10, #16
-; CHECK-CVT-NEXT: fmov s1, w10
+; CHECK-CVT-NEXT: shll v1.4s, v1.4h, #16
; CHECK-CVT-NEXT: fadd s0, s0, s1
; CHECK-CVT-NEXT: fmov w9, s0
; CHECK-CVT-NEXT: ubfx w10, w9, #16, #1
@@ -1003,15 +863,11 @@ define bfloat @test_sitofp_i32_fadd(i32 %a, bfloat %b) #0 {
; CHECK-BF16-LABEL: test_sitofp_i32_fadd:
; CHECK-BF16: // %bb.0:
; CHECK-BF16-NEXT: scvtf d1, w0
-; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-BF16-NEXT: fmov w8, s0
-; CHECK-BF16-NEXT: lsl w8, w8, #16
+; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-BF16-NEXT: shll v0.4s, v0.4h, #16
; CHECK-BF16-NEXT: fcvtxn s1, d1
-; CHECK-BF16-NEXT: fmov s0, w8
; CHECK-BF16-NEXT: bfcvt h1, s1
-; CHECK-BF16-NEXT: fmov w9, s1
-; CHECK-BF16-NEXT: lsl w9, w9, #16
-; CHECK-BF16-NEXT: fmov s1, w9
+; CHECK-BF16-NEXT: shll v1.4s, v1.4h, #16
; CHECK-BF16-NEXT: fadd s0, s0, s1
; CHECK-BF16-NEXT: bfcvt h0, s0
; CHECK-BF16-NEXT: ret
@@ -1070,10 +926,9 @@ define bfloat @test_fptrunc_double(double %a) #0 {
define float @test_fpext_float(bfloat %a) #0 {
; CHECK-LABEL: test_fpext_float:
; CHECK: // %bb.0:
-; CHECK-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-NEXT: fmov w8, s0
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fmov s0, w8
+; CHECK-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-NEXT: // kill: def $s0 killed $s0 killed $q0
; CHECK-NEXT: ret
%r = fpext bfloat %a to float
ret float %r
@@ -1082,10 +937,8 @@ define float @test_fpext_float(bfloat %a) #0 {
define double @test_fpext_double(bfloat %a) #0 {
; CHECK-LABEL: test_fpext_double:
; CHECK: // %bb.0:
-; CHECK-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-NEXT: fmov w8, s0
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fmov s0, w8
+; CHECK-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-NEXT: shll v0.4s, v0.4h, #16
; CHECK-NEXT: fcvt d0, s0
; CHECK-NEXT: ret
%r = fpext bfloat %a to double
@@ -1148,11 +1001,9 @@ declare bfloat @llvm.fmuladd.f16(bfloat %a, bfloat %b, bfloat %c) #0
define bfloat @test_sqrt(bfloat %a) #0 {
; CHECK-CVT-LABEL: test_sqrt:
; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-CVT-NEXT: fmov w9, s0
+; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $d0
; CHECK-CVT-NEXT: mov w8, #32767 // =0x7fff
-; CHECK-CVT-NEXT: lsl w9, w9, #16
-; CHECK-CVT-NEXT: fmov s0, w9
+; CHECK-CVT-NEXT: shll v0.4s, v0.4h, #16
; CHECK-CVT-NEXT: fsqrt s0, s0
; CHECK-CVT-NEXT: fmov w9, s0
; CHECK-CVT-NEXT: ubfx w10, w9, #16, #1
@@ -1165,10 +1016,8 @@ define bfloat @test_sqrt(bfloat %a) #0 {
;
; CHECK-BF16-LABEL: test_sqrt:
; CHECK-BF16: // %bb.0:
-; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-BF16-NEXT: fmov w8, s0
-; CHECK-BF16-NEXT: lsl w8, w8, #16
-; CHECK-BF16-NEXT: fmov s0, w8
+; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-BF16-NEXT: shll v0.4s, v0.4h, #16
; CHECK-BF16-NEXT: fsqrt s0, s0
; CHECK-BF16-NEXT: bfcvt h0, s0
; CHECK-BF16-NEXT: ret
@@ -1180,10 +1029,9 @@ define bfloat @test_powi(bfloat %a, i32 %b) #0 {
; CHECK-CVT-LABEL: test_powi:
; CHECK-CVT: // %bb.0:
; CHECK-CVT-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-CVT-NEXT: fmov w8, s0
-; CHECK-CVT-NEXT: lsl w8, w8, #16
-; CHECK-CVT-NEXT: fmov s0, w8
+; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-CVT-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-CVT-NEXT: // kill: def $s0 killed $s0 killed $q0
; CHECK-CVT-NEXT: bl __powisf2
; CHECK-CVT-NEXT: fmov w9, s0
; CHECK-CVT-NEXT: mov w8, #32767 // =0x7fff
@@ -1199,10 +1047,9 @@ define bfloat @test_powi(bfloat %a, i32 %b) #0 {
; CHECK-BF16-LABEL: test_powi:
; CHECK-BF16: // %bb.0:
; CHECK-BF16-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-BF16-NEXT: fmov w8, s0
-; CHECK-BF16-NEXT: lsl w8, w8, #16
-; CHECK-BF16-NEXT: fmov s0, w8
+; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-BF16-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-BF16-NEXT: // kill: def $s0 killed $s0 killed $q0
; CHECK-BF16-NEXT: bl __powisf2
; CHECK-BF16-NEXT: bfcvt h0, s0
; CHECK-BF16-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
@@ -1216,10 +1063,9 @@ define bfloat @test_sin(bfloat %a) #0 {
; CHECK-CVT-LABEL: test_sin:
; CHECK-CVT: // %bb.0:
; CHECK-CVT-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-CVT-NEXT: fmov w8, s0
-; CHECK-CVT-NEXT: lsl w8, w8, #16
-; CHECK-CVT-NEXT: fmov s0, w8
+; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-CVT-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-CVT-NEXT: // kill: def $s0 killed $s0 killed $q0
; CHECK-CVT-NEXT: bl sinf
; CHECK-CVT-NEXT: fmov w9, s0
; CHECK-CVT-NEXT: mov w8, #32767 // =0x7fff
@@ -1235,10 +1081,9 @@ define bfloat @test_sin(bfloat %a) #0 {
; CHECK-BF16-LABEL: test_sin:
; CHECK-BF16: // %bb.0:
; CHECK-BF16-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-BF16-NEXT: fmov w8, s0
-; CHECK-BF16-NEXT: lsl w8, w8, #16
-; CHECK-BF16-NEXT: fmov s0, w8
+; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-BF16-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-BF16-NEXT: // kill: def $s0 killed $s0 killed $q0
; CHECK-BF16-NEXT: bl sinf
; CHECK-BF16-NEXT: bfcvt h0, s0
; CHECK-BF16-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
@@ -1251,10 +1096,9 @@ define bfloat @test_cos(bfloat %a) #0 {
; CHECK-CVT-LABEL: test_cos:
; CHECK-CVT: // %bb.0:
; CHECK-CVT-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-CVT-NEXT: fmov w8, s0
-; CHECK-CVT-NEXT: lsl w8, w8, #16
-; CHECK-CVT-NEXT: fmov s0, w8
+; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-CVT-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-CVT-NEXT: // kill: def $s0 killed $s0 killed $q0
; CHECK-CVT-NEXT: bl cosf
; CHECK-CVT-NEXT: fmov w9, s0
; CHECK-CVT-NEXT: mov w8, #32767 // =0x7fff
@@ -1270,10 +1114,9 @@ define bfloat @test_cos(bfloat %a) #0 {
; CHECK-BF16-LABEL: test_cos:
; CHECK-BF16: // %bb.0:
; CHECK-BF16-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-BF16-NEXT: fmov w8, s0
-; CHECK-BF16-NEXT: lsl w8, w8, #16
-; CHECK-BF16-NEXT: fmov s0, w8
+; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-BF16-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-BF16-NEXT: // kill: def $s0 killed $s0 killed $q0
; CHECK-BF16-NEXT: bl cosf
; CHECK-BF16-NEXT: bfcvt h0, s0
; CHECK-BF16-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
@@ -1286,10 +1129,9 @@ define bfloat @test_tan(bfloat %a) #0 {
; CHECK-CVT-LABEL: test_tan:
; CHECK-CVT: // %bb.0:
; CHECK-CVT-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-CVT-NEXT: fmov w8, s0
-; CHECK-CVT-NEXT: lsl w8, w8, #16
-; CHECK-CVT-NEXT: fmov s0, w8
+; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-CVT-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-CVT-NEXT: // kill: def $s0 killed $s0 killed $q0
; CHECK-CVT-NEXT: bl tanf
; CHECK-CVT-NEXT: fmov w9, s0
; CHECK-CVT-NEXT: mov w8, #32767 // =0x7fff
@@ -1305,10 +1147,9 @@ define bfloat @test_tan(bfloat %a) #0 {
; CHECK-BF16-LABEL: test_tan:
; CHECK-BF16: // %bb.0:
; CHECK-BF16-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-BF16-NEXT: fmov w8, s0
-; CHECK-BF16-NEXT: lsl w8, w8, #16
-; CHECK-BF16-NEXT: fmov s0, w8
+; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-BF16-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-BF16-NEXT: // kill: def $s0 killed $s0 killed $q0
; CHECK-BF16-NEXT: bl tanf
; CHECK-BF16-NEXT: bfcvt h0, s0
; CHECK-BF16-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
@@ -1321,10 +1162,9 @@ define bfloat @test_acos(bfloat %a) #0 {
; CHECK-CVT-LABEL: test_acos:
; CHECK-CVT: // %bb.0:
; CHECK-CVT-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-CVT-NEXT: fmov w8, s0
-; CHECK-CVT-NEXT: lsl w8, w8, #16
-; CHECK-CVT-NEXT: fmov s0, w8
+; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-CVT-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-CVT-NEXT: // kill: def $s0 killed $s0 killed $q0
; CHECK-CVT-NEXT: bl acosf
; CHECK-CVT-NEXT: fmov w9, s0
; CHECK-CVT-NEXT: mov w8, #32767 // =0x7fff
@@ -1340,10 +1180,9 @@ define bfloat @test_acos(bfloat %a) #0 {
; CHECK-BF16-LABEL: test_acos:
; CHECK-BF16: // %bb.0:
; CHECK-BF16-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-BF16-NEXT: fmov w8, s0
-; CHECK-BF16-NEXT: lsl w8, w8, #16
-; CHECK-BF16-NEXT: fmov s0, w8
+; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-BF16-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-BF16-NEXT: // kill: def $s0 killed $s0 killed $q0
; CHECK-BF16-NEXT: bl acosf
; CHECK-BF16-NEXT: bfcvt h0, s0
; CHECK-BF16-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
@@ -1356,10 +1195,9 @@ define bfloat @test_asin(bfloat %a) #0 {
; CHECK-CVT-LABEL: test_asin:
; CHECK-CVT: // %bb.0:
; CHECK-CVT-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-CVT-NEXT: fmov w8, s0
-; CHECK-CVT-NEXT: lsl w8, w8, #16
-; CHECK-CVT-NEXT: fmov s0, w8
+; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-CVT-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-CVT-NEXT: // kill: def $s0 killed $s0 killed $q0
; CHECK-CVT-NEXT: bl asinf
; CHECK-CVT-NEXT: fmov w9, s0
; CHECK-CVT-NEXT: mov w8, #32767 // =0x7fff
@@ -1375,10 +1213,9 @@ define bfloat @test_asin(bfloat %a) #0 {
; CHECK-BF16-LABEL: test_asin:
; CHECK-BF16: // %bb.0:
; CHECK-BF16-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-BF16-NEXT: fmov w8, s0
-; CHECK-BF16-NEXT: lsl w8, w8, #16
-; CHECK-BF16-NEXT: fmov s0, w8
+; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-BF16-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-BF16-NEXT: // kill: def $s0 killed $s0 killed $q0
; CHECK-BF16-NEXT: bl asinf
; CHECK-BF16-NEXT: bfcvt h0, s0
; CHECK-BF16-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
@@ -1391,10 +1228,9 @@ define bfloat @test_atan(bfloat %a) #0 {
; CHECK-CVT-LABEL: test_atan:
; CHECK-CVT: // %bb.0:
; CHECK-CVT-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-CVT-NEXT: fmov w8, s0
-; CHECK-CVT-NEXT: lsl w8, w8, #16
-; CHECK-CVT-NEXT: fmov s0, w8
+; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-CVT-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-CVT-NEXT: // kill: def $s0 killed $s0 killed $q0
; CHECK-CVT-NEXT: bl atanf
; CHECK-CVT-NEXT: fmov w9, s0
; CHECK-CVT-NEXT: mov w8, #32767 // =0x7fff
@@ -1410,10 +1246,9 @@ define bfloat @test_atan(bfloat %a) #0 {
; CHECK-BF16-LABEL: test_atan:
; CHECK-BF16: // %bb.0:
; CHECK-BF16-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-BF16-NEXT: fmov w8, s0
-; CHECK-BF16-NEXT: lsl w8, w8, #16
-; CHECK-BF16-NEXT: fmov s0, w8
+; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-BF16-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-BF16-NEXT: // kill: def $s0 killed $s0 killed $q0
; CHECK-BF16-NEXT: bl atanf
; CHECK-BF16-NEXT: bfcvt h0, s0
; CHECK-BF16-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
@@ -1426,14 +1261,12 @@ define bfloat @test_atan2(bfloat %a, bfloat %b) #0 {
; CHECK-CVT-LABEL: test_atan2:
; CHECK-CVT: // %bb.0:
; CHECK-CVT-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-CVT-NEXT: // kill: def $h1 killed $h1 def $s1
-; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-CVT-NEXT: fmov w8, s0
-; CHECK-CVT-NEXT: fmov w9, s1
-; CHECK-CVT-NEXT: lsl w8, w8, #16
-; CHECK-CVT-NEXT: lsl w9, w9, #16
-; CHECK-CVT-NEXT: fmov s0, w8
-; CHECK-CVT-NEXT: fmov s1, w9
+; CHECK-CVT-NEXT: // kill: def $h1 killed $h1 def $d1
+; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-CVT-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-CVT-NEXT: shll v1.4s, v1.4h, #16
+; CHECK-CVT-NEXT: // kill: def $s0 killed $s0 killed $q0
+; CHECK-CVT-NEXT: // kill: def $s1 killed $s1 killed $q1
; CHECK-CVT-NEXT: bl atan2f
; CHECK-CVT-NEXT: fmov w9, s0
; CHECK-CVT-NEXT: mov w8, #32767 // =0x7fff
@@ -1449,14 +1282,12 @@ define bfloat @test_atan2(bfloat %a, bfloat %b) #0 {
; CHECK-BF16-LABEL: test_atan2:
; CHECK-BF16: // %bb.0:
; CHECK-BF16-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-BF16-NEXT: // kill: def $h1 killed $h1 def $s1
-; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-BF16-NEXT: fmov w8, s0
-; CHECK-BF16-NEXT: fmov w9, s1
-; CHECK-BF16-NEXT: lsl w8, w8, #16
-; CHECK-BF16-NEXT: lsl w9, w9, #16
-; CHECK-BF16-NEXT: fmov s0, w8
-; CHECK-BF16-NEXT: fmov s1, w9
+; CHECK-BF16-NEXT: // kill: def $h1 killed $h1 def $d1
+; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-BF16-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-BF16-NEXT: shll v1.4s, v1.4h, #16
+; CHECK-BF16-NEXT: // kill: def $s0 killed $s0 killed $q0
+; CHECK-BF16-NEXT: // kill: def $s1 killed $s1 killed $q1
; CHECK-BF16-NEXT: bl atan2f
; CHECK-BF16-NEXT: bfcvt h0, s0
; CHECK-BF16-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
@@ -1469,10 +1300,9 @@ define bfloat @test_cosh(bfloat %a) #0 {
; CHECK-CVT-LABEL: test_cosh:
; CHECK-CVT: // %bb.0:
; CHECK-CVT-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-CVT-NEXT: fmov w8, s0
-; CHECK-CVT-NEXT: lsl w8, w8, #16
-; CHECK-CVT-NEXT: fmov s0, w8
+; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-CVT-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-CVT-NEXT: // kill: def $s0 killed $s0 killed $q0
; CHECK-CVT-NEXT: bl coshf
; CHECK-CVT-NEXT: fmov w9, s0
; CHECK-CVT-NEXT: mov w8, #32767 // =0x7fff
@@ -1488,10 +1318,9 @@ define bfloat @test_cosh(bfloat %a) #0 {
; CHECK-BF16-LABEL: test_cosh:
; CHECK-BF16: // %bb.0:
; CHECK-BF16-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-BF16-NEXT: fmov w8, s0
-; CHECK-BF16-NEXT: lsl w8, w8, #16
-; CHECK-BF16-NEXT: fmov s0, w8
+; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-BF16-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-BF16-NEXT: // kill: def $s0 killed $s0 killed $q0
; CHECK-BF16-NEXT: bl coshf
; CHECK-BF16-NEXT: bfcvt h0, s0
; CHECK-BF16-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
@@ -1504,10 +1333,9 @@ define bfloat @test_sinh(bfloat %a) #0 {
; CHECK-CVT-LABEL: test_sinh:
; CHECK-CVT: // %bb.0:
; CHECK-CVT-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-CVT-NEXT: fmov w8, s0
-; CHECK-CVT-NEXT: lsl w8, w8, #16
-; CHECK-CVT-NEXT: fmov s0, w8
+; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-CVT-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-CVT-NEXT: // kill: def $s0 killed $s0 killed $q0
; CHECK-CVT-NEXT: bl sinhf
; CHECK-CVT-NEXT: fmov w9, s0
; CHECK-CVT-NEXT: mov w8, #32767 // =0x7fff
@@ -1523,10 +1351,9 @@ define bfloat @test_sinh(bfloat %a) #0 {
; CHECK-BF16-LABEL: test_sinh:
; CHECK-BF16: // %bb.0:
; CHECK-BF16-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-BF16-NEXT: fmov w8, s0
-; CHECK-BF16-NEXT: lsl w8, w8, #16
-; CHECK-BF16-NEXT: fmov s0, w8
+; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-BF16-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-BF16-NEXT: // kill: def $s0 killed $s0 killed $q0
; CHECK-BF16-NEXT: bl sinhf
; CHECK-BF16-NEXT: bfcvt h0, s0
; CHECK-BF16-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
@@ -1539,10 +1366,9 @@ define bfloat @test_tanh(bfloat %a) #0 {
; CHECK-CVT-LABEL: test_tanh:
; CHECK-CVT: // %bb.0:
; CHECK-CVT-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-CVT-NEXT: fmov w8, s0
-; CHECK-CVT-NEXT: lsl w8, w8, #16
-; CHECK-CVT-NEXT: fmov s0, w8
+; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-CVT-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-CVT-NEXT: // kill: def $s0 killed $s0 killed $q0
; CHECK-CVT-NEXT: bl tanhf
; CHECK-CVT-NEXT: fmov w9, s0
; CHECK-CVT-NEXT: mov w8, #32767 // =0x7fff
@@ -1558,10 +1384,9 @@ define bfloat @test_tanh(bfloat %a) #0 {
; CHECK-BF16-LABEL: test_tanh:
; CHECK-BF16: // %bb.0:
; CHECK-BF16-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-BF16-NEXT: fmov w8, s0
-; CHECK-BF16-NEXT: lsl w8, w8, #16
-; CHECK-BF16-NEXT: fmov s0, w8
+; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-BF16-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-BF16-NEXT: // kill: def $s0 killed $s0 killed $q0
; CHECK-BF16-NEXT: bl tanhf
; CHECK-BF16-NEXT: bfcvt h0, s0
; CHECK-BF16-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
@@ -1574,14 +1399,12 @@ define bfloat @test_pow(bfloat %a, bfloat %b) #0 {
; CHECK-CVT-LABEL: test_pow:
; CHECK-CVT: // %bb.0:
; CHECK-CVT-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-CVT-NEXT: // kill: def $h1 killed $h1 def $s1
-; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-CVT-NEXT: fmov w8, s0
-; CHECK-CVT-NEXT: fmov w9, s1
-; CHECK-CVT-NEXT: lsl w8, w8, #16
-; CHECK-CVT-NEXT: lsl w9, w9, #16
-; CHECK-CVT-NEXT: fmov s0, w8
-; CHECK-CVT-NEXT: fmov s1, w9
+; CHECK-CVT-NEXT: // kill: def $h1 killed $h1 def $d1
+; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-CVT-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-CVT-NEXT: shll v1.4s, v1.4h, #16
+; CHECK-CVT-NEXT: // kill: def $s0 killed $s0 killed $q0
+; CHECK-CVT-NEXT: // kill: def $s1 killed $s1 killed $q1
; CHECK-CVT-NEXT: bl powf
; CHECK-CVT-NEXT: fmov w9, s0
; CHECK-CVT-NEXT: mov w8, #32767 // =0x7fff
@@ -1597,14 +1420,12 @@ define bfloat @test_pow(bfloat %a, bfloat %b) #0 {
; CHECK-BF16-LABEL: test_pow:
; CHECK-BF16: // %bb.0:
; CHECK-BF16-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-BF16-NEXT: // kill: def $h1 killed $h1 def $s1
-; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-BF16-NEXT: fmov w8, s0
-; CHECK-BF16-NEXT: fmov w9, s1
-; CHECK-BF16-NEXT: lsl w8, w8, #16
-; CHECK-BF16-NEXT: lsl w9, w9, #16
-; CHECK-BF16-NEXT: fmov s0, w8
-; CHECK-BF16-NEXT: fmov s1, w9
+; CHECK-BF16-NEXT: // kill: def $h1 killed $h1 def $d1
+; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-BF16-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-BF16-NEXT: shll v1.4s, v1.4h, #16
+; CHECK-BF16-NEXT: // kill: def $s0 killed $s0 killed $q0
+; CHECK-BF16-NEXT: // kill: def $s1 killed $s1 killed $q1
; CHECK-BF16-NEXT: bl powf
; CHECK-BF16-NEXT: bfcvt h0, s0
; CHECK-BF16-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
@@ -1617,10 +1438,9 @@ define bfloat @test_exp(bfloat %a) #0 {
; CHECK-CVT-LABEL: test_exp:
; CHECK-CVT: // %bb.0:
; CHECK-CVT-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-CVT-NEXT: fmov w8, s0
-; CHECK-CVT-NEXT: lsl w8, w8, #16
-; CHECK-CVT-NEXT: fmov s0, w8
+; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-CVT-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-CVT-NEXT: // kill: def $s0 killed $s0 killed $q0
; CHECK-CVT-NEXT: bl expf
; CHECK-CVT-NEXT: fmov w9, s0
; CHECK-CVT-NEXT: mov w8, #32767 // =0x7fff
@@ -1636,10 +1456,9 @@ define bfloat @test_exp(bfloat %a) #0 {
; CHECK-BF16-LABEL: test_exp:
; CHECK-BF16: // %bb.0:
; CHECK-BF16-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-BF16-NEXT: fmov w8, s0
-; CHECK-BF16-NEXT: lsl w8, w8, #16
-; CHECK-BF16-NEXT: fmov s0, w8
+; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-BF16-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-BF16-NEXT: // kill: def $s0 killed $s0 killed $q0
; CHECK-BF16-NEXT: bl expf
; CHECK-BF16-NEXT: bfcvt h0, s0
; CHECK-BF16-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
@@ -1652,10 +1471,9 @@ define bfloat @test_exp2(bfloat %a) #0 {
; CHECK-CVT-LABEL: test_exp2:
; CHECK-CVT: // %bb.0:
; CHECK-CVT-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-CVT-NEXT: fmov w8, s0
-; CHECK-CVT-NEXT: lsl w8, w8, #16
-; CHECK-CVT-NEXT: fmov s0, w8
+; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-CVT-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-CVT-NEXT: // kill: def $s0 killed $s0 killed $q0
; CHECK-CVT-NEXT: bl exp2f
; CHECK-CVT-NEXT: fmov w9, s0
; CHECK-CVT-NEXT: mov w8, #32767 // =0x7fff
@@ -1671,10 +1489,9 @@ define bfloat @test_exp2(bfloat %a) #0 {
; CHECK-BF16-LABEL: test_exp2:
; CHECK-BF16: // %bb.0:
; CHECK-BF16-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-BF16-NEXT: fmov w8, s0
-; CHECK-BF16-NEXT: lsl w8, w8, #16
-; CHECK-BF16-NEXT: fmov s0, w8
+; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-BF16-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-BF16-NEXT: // kill: def $s0 killed $s0 killed $q0
; CHECK-BF16-NEXT: bl exp2f
; CHECK-BF16-NEXT: bfcvt h0, s0
; CHECK-BF16-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
@@ -1687,10 +1504,9 @@ define bfloat @test_log(bfloat %a) #0 {
; CHECK-CVT-LABEL: test_log:
; CHECK-CVT: // %bb.0:
; CHECK-CVT-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-CVT-NEXT: fmov w8, s0
-; CHECK-CVT-NEXT: lsl w8, w8, #16
-; CHECK-CVT-NEXT: fmov s0, w8
+; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-CVT-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-CVT-NEXT: // kill: def $s0 killed $s0 killed $q0
; CHECK-CVT-NEXT: bl logf
; CHECK-CVT-NEXT: fmov w9, s0
; CHECK-CVT-NEXT: mov w8, #32767 // =0x7fff
@@ -1706,10 +1522,9 @@ define bfloat @test_log(bfloat %a) #0 {
; CHECK-BF16-LABEL: test_log:
; CHECK-BF16: // %bb.0:
; CHECK-BF16-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-BF16-NEXT: fmov w8, s0
-; CHECK-BF16-NEXT: lsl w8, w8, #16
-; CHECK-BF16-NEXT: fmov s0, w8
+; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-BF16-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-BF16-NEXT: // kill: def $s0 killed $s0 killed $q0
; CHECK-BF16-NEXT: bl logf
; CHECK-BF16-NEXT: bfcvt h0, s0
; CHECK-BF16-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
@@ -1722,10 +1537,9 @@ define bfloat @test_log10(bfloat %a) #0 {
; CHECK-CVT-LABEL: test_log10:
; CHECK-CVT: // %bb.0:
; CHECK-CVT-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-CVT-NEXT: fmov w8, s0
-; CHECK-CVT-NEXT: lsl w8, w8, #16
-; CHECK-CVT-NEXT: fmov s0, w8
+; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-CVT-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-CVT-NEXT: // kill: def $s0 killed $s0 killed $q0
; CHECK-CVT-NEXT: bl log10f
; CHECK-CVT-NEXT: fmov w9, s0
; CHECK-CVT-NEXT: mov w8, #32767 // =0x7fff
@@ -1741,10 +1555,9 @@ define bfloat @test_log10(bfloat %a) #0 {
; CHECK-BF16-LABEL: test_log10:
; CHECK-BF16: // %bb.0:
; CHECK-BF16-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-BF16-NEXT: fmov w8, s0
-; CHECK-BF16-NEXT: lsl w8, w8, #16
-; CHECK-BF16-NEXT: fmov s0, w8
+; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-BF16-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-BF16-NEXT: // kill: def $s0 killed $s0 killed $q0
; CHECK-BF16-NEXT: bl log10f
; CHECK-BF16-NEXT: bfcvt h0, s0
; CHECK-BF16-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
@@ -1757,10 +1570,9 @@ define bfloat @test_log2(bfloat %a) #0 {
; CHECK-CVT-LABEL: test_log2:
; CHECK-CVT: // %bb.0:
; CHECK-CVT-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-CVT-NEXT: fmov w8, s0
-; CHECK-CVT-NEXT: lsl w8, w8, #16
-; CHECK-CVT-NEXT: fmov s0, w8
+; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-CVT-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-CVT-NEXT: // kill: def $s0 killed $s0 killed $q0
; CHECK-CVT-NEXT: bl log2f
; CHECK-CVT-NEXT: fmov w9, s0
; CHECK-CVT-NEXT: mov w8, #32767 // =0x7fff
@@ -1776,10 +1588,9 @@ define bfloat @test_log2(bfloat %a) #0 {
; CHECK-BF16-LABEL: test_log2:
; CHECK-BF16: // %bb.0:
; CHECK-BF16-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
-; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-BF16-NEXT: fmov w8, s0
-; CHECK-BF16-NEXT: lsl w8, w8, #16
-; CHECK-BF16-NEXT: fmov s0, w8
+; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-BF16-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-BF16-NEXT: // kill: def $s0 killed $s0 killed $q0
; CHECK-BF16-NEXT: bl log2f
; CHECK-BF16-NEXT: bfcvt h0, s0
; CHECK-BF16-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
@@ -1791,20 +1602,14 @@ define bfloat @test_log2(bfloat %a) #0 {
define bfloat @test_fma(bfloat %a, bfloat %b, bfloat %c) #0 {
; CHECK-CVT-LABEL: test_fma:
; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: // kill: def $h2 killed $h2 def $s2
-; CHECK-CVT-NEXT: // kill: def $h1 killed $h1 def $s1
-; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-CVT-NEXT: fmov w8, s2
-; CHECK-CVT-NEXT: fmov w9, s1
-; CHECK-CVT-NEXT: fmov w10, s0
-; CHECK-CVT-NEXT: lsl w8, w8, #16
-; CHECK-CVT-NEXT: lsl w9, w9, #16
-; CHECK-CVT-NEXT: lsl w10, w10, #16
-; CHECK-CVT-NEXT: fmov s0, w8
-; CHECK-CVT-NEXT: fmov s1, w9
-; CHECK-CVT-NEXT: fmov s2, w10
+; CHECK-CVT-NEXT: // kill: def $h2 killed $h2 def $d2
+; CHECK-CVT-NEXT: // kill: def $h1 killed $h1 def $d1
+; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $d0
; CHECK-CVT-NEXT: mov w10, #32767 // =0x7fff
-; CHECK-CVT-NEXT: fmadd s0, s2, s1, s0
+; CHECK-CVT-NEXT: shll v2.4s, v2.4h, #16
+; CHECK-CVT-NEXT: shll v1.4s, v1.4h, #16
+; CHECK-CVT-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-CVT-NEXT: fmadd s0, s0, s1, s2
; CHECK-CVT-NEXT: fmov w8, s0
; CHECK-CVT-NEXT: ubfx w9, w8, #16, #1
; CHECK-CVT-NEXT: add w8, w8, w10
@@ -1816,19 +1621,13 @@ define bfloat @test_fma(bfloat %a, bfloat %b, bfloat %c) #0 {
;
; CHECK-BF16-LABEL: test_fma:
; CHECK-BF16: // %bb.0:
-; CHECK-BF16-NEXT: // kill: def $h2 killed $h2 def $s2
-; CHECK-BF16-NEXT: // kill: def $h1 killed $h1 def $s1
-; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-BF16-NEXT: fmov w8, s2
-; CHECK-BF16-NEXT: fmov w9, s1
-; CHECK-BF16-NEXT: fmov w10, s0
-; CHECK-BF16-NEXT: lsl w8, w8, #16
-; CHECK-BF16-NEXT: lsl w9, w9, #16
-; CHECK-BF16-NEXT: lsl w10, w10, #16
-; CHECK-BF16-NEXT: fmov s0, w8
-; CHECK-BF16-NEXT: fmov s1, w9
-; CHECK-BF16-NEXT: fmov s2, w10
-; CHECK-BF16-NEXT: fmadd s0, s2, s1, s0
+; CHECK-BF16-NEXT: // kill: def $h2 killed $h2 def $d2
+; CHECK-BF16-NEXT: // kill: def $h1 killed $h1 def $d1
+; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-BF16-NEXT: shll v2.4s, v2.4h, #16
+; CHECK-BF16-NEXT: shll v1.4s, v1.4h, #16
+; CHECK-BF16-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-BF16-NEXT: fmadd s0, s0, s1, s2
; CHECK-BF16-NEXT: bfcvt h0, s0
; CHECK-BF16-NEXT: ret
%r = call bfloat @llvm.fma.f16(bfloat %a, bfloat %b, bfloat %c)
@@ -1851,16 +1650,12 @@ define bfloat @test_fabs(bfloat %a) #0 {
define bfloat @test_minnum(bfloat %a, bfloat %b) #0 {
; CHECK-CVT-LABEL: test_minnum:
; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: // kill: def $h1 killed $h1 def $s1
-; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-CVT-NEXT: fmov w9, s1
-; CHECK-CVT-NEXT: fmov w10, s0
+; CHECK-CVT-NEXT: // kill: def $h1 killed $h1 def $d1
+; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $d0
; CHECK-CVT-NEXT: mov w8, #32767 // =0x7fff
-; CHECK-CVT-NEXT: lsl w9, w9, #16
-; CHECK-CVT-NEXT: lsl w10, w10, #16
-; CHECK-CVT-NEXT: fmov s0, w9
-; CHECK-CVT-NEXT: fmov s1, w10
-; CHECK-CVT-NEXT: fminnm s0, s1, s0
+; CHECK-CVT-NEXT: shll v1.4s, v1.4h, #16
+; CHECK-CVT-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-CVT-NEXT: fminnm s0, s0, s1
; CHECK-CVT-NEXT: fmov w9, s0
; CHECK-CVT-NEXT: ubfx w10, w9, #16, #1
; CHECK-CVT-NEXT: add w8, w9, w8
@@ -1872,15 +1667,11 @@ define bfloat @test_minnum(bfloat %a, bfloat %b) #0 {
;
; CHECK-BF16-LABEL: test_minnum:
; CHECK-BF16: // %bb.0:
-; CHECK-BF16-NEXT: // kill: def $h1 killed $h1 def $s1
-; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-BF16-NEXT: fmov w8, s1
-; CHECK-BF16-NEXT: fmov w9, s0
-; CHECK-BF16-NEXT: lsl w8, w8, #16
-; CHECK-BF16-NEXT: lsl w9, w9, #16
-; CHECK-BF16-NEXT: fmov s0, w8
-; CHECK-BF16-NEXT: fmov s1, w9
-; CHECK-BF16-NEXT: fminnm s0, s1, s0
+; CHECK-BF16-NEXT: // kill: def $h1 killed $h1 def $d1
+; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-BF16-NEXT: shll v1.4s, v1.4h, #16
+; CHECK-BF16-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-BF16-NEXT: fminnm s0, s0, s1
; CHECK-BF16-NEXT: bfcvt h0, s0
; CHECK-BF16-NEXT: ret
%r = call bfloat @llvm.minnum.f16(bfloat %a, bfloat %b)
@@ -1890,16 +1681,12 @@ define bfloat @test_minnum(bfloat %a, bfloat %b) #0 {
define bfloat @test_maxnum(bfloat %a, bfloat %b) #0 {
; CHECK-CVT-LABEL: test_maxnum:
; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: // kill: def $h1 killed $h1 def $s1
-; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-CVT-NEXT: fmov w9, s1
-; CHECK-CVT-NEXT: fmov w10, s0
+; CHECK-CVT-NEXT: // kill: def $h1 killed $h1 def $d1
+; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $d0
; CHECK-CVT-NEXT: mov w8, #32767 // =0x7fff
-; CHECK-CVT-NEXT: lsl w9, w9, #16
-; CHECK-CVT-NEXT: lsl w10, w10, #16
-; CHECK-CVT-NEXT: fmov s0, w9
-; CHECK-CVT-NEXT: fmov s1, w10
-; CHECK-CVT-NEXT: fmaxnm s0, s1, s0
+; CHECK-CVT-NEXT: shll v1.4s, v1.4h, #16
+; CHECK-CVT-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-CVT-NEXT: fmaxnm s0, s0, s1
; CHECK-CVT-NEXT: fmov w9, s0
; CHECK-CVT-NEXT: ubfx w10, w9, #16, #1
; CHECK-CVT-NEXT: add w8, w9, w8
@@ -1911,15 +1698,11 @@ define bfloat @test_maxnum(bfloat %a, bfloat %b) #0 {
;
; CHECK-BF16-LABEL: test_maxnum:
; CHECK-BF16: // %bb.0:
-; CHECK-BF16-NEXT: // kill: def $h1 killed $h1 def $s1
-; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-BF16-NEXT: fmov w8, s1
-; CHECK-BF16-NEXT: fmov w9, s0
-; CHECK-BF16-NEXT: lsl w8, w8, #16
-; CHECK-BF16-NEXT: lsl w9, w9, #16
-; CHECK-BF16-NEXT: fmov s0, w8
-; CHECK-BF16-NEXT: fmov s1, w9
-; CHECK-BF16-NEXT: fmaxnm s0, s1, s0
+; CHECK-BF16-NEXT: // kill: def $h1 killed $h1 def $d1
+; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-BF16-NEXT: shll v1.4s, v1.4h, #16
+; CHECK-BF16-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-BF16-NEXT: fmaxnm s0, s0, s1
; CHECK-BF16-NEXT: bfcvt h0, s0
; CHECK-BF16-NEXT: ret
%r = call bfloat @llvm.maxnum.f16(bfloat %a, bfloat %b)
@@ -1929,16 +1712,12 @@ define bfloat @test_maxnum(bfloat %a, bfloat %b) #0 {
define bfloat @test_copysign(bfloat %a, bfloat %b) #0 {
; CHECK-CVT-LABEL: test_copysign:
; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: // kill: def $h1 killed $h1 def $s1
-; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-CVT-NEXT: fmov w8, s1
-; CHECK-CVT-NEXT: fmov w9, s0
+; CHECK-CVT-NEXT: // kill: def $h1 killed $h1 def $d1
+; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $d0
; CHECK-CVT-NEXT: mvni v2.4s, #128, lsl #24
-; CHECK-CVT-NEXT: lsl w8, w8, #16
-; CHECK-CVT-NEXT: lsl w9, w9, #16
-; CHECK-CVT-NEXT: fmov s0, w8
-; CHECK-CVT-NEXT: fmov s1, w9
-; CHECK-CVT-NEXT: bit v0.16b, v1.16b, v2.16b
+; CHECK-CVT-NEXT: shll v1.4s, v1.4h, #16
+; CHECK-CVT-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-CVT-NEXT: bif v0.16b, v1.16b, v2.16b
; CHECK-CVT-NEXT: fmov w8, s0
; CHECK-CVT-NEXT: lsr w8, w8, #16
; CHECK-CVT-NEXT: fmov s0, w8
@@ -1947,16 +1726,12 @@ define bfloat @test_copysign(bfloat %a, bfloat %b) #0 {
;
; CHECK-BF16-LABEL: test_copysign:
; CHECK-BF16: // %bb.0:
-; CHECK-BF16-NEXT: // kill: def $h1 killed $h1 def $s1
-; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-BF16-NEXT: fmov w8, s1
-; CHECK-BF16-NEXT: fmov w9, s0
+; CHECK-BF16-NEXT: // kill: def $h1 killed $h1 def $d1
+; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $d0
; CHECK-BF16-NEXT: mvni v2.4s, #128, lsl #24
-; CHECK-BF16-NEXT: lsl w8, w8, #16
-; CHECK-BF16-NEXT: lsl w9, w9, #16
-; CHECK-BF16-NEXT: fmov s0, w8
-; CHECK-BF16-NEXT: fmov s1, w9
-; CHECK-BF16-NEXT: bit v0.16b, v1.16b, v2.16b
+; CHECK-BF16-NEXT: shll v1.4s, v1.4h, #16
+; CHECK-BF16-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-BF16-NEXT: bif v0.16b, v1.16b, v2.16b
; CHECK-BF16-NEXT: bfcvt h0, s0
; CHECK-BF16-NEXT: ret
%r = call bfloat @llvm.copysign.f16(bfloat %a, bfloat %b)
@@ -1966,12 +1741,10 @@ define bfloat @test_copysign(bfloat %a, bfloat %b) #0 {
define bfloat @test_copysign_f32(bfloat %a, float %b) #0 {
; CHECK-CVT-LABEL: test_copysign_f32:
; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-CVT-NEXT: fmov w8, s0
+; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $d0
; CHECK-CVT-NEXT: mvni v2.4s, #128, lsl #24
; CHECK-CVT-NEXT: // kill: def $s1 killed $s1 def $q1
-; CHECK-CVT-NEXT: lsl w8, w8, #16
-; CHECK-CVT-NEXT: fmov s0, w8
+; CHECK-CVT-NEXT: shll v0.4s, v0.4h, #16
; CHECK-CVT-NEXT: bif v0.16b, v1.16b, v2.16b
; CHECK-CVT-NEXT: fmov w8, s0
; CHECK-CVT-NEXT: lsr w8, w8, #16
@@ -1981,12 +1754,10 @@ define bfloat @test_copysign_f32(bfloat %a, float %b) #0 {
;
; CHECK-BF16-LABEL: test_copysign_f32:
; CHECK-BF16: // %bb.0:
-; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-BF16-NEXT: fmov w8, s0
+; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $d0
; CHECK-BF16-NEXT: mvni v2.4s, #128, lsl #24
; CHECK-BF16-NEXT: // kill: def $s1 killed $s1 def $q1
-; CHECK-BF16-NEXT: lsl w8, w8, #16
-; CHECK-BF16-NEXT: fmov s0, w8
+; CHECK-BF16-NEXT: shll v0.4s, v0.4h, #16
; CHECK-BF16-NEXT: bif v0.16b, v1.16b, v2.16b
; CHECK-BF16-NEXT: bfcvt h0, s0
; CHECK-BF16-NEXT: ret
@@ -1998,12 +1769,10 @@ define bfloat @test_copysign_f32(bfloat %a, float %b) #0 {
define bfloat @test_copysign_f64(bfloat %a, double %b) #0 {
; CHECK-CVT-LABEL: test_copysign_f64:
; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-CVT-NEXT: fmov w8, s0
+; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $d0
; CHECK-CVT-NEXT: fcvt s1, d1
; CHECK-CVT-NEXT: mvni v2.4s, #128, lsl #24
-; CHECK-CVT-NEXT: lsl w8, w8, #16
-; CHECK-CVT-NEXT: fmov s0, w8
+; CHECK-CVT-NEXT: shll v0.4s, v0.4h, #16
; CHECK-CVT-NEXT: bif v0.16b, v1.16b, v2.16b
; CHECK-CVT-NEXT: fmov w8, s0
; CHECK-CVT-NEXT: lsr w8, w8, #16
@@ -2013,12 +1782,10 @@ define bfloat @test_copysign_f64(bfloat %a, double %b) #0 {
;
; CHECK-BF16-LABEL: test_copysign_f64:
; CHECK-BF16: // %bb.0:
-; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-BF16-NEXT: fmov w8, s0
+; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $d0
; CHECK-BF16-NEXT: fcvt s1, d1
; CHECK-BF16-NEXT: mvni v2.4s, #128, lsl #24
-; CHECK-BF16-NEXT: lsl w8, w8, #16
-; CHECK-BF16-NEXT: fmov s0, w8
+; CHECK-BF16-NEXT: shll v0.4s, v0.4h, #16
; CHECK-BF16-NEXT: bif v0.16b, v1.16b, v2.16b
; CHECK-BF16-NEXT: bfcvt h0, s0
; CHECK-BF16-NEXT: ret
@@ -2032,34 +1799,33 @@ define bfloat @test_copysign_f64(bfloat %a, double %b) #0 {
define float @test_copysign_extended(bfloat %a, bfloat %b) #0 {
; CHECK-CVT-LABEL: test_copysign_extended:
; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: // kill: def $h1 killed $h1 def $s1
-; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-CVT-NEXT: fmov w8, s1
-; CHECK-CVT-NEXT: fmov w9, s0
+; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-CVT-NEXT: movi v2.4s, #16
+; CHECK-CVT-NEXT: // kill: def $h1 killed $h1 def $d1
+; CHECK-CVT-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-CVT-NEXT: shll v1.4s, v1.4h, #16
+; CHECK-CVT-NEXT: ushl v0.4s, v0.4s, v2.4s
; CHECK-CVT-NEXT: mvni v2.4s, #128, lsl #24
-; CHECK-CVT-NEXT: lsl w8, w8, #16
-; CHECK-CVT-NEXT: lsl w9, w9, #16
-; CHECK-CVT-NEXT: fmov s0, w8
-; CHECK-CVT-NEXT: fmov s1, w9
-; CHECK-CVT-NEXT: bit v0.16b, v1.16b, v2.16b
+; CHECK-CVT-NEXT: bif v0.16b, v1.16b, v2.16b
; CHECK-CVT-NEXT: fmov w8, s0
; CHECK-CVT-NEXT: lsr w8, w8, #16
-; CHECK-CVT-NEXT: lsl w8, w8, #16
; CHECK-CVT-NEXT: fmov s0, w8
+; CHECK-CVT-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-CVT-NEXT: // kill: def $s0 killed $s0 killed $q0
; CHECK-CVT-NEXT: ret
;
; CHECK-BF16-LABEL: test_copysign_extended:
; CHECK-BF16: // %bb.0:
-; CHECK-BF16-NEXT: // kill: def $h1 killed $h1 def $s1
-; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-BF16-NEXT: fmov w8, s1
-; CHECK-BF16-NEXT: fmov w9, s0
+; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-BF16-NEXT: movi v2.4s, #16
+; CHECK-BF16-NEXT: // kill: def $h1 killed $h1 def $d1
+; CHECK-BF16-NEXT: ushll v0.4s, v0.4h, #0
+; CHECK-BF16-NEXT: shll v1.4s, v1.4h, #16
+; CHECK-BF16-NEXT: ushl v0.4s, v0.4s, v2.4s
; CHECK-BF16-NEXT: mvni v2.4s, #128, lsl #24
-; CHECK-BF16-NEXT: lsl w8, w8, #16
-; CHECK-BF16-NEXT: lsl w9, w9, #16
-; CHECK-BF16-NEXT: fmov s0, w8
-; CHECK-BF16-NEXT: fmov s1, w9
-; CHECK-BF16-NEXT: bit v0.16b, v1.16b, v2.16b
+; CHECK-BF16-NEXT: bif v0.16b, v1.16b, v2.16b
+; CHECK-BF16-NEXT: bfcvt h0, s0
+; CHECK-BF16-NEXT: shll v0.4s, v0.4h, #16
; CHECK-BF16-NEXT: // kill: def $s0 killed $s0 killed $q0
; CHECK-BF16-NEXT: ret
%r = call bfloat @llvm.copysign.f16(bfloat %a, bfloat %b)
@@ -2070,11 +1836,9 @@ define float @test_copysign_extended(bfloat %a, bfloat %b) #0 {
define bfloat @test_floor(bfloat %a) #0 {
; CHECK-CVT-LABEL: test_floor:
; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-CVT-NEXT: fmov w9, s0
+; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $d0
; CHECK-CVT-NEXT: mov w8, #32767 // =0x7fff
-; CHECK-CVT-NEXT: lsl w9, w9, #16
-; CHECK-CVT-NEXT: fmov s0, w9
+; CHECK-CVT-NEXT: shll v0.4s, v0.4h, #16
; CHECK-CVT-NEXT: frintm s0, s0
; CHECK-CVT-NEXT: fmov w9, s0
; CHECK-CVT-NEXT: ubfx w10, w9, #16, #1
@@ -2087,10 +1851,8 @@ define bfloat @test_floor(bfloat %a) #0 {
;
; CHECK-BF16-LABEL: test_floor:
; CHECK-BF16: // %bb.0:
-; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-BF16-NEXT: fmov w8, s0
-; CHECK-BF16-NEXT: lsl w8, w8, #16
-; CHECK-BF16-NEXT: fmov s0, w8
+; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-BF16-NEXT: shll v0.4s, v0.4h, #16
; CHECK-BF16-NEXT: frintm s0, s0
; CHECK-BF16-NEXT: bfcvt h0, s0
; CHECK-BF16-NEXT: ret
@@ -2101,11 +1863,9 @@ define bfloat @test_floor(bfloat %a) #0 {
define bfloat @test_ceil(bfloat %a) #0 {
; CHECK-CVT-LABEL: test_ceil:
; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-CVT-NEXT: fmov w9, s0
+; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $d0
; CHECK-CVT-NEXT: mov w8, #32767 // =0x7fff
-; CHECK-CVT-NEXT: lsl w9, w9, #16
-; CHECK-CVT-NEXT: fmov s0, w9
+; CHECK-CVT-NEXT: shll v0.4s, v0.4h, #16
; CHECK-CVT-NEXT: frintp s0, s0
; CHECK-CVT-NEXT: fmov w9, s0
; CHECK-CVT-NEXT: ubfx w10, w9, #16, #1
@@ -2118,10 +1878,8 @@ define bfloat @test_ceil(bfloat %a) #0 {
;
; CHECK-BF16-LABEL: test_ceil:
; CHECK-BF16: // %bb.0:
-; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-BF16-NEXT: fmov w8, s0
-; CHECK-BF16-NEXT: lsl w8, w8, #16
-; CHECK-BF16-NEXT: fmov s0, w8
+; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-BF16-NEXT: shll v0.4s, v0.4h, #16
; CHECK-BF16-NEXT: frintp s0, s0
; CHECK-BF16-NEXT: bfcvt h0, s0
; CHECK-BF16-NEXT: ret
@@ -2132,11 +1890,9 @@ define bfloat @test_ceil(bfloat %a) #0 {
define bfloat @test_trunc(bfloat %a) #0 {
; CHECK-CVT-LABEL: test_trunc:
; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-CVT-NEXT: fmov w9, s0
+; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $d0
; CHECK-CVT-NEXT: mov w8, #32767 // =0x7fff
-; CHECK-CVT-NEXT: lsl w9, w9, #16
-; CHECK-CVT-NEXT: fmov s0, w9
+; CHECK-CVT-NEXT: shll v0.4s, v0.4h, #16
; CHECK-CVT-NEXT: frintz s0, s0
; CHECK-CVT-NEXT: fmov w9, s0
; CHECK-CVT-NEXT: ubfx w10, w9, #16, #1
@@ -2149,10 +1905,8 @@ define bfloat @test_trunc(bfloat %a) #0 {
;
; CHECK-BF16-LABEL: test_trunc:
; CHECK-BF16: // %bb.0:
-; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-BF16-NEXT: fmov w8, s0
-; CHECK-BF16-NEXT: lsl w8, w8, #16
-; CHECK-BF16-NEXT: fmov s0, w8
+; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-BF16-NEXT: shll v0.4s, v0.4h, #16
; CHECK-BF16-NEXT: frintz s0, s0
; CHECK-BF16-NEXT: bfcvt h0, s0
; CHECK-BF16-NEXT: ret
@@ -2163,11 +1917,9 @@ define bfloat @test_trunc(bfloat %a) #0 {
define bfloat @test_rint(bfloat %a) #0 {
; CHECK-CVT-LABEL: test_rint:
; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-CVT-NEXT: fmov w9, s0
+; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $d0
; CHECK-CVT-NEXT: mov w8, #32767 // =0x7fff
-; CHECK-CVT-NEXT: lsl w9, w9, #16
-; CHECK-CVT-NEXT: fmov s0, w9
+; CHECK-CVT-NEXT: shll v0.4s, v0.4h, #16
; CHECK-CVT-NEXT: frintx s0, s0
; CHECK-CVT-NEXT: fmov w9, s0
; CHECK-CVT-NEXT: ubfx w10, w9, #16, #1
@@ -2180,10 +1932,8 @@ define bfloat @test_rint(bfloat %a) #0 {
;
; CHECK-BF16-LABEL: test_rint:
; CHECK-BF16: // %bb.0:
-; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-BF16-NEXT: fmov w8, s0
-; CHECK-BF16-NEXT: lsl w8, w8, #16
-; CHECK-BF16-NEXT: fmov s0, w8
+; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-BF16-NEXT: shll v0.4s, v0.4h, #16
; CHECK-BF16-NEXT: frintx s0, s0
; CHECK-BF16-NEXT: bfcvt h0, s0
; CHECK-BF16-NEXT: ret
@@ -2194,11 +1944,9 @@ define bfloat @test_rint(bfloat %a) #0 {
define bfloat @test_nearbyint(bfloat %a) #0 {
; CHECK-CVT-LABEL: test_nearbyint:
; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-CVT-NEXT: fmov w9, s0
+; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $d0
; CHECK-CVT-NEXT: mov w8, #32767 // =0x7fff
-; CHECK-CVT-NEXT: lsl w9, w9, #16
-; CHECK-CVT-NEXT: fmov s0, w9
+; CHECK-CVT-NEXT: shll v0.4s, v0.4h, #16
; CHECK-CVT-NEXT: frinti s0, s0
; CHECK-CVT-NEXT: fmov w9, s0
; CHECK-CVT-NEXT: ubfx w10, w9, #16, #1
@@ -2211,10 +1959,8 @@ define bfloat @test_nearbyint(bfloat %a) #0 {
;
; CHECK-BF16-LABEL: test_nearbyint:
; CHECK-BF16: // %bb.0:
-; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-BF16-NEXT: fmov w8, s0
-; CHECK-BF16-NEXT: lsl w8, w8, #16
-; CHECK-BF16-NEXT: fmov s0, w8
+; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-BF16-NEXT: shll v0.4s, v0.4h, #16
; CHECK-BF16-NEXT: frinti s0, s0
; CHECK-BF16-NEXT: bfcvt h0, s0
; CHECK-BF16-NEXT: ret
@@ -2225,11 +1971,9 @@ define bfloat @test_nearbyint(bfloat %a) #0 {
define bfloat @test_round(bfloat %a) #0 {
; CHECK-CVT-LABEL: test_round:
; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-CVT-NEXT: fmov w9, s0
+; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $d0
; CHECK-CVT-NEXT: mov w8, #32767 // =0x7fff
-; CHECK-CVT-NEXT: lsl w9, w9, #16
-; CHECK-CVT-NEXT: fmov s0, w9
+; CHECK-CVT-NEXT: shll v0.4s, v0.4h, #16
; CHECK-CVT-NEXT: frinta s0, s0
; CHECK-CVT-NEXT: fmov w9, s0
; CHECK-CVT-NEXT: ubfx w10, w9, #16, #1
@@ -2242,10 +1986,8 @@ define bfloat @test_round(bfloat %a) #0 {
;
; CHECK-BF16-LABEL: test_round:
; CHECK-BF16: // %bb.0:
-; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-BF16-NEXT: fmov w8, s0
-; CHECK-BF16-NEXT: lsl w8, w8, #16
-; CHECK-BF16-NEXT: fmov s0, w8
+; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-BF16-NEXT: shll v0.4s, v0.4h, #16
; CHECK-BF16-NEXT: frinta s0, s0
; CHECK-BF16-NEXT: bfcvt h0, s0
; CHECK-BF16-NEXT: ret
@@ -2256,11 +1998,9 @@ define bfloat @test_round(bfloat %a) #0 {
define bfloat @test_roundeven(bfloat %a) #0 {
; CHECK-CVT-LABEL: test_roundeven:
; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-CVT-NEXT: fmov w9, s0
+; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $d0
; CHECK-CVT-NEXT: mov w8, #32767 // =0x7fff
-; CHECK-CVT-NEXT: lsl w9, w9, #16
-; CHECK-CVT-NEXT: fmov s0, w9
+; CHECK-CVT-NEXT: shll v0.4s, v0.4h, #16
; CHECK-CVT-NEXT: frintn s0, s0
; CHECK-CVT-NEXT: fmov w9, s0
; CHECK-CVT-NEXT: ubfx w10, w9, #16, #1
@@ -2273,10 +2013,8 @@ define bfloat @test_roundeven(bfloat %a) #0 {
;
; CHECK-BF16-LABEL: test_roundeven:
; CHECK-BF16: // %bb.0:
-; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-BF16-NEXT: fmov w8, s0
-; CHECK-BF16-NEXT: lsl w8, w8, #16
-; CHECK-BF16-NEXT: fmov s0, w8
+; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-BF16-NEXT: shll v0.4s, v0.4h, #16
; CHECK-BF16-NEXT: frintn s0, s0
; CHECK-BF16-NEXT: bfcvt h0, s0
; CHECK-BF16-NEXT: ret
@@ -2287,27 +2025,21 @@ define bfloat @test_roundeven(bfloat %a) #0 {
define bfloat @test_fmuladd(bfloat %a, bfloat %b, bfloat %c) #0 {
; CHECK-CVT-LABEL: test_fmuladd:
; CHECK-CVT: // %bb.0:
-; CHECK-CVT-NEXT: // kill: def $h1 killed $h1 def $s1
-; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-CVT-NEXT: fmov w8, s1
-; CHECK-CVT-NEXT: fmov w9, s0
+; CHECK-CVT-NEXT: // kill: def $h1 killed $h1 def $d1
+; CHECK-CVT-NEXT: // kill: def $h0 killed $h0 def $d0
; CHECK-CVT-NEXT: mov w10, #32767 // =0x7fff
-; CHECK-CVT-NEXT: // kill: def $h2 killed $h2 def $s2
-; CHECK-CVT-NEXT: lsl w8, w8, #16
-; CHECK-CVT-NEXT: lsl w9, w9, #16
-; CHECK-CVT-NEXT: fmov s0, w8
-; CHECK-CVT-NEXT: fmov s1, w9
-; CHECK-CVT-NEXT: fmul s0, s1, s0
+; CHECK-CVT-NEXT: // kill: def $h2 killed $h2 def $d2
+; CHECK-CVT-NEXT: shll v1.4s, v1.4h, #16
+; CHECK-CVT-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-CVT-NEXT: fmul s0, s0, s1
+; CHECK-CVT-NEXT: shll v1.4s, v2.4h, #16
; CHECK-CVT-NEXT: fmov w8, s0
; CHECK-CVT-NEXT: ubfx w9, w8, #16, #1
; CHECK-CVT-NEXT: add w8, w8, w10
; CHECK-CVT-NEXT: add w8, w9, w8
-; CHECK-CVT-NEXT: fmov w9, s2
; CHECK-CVT-NEXT: lsr w8, w8, #16
-; CHECK-CVT-NEXT: lsl w8, w8, #16
-; CHECK-CVT-NEXT: lsl w9, w9, #16
; CHECK-CVT-NEXT: fmov s0, w8
-; CHECK-CVT-NEXT: fmov s1, w9
+; CHECK-CVT-NEXT: shll v0.4s, v0.4h, #16
; CHECK-CVT-NEXT: fadd s0, s0, s1
; CHECK-CVT-NEXT: fmov w8, s0
; CHECK-CVT-NEXT: ubfx w9, w8, #16, #1
@@ -2320,23 +2052,15 @@ define bfloat @test_fmuladd(bfloat %a, bfloat %b, bfloat %c) #0 {
;
; CHECK-BF16-LABEL: test_fmuladd:
; CHECK-BF16: // %bb.0:
-; CHECK-BF16-NEXT: // kill: def $h1 killed $h1 def $s1
-; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-BF16-NEXT: fmov w8, s1
-; CHECK-BF16-NEXT: fmov w9, s0
-; CHECK-BF16-NEXT: // kill: def $h2 killed $h2 def $s2
-; CHECK-BF16-NEXT: lsl w8, w8, #16
-; CHECK-BF16-NEXT: lsl w9, w9, #16
-; CHECK-BF16-NEXT: fmov s0, w8
-; CHECK-BF16-NEXT: fmov s1, w9
-; CHECK-BF16-NEXT: fmov w9, s2
-; CHECK-BF16-NEXT: fmul s0, s1, s0
-; CHECK-BF16-NEXT: lsl w9, w9, #16
-; CHECK-BF16-NEXT: fmov s1, w9
+; CHECK-BF16-NEXT: // kill: def $h1 killed $h1 def $d1
+; CHECK-BF16-NEXT: // kill: def $h0 killed $h0 def $d0
+; CHECK-BF16-NEXT: // kill: def $h2 killed $h2 def $d2
+; CHECK-BF16-NEXT: shll v1.4s, v1.4h, #16
+; CHECK-BF16-NEXT: shll v0.4s, v0.4h, #16
+; CHECK-BF16-NEXT: fmul s0, s0, s1
+; CHECK-BF16-NEXT: shll v1.4s, v2.4h, #16
; CHECK-BF16-NEXT: bfcvt h0, s0
-; CHECK-BF16-NEXT: fmov w8, s0
-; CHECK-BF16-NEXT: lsl w8, w8, #16
-; CHECK-BF16-NEXT: fmov s0, w8
+; CHECK-BF16-NEXT: shll v0.4s, v0.4h, #16
; CHECK-BF16-NEXT: fadd s0, s0, s1
; CHECK-BF16-NEXT: bfcvt h0, s0
; CHECK-BF16-NEXT: ret
diff --git a/llvm/test/CodeGen/AArch64/bf16-v8-instructions.ll b/llvm/test/CodeGen/AArch64/bf16-v8-instructions.ll
index c03e2e5..a609e33 100644
--- a/llvm/test/CodeGen/AArch64/bf16-v8-instructions.ll
+++ b/llvm/test/CodeGen/AArch64/bf16-v8-instructions.ll
@@ -272,9 +272,8 @@ define <8 x bfloat> @d_to_h(<8 x double> %a) {
define <8 x float> @h_to_s(<8 x bfloat> %a) {
; CHECK-LABEL: h_to_s:
; CHECK: // %bb.0:
-; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT: shll2 v1.4s, v0.8h, #16
; CHECK-NEXT: shll v0.4s, v0.4h, #16
-; CHECK-NEXT: shll v1.4s, v1.4h, #16
; CHECK-NEXT: ret
%1 = fpext <8 x bfloat> %a to <8 x float>
ret <8 x float> %1
@@ -283,13 +282,12 @@ define <8 x float> @h_to_s(<8 x bfloat> %a) {
define <8 x double> @h_to_d(<8 x bfloat> %a) {
; CHECK-LABEL: h_to_d:
; CHECK: // %bb.0:
-; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT: shll v2.4s, v0.4h, #16
-; CHECK-NEXT: fcvtl v0.2d, v2.2s
-; CHECK-NEXT: shll v4.4s, v1.4h, #16
-; CHECK-NEXT: fcvtl2 v1.2d, v2.4s
-; CHECK-NEXT: fcvtl2 v3.2d, v4.4s
-; CHECK-NEXT: fcvtl v2.2d, v4.2s
+; CHECK-NEXT: shll v1.4s, v0.4h, #16
+; CHECK-NEXT: shll2 v2.4s, v0.8h, #16
+; CHECK-NEXT: fcvtl v0.2d, v1.2s
+; CHECK-NEXT: fcvtl2 v3.2d, v2.4s
+; CHECK-NEXT: fcvtl2 v1.2d, v1.4s
+; CHECK-NEXT: fcvtl v2.2d, v2.2s
; CHECK-NEXT: ret
%1 = fpext <8 x bfloat> %a to <8 x double>
ret <8 x double> %1
@@ -788,11 +786,10 @@ define void @test_insert_at_zero(bfloat %a, ptr %b) #0 {
define <8 x i8> @fptosi_i8(<8 x bfloat> %a) #0 {
; CHECK-LABEL: fptosi_i8:
; CHECK: // %bb.0:
-; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT: shll2 v1.4s, v0.8h, #16
; CHECK-NEXT: shll v0.4s, v0.4h, #16
-; CHECK-NEXT: fcvtzs v0.4s, v0.4s
-; CHECK-NEXT: shll v1.4s, v1.4h, #16
; CHECK-NEXT: fcvtzs v1.4s, v1.4s
+; CHECK-NEXT: fcvtzs v0.4s, v0.4s
; CHECK-NEXT: uzp1 v0.8h, v0.8h, v1.8h
; CHECK-NEXT: xtn v0.8b, v0.8h
; CHECK-NEXT: ret
@@ -803,11 +800,10 @@ define <8 x i8> @fptosi_i8(<8 x bfloat> %a) #0 {
define <8 x i16> @fptosi_i16(<8 x bfloat> %a) #0 {
; CHECK-LABEL: fptosi_i16:
; CHECK: // %bb.0:
-; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT: shll2 v1.4s, v0.8h, #16
; CHECK-NEXT: shll v0.4s, v0.4h, #16
-; CHECK-NEXT: fcvtzs v0.4s, v0.4s
-; CHECK-NEXT: shll v1.4s, v1.4h, #16
; CHECK-NEXT: fcvtzs v1.4s, v1.4s
+; CHECK-NEXT: fcvtzs v0.4s, v0.4s
; CHECK-NEXT: uzp1 v0.8h, v0.8h, v1.8h
; CHECK-NEXT: ret
%1 = fptosi<8 x bfloat> %a to <8 x i16>
@@ -817,11 +813,10 @@ define <8 x i16> @fptosi_i16(<8 x bfloat> %a) #0 {
define <8 x i8> @fptoui_i8(<8 x bfloat> %a) #0 {
; CHECK-LABEL: fptoui_i8:
; CHECK: // %bb.0:
-; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT: shll2 v1.4s, v0.8h, #16
; CHECK-NEXT: shll v0.4s, v0.4h, #16
-; CHECK-NEXT: fcvtzu v0.4s, v0.4s
-; CHECK-NEXT: shll v1.4s, v1.4h, #16
; CHECK-NEXT: fcvtzu v1.4s, v1.4s
+; CHECK-NEXT: fcvtzu v0.4s, v0.4s
; CHECK-NEXT: uzp1 v0.8h, v0.8h, v1.8h
; CHECK-NEXT: xtn v0.8b, v0.8h
; CHECK-NEXT: ret
@@ -832,11 +827,10 @@ define <8 x i8> @fptoui_i8(<8 x bfloat> %a) #0 {
define <8 x i16> @fptoui_i16(<8 x bfloat> %a) #0 {
; CHECK-LABEL: fptoui_i16:
; CHECK: // %bb.0:
-; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8
+; CHECK-NEXT: shll2 v1.4s, v0.8h, #16
; CHECK-NEXT: shll v0.4s, v0.4h, #16
-; CHECK-NEXT: fcvtzu v0.4s, v0.4s
-; CHECK-NEXT: shll v1.4s, v1.4h, #16
; CHECK-NEXT: fcvtzu v1.4s, v1.4s
+; CHECK-NEXT: fcvtzu v0.4s, v0.4s
; CHECK-NEXT: uzp1 v0.8h, v0.8h, v1.8h
; CHECK-NEXT: ret
%1 = fptoui<8 x bfloat> %a to <8 x i16>
@@ -846,90 +840,58 @@ define <8 x i16> @fptoui_i16(<8 x bfloat> %a) #0 {
define <8 x i1> @test_fcmp_une(<8 x bfloat> %a, <8 x bfloat> %b) #0 {
; CHECK-LABEL: test_fcmp_une:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov h2, v1.h[1]
-; CHECK-NEXT: mov h3, v0.h[1]
-; CHECK-NEXT: fmov w10, s1
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: fmov w9, s3
-; CHECK-NEXT: mov h2, v1.h[2]
-; CHECK-NEXT: mov h3, v0.h[2]
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s4, w8
-; CHECK-NEXT: fmov w8, s0
-; CHECK-NEXT: fmov s5, w9
-; CHECK-NEXT: lsl w9, w10, #16
-; CHECK-NEXT: fmov w10, s3
-; CHECK-NEXT: mov h3, v1.h[4]
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fcmp s5, s4
-; CHECK-NEXT: fmov s5, w9
-; CHECK-NEXT: mov h4, v1.h[3]
-; CHECK-NEXT: lsl w10, w10, #16
-; CHECK-NEXT: fmov s6, w8
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: csetm w9, ne
-; CHECK-NEXT: fmov s16, w10
-; CHECK-NEXT: fcmp s6, s5
-; CHECK-NEXT: mov h5, v0.h[3]
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fmov w10, s4
-; CHECK-NEXT: mov h6, v0.h[4]
-; CHECK-NEXT: mov h4, v1.h[5]
-; CHECK-NEXT: fmov s7, w8
+; CHECK-NEXT: dup v2.4h, v1.h[1]
+; CHECK-NEXT: dup v3.4h, v0.h[1]
+; CHECK-NEXT: dup v4.4h, v1.h[2]
+; CHECK-NEXT: dup v5.4h, v0.h[2]
+; CHECK-NEXT: dup v6.4h, v0.h[3]
+; CHECK-NEXT: shll v2.4s, v2.4h, #16
+; CHECK-NEXT: shll v3.4s, v3.4h, #16
+; CHECK-NEXT: fcmp s3, s2
+; CHECK-NEXT: shll v2.4s, v1.4h, #16
+; CHECK-NEXT: shll v3.4s, v0.4h, #16
; CHECK-NEXT: csetm w8, ne
-; CHECK-NEXT: fmov s2, w8
-; CHECK-NEXT: fmov w8, s5
-; CHECK-NEXT: mov h5, v0.h[5]
-; CHECK-NEXT: fcmp s16, s7
-; CHECK-NEXT: mov v2.h[1], w9
-; CHECK-NEXT: lsl w9, w10, #16
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fmov w10, s3
-; CHECK-NEXT: fmov s3, w9
-; CHECK-NEXT: fmov w9, s6
-; CHECK-NEXT: fmov s7, w8
+; CHECK-NEXT: fcmp s3, s2
+; CHECK-NEXT: shll v3.4s, v4.4h, #16
+; CHECK-NEXT: shll v4.4s, v5.4h, #16
+; CHECK-NEXT: dup v5.4h, v1.h[3]
+; CHECK-NEXT: csetm w9, ne
+; CHECK-NEXT: fmov s2, w9
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: shll v4.4s, v6.4h, #16
+; CHECK-NEXT: shll v3.4s, v5.4h, #16
+; CHECK-NEXT: dup v5.8h, v1.h[4]
+; CHECK-NEXT: dup v6.8h, v0.h[4]
+; CHECK-NEXT: mov v2.h[1], w8
; CHECK-NEXT: csetm w8, ne
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: shll v3.4s, v5.4h, #16
+; CHECK-NEXT: shll v4.4s, v6.4h, #16
+; CHECK-NEXT: dup v5.8h, v1.h[5]
+; CHECK-NEXT: dup v6.8h, v0.h[5]
; CHECK-NEXT: mov v2.h[2], w8
-; CHECK-NEXT: lsl w8, w10, #16
-; CHECK-NEXT: fmov w10, s4
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fcmp s7, s3
-; CHECK-NEXT: mov h3, v1.h[6]
-; CHECK-NEXT: fmov s4, w8
-; CHECK-NEXT: mov h1, v1.h[7]
-; CHECK-NEXT: fmov s6, w9
-; CHECK-NEXT: fmov w9, s5
; CHECK-NEXT: csetm w8, ne
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: shll v3.4s, v5.4h, #16
+; CHECK-NEXT: shll v4.4s, v6.4h, #16
+; CHECK-NEXT: dup v5.8h, v1.h[6]
+; CHECK-NEXT: dup v6.8h, v0.h[6]
+; CHECK-NEXT: dup v1.8h, v1.h[7]
+; CHECK-NEXT: dup v0.8h, v0.h[7]
; CHECK-NEXT: mov v2.h[3], w8
-; CHECK-NEXT: lsl w8, w10, #16
-; CHECK-NEXT: fcmp s6, s4
-; CHECK-NEXT: mov h4, v0.h[6]
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s5, w8
-; CHECK-NEXT: mov h0, v0.h[7]
-; CHECK-NEXT: fmov s6, w9
; CHECK-NEXT: csetm w8, ne
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: shll v3.4s, v5.4h, #16
+; CHECK-NEXT: shll v4.4s, v6.4h, #16
+; CHECK-NEXT: shll v1.4s, v1.4h, #16
+; CHECK-NEXT: shll v0.4s, v0.4h, #16
; CHECK-NEXT: mov v2.h[4], w8
-; CHECK-NEXT: fmov w8, s3
-; CHECK-NEXT: fmov w9, s4
-; CHECK-NEXT: fcmp s6, s5
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: csetm w10, ne
-; CHECK-NEXT: fmov s3, w8
-; CHECK-NEXT: fmov s4, w9
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: fmov w9, s0
-; CHECK-NEXT: mov v2.h[5], w10
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fcmp s4, s3
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: fmov s1, w9
; CHECK-NEXT: csetm w8, ne
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: mov v2.h[5], w8
+; CHECK-NEXT: csetm w8, ne
+; CHECK-NEXT: fcmp s0, s1
; CHECK-NEXT: mov v2.h[6], w8
-; CHECK-NEXT: fcmp s1, s0
; CHECK-NEXT: csetm w8, ne
; CHECK-NEXT: mov v2.h[7], w8
; CHECK-NEXT: xtn v0.8b, v2.8h
@@ -941,96 +903,64 @@ define <8 x i1> @test_fcmp_une(<8 x bfloat> %a, <8 x bfloat> %b) #0 {
define <8 x i1> @test_fcmp_ueq(<8 x bfloat> %a, <8 x bfloat> %b) #0 {
; CHECK-LABEL: test_fcmp_ueq:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov h2, v1.h[1]
-; CHECK-NEXT: mov h3, v0.h[1]
-; CHECK-NEXT: fmov w10, s1
-; CHECK-NEXT: fmov w11, s0
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: fmov w9, s3
-; CHECK-NEXT: mov h2, v1.h[2]
-; CHECK-NEXT: mov h3, v0.h[2]
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s4, w8
-; CHECK-NEXT: fmov s5, w9
-; CHECK-NEXT: lsl w8, w10, #16
-; CHECK-NEXT: lsl w9, w11, #16
-; CHECK-NEXT: fmov s6, w8
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: fcmp s5, s4
-; CHECK-NEXT: fmov s7, w9
-; CHECK-NEXT: fmov w9, s3
-; CHECK-NEXT: mov h4, v1.h[3]
-; CHECK-NEXT: mov h5, v0.h[3]
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: csetm w10, eq
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: csinv w10, w10, wzr, vc
-; CHECK-NEXT: fcmp s7, s6
-; CHECK-NEXT: fmov s2, w8
-; CHECK-NEXT: fmov s3, w9
-; CHECK-NEXT: fmov w11, s4
-; CHECK-NEXT: fmov w8, s5
-; CHECK-NEXT: mov h4, v0.h[4]
-; CHECK-NEXT: mov h7, v1.h[5]
+; CHECK-NEXT: dup v2.4h, v1.h[1]
+; CHECK-NEXT: dup v3.4h, v0.h[1]
+; CHECK-NEXT: dup v4.4h, v1.h[2]
+; CHECK-NEXT: dup v5.4h, v0.h[2]
+; CHECK-NEXT: dup v6.4h, v0.h[3]
+; CHECK-NEXT: shll v2.4s, v2.4h, #16
+; CHECK-NEXT: shll v3.4s, v3.4h, #16
+; CHECK-NEXT: fcmp s3, s2
+; CHECK-NEXT: shll v2.4s, v1.4h, #16
+; CHECK-NEXT: shll v3.4s, v0.4h, #16
+; CHECK-NEXT: csetm w8, eq
+; CHECK-NEXT: csinv w8, w8, wzr, vc
+; CHECK-NEXT: fcmp s3, s2
+; CHECK-NEXT: shll v3.4s, v4.4h, #16
+; CHECK-NEXT: shll v4.4s, v5.4h, #16
+; CHECK-NEXT: dup v5.4h, v1.h[3]
; CHECK-NEXT: csetm w9, eq
; CHECK-NEXT: csinv w9, w9, wzr, vc
-; CHECK-NEXT: fcmp s3, s2
-; CHECK-NEXT: mov h3, v1.h[4]
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: shll v4.4s, v6.4h, #16
; CHECK-NEXT: fmov s2, w9
-; CHECK-NEXT: lsl w11, w11, #16
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fmov s5, w11
-; CHECK-NEXT: fmov s6, w8
+; CHECK-NEXT: shll v3.4s, v5.4h, #16
+; CHECK-NEXT: dup v5.8h, v1.h[4]
+; CHECK-NEXT: dup v6.8h, v0.h[4]
+; CHECK-NEXT: mov v2.h[1], w8
; CHECK-NEXT: csetm w8, eq
-; CHECK-NEXT: mov v2.h[1], w10
-; CHECK-NEXT: fmov w9, s3
-; CHECK-NEXT: fmov w10, s4
; CHECK-NEXT: csinv w8, w8, wzr, vc
-; CHECK-NEXT: mov h3, v1.h[6]
-; CHECK-NEXT: mov h1, v1.h[7]
-; CHECK-NEXT: fcmp s6, s5
-; CHECK-NEXT: mov h5, v0.h[5]
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: lsl w10, w10, #16
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: shll v3.4s, v5.4h, #16
+; CHECK-NEXT: shll v4.4s, v6.4h, #16
+; CHECK-NEXT: dup v5.8h, v1.h[5]
+; CHECK-NEXT: dup v6.8h, v0.h[5]
; CHECK-NEXT: mov v2.h[2], w8
-; CHECK-NEXT: fmov s4, w9
-; CHECK-NEXT: fmov s6, w10
; CHECK-NEXT: csetm w8, eq
-; CHECK-NEXT: fmov w9, s7
-; CHECK-NEXT: fmov w10, s5
; CHECK-NEXT: csinv w8, w8, wzr, vc
-; CHECK-NEXT: fcmp s6, s4
-; CHECK-NEXT: mov h4, v0.h[6]
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: shll v3.4s, v5.4h, #16
+; CHECK-NEXT: shll v4.4s, v6.4h, #16
+; CHECK-NEXT: dup v5.8h, v1.h[6]
+; CHECK-NEXT: dup v6.8h, v0.h[6]
+; CHECK-NEXT: dup v1.8h, v1.h[7]
+; CHECK-NEXT: dup v0.8h, v0.h[7]
; CHECK-NEXT: mov v2.h[3], w8
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: lsl w10, w10, #16
-; CHECK-NEXT: mov h0, v0.h[7]
-; CHECK-NEXT: fmov s5, w9
-; CHECK-NEXT: fmov s6, w10
-; CHECK-NEXT: fmov w9, s3
-; CHECK-NEXT: fmov w10, s4
; CHECK-NEXT: csetm w8, eq
; CHECK-NEXT: csinv w8, w8, wzr, vc
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: shll v3.4s, v5.4h, #16
+; CHECK-NEXT: shll v4.4s, v6.4h, #16
+; CHECK-NEXT: shll v1.4s, v1.4h, #16
+; CHECK-NEXT: shll v0.4s, v0.4h, #16
; CHECK-NEXT: mov v2.h[4], w8
-; CHECK-NEXT: lsl w8, w9, #16
-; CHECK-NEXT: fcmp s6, s5
-; CHECK-NEXT: lsl w9, w10, #16
-; CHECK-NEXT: fmov s3, w8
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: fmov s4, w9
-; CHECK-NEXT: fmov w9, s0
-; CHECK-NEXT: csetm w10, eq
-; CHECK-NEXT: csinv w10, w10, wzr, vc
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: mov v2.h[5], w10
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fcmp s4, s3
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: fmov s1, w9
; CHECK-NEXT: csetm w8, eq
; CHECK-NEXT: csinv w8, w8, wzr, vc
-; CHECK-NEXT: fcmp s1, s0
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: mov v2.h[5], w8
+; CHECK-NEXT: csetm w8, eq
+; CHECK-NEXT: csinv w8, w8, wzr, vc
+; CHECK-NEXT: fcmp s0, s1
; CHECK-NEXT: mov v2.h[6], w8
; CHECK-NEXT: csetm w8, eq
; CHECK-NEXT: csinv w8, w8, wzr, vc
@@ -1044,90 +974,58 @@ define <8 x i1> @test_fcmp_ueq(<8 x bfloat> %a, <8 x bfloat> %b) #0 {
define <8 x i1> @test_fcmp_ugt(<8 x bfloat> %a, <8 x bfloat> %b) #0 {
; CHECK-LABEL: test_fcmp_ugt:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov h2, v1.h[1]
-; CHECK-NEXT: mov h3, v0.h[1]
-; CHECK-NEXT: fmov w10, s1
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: fmov w9, s3
-; CHECK-NEXT: mov h2, v1.h[2]
-; CHECK-NEXT: mov h3, v0.h[2]
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s4, w8
-; CHECK-NEXT: fmov w8, s0
-; CHECK-NEXT: fmov s5, w9
-; CHECK-NEXT: lsl w9, w10, #16
-; CHECK-NEXT: fmov w10, s3
-; CHECK-NEXT: mov h3, v1.h[4]
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fcmp s5, s4
-; CHECK-NEXT: fmov s5, w9
-; CHECK-NEXT: mov h4, v1.h[3]
-; CHECK-NEXT: lsl w10, w10, #16
-; CHECK-NEXT: fmov s6, w8
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: csetm w9, hi
-; CHECK-NEXT: fmov s16, w10
-; CHECK-NEXT: fcmp s6, s5
-; CHECK-NEXT: mov h5, v0.h[3]
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fmov w10, s4
-; CHECK-NEXT: mov h6, v0.h[4]
-; CHECK-NEXT: mov h4, v1.h[5]
-; CHECK-NEXT: fmov s7, w8
+; CHECK-NEXT: dup v2.4h, v1.h[1]
+; CHECK-NEXT: dup v3.4h, v0.h[1]
+; CHECK-NEXT: dup v4.4h, v1.h[2]
+; CHECK-NEXT: dup v5.4h, v0.h[2]
+; CHECK-NEXT: dup v6.4h, v0.h[3]
+; CHECK-NEXT: shll v2.4s, v2.4h, #16
+; CHECK-NEXT: shll v3.4s, v3.4h, #16
+; CHECK-NEXT: fcmp s3, s2
+; CHECK-NEXT: shll v2.4s, v1.4h, #16
+; CHECK-NEXT: shll v3.4s, v0.4h, #16
; CHECK-NEXT: csetm w8, hi
-; CHECK-NEXT: fmov s2, w8
-; CHECK-NEXT: fmov w8, s5
-; CHECK-NEXT: mov h5, v0.h[5]
-; CHECK-NEXT: fcmp s16, s7
-; CHECK-NEXT: mov v2.h[1], w9
-; CHECK-NEXT: lsl w9, w10, #16
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fmov w10, s3
-; CHECK-NEXT: fmov s3, w9
-; CHECK-NEXT: fmov w9, s6
-; CHECK-NEXT: fmov s7, w8
+; CHECK-NEXT: fcmp s3, s2
+; CHECK-NEXT: shll v3.4s, v4.4h, #16
+; CHECK-NEXT: shll v4.4s, v5.4h, #16
+; CHECK-NEXT: dup v5.4h, v1.h[3]
+; CHECK-NEXT: csetm w9, hi
+; CHECK-NEXT: fmov s2, w9
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: shll v4.4s, v6.4h, #16
+; CHECK-NEXT: shll v3.4s, v5.4h, #16
+; CHECK-NEXT: dup v5.8h, v1.h[4]
+; CHECK-NEXT: dup v6.8h, v0.h[4]
+; CHECK-NEXT: mov v2.h[1], w8
; CHECK-NEXT: csetm w8, hi
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: shll v3.4s, v5.4h, #16
+; CHECK-NEXT: shll v4.4s, v6.4h, #16
+; CHECK-NEXT: dup v5.8h, v1.h[5]
+; CHECK-NEXT: dup v6.8h, v0.h[5]
; CHECK-NEXT: mov v2.h[2], w8
-; CHECK-NEXT: lsl w8, w10, #16
-; CHECK-NEXT: fmov w10, s4
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fcmp s7, s3
-; CHECK-NEXT: mov h3, v1.h[6]
-; CHECK-NEXT: fmov s4, w8
-; CHECK-NEXT: mov h1, v1.h[7]
-; CHECK-NEXT: fmov s6, w9
-; CHECK-NEXT: fmov w9, s5
; CHECK-NEXT: csetm w8, hi
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: shll v3.4s, v5.4h, #16
+; CHECK-NEXT: shll v4.4s, v6.4h, #16
+; CHECK-NEXT: dup v5.8h, v1.h[6]
+; CHECK-NEXT: dup v6.8h, v0.h[6]
+; CHECK-NEXT: dup v1.8h, v1.h[7]
+; CHECK-NEXT: dup v0.8h, v0.h[7]
; CHECK-NEXT: mov v2.h[3], w8
-; CHECK-NEXT: lsl w8, w10, #16
-; CHECK-NEXT: fcmp s6, s4
-; CHECK-NEXT: mov h4, v0.h[6]
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s5, w8
-; CHECK-NEXT: mov h0, v0.h[7]
-; CHECK-NEXT: fmov s6, w9
; CHECK-NEXT: csetm w8, hi
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: shll v3.4s, v5.4h, #16
+; CHECK-NEXT: shll v4.4s, v6.4h, #16
+; CHECK-NEXT: shll v1.4s, v1.4h, #16
+; CHECK-NEXT: shll v0.4s, v0.4h, #16
; CHECK-NEXT: mov v2.h[4], w8
-; CHECK-NEXT: fmov w8, s3
-; CHECK-NEXT: fmov w9, s4
-; CHECK-NEXT: fcmp s6, s5
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: csetm w10, hi
-; CHECK-NEXT: fmov s3, w8
-; CHECK-NEXT: fmov s4, w9
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: fmov w9, s0
-; CHECK-NEXT: mov v2.h[5], w10
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fcmp s4, s3
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: fmov s1, w9
; CHECK-NEXT: csetm w8, hi
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: mov v2.h[5], w8
+; CHECK-NEXT: csetm w8, hi
+; CHECK-NEXT: fcmp s0, s1
; CHECK-NEXT: mov v2.h[6], w8
-; CHECK-NEXT: fcmp s1, s0
; CHECK-NEXT: csetm w8, hi
; CHECK-NEXT: mov v2.h[7], w8
; CHECK-NEXT: xtn v0.8b, v2.8h
@@ -1139,90 +1037,58 @@ define <8 x i1> @test_fcmp_ugt(<8 x bfloat> %a, <8 x bfloat> %b) #0 {
define <8 x i1> @test_fcmp_uge(<8 x bfloat> %a, <8 x bfloat> %b) #0 {
; CHECK-LABEL: test_fcmp_uge:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov h2, v1.h[1]
-; CHECK-NEXT: mov h3, v0.h[1]
-; CHECK-NEXT: fmov w10, s1
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: fmov w9, s3
-; CHECK-NEXT: mov h2, v1.h[2]
-; CHECK-NEXT: mov h3, v0.h[2]
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s4, w8
-; CHECK-NEXT: fmov w8, s0
-; CHECK-NEXT: fmov s5, w9
-; CHECK-NEXT: lsl w9, w10, #16
-; CHECK-NEXT: fmov w10, s3
-; CHECK-NEXT: mov h3, v1.h[4]
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fcmp s5, s4
-; CHECK-NEXT: fmov s5, w9
-; CHECK-NEXT: mov h4, v1.h[3]
-; CHECK-NEXT: lsl w10, w10, #16
-; CHECK-NEXT: fmov s6, w8
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: csetm w9, pl
-; CHECK-NEXT: fmov s16, w10
-; CHECK-NEXT: fcmp s6, s5
-; CHECK-NEXT: mov h5, v0.h[3]
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fmov w10, s4
-; CHECK-NEXT: mov h6, v0.h[4]
-; CHECK-NEXT: mov h4, v1.h[5]
-; CHECK-NEXT: fmov s7, w8
+; CHECK-NEXT: dup v2.4h, v1.h[1]
+; CHECK-NEXT: dup v3.4h, v0.h[1]
+; CHECK-NEXT: dup v4.4h, v1.h[2]
+; CHECK-NEXT: dup v5.4h, v0.h[2]
+; CHECK-NEXT: dup v6.4h, v0.h[3]
+; CHECK-NEXT: shll v2.4s, v2.4h, #16
+; CHECK-NEXT: shll v3.4s, v3.4h, #16
+; CHECK-NEXT: fcmp s3, s2
+; CHECK-NEXT: shll v2.4s, v1.4h, #16
+; CHECK-NEXT: shll v3.4s, v0.4h, #16
; CHECK-NEXT: csetm w8, pl
-; CHECK-NEXT: fmov s2, w8
-; CHECK-NEXT: fmov w8, s5
-; CHECK-NEXT: mov h5, v0.h[5]
-; CHECK-NEXT: fcmp s16, s7
-; CHECK-NEXT: mov v2.h[1], w9
-; CHECK-NEXT: lsl w9, w10, #16
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fmov w10, s3
-; CHECK-NEXT: fmov s3, w9
-; CHECK-NEXT: fmov w9, s6
-; CHECK-NEXT: fmov s7, w8
+; CHECK-NEXT: fcmp s3, s2
+; CHECK-NEXT: shll v3.4s, v4.4h, #16
+; CHECK-NEXT: shll v4.4s, v5.4h, #16
+; CHECK-NEXT: dup v5.4h, v1.h[3]
+; CHECK-NEXT: csetm w9, pl
+; CHECK-NEXT: fmov s2, w9
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: shll v4.4s, v6.4h, #16
+; CHECK-NEXT: shll v3.4s, v5.4h, #16
+; CHECK-NEXT: dup v5.8h, v1.h[4]
+; CHECK-NEXT: dup v6.8h, v0.h[4]
+; CHECK-NEXT: mov v2.h[1], w8
; CHECK-NEXT: csetm w8, pl
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: shll v3.4s, v5.4h, #16
+; CHECK-NEXT: shll v4.4s, v6.4h, #16
+; CHECK-NEXT: dup v5.8h, v1.h[5]
+; CHECK-NEXT: dup v6.8h, v0.h[5]
; CHECK-NEXT: mov v2.h[2], w8
-; CHECK-NEXT: lsl w8, w10, #16
-; CHECK-NEXT: fmov w10, s4
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fcmp s7, s3
-; CHECK-NEXT: mov h3, v1.h[6]
-; CHECK-NEXT: fmov s4, w8
-; CHECK-NEXT: mov h1, v1.h[7]
-; CHECK-NEXT: fmov s6, w9
-; CHECK-NEXT: fmov w9, s5
; CHECK-NEXT: csetm w8, pl
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: shll v3.4s, v5.4h, #16
+; CHECK-NEXT: shll v4.4s, v6.4h, #16
+; CHECK-NEXT: dup v5.8h, v1.h[6]
+; CHECK-NEXT: dup v6.8h, v0.h[6]
+; CHECK-NEXT: dup v1.8h, v1.h[7]
+; CHECK-NEXT: dup v0.8h, v0.h[7]
; CHECK-NEXT: mov v2.h[3], w8
-; CHECK-NEXT: lsl w8, w10, #16
-; CHECK-NEXT: fcmp s6, s4
-; CHECK-NEXT: mov h4, v0.h[6]
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s5, w8
-; CHECK-NEXT: mov h0, v0.h[7]
-; CHECK-NEXT: fmov s6, w9
; CHECK-NEXT: csetm w8, pl
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: shll v3.4s, v5.4h, #16
+; CHECK-NEXT: shll v4.4s, v6.4h, #16
+; CHECK-NEXT: shll v1.4s, v1.4h, #16
+; CHECK-NEXT: shll v0.4s, v0.4h, #16
; CHECK-NEXT: mov v2.h[4], w8
-; CHECK-NEXT: fmov w8, s3
-; CHECK-NEXT: fmov w9, s4
-; CHECK-NEXT: fcmp s6, s5
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: csetm w10, pl
-; CHECK-NEXT: fmov s3, w8
-; CHECK-NEXT: fmov s4, w9
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: fmov w9, s0
-; CHECK-NEXT: mov v2.h[5], w10
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fcmp s4, s3
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: fmov s1, w9
; CHECK-NEXT: csetm w8, pl
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: mov v2.h[5], w8
+; CHECK-NEXT: csetm w8, pl
+; CHECK-NEXT: fcmp s0, s1
; CHECK-NEXT: mov v2.h[6], w8
-; CHECK-NEXT: fcmp s1, s0
; CHECK-NEXT: csetm w8, pl
; CHECK-NEXT: mov v2.h[7], w8
; CHECK-NEXT: xtn v0.8b, v2.8h
@@ -1234,90 +1100,58 @@ define <8 x i1> @test_fcmp_uge(<8 x bfloat> %a, <8 x bfloat> %b) #0 {
define <8 x i1> @test_fcmp_ult(<8 x bfloat> %a, <8 x bfloat> %b) #0 {
; CHECK-LABEL: test_fcmp_ult:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov h2, v1.h[1]
-; CHECK-NEXT: mov h3, v0.h[1]
-; CHECK-NEXT: fmov w10, s1
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: fmov w9, s3
-; CHECK-NEXT: mov h2, v1.h[2]
-; CHECK-NEXT: mov h3, v0.h[2]
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s4, w8
-; CHECK-NEXT: fmov w8, s0
-; CHECK-NEXT: fmov s5, w9
-; CHECK-NEXT: lsl w9, w10, #16
-; CHECK-NEXT: fmov w10, s3
-; CHECK-NEXT: mov h3, v1.h[4]
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fcmp s5, s4
-; CHECK-NEXT: fmov s5, w9
-; CHECK-NEXT: mov h4, v1.h[3]
-; CHECK-NEXT: lsl w10, w10, #16
-; CHECK-NEXT: fmov s6, w8
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: csetm w9, lt
-; CHECK-NEXT: fmov s16, w10
-; CHECK-NEXT: fcmp s6, s5
-; CHECK-NEXT: mov h5, v0.h[3]
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fmov w10, s4
-; CHECK-NEXT: mov h6, v0.h[4]
-; CHECK-NEXT: mov h4, v1.h[5]
-; CHECK-NEXT: fmov s7, w8
+; CHECK-NEXT: dup v2.4h, v1.h[1]
+; CHECK-NEXT: dup v3.4h, v0.h[1]
+; CHECK-NEXT: dup v4.4h, v1.h[2]
+; CHECK-NEXT: dup v5.4h, v0.h[2]
+; CHECK-NEXT: dup v6.4h, v0.h[3]
+; CHECK-NEXT: shll v2.4s, v2.4h, #16
+; CHECK-NEXT: shll v3.4s, v3.4h, #16
+; CHECK-NEXT: fcmp s3, s2
+; CHECK-NEXT: shll v2.4s, v1.4h, #16
+; CHECK-NEXT: shll v3.4s, v0.4h, #16
; CHECK-NEXT: csetm w8, lt
-; CHECK-NEXT: fmov s2, w8
-; CHECK-NEXT: fmov w8, s5
-; CHECK-NEXT: mov h5, v0.h[5]
-; CHECK-NEXT: fcmp s16, s7
-; CHECK-NEXT: mov v2.h[1], w9
-; CHECK-NEXT: lsl w9, w10, #16
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fmov w10, s3
-; CHECK-NEXT: fmov s3, w9
-; CHECK-NEXT: fmov w9, s6
-; CHECK-NEXT: fmov s7, w8
+; CHECK-NEXT: fcmp s3, s2
+; CHECK-NEXT: shll v3.4s, v4.4h, #16
+; CHECK-NEXT: shll v4.4s, v5.4h, #16
+; CHECK-NEXT: dup v5.4h, v1.h[3]
+; CHECK-NEXT: csetm w9, lt
+; CHECK-NEXT: fmov s2, w9
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: shll v4.4s, v6.4h, #16
+; CHECK-NEXT: shll v3.4s, v5.4h, #16
+; CHECK-NEXT: dup v5.8h, v1.h[4]
+; CHECK-NEXT: dup v6.8h, v0.h[4]
+; CHECK-NEXT: mov v2.h[1], w8
; CHECK-NEXT: csetm w8, lt
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: shll v3.4s, v5.4h, #16
+; CHECK-NEXT: shll v4.4s, v6.4h, #16
+; CHECK-NEXT: dup v5.8h, v1.h[5]
+; CHECK-NEXT: dup v6.8h, v0.h[5]
; CHECK-NEXT: mov v2.h[2], w8
-; CHECK-NEXT: lsl w8, w10, #16
-; CHECK-NEXT: fmov w10, s4
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fcmp s7, s3
-; CHECK-NEXT: mov h3, v1.h[6]
-; CHECK-NEXT: fmov s4, w8
-; CHECK-NEXT: mov h1, v1.h[7]
-; CHECK-NEXT: fmov s6, w9
-; CHECK-NEXT: fmov w9, s5
; CHECK-NEXT: csetm w8, lt
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: shll v3.4s, v5.4h, #16
+; CHECK-NEXT: shll v4.4s, v6.4h, #16
+; CHECK-NEXT: dup v5.8h, v1.h[6]
+; CHECK-NEXT: dup v6.8h, v0.h[6]
+; CHECK-NEXT: dup v1.8h, v1.h[7]
+; CHECK-NEXT: dup v0.8h, v0.h[7]
; CHECK-NEXT: mov v2.h[3], w8
-; CHECK-NEXT: lsl w8, w10, #16
-; CHECK-NEXT: fcmp s6, s4
-; CHECK-NEXT: mov h4, v0.h[6]
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s5, w8
-; CHECK-NEXT: mov h0, v0.h[7]
-; CHECK-NEXT: fmov s6, w9
; CHECK-NEXT: csetm w8, lt
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: shll v3.4s, v5.4h, #16
+; CHECK-NEXT: shll v4.4s, v6.4h, #16
+; CHECK-NEXT: shll v1.4s, v1.4h, #16
+; CHECK-NEXT: shll v0.4s, v0.4h, #16
; CHECK-NEXT: mov v2.h[4], w8
-; CHECK-NEXT: fmov w8, s3
-; CHECK-NEXT: fmov w9, s4
-; CHECK-NEXT: fcmp s6, s5
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: csetm w10, lt
-; CHECK-NEXT: fmov s3, w8
-; CHECK-NEXT: fmov s4, w9
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: fmov w9, s0
-; CHECK-NEXT: mov v2.h[5], w10
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fcmp s4, s3
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: fmov s1, w9
; CHECK-NEXT: csetm w8, lt
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: mov v2.h[5], w8
+; CHECK-NEXT: csetm w8, lt
+; CHECK-NEXT: fcmp s0, s1
; CHECK-NEXT: mov v2.h[6], w8
-; CHECK-NEXT: fcmp s1, s0
; CHECK-NEXT: csetm w8, lt
; CHECK-NEXT: mov v2.h[7], w8
; CHECK-NEXT: xtn v0.8b, v2.8h
@@ -1329,90 +1163,58 @@ define <8 x i1> @test_fcmp_ult(<8 x bfloat> %a, <8 x bfloat> %b) #0 {
define <8 x i1> @test_fcmp_ule(<8 x bfloat> %a, <8 x bfloat> %b) #0 {
; CHECK-LABEL: test_fcmp_ule:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov h2, v1.h[1]
-; CHECK-NEXT: mov h3, v0.h[1]
-; CHECK-NEXT: fmov w10, s1
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: fmov w9, s3
-; CHECK-NEXT: mov h2, v1.h[2]
-; CHECK-NEXT: mov h3, v0.h[2]
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s4, w8
-; CHECK-NEXT: fmov w8, s0
-; CHECK-NEXT: fmov s5, w9
-; CHECK-NEXT: lsl w9, w10, #16
-; CHECK-NEXT: fmov w10, s3
-; CHECK-NEXT: mov h3, v1.h[4]
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fcmp s5, s4
-; CHECK-NEXT: fmov s5, w9
-; CHECK-NEXT: mov h4, v1.h[3]
-; CHECK-NEXT: lsl w10, w10, #16
-; CHECK-NEXT: fmov s6, w8
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: csetm w9, le
-; CHECK-NEXT: fmov s16, w10
-; CHECK-NEXT: fcmp s6, s5
-; CHECK-NEXT: mov h5, v0.h[3]
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fmov w10, s4
-; CHECK-NEXT: mov h6, v0.h[4]
-; CHECK-NEXT: mov h4, v1.h[5]
-; CHECK-NEXT: fmov s7, w8
+; CHECK-NEXT: dup v2.4h, v1.h[1]
+; CHECK-NEXT: dup v3.4h, v0.h[1]
+; CHECK-NEXT: dup v4.4h, v1.h[2]
+; CHECK-NEXT: dup v5.4h, v0.h[2]
+; CHECK-NEXT: dup v6.4h, v0.h[3]
+; CHECK-NEXT: shll v2.4s, v2.4h, #16
+; CHECK-NEXT: shll v3.4s, v3.4h, #16
+; CHECK-NEXT: fcmp s3, s2
+; CHECK-NEXT: shll v2.4s, v1.4h, #16
+; CHECK-NEXT: shll v3.4s, v0.4h, #16
; CHECK-NEXT: csetm w8, le
-; CHECK-NEXT: fmov s2, w8
-; CHECK-NEXT: fmov w8, s5
-; CHECK-NEXT: mov h5, v0.h[5]
-; CHECK-NEXT: fcmp s16, s7
-; CHECK-NEXT: mov v2.h[1], w9
-; CHECK-NEXT: lsl w9, w10, #16
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fmov w10, s3
-; CHECK-NEXT: fmov s3, w9
-; CHECK-NEXT: fmov w9, s6
-; CHECK-NEXT: fmov s7, w8
+; CHECK-NEXT: fcmp s3, s2
+; CHECK-NEXT: shll v3.4s, v4.4h, #16
+; CHECK-NEXT: shll v4.4s, v5.4h, #16
+; CHECK-NEXT: dup v5.4h, v1.h[3]
+; CHECK-NEXT: csetm w9, le
+; CHECK-NEXT: fmov s2, w9
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: shll v4.4s, v6.4h, #16
+; CHECK-NEXT: shll v3.4s, v5.4h, #16
+; CHECK-NEXT: dup v5.8h, v1.h[4]
+; CHECK-NEXT: dup v6.8h, v0.h[4]
+; CHECK-NEXT: mov v2.h[1], w8
; CHECK-NEXT: csetm w8, le
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: shll v3.4s, v5.4h, #16
+; CHECK-NEXT: shll v4.4s, v6.4h, #16
+; CHECK-NEXT: dup v5.8h, v1.h[5]
+; CHECK-NEXT: dup v6.8h, v0.h[5]
; CHECK-NEXT: mov v2.h[2], w8
-; CHECK-NEXT: lsl w8, w10, #16
-; CHECK-NEXT: fmov w10, s4
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fcmp s7, s3
-; CHECK-NEXT: mov h3, v1.h[6]
-; CHECK-NEXT: fmov s4, w8
-; CHECK-NEXT: mov h1, v1.h[7]
-; CHECK-NEXT: fmov s6, w9
-; CHECK-NEXT: fmov w9, s5
; CHECK-NEXT: csetm w8, le
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: shll v3.4s, v5.4h, #16
+; CHECK-NEXT: shll v4.4s, v6.4h, #16
+; CHECK-NEXT: dup v5.8h, v1.h[6]
+; CHECK-NEXT: dup v6.8h, v0.h[6]
+; CHECK-NEXT: dup v1.8h, v1.h[7]
+; CHECK-NEXT: dup v0.8h, v0.h[7]
; CHECK-NEXT: mov v2.h[3], w8
-; CHECK-NEXT: lsl w8, w10, #16
-; CHECK-NEXT: fcmp s6, s4
-; CHECK-NEXT: mov h4, v0.h[6]
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s5, w8
-; CHECK-NEXT: mov h0, v0.h[7]
-; CHECK-NEXT: fmov s6, w9
; CHECK-NEXT: csetm w8, le
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: shll v3.4s, v5.4h, #16
+; CHECK-NEXT: shll v4.4s, v6.4h, #16
+; CHECK-NEXT: shll v1.4s, v1.4h, #16
+; CHECK-NEXT: shll v0.4s, v0.4h, #16
; CHECK-NEXT: mov v2.h[4], w8
-; CHECK-NEXT: fmov w8, s3
-; CHECK-NEXT: fmov w9, s4
-; CHECK-NEXT: fcmp s6, s5
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: csetm w10, le
-; CHECK-NEXT: fmov s3, w8
-; CHECK-NEXT: fmov s4, w9
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: fmov w9, s0
-; CHECK-NEXT: mov v2.h[5], w10
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fcmp s4, s3
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: fmov s1, w9
; CHECK-NEXT: csetm w8, le
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: mov v2.h[5], w8
+; CHECK-NEXT: csetm w8, le
+; CHECK-NEXT: fcmp s0, s1
; CHECK-NEXT: mov v2.h[6], w8
-; CHECK-NEXT: fcmp s1, s0
; CHECK-NEXT: csetm w8, le
; CHECK-NEXT: mov v2.h[7], w8
; CHECK-NEXT: xtn v0.8b, v2.8h
@@ -1424,90 +1226,58 @@ define <8 x i1> @test_fcmp_ule(<8 x bfloat> %a, <8 x bfloat> %b) #0 {
define <8 x i1> @test_fcmp_uno(<8 x bfloat> %a, <8 x bfloat> %b) #0 {
; CHECK-LABEL: test_fcmp_uno:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov h2, v1.h[1]
-; CHECK-NEXT: mov h3, v0.h[1]
-; CHECK-NEXT: fmov w10, s1
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: fmov w9, s3
-; CHECK-NEXT: mov h2, v1.h[2]
-; CHECK-NEXT: mov h3, v0.h[2]
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s4, w8
-; CHECK-NEXT: fmov w8, s0
-; CHECK-NEXT: fmov s5, w9
-; CHECK-NEXT: lsl w9, w10, #16
-; CHECK-NEXT: fmov w10, s3
-; CHECK-NEXT: mov h3, v1.h[4]
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fcmp s5, s4
-; CHECK-NEXT: fmov s5, w9
-; CHECK-NEXT: mov h4, v1.h[3]
-; CHECK-NEXT: lsl w10, w10, #16
-; CHECK-NEXT: fmov s6, w8
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: csetm w9, vs
-; CHECK-NEXT: fmov s16, w10
-; CHECK-NEXT: fcmp s6, s5
-; CHECK-NEXT: mov h5, v0.h[3]
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fmov w10, s4
-; CHECK-NEXT: mov h6, v0.h[4]
-; CHECK-NEXT: mov h4, v1.h[5]
-; CHECK-NEXT: fmov s7, w8
+; CHECK-NEXT: dup v2.4h, v1.h[1]
+; CHECK-NEXT: dup v3.4h, v0.h[1]
+; CHECK-NEXT: dup v4.4h, v1.h[2]
+; CHECK-NEXT: dup v5.4h, v0.h[2]
+; CHECK-NEXT: dup v6.4h, v0.h[3]
+; CHECK-NEXT: shll v2.4s, v2.4h, #16
+; CHECK-NEXT: shll v3.4s, v3.4h, #16
+; CHECK-NEXT: fcmp s3, s2
+; CHECK-NEXT: shll v2.4s, v1.4h, #16
+; CHECK-NEXT: shll v3.4s, v0.4h, #16
; CHECK-NEXT: csetm w8, vs
-; CHECK-NEXT: fmov s2, w8
-; CHECK-NEXT: fmov w8, s5
-; CHECK-NEXT: mov h5, v0.h[5]
-; CHECK-NEXT: fcmp s16, s7
-; CHECK-NEXT: mov v2.h[1], w9
-; CHECK-NEXT: lsl w9, w10, #16
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fmov w10, s3
-; CHECK-NEXT: fmov s3, w9
-; CHECK-NEXT: fmov w9, s6
-; CHECK-NEXT: fmov s7, w8
+; CHECK-NEXT: fcmp s3, s2
+; CHECK-NEXT: shll v3.4s, v4.4h, #16
+; CHECK-NEXT: shll v4.4s, v5.4h, #16
+; CHECK-NEXT: dup v5.4h, v1.h[3]
+; CHECK-NEXT: csetm w9, vs
+; CHECK-NEXT: fmov s2, w9
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: shll v4.4s, v6.4h, #16
+; CHECK-NEXT: shll v3.4s, v5.4h, #16
+; CHECK-NEXT: dup v5.8h, v1.h[4]
+; CHECK-NEXT: dup v6.8h, v0.h[4]
+; CHECK-NEXT: mov v2.h[1], w8
; CHECK-NEXT: csetm w8, vs
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: shll v3.4s, v5.4h, #16
+; CHECK-NEXT: shll v4.4s, v6.4h, #16
+; CHECK-NEXT: dup v5.8h, v1.h[5]
+; CHECK-NEXT: dup v6.8h, v0.h[5]
; CHECK-NEXT: mov v2.h[2], w8
-; CHECK-NEXT: lsl w8, w10, #16
-; CHECK-NEXT: fmov w10, s4
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fcmp s7, s3
-; CHECK-NEXT: mov h3, v1.h[6]
-; CHECK-NEXT: fmov s4, w8
-; CHECK-NEXT: mov h1, v1.h[7]
-; CHECK-NEXT: fmov s6, w9
-; CHECK-NEXT: fmov w9, s5
; CHECK-NEXT: csetm w8, vs
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: shll v3.4s, v5.4h, #16
+; CHECK-NEXT: shll v4.4s, v6.4h, #16
+; CHECK-NEXT: dup v5.8h, v1.h[6]
+; CHECK-NEXT: dup v6.8h, v0.h[6]
+; CHECK-NEXT: dup v1.8h, v1.h[7]
+; CHECK-NEXT: dup v0.8h, v0.h[7]
; CHECK-NEXT: mov v2.h[3], w8
-; CHECK-NEXT: lsl w8, w10, #16
-; CHECK-NEXT: fcmp s6, s4
-; CHECK-NEXT: mov h4, v0.h[6]
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s5, w8
-; CHECK-NEXT: mov h0, v0.h[7]
-; CHECK-NEXT: fmov s6, w9
; CHECK-NEXT: csetm w8, vs
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: shll v3.4s, v5.4h, #16
+; CHECK-NEXT: shll v4.4s, v6.4h, #16
+; CHECK-NEXT: shll v1.4s, v1.4h, #16
+; CHECK-NEXT: shll v0.4s, v0.4h, #16
; CHECK-NEXT: mov v2.h[4], w8
-; CHECK-NEXT: fmov w8, s3
-; CHECK-NEXT: fmov w9, s4
-; CHECK-NEXT: fcmp s6, s5
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: csetm w10, vs
-; CHECK-NEXT: fmov s3, w8
-; CHECK-NEXT: fmov s4, w9
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: fmov w9, s0
-; CHECK-NEXT: mov v2.h[5], w10
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fcmp s4, s3
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: fmov s1, w9
; CHECK-NEXT: csetm w8, vs
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: mov v2.h[5], w8
+; CHECK-NEXT: csetm w8, vs
+; CHECK-NEXT: fcmp s0, s1
; CHECK-NEXT: mov v2.h[6], w8
-; CHECK-NEXT: fcmp s1, s0
; CHECK-NEXT: csetm w8, vs
; CHECK-NEXT: mov v2.h[7], w8
; CHECK-NEXT: xtn v0.8b, v2.8h
@@ -1519,96 +1289,64 @@ define <8 x i1> @test_fcmp_uno(<8 x bfloat> %a, <8 x bfloat> %b) #0 {
define <8 x i1> @test_fcmp_one(<8 x bfloat> %a, <8 x bfloat> %b) #0 {
; CHECK-LABEL: test_fcmp_one:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov h2, v1.h[1]
-; CHECK-NEXT: mov h3, v0.h[1]
-; CHECK-NEXT: fmov w10, s1
-; CHECK-NEXT: fmov w11, s0
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: fmov w9, s3
-; CHECK-NEXT: mov h2, v1.h[2]
-; CHECK-NEXT: mov h3, v0.h[2]
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s4, w8
-; CHECK-NEXT: fmov s5, w9
-; CHECK-NEXT: lsl w8, w10, #16
-; CHECK-NEXT: lsl w9, w11, #16
-; CHECK-NEXT: fmov s6, w8
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: fcmp s5, s4
-; CHECK-NEXT: fmov s7, w9
-; CHECK-NEXT: fmov w9, s3
-; CHECK-NEXT: mov h4, v1.h[3]
-; CHECK-NEXT: mov h5, v0.h[3]
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: csetm w10, mi
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: csinv w10, w10, wzr, le
-; CHECK-NEXT: fcmp s7, s6
-; CHECK-NEXT: fmov s2, w8
-; CHECK-NEXT: fmov s3, w9
-; CHECK-NEXT: fmov w11, s4
-; CHECK-NEXT: fmov w8, s5
-; CHECK-NEXT: mov h4, v0.h[4]
-; CHECK-NEXT: mov h7, v1.h[5]
+; CHECK-NEXT: dup v2.4h, v1.h[1]
+; CHECK-NEXT: dup v3.4h, v0.h[1]
+; CHECK-NEXT: dup v4.4h, v1.h[2]
+; CHECK-NEXT: dup v5.4h, v0.h[2]
+; CHECK-NEXT: dup v6.4h, v0.h[3]
+; CHECK-NEXT: shll v2.4s, v2.4h, #16
+; CHECK-NEXT: shll v3.4s, v3.4h, #16
+; CHECK-NEXT: fcmp s3, s2
+; CHECK-NEXT: shll v2.4s, v1.4h, #16
+; CHECK-NEXT: shll v3.4s, v0.4h, #16
+; CHECK-NEXT: csetm w8, mi
+; CHECK-NEXT: csinv w8, w8, wzr, le
+; CHECK-NEXT: fcmp s3, s2
+; CHECK-NEXT: shll v3.4s, v4.4h, #16
+; CHECK-NEXT: shll v4.4s, v5.4h, #16
+; CHECK-NEXT: dup v5.4h, v1.h[3]
; CHECK-NEXT: csetm w9, mi
; CHECK-NEXT: csinv w9, w9, wzr, le
-; CHECK-NEXT: fcmp s3, s2
-; CHECK-NEXT: mov h3, v1.h[4]
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: shll v4.4s, v6.4h, #16
; CHECK-NEXT: fmov s2, w9
-; CHECK-NEXT: lsl w11, w11, #16
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fmov s5, w11
-; CHECK-NEXT: fmov s6, w8
+; CHECK-NEXT: shll v3.4s, v5.4h, #16
+; CHECK-NEXT: dup v5.8h, v1.h[4]
+; CHECK-NEXT: dup v6.8h, v0.h[4]
+; CHECK-NEXT: mov v2.h[1], w8
; CHECK-NEXT: csetm w8, mi
-; CHECK-NEXT: mov v2.h[1], w10
-; CHECK-NEXT: fmov w9, s3
-; CHECK-NEXT: fmov w10, s4
; CHECK-NEXT: csinv w8, w8, wzr, le
-; CHECK-NEXT: mov h3, v1.h[6]
-; CHECK-NEXT: mov h1, v1.h[7]
-; CHECK-NEXT: fcmp s6, s5
-; CHECK-NEXT: mov h5, v0.h[5]
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: lsl w10, w10, #16
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: shll v3.4s, v5.4h, #16
+; CHECK-NEXT: shll v4.4s, v6.4h, #16
+; CHECK-NEXT: dup v5.8h, v1.h[5]
+; CHECK-NEXT: dup v6.8h, v0.h[5]
; CHECK-NEXT: mov v2.h[2], w8
-; CHECK-NEXT: fmov s4, w9
-; CHECK-NEXT: fmov s6, w10
; CHECK-NEXT: csetm w8, mi
-; CHECK-NEXT: fmov w9, s7
-; CHECK-NEXT: fmov w10, s5
; CHECK-NEXT: csinv w8, w8, wzr, le
-; CHECK-NEXT: fcmp s6, s4
-; CHECK-NEXT: mov h4, v0.h[6]
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: shll v3.4s, v5.4h, #16
+; CHECK-NEXT: shll v4.4s, v6.4h, #16
+; CHECK-NEXT: dup v5.8h, v1.h[6]
+; CHECK-NEXT: dup v6.8h, v0.h[6]
+; CHECK-NEXT: dup v1.8h, v1.h[7]
+; CHECK-NEXT: dup v0.8h, v0.h[7]
; CHECK-NEXT: mov v2.h[3], w8
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: lsl w10, w10, #16
-; CHECK-NEXT: mov h0, v0.h[7]
-; CHECK-NEXT: fmov s5, w9
-; CHECK-NEXT: fmov s6, w10
-; CHECK-NEXT: fmov w9, s3
-; CHECK-NEXT: fmov w10, s4
; CHECK-NEXT: csetm w8, mi
; CHECK-NEXT: csinv w8, w8, wzr, le
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: shll v3.4s, v5.4h, #16
+; CHECK-NEXT: shll v4.4s, v6.4h, #16
+; CHECK-NEXT: shll v1.4s, v1.4h, #16
+; CHECK-NEXT: shll v0.4s, v0.4h, #16
; CHECK-NEXT: mov v2.h[4], w8
-; CHECK-NEXT: lsl w8, w9, #16
-; CHECK-NEXT: fcmp s6, s5
-; CHECK-NEXT: lsl w9, w10, #16
-; CHECK-NEXT: fmov s3, w8
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: fmov s4, w9
-; CHECK-NEXT: fmov w9, s0
-; CHECK-NEXT: csetm w10, mi
-; CHECK-NEXT: csinv w10, w10, wzr, le
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: mov v2.h[5], w10
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fcmp s4, s3
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: fmov s1, w9
; CHECK-NEXT: csetm w8, mi
; CHECK-NEXT: csinv w8, w8, wzr, le
-; CHECK-NEXT: fcmp s1, s0
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: mov v2.h[5], w8
+; CHECK-NEXT: csetm w8, mi
+; CHECK-NEXT: csinv w8, w8, wzr, le
+; CHECK-NEXT: fcmp s0, s1
; CHECK-NEXT: mov v2.h[6], w8
; CHECK-NEXT: csetm w8, mi
; CHECK-NEXT: csinv w8, w8, wzr, le
@@ -1622,90 +1360,58 @@ define <8 x i1> @test_fcmp_one(<8 x bfloat> %a, <8 x bfloat> %b) #0 {
define <8 x i1> @test_fcmp_oeq(<8 x bfloat> %a, <8 x bfloat> %b) #0 {
; CHECK-LABEL: test_fcmp_oeq:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov h2, v1.h[1]
-; CHECK-NEXT: mov h3, v0.h[1]
-; CHECK-NEXT: fmov w10, s1
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: fmov w9, s3
-; CHECK-NEXT: mov h2, v1.h[2]
-; CHECK-NEXT: mov h3, v0.h[2]
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s4, w8
-; CHECK-NEXT: fmov w8, s0
-; CHECK-NEXT: fmov s5, w9
-; CHECK-NEXT: lsl w9, w10, #16
-; CHECK-NEXT: fmov w10, s3
-; CHECK-NEXT: mov h3, v1.h[4]
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fcmp s5, s4
-; CHECK-NEXT: fmov s5, w9
-; CHECK-NEXT: mov h4, v1.h[3]
-; CHECK-NEXT: lsl w10, w10, #16
-; CHECK-NEXT: fmov s6, w8
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: csetm w9, eq
-; CHECK-NEXT: fmov s16, w10
-; CHECK-NEXT: fcmp s6, s5
-; CHECK-NEXT: mov h5, v0.h[3]
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fmov w10, s4
-; CHECK-NEXT: mov h6, v0.h[4]
-; CHECK-NEXT: mov h4, v1.h[5]
-; CHECK-NEXT: fmov s7, w8
+; CHECK-NEXT: dup v2.4h, v1.h[1]
+; CHECK-NEXT: dup v3.4h, v0.h[1]
+; CHECK-NEXT: dup v4.4h, v1.h[2]
+; CHECK-NEXT: dup v5.4h, v0.h[2]
+; CHECK-NEXT: dup v6.4h, v0.h[3]
+; CHECK-NEXT: shll v2.4s, v2.4h, #16
+; CHECK-NEXT: shll v3.4s, v3.4h, #16
+; CHECK-NEXT: fcmp s3, s2
+; CHECK-NEXT: shll v2.4s, v1.4h, #16
+; CHECK-NEXT: shll v3.4s, v0.4h, #16
; CHECK-NEXT: csetm w8, eq
-; CHECK-NEXT: fmov s2, w8
-; CHECK-NEXT: fmov w8, s5
-; CHECK-NEXT: mov h5, v0.h[5]
-; CHECK-NEXT: fcmp s16, s7
-; CHECK-NEXT: mov v2.h[1], w9
-; CHECK-NEXT: lsl w9, w10, #16
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fmov w10, s3
-; CHECK-NEXT: fmov s3, w9
-; CHECK-NEXT: fmov w9, s6
-; CHECK-NEXT: fmov s7, w8
+; CHECK-NEXT: fcmp s3, s2
+; CHECK-NEXT: shll v3.4s, v4.4h, #16
+; CHECK-NEXT: shll v4.4s, v5.4h, #16
+; CHECK-NEXT: dup v5.4h, v1.h[3]
+; CHECK-NEXT: csetm w9, eq
+; CHECK-NEXT: fmov s2, w9
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: shll v4.4s, v6.4h, #16
+; CHECK-NEXT: shll v3.4s, v5.4h, #16
+; CHECK-NEXT: dup v5.8h, v1.h[4]
+; CHECK-NEXT: dup v6.8h, v0.h[4]
+; CHECK-NEXT: mov v2.h[1], w8
; CHECK-NEXT: csetm w8, eq
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: shll v3.4s, v5.4h, #16
+; CHECK-NEXT: shll v4.4s, v6.4h, #16
+; CHECK-NEXT: dup v5.8h, v1.h[5]
+; CHECK-NEXT: dup v6.8h, v0.h[5]
; CHECK-NEXT: mov v2.h[2], w8
-; CHECK-NEXT: lsl w8, w10, #16
-; CHECK-NEXT: fmov w10, s4
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fcmp s7, s3
-; CHECK-NEXT: mov h3, v1.h[6]
-; CHECK-NEXT: fmov s4, w8
-; CHECK-NEXT: mov h1, v1.h[7]
-; CHECK-NEXT: fmov s6, w9
-; CHECK-NEXT: fmov w9, s5
; CHECK-NEXT: csetm w8, eq
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: shll v3.4s, v5.4h, #16
+; CHECK-NEXT: shll v4.4s, v6.4h, #16
+; CHECK-NEXT: dup v5.8h, v1.h[6]
+; CHECK-NEXT: dup v6.8h, v0.h[6]
+; CHECK-NEXT: dup v1.8h, v1.h[7]
+; CHECK-NEXT: dup v0.8h, v0.h[7]
; CHECK-NEXT: mov v2.h[3], w8
-; CHECK-NEXT: lsl w8, w10, #16
-; CHECK-NEXT: fcmp s6, s4
-; CHECK-NEXT: mov h4, v0.h[6]
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s5, w8
-; CHECK-NEXT: mov h0, v0.h[7]
-; CHECK-NEXT: fmov s6, w9
; CHECK-NEXT: csetm w8, eq
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: shll v3.4s, v5.4h, #16
+; CHECK-NEXT: shll v4.4s, v6.4h, #16
+; CHECK-NEXT: shll v1.4s, v1.4h, #16
+; CHECK-NEXT: shll v0.4s, v0.4h, #16
; CHECK-NEXT: mov v2.h[4], w8
-; CHECK-NEXT: fmov w8, s3
-; CHECK-NEXT: fmov w9, s4
-; CHECK-NEXT: fcmp s6, s5
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: csetm w10, eq
-; CHECK-NEXT: fmov s3, w8
-; CHECK-NEXT: fmov s4, w9
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: fmov w9, s0
-; CHECK-NEXT: mov v2.h[5], w10
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fcmp s4, s3
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: fmov s1, w9
; CHECK-NEXT: csetm w8, eq
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: mov v2.h[5], w8
+; CHECK-NEXT: csetm w8, eq
+; CHECK-NEXT: fcmp s0, s1
; CHECK-NEXT: mov v2.h[6], w8
-; CHECK-NEXT: fcmp s1, s0
; CHECK-NEXT: csetm w8, eq
; CHECK-NEXT: mov v2.h[7], w8
; CHECK-NEXT: xtn v0.8b, v2.8h
@@ -1717,90 +1423,58 @@ define <8 x i1> @test_fcmp_oeq(<8 x bfloat> %a, <8 x bfloat> %b) #0 {
define <8 x i1> @test_fcmp_ogt(<8 x bfloat> %a, <8 x bfloat> %b) #0 {
; CHECK-LABEL: test_fcmp_ogt:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov h2, v1.h[1]
-; CHECK-NEXT: mov h3, v0.h[1]
-; CHECK-NEXT: fmov w10, s1
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: fmov w9, s3
-; CHECK-NEXT: mov h2, v1.h[2]
-; CHECK-NEXT: mov h3, v0.h[2]
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s4, w8
-; CHECK-NEXT: fmov w8, s0
-; CHECK-NEXT: fmov s5, w9
-; CHECK-NEXT: lsl w9, w10, #16
-; CHECK-NEXT: fmov w10, s3
-; CHECK-NEXT: mov h3, v1.h[4]
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fcmp s5, s4
-; CHECK-NEXT: fmov s5, w9
-; CHECK-NEXT: mov h4, v1.h[3]
-; CHECK-NEXT: lsl w10, w10, #16
-; CHECK-NEXT: fmov s6, w8
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: csetm w9, gt
-; CHECK-NEXT: fmov s16, w10
-; CHECK-NEXT: fcmp s6, s5
-; CHECK-NEXT: mov h5, v0.h[3]
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fmov w10, s4
-; CHECK-NEXT: mov h6, v0.h[4]
-; CHECK-NEXT: mov h4, v1.h[5]
-; CHECK-NEXT: fmov s7, w8
+; CHECK-NEXT: dup v2.4h, v1.h[1]
+; CHECK-NEXT: dup v3.4h, v0.h[1]
+; CHECK-NEXT: dup v4.4h, v1.h[2]
+; CHECK-NEXT: dup v5.4h, v0.h[2]
+; CHECK-NEXT: dup v6.4h, v0.h[3]
+; CHECK-NEXT: shll v2.4s, v2.4h, #16
+; CHECK-NEXT: shll v3.4s, v3.4h, #16
+; CHECK-NEXT: fcmp s3, s2
+; CHECK-NEXT: shll v2.4s, v1.4h, #16
+; CHECK-NEXT: shll v3.4s, v0.4h, #16
; CHECK-NEXT: csetm w8, gt
-; CHECK-NEXT: fmov s2, w8
-; CHECK-NEXT: fmov w8, s5
-; CHECK-NEXT: mov h5, v0.h[5]
-; CHECK-NEXT: fcmp s16, s7
-; CHECK-NEXT: mov v2.h[1], w9
-; CHECK-NEXT: lsl w9, w10, #16
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fmov w10, s3
-; CHECK-NEXT: fmov s3, w9
-; CHECK-NEXT: fmov w9, s6
-; CHECK-NEXT: fmov s7, w8
+; CHECK-NEXT: fcmp s3, s2
+; CHECK-NEXT: shll v3.4s, v4.4h, #16
+; CHECK-NEXT: shll v4.4s, v5.4h, #16
+; CHECK-NEXT: dup v5.4h, v1.h[3]
+; CHECK-NEXT: csetm w9, gt
+; CHECK-NEXT: fmov s2, w9
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: shll v4.4s, v6.4h, #16
+; CHECK-NEXT: shll v3.4s, v5.4h, #16
+; CHECK-NEXT: dup v5.8h, v1.h[4]
+; CHECK-NEXT: dup v6.8h, v0.h[4]
+; CHECK-NEXT: mov v2.h[1], w8
; CHECK-NEXT: csetm w8, gt
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: shll v3.4s, v5.4h, #16
+; CHECK-NEXT: shll v4.4s, v6.4h, #16
+; CHECK-NEXT: dup v5.8h, v1.h[5]
+; CHECK-NEXT: dup v6.8h, v0.h[5]
; CHECK-NEXT: mov v2.h[2], w8
-; CHECK-NEXT: lsl w8, w10, #16
-; CHECK-NEXT: fmov w10, s4
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fcmp s7, s3
-; CHECK-NEXT: mov h3, v1.h[6]
-; CHECK-NEXT: fmov s4, w8
-; CHECK-NEXT: mov h1, v1.h[7]
-; CHECK-NEXT: fmov s6, w9
-; CHECK-NEXT: fmov w9, s5
; CHECK-NEXT: csetm w8, gt
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: shll v3.4s, v5.4h, #16
+; CHECK-NEXT: shll v4.4s, v6.4h, #16
+; CHECK-NEXT: dup v5.8h, v1.h[6]
+; CHECK-NEXT: dup v6.8h, v0.h[6]
+; CHECK-NEXT: dup v1.8h, v1.h[7]
+; CHECK-NEXT: dup v0.8h, v0.h[7]
; CHECK-NEXT: mov v2.h[3], w8
-; CHECK-NEXT: lsl w8, w10, #16
-; CHECK-NEXT: fcmp s6, s4
-; CHECK-NEXT: mov h4, v0.h[6]
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s5, w8
-; CHECK-NEXT: mov h0, v0.h[7]
-; CHECK-NEXT: fmov s6, w9
; CHECK-NEXT: csetm w8, gt
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: shll v3.4s, v5.4h, #16
+; CHECK-NEXT: shll v4.4s, v6.4h, #16
+; CHECK-NEXT: shll v1.4s, v1.4h, #16
+; CHECK-NEXT: shll v0.4s, v0.4h, #16
; CHECK-NEXT: mov v2.h[4], w8
-; CHECK-NEXT: fmov w8, s3
-; CHECK-NEXT: fmov w9, s4
-; CHECK-NEXT: fcmp s6, s5
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: csetm w10, gt
-; CHECK-NEXT: fmov s3, w8
-; CHECK-NEXT: fmov s4, w9
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: fmov w9, s0
-; CHECK-NEXT: mov v2.h[5], w10
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fcmp s4, s3
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: fmov s1, w9
; CHECK-NEXT: csetm w8, gt
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: mov v2.h[5], w8
+; CHECK-NEXT: csetm w8, gt
+; CHECK-NEXT: fcmp s0, s1
; CHECK-NEXT: mov v2.h[6], w8
-; CHECK-NEXT: fcmp s1, s0
; CHECK-NEXT: csetm w8, gt
; CHECK-NEXT: mov v2.h[7], w8
; CHECK-NEXT: xtn v0.8b, v2.8h
@@ -1812,90 +1486,58 @@ define <8 x i1> @test_fcmp_ogt(<8 x bfloat> %a, <8 x bfloat> %b) #0 {
define <8 x i1> @test_fcmp_oge(<8 x bfloat> %a, <8 x bfloat> %b) #0 {
; CHECK-LABEL: test_fcmp_oge:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov h2, v1.h[1]
-; CHECK-NEXT: mov h3, v0.h[1]
-; CHECK-NEXT: fmov w10, s1
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: fmov w9, s3
-; CHECK-NEXT: mov h2, v1.h[2]
-; CHECK-NEXT: mov h3, v0.h[2]
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s4, w8
-; CHECK-NEXT: fmov w8, s0
-; CHECK-NEXT: fmov s5, w9
-; CHECK-NEXT: lsl w9, w10, #16
-; CHECK-NEXT: fmov w10, s3
-; CHECK-NEXT: mov h3, v1.h[4]
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fcmp s5, s4
-; CHECK-NEXT: fmov s5, w9
-; CHECK-NEXT: mov h4, v1.h[3]
-; CHECK-NEXT: lsl w10, w10, #16
-; CHECK-NEXT: fmov s6, w8
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: csetm w9, ge
-; CHECK-NEXT: fmov s16, w10
-; CHECK-NEXT: fcmp s6, s5
-; CHECK-NEXT: mov h5, v0.h[3]
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fmov w10, s4
-; CHECK-NEXT: mov h6, v0.h[4]
-; CHECK-NEXT: mov h4, v1.h[5]
-; CHECK-NEXT: fmov s7, w8
+; CHECK-NEXT: dup v2.4h, v1.h[1]
+; CHECK-NEXT: dup v3.4h, v0.h[1]
+; CHECK-NEXT: dup v4.4h, v1.h[2]
+; CHECK-NEXT: dup v5.4h, v0.h[2]
+; CHECK-NEXT: dup v6.4h, v0.h[3]
+; CHECK-NEXT: shll v2.4s, v2.4h, #16
+; CHECK-NEXT: shll v3.4s, v3.4h, #16
+; CHECK-NEXT: fcmp s3, s2
+; CHECK-NEXT: shll v2.4s, v1.4h, #16
+; CHECK-NEXT: shll v3.4s, v0.4h, #16
; CHECK-NEXT: csetm w8, ge
-; CHECK-NEXT: fmov s2, w8
-; CHECK-NEXT: fmov w8, s5
-; CHECK-NEXT: mov h5, v0.h[5]
-; CHECK-NEXT: fcmp s16, s7
-; CHECK-NEXT: mov v2.h[1], w9
-; CHECK-NEXT: lsl w9, w10, #16
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fmov w10, s3
-; CHECK-NEXT: fmov s3, w9
-; CHECK-NEXT: fmov w9, s6
-; CHECK-NEXT: fmov s7, w8
+; CHECK-NEXT: fcmp s3, s2
+; CHECK-NEXT: shll v3.4s, v4.4h, #16
+; CHECK-NEXT: shll v4.4s, v5.4h, #16
+; CHECK-NEXT: dup v5.4h, v1.h[3]
+; CHECK-NEXT: csetm w9, ge
+; CHECK-NEXT: fmov s2, w9
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: shll v4.4s, v6.4h, #16
+; CHECK-NEXT: shll v3.4s, v5.4h, #16
+; CHECK-NEXT: dup v5.8h, v1.h[4]
+; CHECK-NEXT: dup v6.8h, v0.h[4]
+; CHECK-NEXT: mov v2.h[1], w8
; CHECK-NEXT: csetm w8, ge
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: shll v3.4s, v5.4h, #16
+; CHECK-NEXT: shll v4.4s, v6.4h, #16
+; CHECK-NEXT: dup v5.8h, v1.h[5]
+; CHECK-NEXT: dup v6.8h, v0.h[5]
; CHECK-NEXT: mov v2.h[2], w8
-; CHECK-NEXT: lsl w8, w10, #16
-; CHECK-NEXT: fmov w10, s4
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fcmp s7, s3
-; CHECK-NEXT: mov h3, v1.h[6]
-; CHECK-NEXT: fmov s4, w8
-; CHECK-NEXT: mov h1, v1.h[7]
-; CHECK-NEXT: fmov s6, w9
-; CHECK-NEXT: fmov w9, s5
; CHECK-NEXT: csetm w8, ge
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: shll v3.4s, v5.4h, #16
+; CHECK-NEXT: shll v4.4s, v6.4h, #16
+; CHECK-NEXT: dup v5.8h, v1.h[6]
+; CHECK-NEXT: dup v6.8h, v0.h[6]
+; CHECK-NEXT: dup v1.8h, v1.h[7]
+; CHECK-NEXT: dup v0.8h, v0.h[7]
; CHECK-NEXT: mov v2.h[3], w8
-; CHECK-NEXT: lsl w8, w10, #16
-; CHECK-NEXT: fcmp s6, s4
-; CHECK-NEXT: mov h4, v0.h[6]
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s5, w8
-; CHECK-NEXT: mov h0, v0.h[7]
-; CHECK-NEXT: fmov s6, w9
; CHECK-NEXT: csetm w8, ge
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: shll v3.4s, v5.4h, #16
+; CHECK-NEXT: shll v4.4s, v6.4h, #16
+; CHECK-NEXT: shll v1.4s, v1.4h, #16
+; CHECK-NEXT: shll v0.4s, v0.4h, #16
; CHECK-NEXT: mov v2.h[4], w8
-; CHECK-NEXT: fmov w8, s3
-; CHECK-NEXT: fmov w9, s4
-; CHECK-NEXT: fcmp s6, s5
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: csetm w10, ge
-; CHECK-NEXT: fmov s3, w8
-; CHECK-NEXT: fmov s4, w9
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: fmov w9, s0
-; CHECK-NEXT: mov v2.h[5], w10
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fcmp s4, s3
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: fmov s1, w9
; CHECK-NEXT: csetm w8, ge
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: mov v2.h[5], w8
+; CHECK-NEXT: csetm w8, ge
+; CHECK-NEXT: fcmp s0, s1
; CHECK-NEXT: mov v2.h[6], w8
-; CHECK-NEXT: fcmp s1, s0
; CHECK-NEXT: csetm w8, ge
; CHECK-NEXT: mov v2.h[7], w8
; CHECK-NEXT: xtn v0.8b, v2.8h
@@ -1907,90 +1549,58 @@ define <8 x i1> @test_fcmp_oge(<8 x bfloat> %a, <8 x bfloat> %b) #0 {
define <8 x i1> @test_fcmp_olt(<8 x bfloat> %a, <8 x bfloat> %b) #0 {
; CHECK-LABEL: test_fcmp_olt:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov h2, v1.h[1]
-; CHECK-NEXT: mov h3, v0.h[1]
-; CHECK-NEXT: fmov w10, s1
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: fmov w9, s3
-; CHECK-NEXT: mov h2, v1.h[2]
-; CHECK-NEXT: mov h3, v0.h[2]
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s4, w8
-; CHECK-NEXT: fmov w8, s0
-; CHECK-NEXT: fmov s5, w9
-; CHECK-NEXT: lsl w9, w10, #16
-; CHECK-NEXT: fmov w10, s3
-; CHECK-NEXT: mov h3, v1.h[4]
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fcmp s5, s4
-; CHECK-NEXT: fmov s5, w9
-; CHECK-NEXT: mov h4, v1.h[3]
-; CHECK-NEXT: lsl w10, w10, #16
-; CHECK-NEXT: fmov s6, w8
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: csetm w9, mi
-; CHECK-NEXT: fmov s16, w10
-; CHECK-NEXT: fcmp s6, s5
-; CHECK-NEXT: mov h5, v0.h[3]
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fmov w10, s4
-; CHECK-NEXT: mov h6, v0.h[4]
-; CHECK-NEXT: mov h4, v1.h[5]
-; CHECK-NEXT: fmov s7, w8
+; CHECK-NEXT: dup v2.4h, v1.h[1]
+; CHECK-NEXT: dup v3.4h, v0.h[1]
+; CHECK-NEXT: dup v4.4h, v1.h[2]
+; CHECK-NEXT: dup v5.4h, v0.h[2]
+; CHECK-NEXT: dup v6.4h, v0.h[3]
+; CHECK-NEXT: shll v2.4s, v2.4h, #16
+; CHECK-NEXT: shll v3.4s, v3.4h, #16
+; CHECK-NEXT: fcmp s3, s2
+; CHECK-NEXT: shll v2.4s, v1.4h, #16
+; CHECK-NEXT: shll v3.4s, v0.4h, #16
; CHECK-NEXT: csetm w8, mi
-; CHECK-NEXT: fmov s2, w8
-; CHECK-NEXT: fmov w8, s5
-; CHECK-NEXT: mov h5, v0.h[5]
-; CHECK-NEXT: fcmp s16, s7
-; CHECK-NEXT: mov v2.h[1], w9
-; CHECK-NEXT: lsl w9, w10, #16
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fmov w10, s3
-; CHECK-NEXT: fmov s3, w9
-; CHECK-NEXT: fmov w9, s6
-; CHECK-NEXT: fmov s7, w8
+; CHECK-NEXT: fcmp s3, s2
+; CHECK-NEXT: shll v3.4s, v4.4h, #16
+; CHECK-NEXT: shll v4.4s, v5.4h, #16
+; CHECK-NEXT: dup v5.4h, v1.h[3]
+; CHECK-NEXT: csetm w9, mi
+; CHECK-NEXT: fmov s2, w9
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: shll v4.4s, v6.4h, #16
+; CHECK-NEXT: shll v3.4s, v5.4h, #16
+; CHECK-NEXT: dup v5.8h, v1.h[4]
+; CHECK-NEXT: dup v6.8h, v0.h[4]
+; CHECK-NEXT: mov v2.h[1], w8
; CHECK-NEXT: csetm w8, mi
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: shll v3.4s, v5.4h, #16
+; CHECK-NEXT: shll v4.4s, v6.4h, #16
+; CHECK-NEXT: dup v5.8h, v1.h[5]
+; CHECK-NEXT: dup v6.8h, v0.h[5]
; CHECK-NEXT: mov v2.h[2], w8
-; CHECK-NEXT: lsl w8, w10, #16
-; CHECK-NEXT: fmov w10, s4
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fcmp s7, s3
-; CHECK-NEXT: mov h3, v1.h[6]
-; CHECK-NEXT: fmov s4, w8
-; CHECK-NEXT: mov h1, v1.h[7]
-; CHECK-NEXT: fmov s6, w9
-; CHECK-NEXT: fmov w9, s5
; CHECK-NEXT: csetm w8, mi
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: shll v3.4s, v5.4h, #16
+; CHECK-NEXT: shll v4.4s, v6.4h, #16
+; CHECK-NEXT: dup v5.8h, v1.h[6]
+; CHECK-NEXT: dup v6.8h, v0.h[6]
+; CHECK-NEXT: dup v1.8h, v1.h[7]
+; CHECK-NEXT: dup v0.8h, v0.h[7]
; CHECK-NEXT: mov v2.h[3], w8
-; CHECK-NEXT: lsl w8, w10, #16
-; CHECK-NEXT: fcmp s6, s4
-; CHECK-NEXT: mov h4, v0.h[6]
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s5, w8
-; CHECK-NEXT: mov h0, v0.h[7]
-; CHECK-NEXT: fmov s6, w9
; CHECK-NEXT: csetm w8, mi
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: shll v3.4s, v5.4h, #16
+; CHECK-NEXT: shll v4.4s, v6.4h, #16
+; CHECK-NEXT: shll v1.4s, v1.4h, #16
+; CHECK-NEXT: shll v0.4s, v0.4h, #16
; CHECK-NEXT: mov v2.h[4], w8
-; CHECK-NEXT: fmov w8, s3
-; CHECK-NEXT: fmov w9, s4
-; CHECK-NEXT: fcmp s6, s5
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: csetm w10, mi
-; CHECK-NEXT: fmov s3, w8
-; CHECK-NEXT: fmov s4, w9
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: fmov w9, s0
-; CHECK-NEXT: mov v2.h[5], w10
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fcmp s4, s3
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: fmov s1, w9
; CHECK-NEXT: csetm w8, mi
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: mov v2.h[5], w8
+; CHECK-NEXT: csetm w8, mi
+; CHECK-NEXT: fcmp s0, s1
; CHECK-NEXT: mov v2.h[6], w8
-; CHECK-NEXT: fcmp s1, s0
; CHECK-NEXT: csetm w8, mi
; CHECK-NEXT: mov v2.h[7], w8
; CHECK-NEXT: xtn v0.8b, v2.8h
@@ -2002,90 +1612,58 @@ define <8 x i1> @test_fcmp_olt(<8 x bfloat> %a, <8 x bfloat> %b) #0 {
define <8 x i1> @test_fcmp_ole(<8 x bfloat> %a, <8 x bfloat> %b) #0 {
; CHECK-LABEL: test_fcmp_ole:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov h2, v1.h[1]
-; CHECK-NEXT: mov h3, v0.h[1]
-; CHECK-NEXT: fmov w10, s1
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: fmov w9, s3
-; CHECK-NEXT: mov h2, v1.h[2]
-; CHECK-NEXT: mov h3, v0.h[2]
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s4, w8
-; CHECK-NEXT: fmov w8, s0
-; CHECK-NEXT: fmov s5, w9
-; CHECK-NEXT: lsl w9, w10, #16
-; CHECK-NEXT: fmov w10, s3
-; CHECK-NEXT: mov h3, v1.h[4]
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fcmp s5, s4
-; CHECK-NEXT: fmov s5, w9
-; CHECK-NEXT: mov h4, v1.h[3]
-; CHECK-NEXT: lsl w10, w10, #16
-; CHECK-NEXT: fmov s6, w8
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: csetm w9, ls
-; CHECK-NEXT: fmov s16, w10
-; CHECK-NEXT: fcmp s6, s5
-; CHECK-NEXT: mov h5, v0.h[3]
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fmov w10, s4
-; CHECK-NEXT: mov h6, v0.h[4]
-; CHECK-NEXT: mov h4, v1.h[5]
-; CHECK-NEXT: fmov s7, w8
+; CHECK-NEXT: dup v2.4h, v1.h[1]
+; CHECK-NEXT: dup v3.4h, v0.h[1]
+; CHECK-NEXT: dup v4.4h, v1.h[2]
+; CHECK-NEXT: dup v5.4h, v0.h[2]
+; CHECK-NEXT: dup v6.4h, v0.h[3]
+; CHECK-NEXT: shll v2.4s, v2.4h, #16
+; CHECK-NEXT: shll v3.4s, v3.4h, #16
+; CHECK-NEXT: fcmp s3, s2
+; CHECK-NEXT: shll v2.4s, v1.4h, #16
+; CHECK-NEXT: shll v3.4s, v0.4h, #16
; CHECK-NEXT: csetm w8, ls
-; CHECK-NEXT: fmov s2, w8
-; CHECK-NEXT: fmov w8, s5
-; CHECK-NEXT: mov h5, v0.h[5]
-; CHECK-NEXT: fcmp s16, s7
-; CHECK-NEXT: mov v2.h[1], w9
-; CHECK-NEXT: lsl w9, w10, #16
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fmov w10, s3
-; CHECK-NEXT: fmov s3, w9
-; CHECK-NEXT: fmov w9, s6
-; CHECK-NEXT: fmov s7, w8
+; CHECK-NEXT: fcmp s3, s2
+; CHECK-NEXT: shll v3.4s, v4.4h, #16
+; CHECK-NEXT: shll v4.4s, v5.4h, #16
+; CHECK-NEXT: dup v5.4h, v1.h[3]
+; CHECK-NEXT: csetm w9, ls
+; CHECK-NEXT: fmov s2, w9
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: shll v4.4s, v6.4h, #16
+; CHECK-NEXT: shll v3.4s, v5.4h, #16
+; CHECK-NEXT: dup v5.8h, v1.h[4]
+; CHECK-NEXT: dup v6.8h, v0.h[4]
+; CHECK-NEXT: mov v2.h[1], w8
; CHECK-NEXT: csetm w8, ls
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: shll v3.4s, v5.4h, #16
+; CHECK-NEXT: shll v4.4s, v6.4h, #16
+; CHECK-NEXT: dup v5.8h, v1.h[5]
+; CHECK-NEXT: dup v6.8h, v0.h[5]
; CHECK-NEXT: mov v2.h[2], w8
-; CHECK-NEXT: lsl w8, w10, #16
-; CHECK-NEXT: fmov w10, s4
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fcmp s7, s3
-; CHECK-NEXT: mov h3, v1.h[6]
-; CHECK-NEXT: fmov s4, w8
-; CHECK-NEXT: mov h1, v1.h[7]
-; CHECK-NEXT: fmov s6, w9
-; CHECK-NEXT: fmov w9, s5
; CHECK-NEXT: csetm w8, ls
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: shll v3.4s, v5.4h, #16
+; CHECK-NEXT: shll v4.4s, v6.4h, #16
+; CHECK-NEXT: dup v5.8h, v1.h[6]
+; CHECK-NEXT: dup v6.8h, v0.h[6]
+; CHECK-NEXT: dup v1.8h, v1.h[7]
+; CHECK-NEXT: dup v0.8h, v0.h[7]
; CHECK-NEXT: mov v2.h[3], w8
-; CHECK-NEXT: lsl w8, w10, #16
-; CHECK-NEXT: fcmp s6, s4
-; CHECK-NEXT: mov h4, v0.h[6]
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s5, w8
-; CHECK-NEXT: mov h0, v0.h[7]
-; CHECK-NEXT: fmov s6, w9
; CHECK-NEXT: csetm w8, ls
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: shll v3.4s, v5.4h, #16
+; CHECK-NEXT: shll v4.4s, v6.4h, #16
+; CHECK-NEXT: shll v1.4s, v1.4h, #16
+; CHECK-NEXT: shll v0.4s, v0.4h, #16
; CHECK-NEXT: mov v2.h[4], w8
-; CHECK-NEXT: fmov w8, s3
-; CHECK-NEXT: fmov w9, s4
-; CHECK-NEXT: fcmp s6, s5
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: csetm w10, ls
-; CHECK-NEXT: fmov s3, w8
-; CHECK-NEXT: fmov s4, w9
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: fmov w9, s0
-; CHECK-NEXT: mov v2.h[5], w10
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fcmp s4, s3
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: fmov s1, w9
; CHECK-NEXT: csetm w8, ls
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: mov v2.h[5], w8
+; CHECK-NEXT: csetm w8, ls
+; CHECK-NEXT: fcmp s0, s1
; CHECK-NEXT: mov v2.h[6], w8
-; CHECK-NEXT: fcmp s1, s0
; CHECK-NEXT: csetm w8, ls
; CHECK-NEXT: mov v2.h[7], w8
; CHECK-NEXT: xtn v0.8b, v2.8h
@@ -2097,90 +1675,58 @@ define <8 x i1> @test_fcmp_ole(<8 x bfloat> %a, <8 x bfloat> %b) #0 {
define <8 x i1> @test_fcmp_ord(<8 x bfloat> %a, <8 x bfloat> %b) #0 {
; CHECK-LABEL: test_fcmp_ord:
; CHECK: // %bb.0:
-; CHECK-NEXT: mov h2, v1.h[1]
-; CHECK-NEXT: mov h3, v0.h[1]
-; CHECK-NEXT: fmov w10, s1
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: fmov w9, s3
-; CHECK-NEXT: mov h2, v1.h[2]
-; CHECK-NEXT: mov h3, v0.h[2]
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s4, w8
-; CHECK-NEXT: fmov w8, s0
-; CHECK-NEXT: fmov s5, w9
-; CHECK-NEXT: lsl w9, w10, #16
-; CHECK-NEXT: fmov w10, s3
-; CHECK-NEXT: mov h3, v1.h[4]
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fcmp s5, s4
-; CHECK-NEXT: fmov s5, w9
-; CHECK-NEXT: mov h4, v1.h[3]
-; CHECK-NEXT: lsl w10, w10, #16
-; CHECK-NEXT: fmov s6, w8
-; CHECK-NEXT: fmov w8, s2
-; CHECK-NEXT: csetm w9, vc
-; CHECK-NEXT: fmov s16, w10
-; CHECK-NEXT: fcmp s6, s5
-; CHECK-NEXT: mov h5, v0.h[3]
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fmov w10, s4
-; CHECK-NEXT: mov h6, v0.h[4]
-; CHECK-NEXT: mov h4, v1.h[5]
-; CHECK-NEXT: fmov s7, w8
+; CHECK-NEXT: dup v2.4h, v1.h[1]
+; CHECK-NEXT: dup v3.4h, v0.h[1]
+; CHECK-NEXT: dup v4.4h, v1.h[2]
+; CHECK-NEXT: dup v5.4h, v0.h[2]
+; CHECK-NEXT: dup v6.4h, v0.h[3]
+; CHECK-NEXT: shll v2.4s, v2.4h, #16
+; CHECK-NEXT: shll v3.4s, v3.4h, #16
+; CHECK-NEXT: fcmp s3, s2
+; CHECK-NEXT: shll v2.4s, v1.4h, #16
+; CHECK-NEXT: shll v3.4s, v0.4h, #16
; CHECK-NEXT: csetm w8, vc
-; CHECK-NEXT: fmov s2, w8
-; CHECK-NEXT: fmov w8, s5
-; CHECK-NEXT: mov h5, v0.h[5]
-; CHECK-NEXT: fcmp s16, s7
-; CHECK-NEXT: mov v2.h[1], w9
-; CHECK-NEXT: lsl w9, w10, #16
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fmov w10, s3
-; CHECK-NEXT: fmov s3, w9
-; CHECK-NEXT: fmov w9, s6
-; CHECK-NEXT: fmov s7, w8
+; CHECK-NEXT: fcmp s3, s2
+; CHECK-NEXT: shll v3.4s, v4.4h, #16
+; CHECK-NEXT: shll v4.4s, v5.4h, #16
+; CHECK-NEXT: dup v5.4h, v1.h[3]
+; CHECK-NEXT: csetm w9, vc
+; CHECK-NEXT: fmov s2, w9
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: shll v4.4s, v6.4h, #16
+; CHECK-NEXT: shll v3.4s, v5.4h, #16
+; CHECK-NEXT: dup v5.8h, v1.h[4]
+; CHECK-NEXT: dup v6.8h, v0.h[4]
+; CHECK-NEXT: mov v2.h[1], w8
; CHECK-NEXT: csetm w8, vc
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: shll v3.4s, v5.4h, #16
+; CHECK-NEXT: shll v4.4s, v6.4h, #16
+; CHECK-NEXT: dup v5.8h, v1.h[5]
+; CHECK-NEXT: dup v6.8h, v0.h[5]
; CHECK-NEXT: mov v2.h[2], w8
-; CHECK-NEXT: lsl w8, w10, #16
-; CHECK-NEXT: fmov w10, s4
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fcmp s7, s3
-; CHECK-NEXT: mov h3, v1.h[6]
-; CHECK-NEXT: fmov s4, w8
-; CHECK-NEXT: mov h1, v1.h[7]
-; CHECK-NEXT: fmov s6, w9
-; CHECK-NEXT: fmov w9, s5
; CHECK-NEXT: csetm w8, vc
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: shll v3.4s, v5.4h, #16
+; CHECK-NEXT: shll v4.4s, v6.4h, #16
+; CHECK-NEXT: dup v5.8h, v1.h[6]
+; CHECK-NEXT: dup v6.8h, v0.h[6]
+; CHECK-NEXT: dup v1.8h, v1.h[7]
+; CHECK-NEXT: dup v0.8h, v0.h[7]
; CHECK-NEXT: mov v2.h[3], w8
-; CHECK-NEXT: lsl w8, w10, #16
-; CHECK-NEXT: fcmp s6, s4
-; CHECK-NEXT: mov h4, v0.h[6]
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s5, w8
-; CHECK-NEXT: mov h0, v0.h[7]
-; CHECK-NEXT: fmov s6, w9
; CHECK-NEXT: csetm w8, vc
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: shll v3.4s, v5.4h, #16
+; CHECK-NEXT: shll v4.4s, v6.4h, #16
+; CHECK-NEXT: shll v1.4s, v1.4h, #16
+; CHECK-NEXT: shll v0.4s, v0.4h, #16
; CHECK-NEXT: mov v2.h[4], w8
-; CHECK-NEXT: fmov w8, s3
-; CHECK-NEXT: fmov w9, s4
-; CHECK-NEXT: fcmp s6, s5
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: csetm w10, vc
-; CHECK-NEXT: fmov s3, w8
-; CHECK-NEXT: fmov s4, w9
-; CHECK-NEXT: fmov w8, s1
-; CHECK-NEXT: fmov w9, s0
-; CHECK-NEXT: mov v2.h[5], w10
-; CHECK-NEXT: lsl w8, w8, #16
-; CHECK-NEXT: fcmp s4, s3
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s0, w8
-; CHECK-NEXT: fmov s1, w9
; CHECK-NEXT: csetm w8, vc
+; CHECK-NEXT: fcmp s4, s3
+; CHECK-NEXT: mov v2.h[5], w8
+; CHECK-NEXT: csetm w8, vc
+; CHECK-NEXT: fcmp s0, s1
; CHECK-NEXT: mov v2.h[6], w8
-; CHECK-NEXT: fcmp s1, s0
; CHECK-NEXT: csetm w8, vc
; CHECK-NEXT: mov v2.h[7], w8
; CHECK-NEXT: xtn v0.8b, v2.8h
diff --git a/llvm/test/CodeGen/AArch64/cvt-fp-int-fp.ll b/llvm/test/CodeGen/AArch64/cvt-fp-int-fp.ll
index 40684b0..e3263252 100644
--- a/llvm/test/CodeGen/AArch64/cvt-fp-int-fp.ll
+++ b/llvm/test/CodeGen/AArch64/cvt-fp-int-fp.ll
@@ -76,11 +76,9 @@ entry:
define bfloat @t7(bfloat %x) {
; CHECK-LABEL: t7:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-NEXT: fmov w9, s0
+; CHECK-NEXT: // kill: def $h0 killed $h0 def $d0
; CHECK-NEXT: mov w8, #32767 // =0x7fff
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s0, w9
+; CHECK-NEXT: shll v0.4s, v0.4h, #16
; CHECK-NEXT: fcvtzs w9, s0
; CHECK-NEXT: scvtf d0, w9
; CHECK-NEXT: fcvtxn s0, d0
@@ -101,11 +99,9 @@ entry:
define bfloat @t8(bfloat %x) {
; CHECK-LABEL: t8:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-NEXT: fmov w9, s0
+; CHECK-NEXT: // kill: def $h0 killed $h0 def $d0
; CHECK-NEXT: mov w8, #32767 // =0x7fff
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s0, w9
+; CHECK-NEXT: shll v0.4s, v0.4h, #16
; CHECK-NEXT: fcvtzu w9, s0
; CHECK-NEXT: ucvtf d0, w9
; CHECK-NEXT: fcvtxn s0, d0
@@ -198,11 +194,9 @@ entry:
define bfloat @t7_strict(bfloat %x) #0 {
; CHECK-LABEL: t7_strict:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-NEXT: fmov w9, s0
+; CHECK-NEXT: // kill: def $h0 killed $h0 def $d0
; CHECK-NEXT: mov w8, #32767 // =0x7fff
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s0, w9
+; CHECK-NEXT: shll v0.4s, v0.4h, #16
; CHECK-NEXT: fcvtzs w9, s0
; CHECK-NEXT: scvtf d0, w9
; CHECK-NEXT: fcvtxn s0, d0
@@ -223,11 +217,9 @@ entry:
define bfloat @t8_strict(bfloat %x) #0 {
; CHECK-LABEL: t8_strict:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-NEXT: fmov w9, s0
+; CHECK-NEXT: // kill: def $h0 killed $h0 def $d0
; CHECK-NEXT: mov w8, #32767 // =0x7fff
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s0, w9
+; CHECK-NEXT: shll v0.4s, v0.4h, #16
; CHECK-NEXT: fcvtzu w9, s0
; CHECK-NEXT: ucvtf d0, w9
; CHECK-NEXT: fcvtxn s0, d0
diff --git a/llvm/test/CodeGen/AArch64/hwasan-zero-ptr.ll b/llvm/test/CodeGen/AArch64/hwasan-zero-ptr.ll
new file mode 100644
index 0000000..dca39fe
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/hwasan-zero-ptr.ll
@@ -0,0 +1,65 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -filetype asm -o - %s | FileCheck %s
+
+; This shows that when dereferencing a null pointer, HWASan will call
+; __hwasan_check_x4294967071_19_fixed_0_short_v2
+; (N.B. 4294967071 == 2**32 - 239 + 14 == 2**32 - X0 + XZR
+;
+; The source was generated from llvm/test/Instrumentation/HWAddressSanitizer/zero-ptr.ll.
+
+; ModuleID = '<stdin>'
+source_filename = "<stdin>"
+target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
+target triple = "aarch64--linux-android10000"
+
+$hwasan.module_ctor = comdat any
+
+@__start_hwasan_globals = external hidden constant [0 x i8]
+@__stop_hwasan_globals = external hidden constant [0 x i8]
+@hwasan.note = private constant { i32, i32, i32, [8 x i8], i32, i32 } { i32 8, i32 8, i32 3, [8 x i8] c"LLVM\00\00\00\00", i32 trunc (i64 sub (i64 ptrtoint (ptr @__start_hwasan_globals to i64), i64 ptrtoint (ptr @hwasan.note to i64)) to i32), i32 trunc (i64 sub (i64 ptrtoint (ptr @__stop_hwasan_globals to i64), i64 ptrtoint (ptr @hwasan.note to i64)) to i32) }, section ".note.hwasan.globals", comdat($hwasan.module_ctor), align 4
+
+; Function Attrs: sanitize_hwaddress
+define void @test_store_to_zeroptr() #0 {
+; CHECK-LABEL: test_store_to_zeroptr:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: .cfi_offset w30, -16
+; CHECK-NEXT: bl __hwasan_check_x4294967071_19_fixed_0_short_v2
+; CHECK-NEXT: mov x8, xzr
+; CHECK-NEXT: mov w9, #42 // =0x2a
+; CHECK-NEXT: str x9, [x8]
+; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+entry:
+ %.hwasan.shadow = call ptr asm "", "=r,0"(ptr null)
+ %b = inttoptr i64 0 to ptr
+ call void @llvm.hwasan.check.memaccess.shortgranules.fixedshadow(ptr %b, i32 19, i64 0)
+ store i64 42, ptr %b, align 8
+ ret void
+}
+
+; Function Attrs: nounwind
+declare void @llvm.hwasan.check.memaccess.shortgranules.fixedshadow(ptr, i32 immarg, i64 immarg) #1
+
+attributes #0 = { sanitize_hwaddress }
+attributes #1 = { nounwind }
+
+declare void @__hwasan_init()
+
+; Function Attrs: nounwind
+define internal void @hwasan.module_ctor() #1 comdat {
+; CHECK-LABEL: hwasan.module_ctor:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: bl __hwasan_init
+; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ call void @__hwasan_init()
+ ret void
+}
+
+!llvm.module.flags = !{!1}
+
+!0 = !{ptr @hwasan.note}
+!1 = !{i32 4, !"nosanitize_hwaddress", i32 1}
diff --git a/llvm/test/CodeGen/AArch64/register-coalesce-update-subranges-remat.mir b/llvm/test/CodeGen/AArch64/register-coalesce-update-subranges-remat.mir
index b61fa4b..08fc47d 100644
--- a/llvm/test/CodeGen/AArch64/register-coalesce-update-subranges-remat.mir
+++ b/llvm/test/CodeGen/AArch64/register-coalesce-update-subranges-remat.mir
@@ -1,5 +1,5 @@
+# RUN: llc -mtriple=aarch64 -o /dev/null -run-pass=register-coalescer -aarch64-enable-subreg-liveness-tracking -debug-only=regalloc %s 2>&1 | FileCheck %s --check-prefix=CHECK-DBG
# RUN: llc -mtriple=aarch64 -verify-machineinstrs -o - -run-pass=register-coalescer -aarch64-enable-subreg-liveness-tracking %s | FileCheck %s --check-prefix=CHECK
-# RUN: llc -mtriple=aarch64 -verify-machineinstrs -o /dev/null -run-pass=register-coalescer -aarch64-enable-subreg-liveness-tracking -debug-only=regalloc %s 2>&1 | FileCheck %s --check-prefix=CHECK-DBG
# REQUIRES: asserts
# CHECK-DBG: ********** REGISTER COALESCER **********
@@ -36,3 +36,94 @@ body: |
RET_ReallyLR
...
+# CHECK-DBG: ********** REGISTER COALESCER **********
+# CHECK-DBG: ********** Function: reproducer
+# CHECK-DBG: ********** JOINING INTERVALS ***********
+# CHECK-DBG: ********** INTERVALS **********
+# CHECK-DBG: %1 [32r,48B:2)[48B,320r:0)[320r,368B:1) 0@48B-phi 1@320r 2@32r
+# CHECK-DBG-SAME: weight:0.000000e+00
+# CHECK-DBG: %3 [80r,160B:2)[240r,272B:1)[288r,304B:0)[304B,320r:3) 0@288r 1@240r 2@80r 3@304B-phi
+# CHECK-DBG-SAME: L0000000000000080 [288r,304B:0)[304B,320r:3) 0@288r 1@x 2@x 3@304B-phi
+# CHECK-DBG-SAME: L0000000000000040 [80r,160B:2)[240r,272B:1)[288r,304B:0)[304B,320r:3) 0@288r 1@240r 2@80r 3@304B-phi
+# CHECK-DBG-SAME: weight:0.000000e+00
+---
+name: reproducer
+tracksRegLiveness: true
+body: |
+ bb.0:
+ %0:gpr32 = MOVi32imm 1
+ %1:gpr64 = IMPLICIT_DEF
+
+ bb.1:
+
+ bb.2:
+ %3:gpr64all = SUBREG_TO_REG 0, %0, %subreg.sub_32
+
+ bb.3:
+ $nzcv = IMPLICIT_DEF
+ %4:gpr64 = COPY killed %3
+ Bcc 1, %bb.7, implicit killed $nzcv
+
+ bb.4:
+ $nzcv = IMPLICIT_DEF
+ Bcc 1, %bb.6, implicit killed $nzcv
+
+ bb.5:
+ %5:gpr64all = SUBREG_TO_REG 0, %0, %subreg.sub_32
+ %4:gpr64 = COPY killed %5
+ B %bb.7
+
+ bb.6:
+ %4:gpr64 = COPY $xzr
+
+ bb.7:
+ %7:gpr64 = ADDXrs killed %1, killed %4, 1
+ %1:gpr64 = COPY killed %7
+ B %bb.1
+
+...
+# CHECK-DBG: ********** REGISTER COALESCER **********
+# CHECK-DBG: ********** Function: reproducer2
+# CHECK-DBG: ********** JOINING INTERVALS ***********
+# CHECK-DBG: ********** INTERVALS **********
+# CHECK-DBG: %1 [32r,48B:2)[48B,304r:0)[304r,352B:1) 0@48B-phi 1@304r 2@32r
+# CHECK-DBG-SAME: weight:0.000000e+00
+# CHECK-DBG: %3 [80r,160B:2)[224r,256B:1)[272r,288B:0)[288B,304r:3) 0@272r 1@224r 2@80r 3@288B-phi
+# CHECK-DBG-SAME: L0000000000000080 [224r,256B:1)[272r,288B:0)[288B,304r:3) 0@272r 1@224r 2@x 3@288B-phi
+# CHECK-DBG-SAME: L0000000000000040 [80r,160B:2)[224r,256B:1)[272r,288B:0)[288B,304r:3) 0@272r 1@224r 2@80r 3@288B-phi
+# CHECK-DBG-SAME: weight:0.000000e+00
+---
+name: reproducer2
+tracksRegLiveness: true
+body: |
+ bb.0:
+ %0:gpr32 = MOVi32imm 1
+ %1:gpr64 = IMPLICIT_DEF
+
+ bb.1:
+
+ bb.2:
+ %3:gpr64all = SUBREG_TO_REG 0, %0, %subreg.sub_32
+
+ bb.3:
+ $nzcv = IMPLICIT_DEF
+ %4:gpr64 = COPY killed %3
+ Bcc 1, %bb.7, implicit killed $nzcv
+
+ bb.4:
+ $nzcv = IMPLICIT_DEF
+ Bcc 1, %bb.6, implicit killed $nzcv
+
+ bb.5:
+ %4:gpr64 = IMPLICIT_DEF
+ B %bb.7
+
+ bb.6:
+ %4:gpr64 = COPY $xzr
+
+ bb.7:
+ %5:gpr64 = ADDXrs killed %1, killed %4, 1
+ %1:gpr64 = COPY killed %5
+ B %bb.1
+
+...
diff --git a/llvm/test/CodeGen/AArch64/round-fptosi-sat-scalar.ll b/llvm/test/CodeGen/AArch64/round-fptosi-sat-scalar.ll
index ec7548e..b7fae2b 100644
--- a/llvm/test/CodeGen/AArch64/round-fptosi-sat-scalar.ll
+++ b/llvm/test/CodeGen/AArch64/round-fptosi-sat-scalar.ll
@@ -7,19 +7,17 @@
define i32 @testmswbf(bfloat %a) {
; CHECK-LABEL: testmswbf:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-NEXT: fmov w9, s0
+; CHECK-NEXT: // kill: def $h0 killed $h0 def $d0
; CHECK-NEXT: mov w8, #32767 // =0x7fff
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s0, w9
+; CHECK-NEXT: shll v0.4s, v0.4h, #16
; CHECK-NEXT: frintm s0, s0
; CHECK-NEXT: fmov w9, s0
; CHECK-NEXT: ubfx w10, w9, #16, #1
; CHECK-NEXT: add w8, w9, w8
; CHECK-NEXT: add w8, w10, w8
; CHECK-NEXT: lsr w8, w8, #16
-; CHECK-NEXT: lsl w8, w8, #16
; CHECK-NEXT: fmov s0, w8
+; CHECK-NEXT: shll v0.4s, v0.4h, #16
; CHECK-NEXT: fcvtzs w0, s0
; CHECK-NEXT: ret
entry:
@@ -31,19 +29,17 @@ entry:
define i64 @testmsxbf(bfloat %a) {
; CHECK-LABEL: testmsxbf:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-NEXT: fmov w9, s0
+; CHECK-NEXT: // kill: def $h0 killed $h0 def $d0
; CHECK-NEXT: mov w8, #32767 // =0x7fff
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s0, w9
+; CHECK-NEXT: shll v0.4s, v0.4h, #16
; CHECK-NEXT: frintm s0, s0
; CHECK-NEXT: fmov w9, s0
; CHECK-NEXT: ubfx w10, w9, #16, #1
; CHECK-NEXT: add w8, w9, w8
; CHECK-NEXT: add w8, w10, w8
; CHECK-NEXT: lsr w8, w8, #16
-; CHECK-NEXT: lsl w8, w8, #16
; CHECK-NEXT: fmov s0, w8
+; CHECK-NEXT: shll v0.4s, v0.4h, #16
; CHECK-NEXT: fcvtzs x0, s0
; CHECK-NEXT: ret
entry:
@@ -141,19 +137,17 @@ entry:
define i32 @testpswbf(bfloat %a) {
; CHECK-LABEL: testpswbf:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-NEXT: fmov w9, s0
+; CHECK-NEXT: // kill: def $h0 killed $h0 def $d0
; CHECK-NEXT: mov w8, #32767 // =0x7fff
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s0, w9
+; CHECK-NEXT: shll v0.4s, v0.4h, #16
; CHECK-NEXT: frintp s0, s0
; CHECK-NEXT: fmov w9, s0
; CHECK-NEXT: ubfx w10, w9, #16, #1
; CHECK-NEXT: add w8, w9, w8
; CHECK-NEXT: add w8, w10, w8
; CHECK-NEXT: lsr w8, w8, #16
-; CHECK-NEXT: lsl w8, w8, #16
; CHECK-NEXT: fmov s0, w8
+; CHECK-NEXT: shll v0.4s, v0.4h, #16
; CHECK-NEXT: fcvtzs w0, s0
; CHECK-NEXT: ret
entry:
@@ -165,19 +159,17 @@ entry:
define i64 @testpsxbf(bfloat %a) {
; CHECK-LABEL: testpsxbf:
; CHECK: // %bb.0: // %entry
-; CHECK-NEXT: // kill: def $h0 killed $h0 def $s0
-; CHECK-NEXT: fmov w9, s0
+; CHECK-NEXT: // kill: def $h0 killed $h0 def $d0
; CHECK-NEXT: mov w8, #32767 // =0x7fff
-; CHECK-NEXT: lsl w9, w9, #16
-; CHECK-NEXT: fmov s0, w9
+; CHECK-NEXT: shll v0.4s, v0.4h, #16
; CHECK-NEXT: frintp s0, s0
; CHECK-NEXT: fmov w9, s0
; CHECK-NEXT: ubfx w10, w9, #16, #1
; CHECK-NEXT: add w8, w9, w8
; CHECK-NEXT: add w8, w10, w8
; CHECK-NEXT: lsr w8, w8, #16
-; CHECK-NEXT: lsl w8, w8, #16
; CHECK-NEXT: fmov s0, w8
+; CHECK-NEXT: shll v0.4s, v0.4h, #16
; CHECK-NEXT: fcvtzs x0, s0
; CHECK-NEXT: ret
entry:
diff --git a/llvm/test/CodeGen/AArch64/sme-intrinsics-state.ll b/llvm/test/CodeGen/AArch64/sme-intrinsics-state.ll
new file mode 100644
index 0000000..5037772
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sme-intrinsics-state.ll
@@ -0,0 +1,18 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme -verify-machineinstrs < %s | FileCheck %s
+
+
+define i1 @streaming_mode_streaming_compatible() #0 {
+; CHECK-LABEL: streaming_mode_streaming_compatible:
+; CHECK: // %bb.0:
+; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
+; CHECK-NEXT: bl __arm_sme_state
+; CHECK-NEXT: and w0, w0, #0x1
+; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
+; CHECK-NEXT: ret
+ %mode = tail call noundef i1 @llvm.aarch64.sme.in.streaming.mode()
+ ret i1 %mode
+}
+
+
+attributes #0 = {nounwind memory(none) "aarch64_pstate_sm_compatible"}
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/flat-scratch.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/flat-scratch.ll
index b0f2aac..7cafa2f 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/flat-scratch.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/flat-scratch.ll
@@ -3990,6 +3990,116 @@ bb:
ret void
}
+define amdgpu_gs void @sgpr_base_plus_sgpr_plus_vgpr_plus_negative_imm_offset(ptr addrspace(5) inreg %sgpr_base, i32 inreg %sidx, i32 %vidx) {
+; GFX9-LABEL: sgpr_base_plus_sgpr_plus_vgpr_plus_negative_imm_offset:
+; GFX9: ; %bb.0: ; %bb
+; GFX9-NEXT: s_add_u32 flat_scratch_lo, s0, s5
+; GFX9-NEXT: v_add_u32_e32 v0, s3, v0
+; GFX9-NEXT: s_addc_u32 flat_scratch_hi, s1, 0
+; GFX9-NEXT: v_add3_u32 v0, s2, v0, -16
+; GFX9-NEXT: v_mov_b32_e32 v1, 15
+; GFX9-NEXT: scratch_store_dword v0, v1, off
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: s_endpgm
+;
+; GFX10-LABEL: sgpr_base_plus_sgpr_plus_vgpr_plus_negative_imm_offset:
+; GFX10: ; %bb.0: ; %bb
+; GFX10-NEXT: s_add_u32 s0, s0, s5
+; GFX10-NEXT: s_addc_u32 s1, s1, 0
+; GFX10-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_LO), s0
+; GFX10-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_HI), s1
+; GFX10-NEXT: v_add_nc_u32_e32 v0, s3, v0
+; GFX10-NEXT: v_mov_b32_e32 v1, 15
+; GFX10-NEXT: v_add_nc_u32_e32 v0, s2, v0
+; GFX10-NEXT: scratch_store_dword v0, v1, off offset:-16
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: s_endpgm
+;
+; GFX940-LABEL: sgpr_base_plus_sgpr_plus_vgpr_plus_negative_imm_offset:
+; GFX940: ; %bb.0: ; %bb
+; GFX940-NEXT: v_add_u32_e32 v0, s1, v0
+; GFX940-NEXT: v_add3_u32 v0, s0, v0, -16
+; GFX940-NEXT: v_mov_b32_e32 v1, 15
+; GFX940-NEXT: scratch_store_dword v0, v1, off sc0 sc1
+; GFX940-NEXT: s_waitcnt vmcnt(0)
+; GFX940-NEXT: s_endpgm
+;
+; GFX11-LABEL: sgpr_base_plus_sgpr_plus_vgpr_plus_negative_imm_offset:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: v_dual_mov_b32 v1, 15 :: v_dual_add_nc_u32 v0, s1, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_add_nc_u32_e32 v0, s0, v0
+; GFX11-NEXT: scratch_store_b32 v0, v1, off offset:-16 dlc
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: sgpr_base_plus_sgpr_plus_vgpr_plus_negative_imm_offset:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: v_dual_mov_b32 v1, 15 :: v_dual_add_nc_u32 v0, s1, v0
+; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX12-NEXT: v_add_nc_u32_e32 v0, s0, v0
+; GFX12-NEXT: scratch_store_b32 v0, v1, off offset:-16 scope:SCOPE_SYS
+; GFX12-NEXT: s_wait_storecnt 0x0
+; GFX12-NEXT: s_endpgm
+;
+; UNALIGNED_GFX9-LABEL: sgpr_base_plus_sgpr_plus_vgpr_plus_negative_imm_offset:
+; UNALIGNED_GFX9: ; %bb.0: ; %bb
+; UNALIGNED_GFX9-NEXT: s_add_u32 flat_scratch_lo, s0, s5
+; UNALIGNED_GFX9-NEXT: v_add_u32_e32 v0, s3, v0
+; UNALIGNED_GFX9-NEXT: s_addc_u32 flat_scratch_hi, s1, 0
+; UNALIGNED_GFX9-NEXT: v_add3_u32 v0, s2, v0, -16
+; UNALIGNED_GFX9-NEXT: v_mov_b32_e32 v1, 15
+; UNALIGNED_GFX9-NEXT: scratch_store_dword v0, v1, off
+; UNALIGNED_GFX9-NEXT: s_waitcnt vmcnt(0)
+; UNALIGNED_GFX9-NEXT: s_endpgm
+;
+; UNALIGNED_GFX10-LABEL: sgpr_base_plus_sgpr_plus_vgpr_plus_negative_imm_offset:
+; UNALIGNED_GFX10: ; %bb.0: ; %bb
+; UNALIGNED_GFX10-NEXT: s_add_u32 s0, s0, s5
+; UNALIGNED_GFX10-NEXT: s_addc_u32 s1, s1, 0
+; UNALIGNED_GFX10-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_LO), s0
+; UNALIGNED_GFX10-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_HI), s1
+; UNALIGNED_GFX10-NEXT: v_add_nc_u32_e32 v0, s3, v0
+; UNALIGNED_GFX10-NEXT: v_mov_b32_e32 v1, 15
+; UNALIGNED_GFX10-NEXT: v_add_nc_u32_e32 v0, s2, v0
+; UNALIGNED_GFX10-NEXT: scratch_store_dword v0, v1, off offset:-16
+; UNALIGNED_GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; UNALIGNED_GFX10-NEXT: s_endpgm
+;
+; UNALIGNED_GFX940-LABEL: sgpr_base_plus_sgpr_plus_vgpr_plus_negative_imm_offset:
+; UNALIGNED_GFX940: ; %bb.0: ; %bb
+; UNALIGNED_GFX940-NEXT: v_add_u32_e32 v0, s1, v0
+; UNALIGNED_GFX940-NEXT: v_add3_u32 v0, s0, v0, -16
+; UNALIGNED_GFX940-NEXT: v_mov_b32_e32 v1, 15
+; UNALIGNED_GFX940-NEXT: scratch_store_dword v0, v1, off sc0 sc1
+; UNALIGNED_GFX940-NEXT: s_waitcnt vmcnt(0)
+; UNALIGNED_GFX940-NEXT: s_endpgm
+;
+; UNALIGNED_GFX11-LABEL: sgpr_base_plus_sgpr_plus_vgpr_plus_negative_imm_offset:
+; UNALIGNED_GFX11: ; %bb.0: ; %bb
+; UNALIGNED_GFX11-NEXT: v_dual_mov_b32 v1, 15 :: v_dual_add_nc_u32 v0, s1, v0
+; UNALIGNED_GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; UNALIGNED_GFX11-NEXT: v_add_nc_u32_e32 v0, s0, v0
+; UNALIGNED_GFX11-NEXT: scratch_store_b32 v0, v1, off offset:-16 dlc
+; UNALIGNED_GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; UNALIGNED_GFX11-NEXT: s_endpgm
+;
+; UNALIGNED_GFX12-LABEL: sgpr_base_plus_sgpr_plus_vgpr_plus_negative_imm_offset:
+; UNALIGNED_GFX12: ; %bb.0: ; %bb
+; UNALIGNED_GFX12-NEXT: v_dual_mov_b32 v1, 15 :: v_dual_add_nc_u32 v0, s1, v0
+; UNALIGNED_GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; UNALIGNED_GFX12-NEXT: v_add_nc_u32_e32 v0, s0, v0
+; UNALIGNED_GFX12-NEXT: scratch_store_b32 v0, v1, off offset:-16 scope:SCOPE_SYS
+; UNALIGNED_GFX12-NEXT: s_wait_storecnt 0x0
+; UNALIGNED_GFX12-NEXT: s_endpgm
+bb:
+ %add1 = add nsw i32 %sidx, %vidx
+ %add2 = add nsw i32 %add1, -16
+ %gep = getelementptr inbounds [16 x i8], ptr addrspace(5) %sgpr_base, i32 0, i32 %add2
+ store volatile i32 15, ptr addrspace(5) %gep, align 4
+ ret void
+}
+
define amdgpu_gs void @sgpr_base_negative_offset(ptr addrspace(1) %out, ptr addrspace(5) inreg %scevgep) {
; GFX9-LABEL: sgpr_base_negative_offset:
; GFX9: ; %bb.0: ; %entry
diff --git a/llvm/test/CodeGen/AMDGPU/add64-low-32-bits-known-zero.ll b/llvm/test/CodeGen/AMDGPU/add64-low-32-bits-known-zero.ll
new file mode 100644
index 0000000..52259c4
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/add64-low-32-bits-known-zero.ll
@@ -0,0 +1,193 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 < %s | FileCheck -check-prefix=GFX9 %s
+
+; Reduce a 64-bit add by a constant if we know the low 32-bits are all
+; zero.
+
+; add i64:x, K if computeTrailingZeros(K) >= 32
+; => build_pair (add x.hi, K.hi), x.lo
+
+define amdgpu_ps i64 @s_add_i64_const_low_bits_known0_0(i64 inreg %reg) {
+; GFX9-LABEL: s_add_i64_const_low_bits_known0_0:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_add_i32 s1, s1, 0x40000
+; GFX9-NEXT: ; return to shader part epilog
+ %add = add i64 %reg, 1125899906842624 ; (1 << 50)
+ ret i64 %add
+}
+
+define amdgpu_ps i64 @s_add_i64_const_low_bits_known0_1(i64 inreg %reg) {
+; GFX9-LABEL: s_add_i64_const_low_bits_known0_1:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_add_i32 s1, s1, 1
+; GFX9-NEXT: ; return to shader part epilog
+ %add = add i64 %reg, 4294967296 ; (1 << 32)
+ ret i64 %add
+}
+
+define amdgpu_ps i64 @s_add_i64_const_low_bits_known0_2(i64 inreg %reg) {
+; GFX9-LABEL: s_add_i64_const_low_bits_known0_2:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_add_i32 s1, s1, 2
+; GFX9-NEXT: ; return to shader part epilog
+ %add = add i64 %reg, 8589934592 ; (1 << 33)
+ ret i64 %add
+}
+
+define amdgpu_ps i64 @s_add_i64_const_low_bits_known0_3(i64 inreg %reg) {
+; GFX9-LABEL: s_add_i64_const_low_bits_known0_3:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_add_i32 s1, s1, 0x80000000
+; GFX9-NEXT: ; return to shader part epilog
+ %add = add i64 %reg, -9223372036854775808 ; (1 << 63)
+ ret i64 %add
+}
+
+define amdgpu_ps i64 @s_add_i64_const_low_bits_known0_4(i64 inreg %reg) {
+; GFX9-LABEL: s_add_i64_const_low_bits_known0_4:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_add_i32 s1, s1, -1
+; GFX9-NEXT: ; return to shader part epilog
+ %add = add i64 %reg, -4294967296 ; 0xffffffff00000000
+ ret i64 %add
+}
+
+define i64 @v_add_i64_const_low_bits_known0_0(i64 %reg) {
+; GFX9-LABEL: v_add_i64_const_low_bits_known0_0:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_add_u32_e32 v1, 0x40000, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %add = add i64 %reg, 1125899906842624 ; (1 << 50)
+ ret i64 %add
+}
+
+define i64 @v_add_i64_const_low_bits_known0_1(i64 %reg) {
+; GFX9-LABEL: v_add_i64_const_low_bits_known0_1:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_add_u32_e32 v1, 1, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %add = add i64 %reg, 4294967296 ; (1 << 32)
+ ret i64 %add
+}
+
+define i64 @v_add_i64_const_low_bits_known0_2(i64 %reg) {
+; GFX9-LABEL: v_add_i64_const_low_bits_known0_2:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_add_u32_e32 v1, 2, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %add = add i64 %reg, 8589934592 ; (1 << 33)
+ ret i64 %add
+}
+
+define i64 @v_add_i64_const_low_bits_known0_3(i64 %reg) {
+; GFX9-LABEL: v_add_i64_const_low_bits_known0_3:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_add_u32_e32 v1, 0x80000000, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %add = add i64 %reg, -9223372036854775808 ; (1 << 63)
+ ret i64 %add
+}
+
+define i64 @v_add_i64_const_low_bits_known0_4(i64 %reg) {
+; GFX9-LABEL: v_add_i64_const_low_bits_known0_4:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_add_u32_e32 v1, -1, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %add = add i64 %reg, -4294967296 ; 0xffffffff00000000
+ ret i64 %add
+}
+
+define amdgpu_ps i64 @s_add_i64_const_high_bits_known0_0(i64 inreg %reg) {
+; GFX9-LABEL: s_add_i64_const_high_bits_known0_0:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_add_u32 s0, s0, -1
+; GFX9-NEXT: s_addc_u32 s1, s1, 0
+; GFX9-NEXT: ; return to shader part epilog
+ %add = add i64 %reg, 4294967295 ; (1 << 31)
+ ret i64 %add
+}
+
+define i64 @v_add_i64_const_high_bits_known0_0(i64 %reg) {
+; GFX9-LABEL: v_add_i64_const_high_bits_known0_0:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, -1, v0
+; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %add = add i64 %reg, 4294967295 ; (1 << 31)
+ ret i64 %add
+}
+
+define <2 x i64> @v_add_v2i64_splat_const_low_bits_known0_0(<2 x i64> %reg) {
+; GFX9-LABEL: v_add_v2i64_splat_const_low_bits_known0_0:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_add_u32_e32 v1, 1, v1
+; GFX9-NEXT: v_add_u32_e32 v3, 1, v3
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %add = add <2 x i64> %reg, <i64 4294967296, i64 4294967296> ; (1 << 32)
+ ret <2 x i64> %add
+}
+
+define <2 x i64> @v_add_v2i64_nonsplat_const_low_bits_known0_0(<2 x i64> %reg) {
+; GFX9-LABEL: v_add_v2i64_nonsplat_const_low_bits_known0_0:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_add_u32_e32 v1, 1, v1
+; GFX9-NEXT: v_add_u32_e32 v3, 2, v3
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %add = add <2 x i64> %reg, <i64 4294967296, i64 8589934592> ; (1 << 32), (1 << 33)
+ ret <2 x i64> %add
+}
+
+define amdgpu_ps <2 x i64> @s_add_v2i64_splat_const_low_bits_known0_0(<2 x i64> inreg %reg) {
+; GFX9-LABEL: s_add_v2i64_splat_const_low_bits_known0_0:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_add_i32 s1, s1, 1
+; GFX9-NEXT: s_add_i32 s3, s3, 1
+; GFX9-NEXT: ; return to shader part epilog
+ %add = add <2 x i64> %reg, <i64 4294967296, i64 4294967296> ; (1 << 32)
+ ret <2 x i64> %add
+}
+
+define amdgpu_ps <2 x i64> @s_add_v2i64_nonsplat_const_low_bits_known0_0(<2 x i64> inreg %reg) {
+; GFX9-LABEL: s_add_v2i64_nonsplat_const_low_bits_known0_0:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_add_i32 s1, s1, 1
+; GFX9-NEXT: s_add_i32 s3, s3, 2
+; GFX9-NEXT: ; return to shader part epilog
+ %add = add <2 x i64> %reg, <i64 4294967296, i64 8589934592> ; (1 << 32), (1 << 33)
+ ret <2 x i64> %add
+}
+
+; We could reduce this to use a 32-bit add if we use computeKnownBits
+define i64 @v_add_i64_variable_high_bits_known0_0(i64 %reg, i32 %offset.hi32) {
+; GFX9-LABEL: v_add_i64_variable_high_bits_known0_0:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, 0, v0
+; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v2, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %zext.offset.hi32 = zext i32 %offset.hi32 to i64
+ %in.high.bits = shl i64 %zext.offset.hi32, 32
+ %add = add i64 %reg, %in.high.bits
+ ret i64 %add
+}
+
+; We could reduce this to use a 32-bit add if we use computeKnownBits
+define amdgpu_ps i64 @s_add_i64_variable_high_bits_known0_0(i64 inreg %reg, i32 inreg %offset.hi32) {
+; GFX9-LABEL: s_add_i64_variable_high_bits_known0_0:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_add_u32 s0, s0, 0
+; GFX9-NEXT: s_addc_u32 s1, s1, s2
+; GFX9-NEXT: ; return to shader part epilog
+ %zext.offset.hi32 = zext i32 %offset.hi32 to i64
+ %in.high.bits = shl i64 %zext.offset.hi32, 32
+ %add = add i64 %reg, %in.high.bits
+ ret i64 %add
+}
diff --git a/llvm/test/CodeGen/AMDGPU/flat-scratch.ll b/llvm/test/CodeGen/AMDGPU/flat-scratch.ll
index 97d642b..5415af0 100644
--- a/llvm/test/CodeGen/AMDGPU/flat-scratch.ll
+++ b/llvm/test/CodeGen/AMDGPU/flat-scratch.ll
@@ -5249,6 +5249,114 @@ bb:
ret void
}
+define amdgpu_gs void @sgpr_base_plus_sgpr_plus_vgpr_plus_negative_imm_offset(ptr addrspace(5) inreg %sgpr_base, i32 inreg %sidx, i32 %vidx) {
+; GFX9-LABEL: sgpr_base_plus_sgpr_plus_vgpr_plus_negative_imm_offset:
+; GFX9: ; %bb.0: ; %bb
+; GFX9-NEXT: s_add_u32 flat_scratch_lo, s0, s5
+; GFX9-NEXT: s_addc_u32 flat_scratch_hi, s1, 0
+; GFX9-NEXT: s_add_i32 s2, s2, s3
+; GFX9-NEXT: v_add_u32_e32 v0, s2, v0
+; GFX9-NEXT: v_add_u32_e32 v0, -16, v0
+; GFX9-NEXT: v_mov_b32_e32 v1, 15
+; GFX9-NEXT: scratch_store_dword v0, v1, off
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: s_endpgm
+;
+; GFX10-LABEL: sgpr_base_plus_sgpr_plus_vgpr_plus_negative_imm_offset:
+; GFX10: ; %bb.0: ; %bb
+; GFX10-NEXT: s_add_u32 s0, s0, s5
+; GFX10-NEXT: s_addc_u32 s1, s1, 0
+; GFX10-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_LO), s0
+; GFX10-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_HI), s1
+; GFX10-NEXT: v_add3_u32 v0, s2, s3, v0
+; GFX10-NEXT: v_mov_b32_e32 v1, 15
+; GFX10-NEXT: scratch_store_dword v0, v1, off offset:-16
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: sgpr_base_plus_sgpr_plus_vgpr_plus_negative_imm_offset:
+; GFX11: ; %bb.0: ; %bb
+; GFX11-NEXT: v_add3_u32 v0, s0, s1, v0
+; GFX11-NEXT: v_mov_b32_e32 v1, 15
+; GFX11-NEXT: scratch_store_b32 v0, v1, off offset:-16 dlc
+; GFX11-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-NEXT: s_endpgm
+;
+; GFX12-LABEL: sgpr_base_plus_sgpr_plus_vgpr_plus_negative_imm_offset:
+; GFX12: ; %bb.0: ; %bb
+; GFX12-NEXT: v_mov_b32_e32 v1, 15
+; GFX12-NEXT: s_add_co_i32 s0, s0, s1
+; GFX12-NEXT: scratch_store_b32 v0, v1, s0 offset:-16 scope:SCOPE_SYS
+; GFX12-NEXT: s_wait_storecnt 0x0
+; GFX12-NEXT: s_endpgm
+;
+; GFX9-PAL-LABEL: sgpr_base_plus_sgpr_plus_vgpr_plus_negative_imm_offset:
+; GFX9-PAL: ; %bb.0: ; %bb
+; GFX9-PAL-NEXT: s_getpc_b64 s[2:3]
+; GFX9-PAL-NEXT: s_mov_b32 s2, s8
+; GFX9-PAL-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x0
+; GFX9-PAL-NEXT: v_mov_b32_e32 v1, 15
+; GFX9-PAL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-PAL-NEXT: s_and_b32 s3, s3, 0xffff
+; GFX9-PAL-NEXT: s_add_u32 flat_scratch_lo, s2, s5
+; GFX9-PAL-NEXT: s_addc_u32 flat_scratch_hi, s3, 0
+; GFX9-PAL-NEXT: s_add_i32 s0, s0, s1
+; GFX9-PAL-NEXT: v_add_u32_e32 v0, s0, v0
+; GFX9-PAL-NEXT: v_add_u32_e32 v0, -16, v0
+; GFX9-PAL-NEXT: scratch_store_dword v0, v1, off
+; GFX9-PAL-NEXT: s_waitcnt vmcnt(0)
+; GFX9-PAL-NEXT: s_endpgm
+;
+; GFX940-LABEL: sgpr_base_plus_sgpr_plus_vgpr_plus_negative_imm_offset:
+; GFX940: ; %bb.0: ; %bb
+; GFX940-NEXT: s_add_i32 s0, s0, s1
+; GFX940-NEXT: v_add_u32_e32 v0, s0, v0
+; GFX940-NEXT: v_add_u32_e32 v0, -16, v0
+; GFX940-NEXT: v_mov_b32_e32 v1, 15
+; GFX940-NEXT: scratch_store_dword v0, v1, off sc0 sc1
+; GFX940-NEXT: s_waitcnt vmcnt(0)
+; GFX940-NEXT: s_endpgm
+;
+; GFX10-PAL-LABEL: sgpr_base_plus_sgpr_plus_vgpr_plus_negative_imm_offset:
+; GFX10-PAL: ; %bb.0: ; %bb
+; GFX10-PAL-NEXT: s_getpc_b64 s[2:3]
+; GFX10-PAL-NEXT: s_mov_b32 s2, s8
+; GFX10-PAL-NEXT: s_load_dwordx2 s[2:3], s[2:3], 0x0
+; GFX10-PAL-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-PAL-NEXT: s_and_b32 s3, s3, 0xffff
+; GFX10-PAL-NEXT: s_add_u32 s2, s2, s5
+; GFX10-PAL-NEXT: s_addc_u32 s3, s3, 0
+; GFX10-PAL-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_LO), s2
+; GFX10-PAL-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_HI), s3
+; GFX10-PAL-NEXT: v_add3_u32 v0, s0, s1, v0
+; GFX10-PAL-NEXT: v_mov_b32_e32 v1, 15
+; GFX10-PAL-NEXT: scratch_store_dword v0, v1, off offset:-16
+; GFX10-PAL-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-PAL-NEXT: s_endpgm
+;
+; GFX11-PAL-LABEL: sgpr_base_plus_sgpr_plus_vgpr_plus_negative_imm_offset:
+; GFX11-PAL: ; %bb.0: ; %bb
+; GFX11-PAL-NEXT: v_add3_u32 v0, s0, s1, v0
+; GFX11-PAL-NEXT: v_mov_b32_e32 v1, 15
+; GFX11-PAL-NEXT: scratch_store_b32 v0, v1, off offset:-16 dlc
+; GFX11-PAL-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX11-PAL-NEXT: s_endpgm
+;
+; GFX12-PAL-LABEL: sgpr_base_plus_sgpr_plus_vgpr_plus_negative_imm_offset:
+; GFX12-PAL: ; %bb.0: ; %bb
+; GFX12-PAL-NEXT: v_mov_b32_e32 v1, 15
+; GFX12-PAL-NEXT: s_add_co_i32 s0, s0, s1
+; GFX12-PAL-NEXT: scratch_store_b32 v0, v1, s0 offset:-16 scope:SCOPE_SYS
+; GFX12-PAL-NEXT: s_wait_storecnt 0x0
+; GFX12-PAL-NEXT: s_endpgm
+bb:
+ %add1 = add nsw i32 %sidx, %vidx
+ %add2 = add nsw i32 %add1, -16
+ %gep = getelementptr inbounds [16 x i8], ptr addrspace(5) %sgpr_base, i32 0, i32 %add2
+ store volatile i32 15, ptr addrspace(5) %gep, align 4
+ ret void
+}
+
define amdgpu_gs void @sgpr_base_negative_offset(ptr addrspace(1) %out, ptr addrspace(5) inreg %scevgep) {
; GFX9-LABEL: sgpr_base_negative_offset:
; GFX9: ; %bb.0: ; %entry
diff --git a/llvm/test/CodeGen/AMDGPU/global-saddr-load.ll b/llvm/test/CodeGen/AMDGPU/global-saddr-load.ll
index 157f91c..b2f113f 100644
--- a/llvm/test/CodeGen/AMDGPU/global-saddr-load.ll
+++ b/llvm/test/CodeGen/AMDGPU/global-saddr-load.ll
@@ -668,37 +668,32 @@ define amdgpu_ps float @global_load_saddr_i8_offset_0xFFFFFFFF(ptr addrspace(1)
define amdgpu_ps float @global_load_saddr_i8_offset_0x100000000(ptr addrspace(1) inreg %sbase) {
; GFX9-LABEL: global_load_saddr_i8_offset_0x100000000:
; GFX9: ; %bb.0:
-; GFX9-NEXT: v_mov_b32_e32 v1, s3
-; GFX9-NEXT: v_add_co_u32_e64 v0, vcc, 0, s2
-; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 1, v1, vcc
-; GFX9-NEXT: global_load_ubyte v0, v[0:1], off
+; GFX9-NEXT: s_add_i32 s3, s3, 1
+; GFX9-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-NEXT: global_load_ubyte v0, v0, s[2:3]
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: ; return to shader part epilog
;
; GFX10-LABEL: global_load_saddr_i8_offset_0x100000000:
; GFX10: ; %bb.0:
-; GFX10-NEXT: v_add_co_u32 v0, s[0:1], 0, s2
-; GFX10-NEXT: v_add_co_ci_u32_e64 v1, s[0:1], 1, s3, s[0:1]
-; GFX10-NEXT: global_load_ubyte v0, v[0:1], off
+; GFX10-NEXT: v_mov_b32_e32 v0, 0
+; GFX10-NEXT: s_add_i32 s3, s3, 1
+; GFX10-NEXT: global_load_ubyte v0, v0, s[2:3]
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: global_load_saddr_i8_offset_0x100000000:
; GFX11: ; %bb.0:
-; GFX11-NEXT: v_add_co_u32 v0, s[0:1], 0, s2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 1, s3, s[0:1]
-; GFX11-NEXT: global_load_u8 v0, v[0:1], off
+; GFX11-NEXT: v_mov_b32_e32 v0, 0
+; GFX11-NEXT: s_add_i32 s3, s3, 1
+; GFX11-NEXT: global_load_u8 v0, v0, s[2:3]
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
;
; GFX12-SDAG-LABEL: global_load_saddr_i8_offset_0x100000000:
; GFX12-SDAG: ; %bb.0:
-; GFX12-SDAG-NEXT: s_mov_b32 s0, 0
-; GFX12-SDAG-NEXT: s_mov_b32 s1, 1
-; GFX12-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX12-SDAG-NEXT: s_add_nc_u64 s[0:1], s[2:3], s[0:1]
-; GFX12-SDAG-NEXT: s_load_u8 s0, s[0:1], 0x0
+; GFX12-SDAG-NEXT: s_add_co_i32 s3, s3, 1
+; GFX12-SDAG-NEXT: s_load_u8 s0, s[2:3], 0x0
; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
; GFX12-SDAG-NEXT: v_mov_b32_e32 v0, s0
; GFX12-SDAG-NEXT: ; return to shader part epilog
@@ -934,37 +929,32 @@ define amdgpu_ps float @global_load_saddr_i8_offset_neg0xFFFFFFFF(ptr addrspace(
define amdgpu_ps float @global_load_saddr_i8_offset_neg0x100000000(ptr addrspace(1) inreg %sbase) {
; GFX9-LABEL: global_load_saddr_i8_offset_neg0x100000000:
; GFX9: ; %bb.0:
-; GFX9-NEXT: v_mov_b32_e32 v1, s3
-; GFX9-NEXT: v_add_co_u32_e64 v0, vcc, 0, s2
-; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, -1, v1, vcc
-; GFX9-NEXT: global_load_ubyte v0, v[0:1], off
+; GFX9-NEXT: s_add_i32 s3, s3, -1
+; GFX9-NEXT: v_mov_b32_e32 v0, 0
+; GFX9-NEXT: global_load_ubyte v0, v0, s[2:3]
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: ; return to shader part epilog
;
; GFX10-LABEL: global_load_saddr_i8_offset_neg0x100000000:
; GFX10: ; %bb.0:
-; GFX10-NEXT: v_add_co_u32 v0, s[0:1], 0, s2
-; GFX10-NEXT: v_add_co_ci_u32_e64 v1, s[0:1], -1, s3, s[0:1]
-; GFX10-NEXT: global_load_ubyte v0, v[0:1], off
+; GFX10-NEXT: v_mov_b32_e32 v0, 0
+; GFX10-NEXT: s_add_i32 s3, s3, -1
+; GFX10-NEXT: global_load_ubyte v0, v0, s[2:3]
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: ; return to shader part epilog
;
; GFX11-LABEL: global_load_saddr_i8_offset_neg0x100000000:
; GFX11: ; %bb.0:
-; GFX11-NEXT: v_add_co_u32 v0, s[0:1], 0, s2
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, -1, s3, s[0:1]
-; GFX11-NEXT: global_load_u8 v0, v[0:1], off
+; GFX11-NEXT: v_mov_b32_e32 v0, 0
+; GFX11-NEXT: s_add_i32 s3, s3, -1
+; GFX11-NEXT: global_load_u8 v0, v0, s[2:3]
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: ; return to shader part epilog
;
; GFX12-SDAG-LABEL: global_load_saddr_i8_offset_neg0x100000000:
; GFX12-SDAG: ; %bb.0:
-; GFX12-SDAG-NEXT: s_mov_b32 s0, 0
-; GFX12-SDAG-NEXT: s_mov_b32 s1, -1
-; GFX12-SDAG-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX12-SDAG-NEXT: s_add_nc_u64 s[0:1], s[2:3], s[0:1]
-; GFX12-SDAG-NEXT: s_load_u8 s0, s[0:1], 0x0
+; GFX12-SDAG-NEXT: s_add_co_i32 s3, s3, -1
+; GFX12-SDAG-NEXT: s_load_u8 s0, s[2:3], 0x0
; GFX12-SDAG-NEXT: s_wait_kmcnt 0x0
; GFX12-SDAG-NEXT: v_mov_b32_e32 v0, s0
; GFX12-SDAG-NEXT: ; return to shader part epilog
diff --git a/llvm/test/CodeGen/AMDGPU/minmax.ll b/llvm/test/CodeGen/AMDGPU/minmax.ll
index 73f3d4c..774a22f 100644
--- a/llvm/test/CodeGen/AMDGPU/minmax.ll
+++ b/llvm/test/CodeGen/AMDGPU/minmax.ll
@@ -1,6 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1100 -verify-machineinstrs -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefixes=GFX11,SDAG %s
-; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1100 -verify-machineinstrs -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefixes=GFX11,GISEL %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1100 -verify-machineinstrs -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefixes=GFX11,SDAG,SDAG-GFX11 %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1100 -verify-machineinstrs -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefixes=GFX11,GISEL,GISEL-GFX11 %s
+; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefixes=GFX12,SDAG,SDAG-GFX12 %s
+; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=gfx1200 -verify-machineinstrs -amdgpu-enable-delay-alu=0 < %s | FileCheck -check-prefixes=GFX12,GISEL,GISEL-GFX12 %s
define i32 @test_minmax_i32(i32 %a, i32 %b, i32 %c) {
; GFX11-LABEL: test_minmax_i32:
@@ -8,6 +10,16 @@ define i32 @test_minmax_i32(i32 %a, i32 %b, i32 %c) {
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_maxmin_i32 v0, v0, v1, v2
; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_minmax_i32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maxmin_i32 v0, v0, v1, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
%smax = call i32 @llvm.smax.i32(i32 %a, i32 %b)
%sminmax = call i32 @llvm.smin.i32(i32 %smax, i32 %c)
ret i32 %sminmax
@@ -45,6 +57,16 @@ define i32 @test_minmax_commuted_i32(i32 %a, i32 %b, i32 %c) {
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_maxmin_i32 v0, v0, v1, v2
; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_minmax_commuted_i32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maxmin_i32 v0, v0, v1, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
%smax = call i32 @llvm.smax.i32(i32 %a, i32 %b)
%sminmax = call i32 @llvm.smin.i32(i32 %c, i32 %smax)
ret i32 %sminmax
@@ -56,6 +78,16 @@ define i32 @test_maxmin_i32(i32 %a, i32 %b, i32 %c) {
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_minmax_i32 v0, v0, v1, v2
; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_maxmin_i32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minmax_i32 v0, v0, v1, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
%smin = call i32 @llvm.smin.i32(i32 %a, i32 %b)
%smaxmin = call i32 @llvm.smax.i32(i32 %smin, i32 %c)
ret i32 %smaxmin
@@ -67,6 +99,16 @@ define i32 @test_maxmin_commuted_i32(i32 %a, i32 %b, i32 %c) {
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_minmax_i32 v0, v0, v1, v2
; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_maxmin_commuted_i32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minmax_i32 v0, v0, v1, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
%smin = call i32 @llvm.smin.i32(i32 %a, i32 %b)
%smaxmin = call i32 @llvm.smax.i32(i32 %c, i32 %smin)
ret i32 %smaxmin
@@ -79,6 +121,17 @@ define void @test_smed3_i32(ptr addrspace(1) %arg, i32 %x, i32 %y, i32 %z) {
; GFX11-NEXT: v_med3_i32 v2, v2, v3, v4
; GFX11-NEXT: global_store_b32 v[0:1], v2, off
; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_smed3_i32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_med3_i32 v2, v2, v3, v4
+; GFX12-NEXT: global_store_b32 v[0:1], v2, off
+; GFX12-NEXT: s_setpc_b64 s[30:31]
%tmp0 = call i32 @llvm.smin.i32(i32 %x, i32 %y)
%tmp1 = call i32 @llvm.smax.i32(i32 %x, i32 %y)
%tmp2 = call i32 @llvm.smin.i32(i32 %tmp1, i32 %z)
@@ -93,6 +146,16 @@ define i32 @test_minmax_u32(i32 %a, i32 %b, i32 %c) {
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_maxmin_u32 v0, v0, v1, v2
; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_minmax_u32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maxmin_u32 v0, v0, v1, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
%umax = call i32 @llvm.umax.i32(i32 %a, i32 %b)
%uminmax = call i32 @llvm.umin.i32(i32 %umax, i32 %c)
ret i32 %uminmax
@@ -130,6 +193,16 @@ define i32 @test_minmax_commuted_u32(i32 %a, i32 %b, i32 %c) {
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_maxmin_u32 v0, v0, v1, v2
; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_minmax_commuted_u32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_maxmin_u32 v0, v0, v1, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
%umax = call i32 @llvm.umax.i32(i32 %a, i32 %b)
%uminmax = call i32 @llvm.umin.i32(i32 %c, i32 %umax)
ret i32 %uminmax
@@ -141,6 +214,16 @@ define i32 @test_maxmin_u32(i32 %a, i32 %b, i32 %c) {
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_minmax_u32 v0, v0, v1, v2
; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_maxmin_u32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minmax_u32 v0, v0, v1, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
%umin = call i32 @llvm.umin.i32(i32 %a, i32 %b)
%umaxmin = call i32 @llvm.umax.i32(i32 %umin, i32 %c)
ret i32 %umaxmin
@@ -152,6 +235,16 @@ define i32 @test_maxmin_commuted_u32(i32 %a, i32 %b, i32 %c) {
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_minmax_u32 v0, v0, v1, v2
; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_maxmin_commuted_u32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_minmax_u32 v0, v0, v1, v2
+; GFX12-NEXT: s_setpc_b64 s[30:31]
%umin = call i32 @llvm.umin.i32(i32 %a, i32 %b)
%umaxmin = call i32 @llvm.umax.i32(i32 %c, i32 %umin)
ret i32 %umaxmin
@@ -164,6 +257,17 @@ define void @test_umed3_i32(ptr addrspace(1) %arg, i32 %x, i32 %y, i32 %z) {
; GFX11-NEXT: v_med3_u32 v2, v2, v3, v4
; GFX11-NEXT: global_store_b32 v[0:1], v2, off
; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_umed3_i32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_med3_u32 v2, v2, v3, v4
+; GFX12-NEXT: global_store_b32 v[0:1], v2, off
+; GFX12-NEXT: s_setpc_b64 s[30:31]
%tmp0 = call i32 @llvm.umin.i32(i32 %x, i32 %y)
%tmp1 = call i32 @llvm.umax.i32(i32 %x, i32 %y)
%tmp2 = call i32 @llvm.umin.i32(i32 %tmp1, i32 %z)
@@ -173,44 +277,88 @@ define void @test_umed3_i32(ptr addrspace(1) %arg, i32 %x, i32 %y, i32 %z) {
}
define float @test_minmax_f32_ieee_true(float %a, float %b, float %c) {
-; SDAG-LABEL: test_minmax_f32_ieee_true:
-; SDAG: ; %bb.0:
-; SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SDAG-NEXT: v_dual_max_f32 v1, v1, v1 :: v_dual_max_f32 v0, v0, v0
-; SDAG-NEXT: v_max_f32_e32 v2, v2, v2
-; SDAG-NEXT: v_maxmin_f32 v0, v0, v1, v2
-; SDAG-NEXT: s_setpc_b64 s[30:31]
+; SDAG-GFX11-LABEL: test_minmax_f32_ieee_true:
+; SDAG-GFX11: ; %bb.0:
+; SDAG-GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-GFX11-NEXT: v_dual_max_f32 v1, v1, v1 :: v_dual_max_f32 v0, v0, v0
+; SDAG-GFX11-NEXT: v_max_f32_e32 v2, v2, v2
+; SDAG-GFX11-NEXT: v_maxmin_f32 v0, v0, v1, v2
+; SDAG-GFX11-NEXT: s_setpc_b64 s[30:31]
;
-; GISEL-LABEL: test_minmax_f32_ieee_true:
-; GISEL: ; %bb.0:
-; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GISEL-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
-; GISEL-NEXT: v_max_f32_e32 v2, v2, v2
-; GISEL-NEXT: v_maxmin_f32 v0, v0, v1, v2
-; GISEL-NEXT: s_setpc_b64 s[30:31]
+; GISEL-GFX11-LABEL: test_minmax_f32_ieee_true:
+; GISEL-GFX11: ; %bb.0:
+; GISEL-GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-GFX11-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
+; GISEL-GFX11-NEXT: v_max_f32_e32 v2, v2, v2
+; GISEL-GFX11-NEXT: v_maxmin_f32 v0, v0, v1, v2
+; GISEL-GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG-GFX12-LABEL: test_minmax_f32_ieee_true:
+; SDAG-GFX12: ; %bb.0:
+; SDAG-GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; SDAG-GFX12-NEXT: s_wait_expcnt 0x0
+; SDAG-GFX12-NEXT: s_wait_samplecnt 0x0
+; SDAG-GFX12-NEXT: s_wait_bvhcnt 0x0
+; SDAG-GFX12-NEXT: s_wait_kmcnt 0x0
+; SDAG-GFX12-NEXT: v_dual_max_num_f32 v1, v1, v1 :: v_dual_max_num_f32 v0, v0, v0
+; SDAG-GFX12-NEXT: v_max_num_f32_e32 v2, v2, v2
+; SDAG-GFX12-NEXT: v_maxmin_num_f32 v0, v0, v1, v2
+; SDAG-GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-GFX12-LABEL: test_minmax_f32_ieee_true:
+; GISEL-GFX12: ; %bb.0:
+; GISEL-GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GISEL-GFX12-NEXT: s_wait_expcnt 0x0
+; GISEL-GFX12-NEXT: s_wait_samplecnt 0x0
+; GISEL-GFX12-NEXT: s_wait_bvhcnt 0x0
+; GISEL-GFX12-NEXT: s_wait_kmcnt 0x0
+; GISEL-GFX12-NEXT: v_dual_max_num_f32 v0, v0, v0 :: v_dual_max_num_f32 v1, v1, v1
+; GISEL-GFX12-NEXT: v_max_num_f32_e32 v2, v2, v2
+; GISEL-GFX12-NEXT: v_maxmin_num_f32 v0, v0, v1, v2
+; GISEL-GFX12-NEXT: s_setpc_b64 s[30:31]
%max = call float @llvm.maxnum.f32(float %a, float %b)
%minmax = call float @llvm.minnum.f32(float %max, float %c)
ret float %minmax
}
define amdgpu_ps void @s_test_minmax_f32_ieee_false(float inreg %a, float inreg %b, float inreg %c, ptr addrspace(1) inreg %out) {
-; SDAG-LABEL: s_test_minmax_f32_ieee_false:
-; SDAG: ; %bb.0:
-; SDAG-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, 0
-; SDAG-NEXT: s_mov_b32 s5, s4
-; SDAG-NEXT: s_mov_b32 s4, s3
-; SDAG-NEXT: v_maxmin_f32 v0, s0, s1, v0
-; SDAG-NEXT: global_store_b32 v1, v0, s[4:5]
-; SDAG-NEXT: s_endpgm
+; SDAG-GFX11-LABEL: s_test_minmax_f32_ieee_false:
+; SDAG-GFX11: ; %bb.0:
+; SDAG-GFX11-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, 0
+; SDAG-GFX11-NEXT: s_mov_b32 s5, s4
+; SDAG-GFX11-NEXT: s_mov_b32 s4, s3
+; SDAG-GFX11-NEXT: v_maxmin_f32 v0, s0, s1, v0
+; SDAG-GFX11-NEXT: global_store_b32 v1, v0, s[4:5]
+; SDAG-GFX11-NEXT: s_endpgm
;
-; GISEL-LABEL: s_test_minmax_f32_ieee_false:
-; GISEL: ; %bb.0:
-; GISEL-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, 0
-; GISEL-NEXT: s_mov_b32 s6, s3
-; GISEL-NEXT: s_mov_b32 s7, s4
-; GISEL-NEXT: v_maxmin_f32 v0, s0, s1, v0
-; GISEL-NEXT: global_store_b32 v1, v0, s[6:7]
-; GISEL-NEXT: s_endpgm
+; GISEL-GFX11-LABEL: s_test_minmax_f32_ieee_false:
+; GISEL-GFX11: ; %bb.0:
+; GISEL-GFX11-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, 0
+; GISEL-GFX11-NEXT: s_mov_b32 s6, s3
+; GISEL-GFX11-NEXT: s_mov_b32 s7, s4
+; GISEL-GFX11-NEXT: v_maxmin_f32 v0, s0, s1, v0
+; GISEL-GFX11-NEXT: global_store_b32 v1, v0, s[6:7]
+; GISEL-GFX11-NEXT: s_endpgm
+;
+; SDAG-GFX12-LABEL: s_test_minmax_f32_ieee_false:
+; SDAG-GFX12: ; %bb.0:
+; SDAG-GFX12-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, 0
+; SDAG-GFX12-NEXT: s_mov_b32 s5, s4
+; SDAG-GFX12-NEXT: s_mov_b32 s4, s3
+; SDAG-GFX12-NEXT: v_maxmin_num_f32 v0, s0, s1, v0
+; SDAG-GFX12-NEXT: global_store_b32 v1, v0, s[4:5]
+; SDAG-GFX12-NEXT: s_endpgm
+;
+; GISEL-GFX12-LABEL: s_test_minmax_f32_ieee_false:
+; GISEL-GFX12: ; %bb.0:
+; GISEL-GFX12-NEXT: s_max_num_f32 s0, s0, s1
+; GISEL-GFX12-NEXT: s_mov_b32 s6, s3
+; GISEL-GFX12-NEXT: s_mov_b32 s7, s4
+; GISEL-GFX12-NEXT: v_mov_b32_e32 v1, 0
+; GISEL-GFX12-NEXT: s_min_num_f32 s0, s0, s2
+; GISEL-GFX12-NEXT: v_mov_b32_e32 v0, s0
+; GISEL-GFX12-NEXT: global_store_b32 v1, v0, s[6:7]
+; GISEL-GFX12-NEXT: s_endpgm
%smax = call float @llvm.maxnum.f32(float %a, float %b)
%sminmax = call float @llvm.minnum.f32(float %smax, float %c)
store float %sminmax, ptr addrspace(1) %out
@@ -222,27 +370,56 @@ define amdgpu_ps float @test_minmax_commuted_f32_ieee_false(float %a, float %b,
; GFX11: ; %bb.0:
; GFX11-NEXT: v_maxmin_f32 v0, v0, v1, v2
; GFX11-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: test_minmax_commuted_f32_ieee_false:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_maxmin_num_f32 v0, v0, v1, v2
+; GFX12-NEXT: ; return to shader part epilog
%max = call float @llvm.maxnum.f32(float %a, float %b)
%minmax = call float @llvm.minnum.f32(float %c, float %max)
ret float %minmax
}
define float @test_maxmin_f32_ieee_true(float %a, float %b, float %c) {
-; SDAG-LABEL: test_maxmin_f32_ieee_true:
-; SDAG: ; %bb.0:
-; SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SDAG-NEXT: v_dual_max_f32 v1, v1, v1 :: v_dual_max_f32 v0, v0, v0
-; SDAG-NEXT: v_max_f32_e32 v2, v2, v2
-; SDAG-NEXT: v_minmax_f32 v0, v0, v1, v2
-; SDAG-NEXT: s_setpc_b64 s[30:31]
+; SDAG-GFX11-LABEL: test_maxmin_f32_ieee_true:
+; SDAG-GFX11: ; %bb.0:
+; SDAG-GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-GFX11-NEXT: v_dual_max_f32 v1, v1, v1 :: v_dual_max_f32 v0, v0, v0
+; SDAG-GFX11-NEXT: v_max_f32_e32 v2, v2, v2
+; SDAG-GFX11-NEXT: v_minmax_f32 v0, v0, v1, v2
+; SDAG-GFX11-NEXT: s_setpc_b64 s[30:31]
;
-; GISEL-LABEL: test_maxmin_f32_ieee_true:
-; GISEL: ; %bb.0:
-; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GISEL-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
-; GISEL-NEXT: v_max_f32_e32 v2, v2, v2
-; GISEL-NEXT: v_minmax_f32 v0, v0, v1, v2
-; GISEL-NEXT: s_setpc_b64 s[30:31]
+; GISEL-GFX11-LABEL: test_maxmin_f32_ieee_true:
+; GISEL-GFX11: ; %bb.0:
+; GISEL-GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-GFX11-NEXT: v_dual_max_f32 v0, v0, v0 :: v_dual_max_f32 v1, v1, v1
+; GISEL-GFX11-NEXT: v_max_f32_e32 v2, v2, v2
+; GISEL-GFX11-NEXT: v_minmax_f32 v0, v0, v1, v2
+; GISEL-GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG-GFX12-LABEL: test_maxmin_f32_ieee_true:
+; SDAG-GFX12: ; %bb.0:
+; SDAG-GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; SDAG-GFX12-NEXT: s_wait_expcnt 0x0
+; SDAG-GFX12-NEXT: s_wait_samplecnt 0x0
+; SDAG-GFX12-NEXT: s_wait_bvhcnt 0x0
+; SDAG-GFX12-NEXT: s_wait_kmcnt 0x0
+; SDAG-GFX12-NEXT: v_dual_max_num_f32 v1, v1, v1 :: v_dual_max_num_f32 v0, v0, v0
+; SDAG-GFX12-NEXT: v_max_num_f32_e32 v2, v2, v2
+; SDAG-GFX12-NEXT: v_minmax_num_f32 v0, v0, v1, v2
+; SDAG-GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-GFX12-LABEL: test_maxmin_f32_ieee_true:
+; GISEL-GFX12: ; %bb.0:
+; GISEL-GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GISEL-GFX12-NEXT: s_wait_expcnt 0x0
+; GISEL-GFX12-NEXT: s_wait_samplecnt 0x0
+; GISEL-GFX12-NEXT: s_wait_bvhcnt 0x0
+; GISEL-GFX12-NEXT: s_wait_kmcnt 0x0
+; GISEL-GFX12-NEXT: v_dual_max_num_f32 v0, v0, v0 :: v_dual_max_num_f32 v1, v1, v1
+; GISEL-GFX12-NEXT: v_max_num_f32_e32 v2, v2, v2
+; GISEL-GFX12-NEXT: v_minmax_num_f32 v0, v0, v1, v2
+; GISEL-GFX12-NEXT: s_setpc_b64 s[30:31]
%min = call float @llvm.minnum.f32(float %a, float %b)
%maxmin = call float @llvm.maxnum.f32(float %min, float %c)
ret float %maxmin
@@ -253,6 +430,11 @@ define amdgpu_ps float @test_maxmin_commuted_f32_ieee_false(float %a, float %b,
; GFX11: ; %bb.0:
; GFX11-NEXT: v_minmax_f32 v0, v0, v1, v2
; GFX11-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: test_maxmin_commuted_f32_ieee_false:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_minmax_num_f32 v0, v0, v1, v2
+; GFX12-NEXT: ; return to shader part epilog
%min = call float @llvm.minnum.f32(float %a, float %b)
%maxmin = call float @llvm.maxnum.f32(float %c, float %min)
ret float %maxmin
@@ -265,6 +447,17 @@ define void @test_med3_f32(ptr addrspace(1) %arg, float %x, float %y, float %z)
; GFX11-NEXT: v_med3_f32 v2, v2, v3, v4
; GFX11-NEXT: global_store_b32 v[0:1], v2, off
; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_med3_f32:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_med3_num_f32 v2, v2, v3, v4
+; GFX12-NEXT: global_store_b32 v[0:1], v2, off
+; GFX12-NEXT: s_setpc_b64 s[30:31]
%tmp0 = call float @llvm.minnum.f32(float %x, float %y)
%tmp1 = call float @llvm.maxnum.f32(float %x, float %y)
%tmp2 = call float @llvm.minnum.f32(float %tmp1, float %z)
@@ -278,29 +471,54 @@ define amdgpu_ps half @test_minmax_f16_ieee_false(half %a, half %b, half %c) {
; GFX11: ; %bb.0:
; GFX11-NEXT: v_maxmin_f16 v0, v0, v1, v2
; GFX11-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: test_minmax_f16_ieee_false:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_maxmin_num_f16 v0, v0, v1, v2
+; GFX12-NEXT: ; return to shader part epilog
%max = call half @llvm.maxnum.f16(half %a, half %b)
%minmax = call half @llvm.minnum.f16(half %max, half %c)
ret half %minmax
}
define amdgpu_ps void @s_test_minmax_f16_ieee_false(half inreg %a, half inreg %b, half inreg %c, ptr addrspace(1) inreg %out) {
-; SDAG-LABEL: s_test_minmax_f16_ieee_false:
-; SDAG: ; %bb.0:
-; SDAG-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, 0
-; SDAG-NEXT: s_mov_b32 s5, s4
-; SDAG-NEXT: s_mov_b32 s4, s3
-; SDAG-NEXT: v_maxmin_f16 v0, s0, s1, v0
-; SDAG-NEXT: global_store_b16 v1, v0, s[4:5]
-; SDAG-NEXT: s_endpgm
+; SDAG-GFX11-LABEL: s_test_minmax_f16_ieee_false:
+; SDAG-GFX11: ; %bb.0:
+; SDAG-GFX11-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, 0
+; SDAG-GFX11-NEXT: s_mov_b32 s5, s4
+; SDAG-GFX11-NEXT: s_mov_b32 s4, s3
+; SDAG-GFX11-NEXT: v_maxmin_f16 v0, s0, s1, v0
+; SDAG-GFX11-NEXT: global_store_b16 v1, v0, s[4:5]
+; SDAG-GFX11-NEXT: s_endpgm
;
-; GISEL-LABEL: s_test_minmax_f16_ieee_false:
-; GISEL: ; %bb.0:
-; GISEL-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, 0
-; GISEL-NEXT: s_mov_b32 s6, s3
-; GISEL-NEXT: s_mov_b32 s7, s4
-; GISEL-NEXT: v_maxmin_f16 v0, s0, s1, v0
-; GISEL-NEXT: global_store_b16 v1, v0, s[6:7]
-; GISEL-NEXT: s_endpgm
+; GISEL-GFX11-LABEL: s_test_minmax_f16_ieee_false:
+; GISEL-GFX11: ; %bb.0:
+; GISEL-GFX11-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, 0
+; GISEL-GFX11-NEXT: s_mov_b32 s6, s3
+; GISEL-GFX11-NEXT: s_mov_b32 s7, s4
+; GISEL-GFX11-NEXT: v_maxmin_f16 v0, s0, s1, v0
+; GISEL-GFX11-NEXT: global_store_b16 v1, v0, s[6:7]
+; GISEL-GFX11-NEXT: s_endpgm
+;
+; SDAG-GFX12-LABEL: s_test_minmax_f16_ieee_false:
+; SDAG-GFX12: ; %bb.0:
+; SDAG-GFX12-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, 0
+; SDAG-GFX12-NEXT: s_mov_b32 s5, s4
+; SDAG-GFX12-NEXT: s_mov_b32 s4, s3
+; SDAG-GFX12-NEXT: v_maxmin_num_f16 v0, s0, s1, v0
+; SDAG-GFX12-NEXT: global_store_b16 v1, v0, s[4:5]
+; SDAG-GFX12-NEXT: s_endpgm
+;
+; GISEL-GFX12-LABEL: s_test_minmax_f16_ieee_false:
+; GISEL-GFX12: ; %bb.0:
+; GISEL-GFX12-NEXT: s_max_num_f16 s0, s0, s1
+; GISEL-GFX12-NEXT: s_mov_b32 s6, s3
+; GISEL-GFX12-NEXT: s_mov_b32 s7, s4
+; GISEL-GFX12-NEXT: v_mov_b32_e32 v1, 0
+; GISEL-GFX12-NEXT: s_min_num_f16 s0, s0, s2
+; GISEL-GFX12-NEXT: v_mov_b32_e32 v0, s0
+; GISEL-GFX12-NEXT: global_store_b16 v1, v0, s[6:7]
+; GISEL-GFX12-NEXT: s_endpgm
%smax = call half @llvm.maxnum.f16(half %a, half %b)
%sminmax = call half @llvm.minnum.f16(half %smax, half %c)
store half %sminmax, ptr addrspace(1) %out
@@ -308,23 +526,49 @@ define amdgpu_ps void @s_test_minmax_f16_ieee_false(half inreg %a, half inreg %b
}
define half @test_minmax_commuted_f16_ieee_true(half %a, half %b, half %c) {
-; SDAG-LABEL: test_minmax_commuted_f16_ieee_true:
-; SDAG: ; %bb.0:
-; SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SDAG-NEXT: v_max_f16_e32 v1, v1, v1
-; SDAG-NEXT: v_max_f16_e32 v0, v0, v0
-; SDAG-NEXT: v_max_f16_e32 v2, v2, v2
-; SDAG-NEXT: v_maxmin_f16 v0, v0, v1, v2
-; SDAG-NEXT: s_setpc_b64 s[30:31]
-;
-; GISEL-LABEL: test_minmax_commuted_f16_ieee_true:
-; GISEL: ; %bb.0:
-; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GISEL-NEXT: v_max_f16_e32 v0, v0, v0
-; GISEL-NEXT: v_max_f16_e32 v1, v1, v1
-; GISEL-NEXT: v_max_f16_e32 v2, v2, v2
-; GISEL-NEXT: v_maxmin_f16 v0, v0, v1, v2
-; GISEL-NEXT: s_setpc_b64 s[30:31]
+; SDAG-GFX11-LABEL: test_minmax_commuted_f16_ieee_true:
+; SDAG-GFX11: ; %bb.0:
+; SDAG-GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-GFX11-NEXT: v_max_f16_e32 v1, v1, v1
+; SDAG-GFX11-NEXT: v_max_f16_e32 v0, v0, v0
+; SDAG-GFX11-NEXT: v_max_f16_e32 v2, v2, v2
+; SDAG-GFX11-NEXT: v_maxmin_f16 v0, v0, v1, v2
+; SDAG-GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-GFX11-LABEL: test_minmax_commuted_f16_ieee_true:
+; GISEL-GFX11: ; %bb.0:
+; GISEL-GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-GFX11-NEXT: v_max_f16_e32 v0, v0, v0
+; GISEL-GFX11-NEXT: v_max_f16_e32 v1, v1, v1
+; GISEL-GFX11-NEXT: v_max_f16_e32 v2, v2, v2
+; GISEL-GFX11-NEXT: v_maxmin_f16 v0, v0, v1, v2
+; GISEL-GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG-GFX12-LABEL: test_minmax_commuted_f16_ieee_true:
+; SDAG-GFX12: ; %bb.0:
+; SDAG-GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; SDAG-GFX12-NEXT: s_wait_expcnt 0x0
+; SDAG-GFX12-NEXT: s_wait_samplecnt 0x0
+; SDAG-GFX12-NEXT: s_wait_bvhcnt 0x0
+; SDAG-GFX12-NEXT: s_wait_kmcnt 0x0
+; SDAG-GFX12-NEXT: v_max_num_f16_e32 v1, v1, v1
+; SDAG-GFX12-NEXT: v_max_num_f16_e32 v0, v0, v0
+; SDAG-GFX12-NEXT: v_max_num_f16_e32 v2, v2, v2
+; SDAG-GFX12-NEXT: v_maxmin_num_f16 v0, v0, v1, v2
+; SDAG-GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-GFX12-LABEL: test_minmax_commuted_f16_ieee_true:
+; GISEL-GFX12: ; %bb.0:
+; GISEL-GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GISEL-GFX12-NEXT: s_wait_expcnt 0x0
+; GISEL-GFX12-NEXT: s_wait_samplecnt 0x0
+; GISEL-GFX12-NEXT: s_wait_bvhcnt 0x0
+; GISEL-GFX12-NEXT: s_wait_kmcnt 0x0
+; GISEL-GFX12-NEXT: v_max_num_f16_e32 v0, v0, v0
+; GISEL-GFX12-NEXT: v_max_num_f16_e32 v1, v1, v1
+; GISEL-GFX12-NEXT: v_max_num_f16_e32 v2, v2, v2
+; GISEL-GFX12-NEXT: v_maxmin_num_f16 v0, v0, v1, v2
+; GISEL-GFX12-NEXT: s_setpc_b64 s[30:31]
%max = call half @llvm.maxnum.f16(half %a, half %b)
%minmax = call half @llvm.minnum.f16(half %c, half %max)
ret half %minmax
@@ -335,29 +579,60 @@ define amdgpu_ps half @test_maxmin_f16_ieee_false(half %a, half %b, half %c) {
; GFX11: ; %bb.0:
; GFX11-NEXT: v_minmax_f16 v0, v0, v1, v2
; GFX11-NEXT: ; return to shader part epilog
+;
+; GFX12-LABEL: test_maxmin_f16_ieee_false:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: v_minmax_num_f16 v0, v0, v1, v2
+; GFX12-NEXT: ; return to shader part epilog
%min = call half @llvm.minnum.f16(half %a, half %b)
%maxmin = call half @llvm.maxnum.f16(half %min, half %c)
ret half %maxmin
}
define half @test_maxmin_commuted_f16_ieee_true(half %a, half %b, half %c) {
-; SDAG-LABEL: test_maxmin_commuted_f16_ieee_true:
-; SDAG: ; %bb.0:
-; SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SDAG-NEXT: v_max_f16_e32 v1, v1, v1
-; SDAG-NEXT: v_max_f16_e32 v0, v0, v0
-; SDAG-NEXT: v_max_f16_e32 v2, v2, v2
-; SDAG-NEXT: v_minmax_f16 v0, v0, v1, v2
-; SDAG-NEXT: s_setpc_b64 s[30:31]
-;
-; GISEL-LABEL: test_maxmin_commuted_f16_ieee_true:
-; GISEL: ; %bb.0:
-; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GISEL-NEXT: v_max_f16_e32 v0, v0, v0
-; GISEL-NEXT: v_max_f16_e32 v1, v1, v1
-; GISEL-NEXT: v_max_f16_e32 v2, v2, v2
-; GISEL-NEXT: v_minmax_f16 v0, v0, v1, v2
-; GISEL-NEXT: s_setpc_b64 s[30:31]
+; SDAG-GFX11-LABEL: test_maxmin_commuted_f16_ieee_true:
+; SDAG-GFX11: ; %bb.0:
+; SDAG-GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SDAG-GFX11-NEXT: v_max_f16_e32 v1, v1, v1
+; SDAG-GFX11-NEXT: v_max_f16_e32 v0, v0, v0
+; SDAG-GFX11-NEXT: v_max_f16_e32 v2, v2, v2
+; SDAG-GFX11-NEXT: v_minmax_f16 v0, v0, v1, v2
+; SDAG-GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-GFX11-LABEL: test_maxmin_commuted_f16_ieee_true:
+; GISEL-GFX11: ; %bb.0:
+; GISEL-GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GISEL-GFX11-NEXT: v_max_f16_e32 v0, v0, v0
+; GISEL-GFX11-NEXT: v_max_f16_e32 v1, v1, v1
+; GISEL-GFX11-NEXT: v_max_f16_e32 v2, v2, v2
+; GISEL-GFX11-NEXT: v_minmax_f16 v0, v0, v1, v2
+; GISEL-GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; SDAG-GFX12-LABEL: test_maxmin_commuted_f16_ieee_true:
+; SDAG-GFX12: ; %bb.0:
+; SDAG-GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; SDAG-GFX12-NEXT: s_wait_expcnt 0x0
+; SDAG-GFX12-NEXT: s_wait_samplecnt 0x0
+; SDAG-GFX12-NEXT: s_wait_bvhcnt 0x0
+; SDAG-GFX12-NEXT: s_wait_kmcnt 0x0
+; SDAG-GFX12-NEXT: v_max_num_f16_e32 v1, v1, v1
+; SDAG-GFX12-NEXT: v_max_num_f16_e32 v0, v0, v0
+; SDAG-GFX12-NEXT: v_max_num_f16_e32 v2, v2, v2
+; SDAG-GFX12-NEXT: v_minmax_num_f16 v0, v0, v1, v2
+; SDAG-GFX12-NEXT: s_setpc_b64 s[30:31]
+;
+; GISEL-GFX12-LABEL: test_maxmin_commuted_f16_ieee_true:
+; GISEL-GFX12: ; %bb.0:
+; GISEL-GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GISEL-GFX12-NEXT: s_wait_expcnt 0x0
+; GISEL-GFX12-NEXT: s_wait_samplecnt 0x0
+; GISEL-GFX12-NEXT: s_wait_bvhcnt 0x0
+; GISEL-GFX12-NEXT: s_wait_kmcnt 0x0
+; GISEL-GFX12-NEXT: v_max_num_f16_e32 v0, v0, v0
+; GISEL-GFX12-NEXT: v_max_num_f16_e32 v1, v1, v1
+; GISEL-GFX12-NEXT: v_max_num_f16_e32 v2, v2, v2
+; GISEL-GFX12-NEXT: v_minmax_num_f16 v0, v0, v1, v2
+; GISEL-GFX12-NEXT: s_setpc_b64 s[30:31]
%min = call half @llvm.minnum.f16(half %a, half %b)
%maxmin = call half @llvm.maxnum.f16(half %c, half %min)
ret half %maxmin
@@ -370,6 +645,17 @@ define void @test_med3_f16(ptr addrspace(1) %arg, half %x, half %y, half %z) #0
; GFX11-NEXT: v_med3_f16 v2, v2, v3, v4
; GFX11-NEXT: global_store_b16 v[0:1], v2, off
; GFX11-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX12-LABEL: test_med3_f16:
+; GFX12: ; %bb.0:
+; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
+; GFX12-NEXT: s_wait_expcnt 0x0
+; GFX12-NEXT: s_wait_samplecnt 0x0
+; GFX12-NEXT: s_wait_bvhcnt 0x0
+; GFX12-NEXT: s_wait_kmcnt 0x0
+; GFX12-NEXT: v_med3_num_f16 v2, v2, v3, v4
+; GFX12-NEXT: global_store_b16 v[0:1], v2, off
+; GFX12-NEXT: s_setpc_b64 s[30:31]
%tmp0 = call half @llvm.minnum.f16(half %x, half %y)
%tmp1 = call half @llvm.maxnum.f16(half %x, half %y)
%tmp2 = call half @llvm.minnum.f16(half %tmp1, half %z)
diff --git a/llvm/test/CodeGen/AMDGPU/no-fold-accvgpr-mov.ll b/llvm/test/CodeGen/AMDGPU/no-fold-accvgpr-mov.ll
new file mode 100644
index 0000000..a9b8663
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/no-fold-accvgpr-mov.ll
@@ -0,0 +1,120 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx942 %s -o - | FileCheck %s --check-prefixes=GFX942
+; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx908 %s -o - | FileCheck %s --check-prefixes=GFX908
+
+define amdgpu_kernel void @matmul_kernel(i32 %a0, i32 %a1) {
+; GFX942-LABEL: matmul_kernel:
+; GFX942: ; %bb.0: ; %entry
+; GFX942-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
+; GFX942-NEXT: v_mov_b32_e32 v1, 0
+; GFX942-NEXT: s_mov_b32 s2, 0
+; GFX942-NEXT: v_accvgpr_write_b32 a0, v1
+; GFX942-NEXT: s_mov_b32 s3, 0
+; GFX942-NEXT: s_waitcnt lgkmcnt(0)
+; GFX942-NEXT: s_cmp_lg_u32 s0, 0
+; GFX942-NEXT: s_cselect_b64 s[0:1], -1, 0
+; GFX942-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
+; GFX942-NEXT: v_cmp_ne_u32_e64 s[0:1], 1, v0
+; GFX942-NEXT: s_branch .LBB0_2
+; GFX942-NEXT: .LBB0_1: ; %bb2
+; GFX942-NEXT: ; in Loop: Header=BB0_2 Depth=1
+; GFX942-NEXT: s_or_b32 s4, s3, 1
+; GFX942-NEXT: s_ashr_i32 s5, s3, 31
+; GFX942-NEXT: s_mov_b32 s3, s2
+; GFX942-NEXT: v_mov_b64_e32 v[4:5], s[2:3]
+; GFX942-NEXT: v_accvgpr_read_b32 v0, a0
+; GFX942-NEXT: v_mov_b32_e32 v2, v1
+; GFX942-NEXT: v_mov_b32_e32 v3, v1
+; GFX942-NEXT: v_accvgpr_write_b32 a0, v0
+; GFX942-NEXT: v_accvgpr_write_b32 a1, v1
+; GFX942-NEXT: v_accvgpr_write_b32 a2, v2
+; GFX942-NEXT: v_accvgpr_write_b32 a3, v3
+; GFX942-NEXT: s_and_b32 s3, s5, s4
+; GFX942-NEXT: s_nop 0
+; GFX942-NEXT: v_mfma_f32_16x16x16_f16 a[0:3], v[4:5], v[4:5], a[0:3]
+; GFX942-NEXT: s_cbranch_execz .LBB0_4
+; GFX942-NEXT: .LBB0_2: ; %bb
+; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX942-NEXT: s_and_b64 vcc, exec, s[0:1]
+; GFX942-NEXT: s_cbranch_vccz .LBB0_1
+; GFX942-NEXT: ; %bb.3:
+; GFX942-NEXT: ; implicit-def: $sgpr3
+; GFX942-NEXT: .LBB0_4: ; %common.ret
+; GFX942-NEXT: s_endpgm
+;
+; GFX908-LABEL: matmul_kernel:
+; GFX908: ; %bb.0: ; %entry
+; GFX908-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x0
+; GFX908-NEXT: v_mov_b32_e32 v1, 0
+; GFX908-NEXT: s_mov_b32 s2, 0
+; GFX908-NEXT: s_mov_b32 s3, 0
+; GFX908-NEXT: v_accvgpr_write_b32 a0, v1
+; GFX908-NEXT: s_waitcnt lgkmcnt(0)
+; GFX908-NEXT: s_cmp_lg_u32 s0, 0
+; GFX908-NEXT: s_cselect_b64 s[0:1], -1, 0
+; GFX908-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
+; GFX908-NEXT: v_cmp_ne_u32_e64 s[0:1], 1, v0
+; GFX908-NEXT: s_branch .LBB0_2
+; GFX908-NEXT: .LBB0_1: ; %bb2
+; GFX908-NEXT: ; in Loop: Header=BB0_2 Depth=1
+; GFX908-NEXT: s_or_b32 s4, s3, 1
+; GFX908-NEXT: s_ashr_i32 s5, s3, 31
+; GFX908-NEXT: s_mov_b32 s3, s2
+; GFX908-NEXT: s_nop 3
+; GFX908-NEXT: v_accvgpr_read_b32 v0, a0
+; GFX908-NEXT: v_mov_b32_e32 v5, s3
+; GFX908-NEXT: v_mov_b32_e32 v4, s2
+; GFX908-NEXT: v_mov_b32_e32 v2, v1
+; GFX908-NEXT: v_mov_b32_e32 v3, v1
+; GFX908-NEXT: v_accvgpr_write_b32 a0, v0
+; GFX908-NEXT: v_accvgpr_write_b32 a1, v1
+; GFX908-NEXT: v_accvgpr_write_b32 a2, v2
+; GFX908-NEXT: v_accvgpr_write_b32 a3, v3
+; GFX908-NEXT: s_and_b32 s3, s5, s4
+; GFX908-NEXT: v_mfma_f32_16x16x16f16 a[0:3], v[4:5], v[4:5], a[0:3]
+; GFX908-NEXT: s_cbranch_execz .LBB0_4
+; GFX908-NEXT: .LBB0_2: ; %bb
+; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX908-NEXT: s_and_b64 vcc, exec, s[0:1]
+; GFX908-NEXT: s_cbranch_vccz .LBB0_1
+; GFX908-NEXT: ; %bb.3:
+; GFX908-NEXT: ; implicit-def: $sgpr3
+; GFX908-NEXT: .LBB0_4: ; %common.ret
+; GFX908-NEXT: s_endpgm
+entry:
+ br label %bb
+
+bb:
+ %i = phi { float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float } [ %i10, %bb2 ], [ zeroinitializer, %entry ]
+ %i1 = phi i32 [ %i5, %bb2 ], [ 0, %entry ]
+ %c0 = icmp ne i32 %a0, 0
+ br i1 %c0, label %bb2, label %bb11
+
+bb2:
+ %i3 = or i32 %i1, 1
+ %i4 = icmp slt i32 %i1, 0
+ %i5 = select i1 %i4, i32 %i3, i32 0
+ %i6 = extractvalue { float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float } %i, 123
+ %i7 = insertelement <4 x float> zeroinitializer, float %i6, i32 0
+ %i8 = call <4 x float> @llvm.amdgcn.mfma.f32.16x16x16f16(<4 x half> zeroinitializer, <4 x half> zeroinitializer, <4 x float> %i7, i32 0, i32 0, i32 0)
+ %i9 = extractelement <4 x float> %i8, i32 0
+ %i10 = insertvalue { float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float } zeroinitializer, float %i9, 123
+ br label %bb
+
+bb11:
+ %c1 = icmp ne i32 %a1, 0
+ br i1 %c1, label %bb12, label %common.ret
+
+common.ret:
+ ret void
+
+bb12:
+ %i13 = extractvalue { float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float, float } %i, 0
+ %i14 = insertelement <4 x float> zeroinitializer, float %i13, i32 0
+ %i15 = insertelement <4 x float> %i14, float 0.000000e+00, i32 0
+ %i16 = insertelement <4 x float> %i15, float 0.000000e+00, i32 0
+ br label %common.ret
+}
+
+; Function Attrs: convergent nocallback nofree nosync nounwind willreturn memory(none)
+declare <4 x float> @llvm.amdgcn.mfma.f32.16x16x16f16(<4 x half>, <4 x half>, <4 x float>, i32 immarg, i32 immarg, i32 immarg)
diff --git a/llvm/test/CodeGen/AMDGPU/no-fold-accvgpr-mov.mir b/llvm/test/CodeGen/AMDGPU/no-fold-accvgpr-mov.mir
new file mode 100644
index 0000000..5c83170
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/no-fold-accvgpr-mov.mir
@@ -0,0 +1,235 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+# RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx942 -run-pass si-fold-operands %s -o - | FileCheck %s
+# RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx942 -start-before=si-fold-operands -stop-after=register-coalescer %s -o - | FileCheck %s --check-prefixes=COALESCE
+# RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx908 -start-before=si-fold-operands -stop-after=register-coalescer %s -o - | FileCheck %s --check-prefixes=GFX908-COALESCE
+
+...
+---
+name: test
+tracksRegLiveness: true
+body: |
+ ; CHECK-LABEL: name: test
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $sgpr4_sgpr5
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr4_sgpr5
+ ; CHECK-NEXT: [[S_LOAD_DWORD_IMM:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM [[COPY]](p4), 0, 0 :: (dereferenceable invariant load (s32), align 16, addrspace 4)
+ ; CHECK-NEXT: S_BITCMP1_B32 killed [[S_LOAD_DWORD_IMM]], 0, implicit-def $scc
+ ; CHECK-NEXT: [[S_MOV_B32_:%[0-9]+]]:sgpr_32 = S_MOV_B32 0
+ ; CHECK-NEXT: [[S_CSELECT_B64_:%[0-9]+]]:sreg_64_xexec = S_CSELECT_B64 -1, 0, implicit $scc
+ ; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:agpr_32 = COPY [[V_MOV_B32_e32_]]
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:sreg_32 = IMPLICIT_DEF
+ ; CHECK-NEXT: [[V_CNDMASK_B32_e64_:%[0-9]+]]:vgpr_32 = V_CNDMASK_B32_e64 0, 0, 0, 1, [[S_CSELECT_B64_]], implicit $exec
+ ; CHECK-NEXT: [[V_CMP_NE_U32_e64_:%[0-9]+]]:sreg_64_xexec = V_CMP_NE_U32_e64 [[V_CNDMASK_B32_e64_]], 1, implicit $exec
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.3(0x40000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[PHI:%[0-9]+]]:agpr_32 = PHI [[COPY1]], %bb.0, %24, %bb.3
+ ; CHECK-NEXT: [[PHI1:%[0-9]+]]:sreg_32 = PHI [[S_MOV_B32_]], %bb.0, %11, %bb.3
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[PHI]]
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:agpr_32 = COPY [[PHI]]
+ ; CHECK-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 -1
+ ; CHECK-NEXT: $vcc = S_AND_B64 $exec, [[V_CMP_NE_U32_e64_]], implicit-def $scc
+ ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.3, implicit $vcc
+ ; CHECK-NEXT: S_BRANCH %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: successors: %bb.3(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[S_OR_B32_:%[0-9]+]]:sreg_32 = S_OR_B32 [[PHI1]], 1, implicit-def dead $scc
+ ; CHECK-NEXT: [[S_ASHR_I32_:%[0-9]+]]:sreg_32 = S_ASHR_I32 [[PHI1]], 31, implicit-def dead $scc
+ ; CHECK-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32 = S_AND_B32 killed [[S_ASHR_I32_]], killed [[S_OR_B32_]], implicit-def dead $scc
+ ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:vreg_128_align2 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[V_MOV_B32_e32_]], %subreg.sub1, [[V_MOV_B32_e32_]], %subreg.sub2, [[V_MOV_B32_e32_]], %subreg.sub3
+ ; CHECK-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_]], %subreg.sub0, [[S_MOV_B32_]], %subreg.sub1
+ ; CHECK-NEXT: [[COPY4:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE1]]
+ ; CHECK-NEXT: [[COPY5:%[0-9]+]]:areg_128_align2 = COPY [[REG_SEQUENCE]]
+ ; CHECK-NEXT: [[V_MFMA_F32_16X16X16F16_e64_:%[0-9]+]]:areg_128_align2 = V_MFMA_F32_16X16X16F16_e64 [[COPY4]], [[COPY4]], killed [[COPY5]], 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: [[S_MOV_B64_1:%[0-9]+]]:sreg_64 = S_MOV_B64 0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3:
+ ; CHECK-NEXT: successors: %bb.4(0x40000000), %bb.1(0x40000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[PHI2:%[0-9]+]]:sreg_32 = PHI [[DEF]], %bb.1, [[S_AND_B32_]], %bb.2
+ ; CHECK-NEXT: [[PHI3:%[0-9]+]]:agpr_32 = PHI [[COPY3]], %bb.1, [[V_MFMA_F32_16X16X16F16_e64_]].sub0, %bb.2
+ ; CHECK-NEXT: [[PHI4:%[0-9]+]]:sreg_64_xexec = PHI [[S_MOV_B64_]], %bb.1, [[S_MOV_B64_1]], %bb.2
+ ; CHECK-NEXT: [[V_CNDMASK_B32_e64_1:%[0-9]+]]:vgpr_32 = V_CNDMASK_B32_e64 0, 0, 0, 1, [[PHI4]], implicit $exec
+ ; CHECK-NEXT: [[V_CMP_NE_U32_e64_1:%[0-9]+]]:sreg_64_xexec = V_CMP_NE_U32_e64 [[V_CNDMASK_B32_e64_1]], 1, implicit $exec
+ ; CHECK-NEXT: $vcc = S_AND_B64 $exec, [[V_CMP_NE_U32_e64_1]], implicit-def $scc
+ ; CHECK-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit $vcc
+ ; CHECK-NEXT: S_BRANCH %bb.4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4:
+ ; CHECK-NEXT: successors: %bb.5(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.5:
+ ; CHECK-NEXT: S_ENDPGM 0
+ ;
+ ; COALESCE-LABEL: name: test
+ ; COALESCE: bb.0:
+ ; COALESCE-NEXT: successors: %bb.1(0x80000000)
+ ; COALESCE-NEXT: liveins: $sgpr4_sgpr5
+ ; COALESCE-NEXT: {{ $}}
+ ; COALESCE-NEXT: [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr4_sgpr5
+ ; COALESCE-NEXT: [[S_LOAD_DWORD_IMM:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM [[COPY]](p4), 0, 0 :: (dereferenceable invariant load (s32), align 16, addrspace 4)
+ ; COALESCE-NEXT: S_BITCMP1_B32 [[S_LOAD_DWORD_IMM]], 0, implicit-def $scc
+ ; COALESCE-NEXT: undef [[S_MOV_B32_:%[0-9]+]].sub0:sgpr_64 = S_MOV_B32 0
+ ; COALESCE-NEXT: [[S_CSELECT_B64_:%[0-9]+]]:sreg_64_xexec = S_CSELECT_B64 -1, 0, implicit killed $scc
+ ; COALESCE-NEXT: undef [[V_MOV_B32_e32_:%[0-9]+]].sub1:vreg_128_align2 = V_MOV_B32_e32 0, implicit $exec
+ ; COALESCE-NEXT: undef [[V_ACCVGPR_WRITE_B32_e64_:%[0-9]+]].sub0:areg_128_align2 = V_ACCVGPR_WRITE_B32_e64 0, implicit $exec
+ ; COALESCE-NEXT: [[V_CNDMASK_B32_e64_:%[0-9]+]]:vgpr_32 = V_CNDMASK_B32_e64 0, 0, 0, 1, [[S_CSELECT_B64_]], implicit $exec
+ ; COALESCE-NEXT: [[V_CMP_NE_U32_e64_:%[0-9]+]]:sreg_64_xexec = V_CMP_NE_U32_e64 1, [[V_CNDMASK_B32_e64_]], implicit $exec
+ ; COALESCE-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; COALESCE-NEXT: {{ $}}
+ ; COALESCE-NEXT: bb.1:
+ ; COALESCE-NEXT: successors: %bb.2(0x40000000), %bb.3(0x40000000)
+ ; COALESCE-NEXT: {{ $}}
+ ; COALESCE-NEXT: [[V_MOV_B32_e32_:%[0-9]+]].sub0:vreg_128_align2 = COPY [[V_ACCVGPR_WRITE_B32_e64_]].sub0
+ ; COALESCE-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64_xexec = S_MOV_B64 -1
+ ; COALESCE-NEXT: $vcc = S_AND_B64 $exec, [[V_CMP_NE_U32_e64_]], implicit-def dead $scc
+ ; COALESCE-NEXT: S_CBRANCH_VCCNZ %bb.3, implicit killed $vcc
+ ; COALESCE-NEXT: S_BRANCH %bb.2
+ ; COALESCE-NEXT: {{ $}}
+ ; COALESCE-NEXT: bb.2:
+ ; COALESCE-NEXT: successors: %bb.3(0x80000000)
+ ; COALESCE-NEXT: {{ $}}
+ ; COALESCE-NEXT: [[S_OR_B32_:%[0-9]+]]:sreg_32 = S_OR_B32 [[S_MOV_B32_1]], 1, implicit-def dead $scc
+ ; COALESCE-NEXT: [[S_ASHR_I32_:%[0-9]+]]:sreg_32 = S_ASHR_I32 [[S_MOV_B32_1]], 31, implicit-def dead $scc
+ ; COALESCE-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_AND_B32 [[S_ASHR_I32_]], [[S_OR_B32_]], implicit-def dead $scc
+ ; COALESCE-NEXT: [[V_MOV_B32_e32_:%[0-9]+]].sub2:vreg_128_align2 = COPY [[V_MOV_B32_e32_]].sub1
+ ; COALESCE-NEXT: [[V_MOV_B32_e32_:%[0-9]+]].sub3:vreg_128_align2 = COPY [[V_MOV_B32_e32_]].sub1
+ ; COALESCE-NEXT: [[S_MOV_B32_:%[0-9]+]].sub1:sgpr_64 = COPY [[S_MOV_B32_]].sub0
+ ; COALESCE-NEXT: [[COPY1:%[0-9]+]]:vreg_64_align2 = COPY [[S_MOV_B32_]]
+ ; COALESCE-NEXT: [[COPY2:%[0-9]+]]:areg_128_align2 = COPY [[V_MOV_B32_e32_]]
+ ; COALESCE-NEXT: [[V_ACCVGPR_WRITE_B32_e64_:%[0-9]+]]:areg_128_align2 = V_MFMA_F32_16X16X16F16_e64 [[COPY1]], [[COPY1]], [[COPY2]], 0, 0, 0, implicit $mode, implicit $exec
+ ; COALESCE-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64_xexec = S_MOV_B64 0
+ ; COALESCE-NEXT: {{ $}}
+ ; COALESCE-NEXT: bb.3:
+ ; COALESCE-NEXT: successors: %bb.4(0x40000000), %bb.1(0x40000000)
+ ; COALESCE-NEXT: {{ $}}
+ ; COALESCE-NEXT: [[V_CNDMASK_B32_e64_1:%[0-9]+]]:vgpr_32 = V_CNDMASK_B32_e64 0, 0, 0, 1, [[S_MOV_B64_]], implicit $exec
+ ; COALESCE-NEXT: [[V_CMP_NE_U32_e64_1:%[0-9]+]]:sreg_64_xexec = V_CMP_NE_U32_e64 1, [[V_CNDMASK_B32_e64_1]], implicit $exec
+ ; COALESCE-NEXT: $vcc = S_AND_B64 $exec, [[V_CMP_NE_U32_e64_1]], implicit-def dead $scc
+ ; COALESCE-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit killed $vcc
+ ; COALESCE-NEXT: S_BRANCH %bb.4
+ ; COALESCE-NEXT: {{ $}}
+ ; COALESCE-NEXT: bb.4:
+ ; COALESCE-NEXT: successors: %bb.5(0x80000000)
+ ; COALESCE-NEXT: {{ $}}
+ ; COALESCE-NEXT: bb.5:
+ ; COALESCE-NEXT: S_ENDPGM 0
+ ;
+ ; GFX908-COALESCE-LABEL: name: test
+ ; GFX908-COALESCE: bb.0:
+ ; GFX908-COALESCE-NEXT: successors: %bb.1(0x80000000)
+ ; GFX908-COALESCE-NEXT: liveins: $sgpr4_sgpr5
+ ; GFX908-COALESCE-NEXT: {{ $}}
+ ; GFX908-COALESCE-NEXT: [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr4_sgpr5
+ ; GFX908-COALESCE-NEXT: [[S_LOAD_DWORD_IMM:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM [[COPY]](p4), 0, 0 :: (dereferenceable invariant load (s32), align 16, addrspace 4)
+ ; GFX908-COALESCE-NEXT: S_BITCMP1_B32 [[S_LOAD_DWORD_IMM]], 0, implicit-def $scc
+ ; GFX908-COALESCE-NEXT: undef [[S_MOV_B32_:%[0-9]+]].sub0:sgpr_64 = S_MOV_B32 0
+ ; GFX908-COALESCE-NEXT: [[S_CSELECT_B64_:%[0-9]+]]:sreg_64_xexec = S_CSELECT_B64 -1, 0, implicit killed $scc
+ ; GFX908-COALESCE-NEXT: undef [[V_MOV_B32_e32_:%[0-9]+]].sub1:vreg_128_align2 = V_MOV_B32_e32 0, implicit $exec
+ ; GFX908-COALESCE-NEXT: undef [[V_ACCVGPR_WRITE_B32_e64_:%[0-9]+]].sub0:areg_128_align2 = V_ACCVGPR_WRITE_B32_e64 0, implicit $exec
+ ; GFX908-COALESCE-NEXT: [[V_CNDMASK_B32_e64_:%[0-9]+]]:vgpr_32 = V_CNDMASK_B32_e64 0, 0, 0, 1, [[S_CSELECT_B64_]], implicit $exec
+ ; GFX908-COALESCE-NEXT: [[V_CMP_NE_U32_e64_:%[0-9]+]]:sreg_64_xexec = V_CMP_NE_U32_e64 1, [[V_CNDMASK_B32_e64_]], implicit $exec
+ ; GFX908-COALESCE-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 0
+ ; GFX908-COALESCE-NEXT: {{ $}}
+ ; GFX908-COALESCE-NEXT: bb.1:
+ ; GFX908-COALESCE-NEXT: successors: %bb.2(0x40000000), %bb.3(0x40000000)
+ ; GFX908-COALESCE-NEXT: {{ $}}
+ ; GFX908-COALESCE-NEXT: [[V_MOV_B32_e32_:%[0-9]+]].sub0:vreg_128_align2 = COPY [[V_ACCVGPR_WRITE_B32_e64_]].sub0
+ ; GFX908-COALESCE-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64_xexec = S_MOV_B64 -1
+ ; GFX908-COALESCE-NEXT: $vcc = S_AND_B64 $exec, [[V_CMP_NE_U32_e64_]], implicit-def dead $scc
+ ; GFX908-COALESCE-NEXT: S_CBRANCH_VCCNZ %bb.3, implicit killed $vcc
+ ; GFX908-COALESCE-NEXT: S_BRANCH %bb.2
+ ; GFX908-COALESCE-NEXT: {{ $}}
+ ; GFX908-COALESCE-NEXT: bb.2:
+ ; GFX908-COALESCE-NEXT: successors: %bb.3(0x80000000)
+ ; GFX908-COALESCE-NEXT: {{ $}}
+ ; GFX908-COALESCE-NEXT: [[S_OR_B32_:%[0-9]+]]:sreg_32 = S_OR_B32 [[S_MOV_B32_1]], 1, implicit-def dead $scc
+ ; GFX908-COALESCE-NEXT: [[S_ASHR_I32_:%[0-9]+]]:sreg_32 = S_ASHR_I32 [[S_MOV_B32_1]], 31, implicit-def dead $scc
+ ; GFX908-COALESCE-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_AND_B32 [[S_ASHR_I32_]], [[S_OR_B32_]], implicit-def dead $scc
+ ; GFX908-COALESCE-NEXT: [[V_MOV_B32_e32_:%[0-9]+]].sub2:vreg_128_align2 = COPY [[V_MOV_B32_e32_]].sub1
+ ; GFX908-COALESCE-NEXT: [[V_MOV_B32_e32_:%[0-9]+]].sub3:vreg_128_align2 = COPY [[V_MOV_B32_e32_]].sub1
+ ; GFX908-COALESCE-NEXT: [[S_MOV_B32_:%[0-9]+]].sub1:sgpr_64 = COPY [[S_MOV_B32_]].sub0
+ ; GFX908-COALESCE-NEXT: [[COPY1:%[0-9]+]]:vreg_64_align2 = COPY [[S_MOV_B32_]]
+ ; GFX908-COALESCE-NEXT: [[COPY2:%[0-9]+]]:areg_128_align2 = COPY [[V_MOV_B32_e32_]]
+ ; GFX908-COALESCE-NEXT: [[V_ACCVGPR_WRITE_B32_e64_:%[0-9]+]]:areg_128_align2 = V_MFMA_F32_16X16X16F16_e64 [[COPY1]], [[COPY1]], [[COPY2]], 0, 0, 0, implicit $mode, implicit $exec
+ ; GFX908-COALESCE-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64_xexec = S_MOV_B64 0
+ ; GFX908-COALESCE-NEXT: {{ $}}
+ ; GFX908-COALESCE-NEXT: bb.3:
+ ; GFX908-COALESCE-NEXT: successors: %bb.4(0x40000000), %bb.1(0x40000000)
+ ; GFX908-COALESCE-NEXT: {{ $}}
+ ; GFX908-COALESCE-NEXT: [[V_CNDMASK_B32_e64_1:%[0-9]+]]:vgpr_32 = V_CNDMASK_B32_e64 0, 0, 0, 1, [[S_MOV_B64_]], implicit $exec
+ ; GFX908-COALESCE-NEXT: [[V_CMP_NE_U32_e64_1:%[0-9]+]]:sreg_64_xexec = V_CMP_NE_U32_e64 1, [[V_CNDMASK_B32_e64_1]], implicit $exec
+ ; GFX908-COALESCE-NEXT: $vcc = S_AND_B64 $exec, [[V_CMP_NE_U32_e64_1]], implicit-def dead $scc
+ ; GFX908-COALESCE-NEXT: S_CBRANCH_VCCNZ %bb.1, implicit killed $vcc
+ ; GFX908-COALESCE-NEXT: S_BRANCH %bb.4
+ ; GFX908-COALESCE-NEXT: {{ $}}
+ ; GFX908-COALESCE-NEXT: bb.4:
+ ; GFX908-COALESCE-NEXT: successors: %bb.5(0x80000000)
+ ; GFX908-COALESCE-NEXT: {{ $}}
+ ; GFX908-COALESCE-NEXT: bb.5:
+ ; GFX908-COALESCE-NEXT: S_ENDPGM 0
+ bb.0:
+ successors: %bb.1
+ liveins: $sgpr4_sgpr5
+
+ %0:sgpr_64(p4) = COPY $sgpr4_sgpr5
+ %1:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM %0(p4), 0, 0 :: (dereferenceable invariant load (s32), align 16, addrspace 4)
+ S_BITCMP1_B32 killed %1, 0, implicit-def $scc
+ %2:sgpr_32 = S_MOV_B32 0
+ %3:sreg_64_xexec = S_CSELECT_B64 -1, 0, implicit $scc
+ %4:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+ %5:sreg_32 = IMPLICIT_DEF
+ %6:vgpr_32 = V_CNDMASK_B32_e64 0, 0, 0, 1, %3, implicit $exec
+ %7:sreg_64_xexec = V_CMP_NE_U32_e64 %6, 1, implicit $exec
+
+ bb.1:
+ successors: %bb.2, %bb.3
+
+ %8:vgpr_32 = PHI %4, %bb.0, %9, %bb.3
+ %10:sreg_32 = PHI %2, %bb.0, %11, %bb.3
+ %12:agpr_32 = COPY %8
+ %13:sreg_64 = S_MOV_B64 -1
+ $vcc = S_AND_B64 $exec, %7, implicit-def $scc
+ S_CBRANCH_VCCNZ %bb.3, implicit $vcc
+ S_BRANCH %bb.2
+
+ bb.2:
+ successors: %bb.3
+
+ %14:sreg_32 = S_OR_B32 %10, 1, implicit-def dead $scc
+ %15:sreg_32 = S_ASHR_I32 %10, 31, implicit-def dead $scc
+ %16:sreg_32 = S_AND_B32 killed %15, killed %14, implicit-def dead $scc
+ %17:vreg_128_align2 = REG_SEQUENCE %8, %subreg.sub0, %4, %subreg.sub1, %4, %subreg.sub2, %4, %subreg.sub3
+ %18:sreg_64 = REG_SEQUENCE %2, %subreg.sub0, %2, %subreg.sub1
+ %19:vreg_64_align2 = COPY %18
+ %20:areg_128_align2 = COPY %17
+ %21:areg_128_align2 = V_MFMA_F32_16X16X16F16_e64 %19, %19, killed %20, 0, 0, 0, implicit $mode, implicit $exec
+ %22:vgpr_32 = COPY %21.sub0
+ %23:sreg_64 = S_MOV_B64 0
+
+ bb.3:
+ successors: %bb.4, %bb.1
+
+ %11:sreg_32 = PHI %5, %bb.1, %16, %bb.2
+ %24:agpr_32 = PHI %12, %bb.1, %21.sub0, %bb.2
+ %25:sreg_64_xexec = PHI %13, %bb.1, %23, %bb.2
+ %9:vgpr_32 = COPY %24
+ %26:vgpr_32 = V_CNDMASK_B32_e64 0, 0, 0, 1, %25, implicit $exec
+ %27:sreg_64_xexec = V_CMP_NE_U32_e64 %26, 1, implicit $exec
+ $vcc = S_AND_B64 $exec, %27, implicit-def $scc
+ S_CBRANCH_VCCNZ %bb.1, implicit $vcc
+ S_BRANCH %bb.4
+
+ bb.4:
+ successors: %bb.5
+
+ bb.5:
+ S_ENDPGM 0
+
+...
diff --git a/llvm/test/CodeGen/AMDGPU/no-fold-accvgpr-read.mir b/llvm/test/CodeGen/AMDGPU/no-fold-accvgpr-read.mir
new file mode 100644
index 0000000..49c0aaf
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/no-fold-accvgpr-read.mir
@@ -0,0 +1,182 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+# RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx942 -run-pass si-fold-operands %s -o - | FileCheck %s
+# RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx942 -start-before=si-fold-operands -stop-after=register-coalescer %s -o - | FileCheck %s --check-prefixes=COALESCE
+# RUN: llc -mtriple=amdgcn--amdhsa -mcpu=gfx908 -start-before=si-fold-operands -stop-after=register-coalescer %s -o - | FileCheck %s --check-prefixes=GFX908-COALESCE
+
+...
+---
+name: test
+tracksRegLiveness: true
+body: |
+ ; CHECK-LABEL: name: test
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000)
+ ; CHECK-NEXT: liveins: $sgpr4_sgpr5
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr4_sgpr5
+ ; CHECK-NEXT: [[S_LOAD_DWORD_IMM:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM [[COPY]](p4), 0, 0 :: (dereferenceable invariant load (s32), align 16, addrspace 4)
+ ; CHECK-NEXT: [[S_MOV_B32_:%[0-9]+]]:sgpr_32 = S_MOV_B32 0
+ ; CHECK-NEXT: S_BITCMP0_B32 killed [[S_LOAD_DWORD_IMM]], 0, implicit-def $scc
+ ; CHECK-NEXT: S_CBRANCH_SCC0 %bb.2, implicit $scc
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1:
+ ; CHECK-NEXT: successors: %bb.3(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[V_ACCVGPR_WRITE_B32_e64_:%[0-9]+]]:agpr_32 = V_ACCVGPR_WRITE_B32_e64 0, implicit $exec
+ ; CHECK-NEXT: S_BRANCH %bb.3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2:
+ ; CHECK-NEXT: successors: %bb.3(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sgpr_32 = S_MOV_B32 0
+ ; CHECK-NEXT: [[V_ACCVGPR_WRITE_B32_e64_1:%[0-9]+]]:agpr_32 = V_ACCVGPR_WRITE_B32_e64 0, implicit $exec
+ ; CHECK-NEXT: [[V_ACCVGPR_WRITE_B32_e64_2:%[0-9]+]]:agpr_32 = V_ACCVGPR_WRITE_B32_e64 0, implicit $exec
+ ; CHECK-NEXT: [[V_ACCVGPR_WRITE_B32_e64_3:%[0-9]+]]:agpr_32 = V_ACCVGPR_WRITE_B32_e64 0, implicit $exec
+ ; CHECK-NEXT: [[V_ACCVGPR_WRITE_B32_e64_4:%[0-9]+]]:agpr_32 = V_ACCVGPR_WRITE_B32_e64 0, implicit $exec
+ ; CHECK-NEXT: [[REG_SEQUENCE:%[0-9]+]]:areg_128_align2 = REG_SEQUENCE [[V_ACCVGPR_WRITE_B32_e64_1]], %subreg.sub0, [[V_ACCVGPR_WRITE_B32_e64_2]], %subreg.sub1, [[V_ACCVGPR_WRITE_B32_e64_3]], %subreg.sub2, [[V_ACCVGPR_WRITE_B32_e64_4]], %subreg.sub3
+ ; CHECK-NEXT: [[REG_SEQUENCE1:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[S_MOV_B32_1]], %subreg.sub0, [[S_MOV_B32_1]], %subreg.sub1
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vreg_64_align2 = COPY [[REG_SEQUENCE1]]
+ ; CHECK-NEXT: [[V_MFMA_F32_16X16X16F16_e64_:%[0-9]+]]:areg_128_align2 = V_MFMA_F32_16X16X16F16_e64 [[COPY1]], [[COPY1]], 0, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: [[V_MFMA_F32_16X16X16F16_e64_1:%[0-9]+]]:areg_128_align2 = V_MFMA_F32_16X16X16F16_e64 [[COPY1]], [[COPY1]], killed [[V_MFMA_F32_16X16X16F16_e64_]], 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: [[V_MFMA_F32_16X16X16F16_e64_2:%[0-9]+]]:areg_128_align2 = V_MFMA_F32_16X16X16F16_e64 [[COPY1]], [[COPY1]], killed [[V_MFMA_F32_16X16X16F16_e64_1]], 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: [[V_MFMA_F32_16X16X16F16_e64_3:%[0-9]+]]:areg_128_align2 = V_MFMA_F32_16X16X16F16_e64 [[COPY1]], [[COPY1]], killed [[V_MFMA_F32_16X16X16F16_e64_2]], 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[V_MFMA_F32_16X16X16F16_e64_3]].sub0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3:
+ ; CHECK-NEXT: [[PHI:%[0-9]+]]:agpr_32 = PHI [[V_ACCVGPR_WRITE_B32_e64_]], %bb.1, [[V_MFMA_F32_16X16X16F16_e64_3]].sub0, %bb.2
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vgpr_32 = COPY [[PHI]]
+ ; CHECK-NEXT: [[V_CVT_F16_F32_e64_:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_F16_F32_e64 0, [[COPY3]], 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: [[V_PACK_B32_F16_e64_:%[0-9]+]]:vgpr_32 = nofpexcept V_PACK_B32_F16_e64 0, killed [[V_CVT_F16_F32_e64_]], 0, 0, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: [[REG_SEQUENCE2:%[0-9]+]]:vreg_64_align2 = REG_SEQUENCE [[V_PACK_B32_F16_e64_]], %subreg.sub0, killed [[V_MOV_B32_e32_]], %subreg.sub1
+ ; CHECK-NEXT: [[REG_SEQUENCE3:%[0-9]+]]:sgpr_128 = REG_SEQUENCE [[S_MOV_B32_]], %subreg.sub0, [[S_MOV_B32_]], %subreg.sub1, [[S_MOV_B32_]], %subreg.sub2, [[S_MOV_B32_]], %subreg.sub3
+ ; CHECK-NEXT: BUFFER_STORE_DWORDX2_OFFSET_exact [[REG_SEQUENCE2]], killed [[REG_SEQUENCE3]], 0, 0, 0, 0, implicit $exec :: (dereferenceable store (s64) into `ptr addrspace(8) null`, align 1, addrspace 8)
+ ; CHECK-NEXT: S_ENDPGM 0
+ ;
+ ; COALESCE-LABEL: name: test
+ ; COALESCE: bb.0:
+ ; COALESCE-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000)
+ ; COALESCE-NEXT: liveins: $sgpr4_sgpr5
+ ; COALESCE-NEXT: {{ $}}
+ ; COALESCE-NEXT: [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr4_sgpr5
+ ; COALESCE-NEXT: [[S_LOAD_DWORD_IMM:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM [[COPY]](p4), 0, 0 :: (dereferenceable invariant load (s32), align 16, addrspace 4)
+ ; COALESCE-NEXT: undef [[S_MOV_B32_:%[0-9]+]].sub0:sgpr_128 = S_MOV_B32 0
+ ; COALESCE-NEXT: S_BITCMP0_B32 [[S_LOAD_DWORD_IMM]], 0, implicit-def $scc
+ ; COALESCE-NEXT: S_CBRANCH_SCC0 %bb.2, implicit killed $scc
+ ; COALESCE-NEXT: {{ $}}
+ ; COALESCE-NEXT: bb.1:
+ ; COALESCE-NEXT: successors: %bb.3(0x80000000)
+ ; COALESCE-NEXT: {{ $}}
+ ; COALESCE-NEXT: undef [[V_ACCVGPR_WRITE_B32_e64_:%[0-9]+]].sub0:areg_128_align2 = V_ACCVGPR_WRITE_B32_e64 0, implicit $exec
+ ; COALESCE-NEXT: S_BRANCH %bb.3
+ ; COALESCE-NEXT: {{ $}}
+ ; COALESCE-NEXT: bb.2:
+ ; COALESCE-NEXT: successors: %bb.3(0x80000000)
+ ; COALESCE-NEXT: {{ $}}
+ ; COALESCE-NEXT: [[S_MOV_B32_:%[0-9]+]].sub1:sgpr_128 = COPY [[S_MOV_B32_]].sub0
+ ; COALESCE-NEXT: [[COPY1:%[0-9]+]]:vreg_64_align2 = COPY [[S_MOV_B32_]].sub0_sub1
+ ; COALESCE-NEXT: [[V_MFMA_F32_16X16X16F16_e64_:%[0-9]+]]:areg_128_align2 = V_MFMA_F32_16X16X16F16_e64 [[COPY1]], [[COPY1]], 0, 0, 0, 0, implicit $mode, implicit $exec
+ ; COALESCE-NEXT: [[V_MFMA_F32_16X16X16F16_e64_1:%[0-9]+]]:areg_128_align2 = V_MFMA_F32_16X16X16F16_e64 [[COPY1]], [[COPY1]], [[V_MFMA_F32_16X16X16F16_e64_]], 0, 0, 0, implicit $mode, implicit $exec
+ ; COALESCE-NEXT: [[V_MFMA_F32_16X16X16F16_e64_2:%[0-9]+]]:areg_128_align2 = V_MFMA_F32_16X16X16F16_e64 [[COPY1]], [[COPY1]], [[V_MFMA_F32_16X16X16F16_e64_1]], 0, 0, 0, implicit $mode, implicit $exec
+ ; COALESCE-NEXT: [[V_ACCVGPR_WRITE_B32_e64_:%[0-9]+]]:areg_128_align2 = V_MFMA_F32_16X16X16F16_e64 [[COPY1]], [[COPY1]], [[V_MFMA_F32_16X16X16F16_e64_2]], 0, 0, 0, implicit $mode, implicit $exec
+ ; COALESCE-NEXT: {{ $}}
+ ; COALESCE-NEXT: bb.3:
+ ; COALESCE-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[V_ACCVGPR_WRITE_B32_e64_]].sub0
+ ; COALESCE-NEXT: [[V_CVT_F16_F32_e32_:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_F16_F32_e32 [[COPY2]], implicit $mode, implicit $exec
+ ; COALESCE-NEXT: undef [[V_PACK_B32_F16_e64_:%[0-9]+]].sub0:vreg_64_align2 = nofpexcept V_PACK_B32_F16_e64 0, [[V_CVT_F16_F32_e32_]], 0, 0, 0, 0, implicit $mode, implicit $exec
+ ; COALESCE-NEXT: [[V_PACK_B32_F16_e64_:%[0-9]+]].sub1:vreg_64_align2 = V_MOV_B32_e32 0, implicit $exec
+ ; COALESCE-NEXT: [[S_MOV_B32_:%[0-9]+]].sub1:sgpr_128 = COPY [[S_MOV_B32_]].sub0
+ ; COALESCE-NEXT: [[S_MOV_B32_:%[0-9]+]].sub2:sgpr_128 = COPY [[S_MOV_B32_]].sub0
+ ; COALESCE-NEXT: [[S_MOV_B32_:%[0-9]+]].sub3:sgpr_128 = COPY [[S_MOV_B32_]].sub0
+ ; COALESCE-NEXT: BUFFER_STORE_DWORDX2_OFFSET_exact [[V_PACK_B32_F16_e64_]], [[S_MOV_B32_]], 0, 0, 0, 0, implicit $exec :: (dereferenceable store (s64) into `ptr addrspace(8) null`, align 1, addrspace 8)
+ ; COALESCE-NEXT: S_ENDPGM 0
+ ;
+ ; GFX908-COALESCE-LABEL: name: test
+ ; GFX908-COALESCE: bb.0:
+ ; GFX908-COALESCE-NEXT: successors: %bb.2(0x40000000), %bb.1(0x40000000)
+ ; GFX908-COALESCE-NEXT: liveins: $sgpr4_sgpr5
+ ; GFX908-COALESCE-NEXT: {{ $}}
+ ; GFX908-COALESCE-NEXT: [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr4_sgpr5
+ ; GFX908-COALESCE-NEXT: [[S_LOAD_DWORD_IMM:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM [[COPY]](p4), 0, 0 :: (dereferenceable invariant load (s32), align 16, addrspace 4)
+ ; GFX908-COALESCE-NEXT: undef [[S_MOV_B32_:%[0-9]+]].sub0:sgpr_128 = S_MOV_B32 0
+ ; GFX908-COALESCE-NEXT: S_BITCMP0_B32 [[S_LOAD_DWORD_IMM]], 0, implicit-def $scc
+ ; GFX908-COALESCE-NEXT: S_CBRANCH_SCC0 %bb.2, implicit killed $scc
+ ; GFX908-COALESCE-NEXT: {{ $}}
+ ; GFX908-COALESCE-NEXT: bb.1:
+ ; GFX908-COALESCE-NEXT: successors: %bb.3(0x80000000)
+ ; GFX908-COALESCE-NEXT: {{ $}}
+ ; GFX908-COALESCE-NEXT: undef [[V_ACCVGPR_WRITE_B32_e64_:%[0-9]+]].sub0:areg_128_align2 = V_ACCVGPR_WRITE_B32_e64 0, implicit $exec
+ ; GFX908-COALESCE-NEXT: S_BRANCH %bb.3
+ ; GFX908-COALESCE-NEXT: {{ $}}
+ ; GFX908-COALESCE-NEXT: bb.2:
+ ; GFX908-COALESCE-NEXT: successors: %bb.3(0x80000000)
+ ; GFX908-COALESCE-NEXT: {{ $}}
+ ; GFX908-COALESCE-NEXT: undef [[V_ACCVGPR_WRITE_B32_e64_1:%[0-9]+]].sub0:areg_128_align2 = V_ACCVGPR_WRITE_B32_e64 0, implicit $exec
+ ; GFX908-COALESCE-NEXT: [[V_ACCVGPR_WRITE_B32_e64_1:%[0-9]+]].sub1:areg_128_align2 = COPY [[V_ACCVGPR_WRITE_B32_e64_1]].sub0
+ ; GFX908-COALESCE-NEXT: [[V_ACCVGPR_WRITE_B32_e64_1:%[0-9]+]].sub2:areg_128_align2 = COPY [[V_ACCVGPR_WRITE_B32_e64_1]].sub0
+ ; GFX908-COALESCE-NEXT: [[V_ACCVGPR_WRITE_B32_e64_1:%[0-9]+]].sub3:areg_128_align2 = COPY [[V_ACCVGPR_WRITE_B32_e64_1]].sub0
+ ; GFX908-COALESCE-NEXT: [[S_MOV_B32_:%[0-9]+]].sub1:sgpr_128 = COPY [[S_MOV_B32_]].sub0
+ ; GFX908-COALESCE-NEXT: [[COPY1:%[0-9]+]]:vreg_64_align2 = COPY [[S_MOV_B32_]].sub0_sub1
+ ; GFX908-COALESCE-NEXT: [[V_MFMA_F32_16X16X16F16_e64_:%[0-9]+]]:areg_128_align2 = V_MFMA_F32_16X16X16F16_e64 [[COPY1]], [[COPY1]], [[V_ACCVGPR_WRITE_B32_e64_1]], 0, 0, 0, implicit $mode, implicit $exec
+ ; GFX908-COALESCE-NEXT: [[V_MFMA_F32_16X16X16F16_e64_1:%[0-9]+]]:areg_128_align2 = V_MFMA_F32_16X16X16F16_e64 [[COPY1]], [[COPY1]], [[V_MFMA_F32_16X16X16F16_e64_]], 0, 0, 0, implicit $mode, implicit $exec
+ ; GFX908-COALESCE-NEXT: [[V_MFMA_F32_16X16X16F16_e64_2:%[0-9]+]]:areg_128_align2 = V_MFMA_F32_16X16X16F16_e64 [[COPY1]], [[COPY1]], [[V_MFMA_F32_16X16X16F16_e64_1]], 0, 0, 0, implicit $mode, implicit $exec
+ ; GFX908-COALESCE-NEXT: [[V_ACCVGPR_WRITE_B32_e64_:%[0-9]+]]:areg_128_align2 = V_MFMA_F32_16X16X16F16_e64 [[COPY1]], [[COPY1]], [[V_MFMA_F32_16X16X16F16_e64_2]], 0, 0, 0, implicit $mode, implicit $exec
+ ; GFX908-COALESCE-NEXT: {{ $}}
+ ; GFX908-COALESCE-NEXT: bb.3:
+ ; GFX908-COALESCE-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[V_ACCVGPR_WRITE_B32_e64_]].sub0
+ ; GFX908-COALESCE-NEXT: [[V_CVT_F16_F32_e32_:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_F16_F32_e32 [[COPY2]], implicit $mode, implicit $exec
+ ; GFX908-COALESCE-NEXT: undef [[V_PACK_B32_F16_e64_:%[0-9]+]].sub0:vreg_64_align2 = nofpexcept V_PACK_B32_F16_e64 0, [[V_CVT_F16_F32_e32_]], 0, 0, 0, 0, implicit $mode, implicit $exec
+ ; GFX908-COALESCE-NEXT: [[V_PACK_B32_F16_e64_:%[0-9]+]].sub1:vreg_64_align2 = V_MOV_B32_e32 0, implicit $exec
+ ; GFX908-COALESCE-NEXT: [[S_MOV_B32_:%[0-9]+]].sub1:sgpr_128 = COPY [[S_MOV_B32_]].sub0
+ ; GFX908-COALESCE-NEXT: [[S_MOV_B32_:%[0-9]+]].sub2:sgpr_128 = COPY [[S_MOV_B32_]].sub0
+ ; GFX908-COALESCE-NEXT: [[S_MOV_B32_:%[0-9]+]].sub3:sgpr_128 = COPY [[S_MOV_B32_]].sub0
+ ; GFX908-COALESCE-NEXT: BUFFER_STORE_DWORDX2_OFFSET_exact [[V_PACK_B32_F16_e64_]], [[S_MOV_B32_]], 0, 0, 0, 0, implicit $exec :: (dereferenceable store (s64) into `ptr addrspace(8) null`, align 1, addrspace 8)
+ ; GFX908-COALESCE-NEXT: S_ENDPGM 0
+ bb.0:
+ successors: %bb.2, %bb.1
+ liveins: $sgpr4_sgpr5
+
+ %0:sgpr_64(p4) = COPY $sgpr4_sgpr5
+ %1:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM %0(p4), 0, 0 :: (dereferenceable invariant load (s32), align 16, addrspace 4)
+ %2:sgpr_32 = S_MOV_B32 0
+ S_BITCMP0_B32 killed %1, 0, implicit-def $scc
+ S_CBRANCH_SCC0 %bb.2, implicit $scc
+
+ bb.1:
+ successors: %bb.3
+
+ %3:sgpr_32 = COPY %2
+ %4:vgpr_32 = COPY %3, implicit $exec
+ S_BRANCH %bb.3
+
+ bb.2:
+ successors: %bb.3
+
+ %5:sgpr_32 = S_MOV_B32 0
+ %6:vgpr_32 = COPY %5
+ %7:agpr_32 = V_ACCVGPR_WRITE_B32_e64 %6, implicit $exec
+ %8:agpr_32 = V_ACCVGPR_WRITE_B32_e64 %6, implicit $exec
+ %9:agpr_32 = V_ACCVGPR_WRITE_B32_e64 %6, implicit $exec
+ %10:agpr_32 = V_ACCVGPR_WRITE_B32_e64 %6, implicit $exec
+ %11:areg_128_align2 = REG_SEQUENCE %7, %subreg.sub0, %8, %subreg.sub1, %9, %subreg.sub2, %10, %subreg.sub3
+ %12:sreg_64 = REG_SEQUENCE %5, %subreg.sub0, %5, %subreg.sub1
+ %13:vreg_64_align2 = COPY %12
+ %14:areg_128_align2 = V_MFMA_F32_16X16X16F16_e64 %13, %13, killed %11, 0, 0, 0, implicit $mode, implicit $exec
+ %15:areg_128_align2 = V_MFMA_F32_16X16X16F16_e64 %13, %13, killed %14, 0, 0, 0, implicit $mode, implicit $exec
+ %16:areg_128_align2 = V_MFMA_F32_16X16X16F16_e64 %13, %13, killed %15, 0, 0, 0, implicit $mode, implicit $exec
+ %17:areg_128_align2 = V_MFMA_F32_16X16X16F16_e64 %13, %13, killed %16, 0, 0, 0, implicit $mode, implicit $exec
+ %18:vgpr_32 = COPY %17.sub0
+ %19:vgpr_32 = COPY %18
+
+ bb.3:
+ %20:vgpr_32 = PHI %4, %bb.1, %19, %bb.2
+ %21:vgpr_32 = nofpexcept V_CVT_F16_F32_e64 0, %20, 0, 0, implicit $mode, implicit $exec
+ %22:vgpr_32 = nofpexcept V_PACK_B32_F16_e64 0, killed %21, 0, %2, 0, 0, implicit $mode, implicit $exec
+ %23:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+ %24:vreg_64_align2 = REG_SEQUENCE %22, %subreg.sub0, killed %23, %subreg.sub1
+ %25:sgpr_128 = REG_SEQUENCE %2, %subreg.sub0, %2, %subreg.sub1, %2, %subreg.sub2, %2, %subreg.sub3
+ %26:vreg_64_align2 = COPY %24
+ BUFFER_STORE_DWORDX2_OFFSET_exact killed %26, killed %25, %2, 0, 0, 0, implicit $exec :: (dereferenceable store (s64) into `ptr addrspace(8) null`, align 1, addrspace 8)
+ S_ENDPGM 0
+
+...
diff --git a/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll b/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll
index 98d5f30..a2a0107 100644
--- a/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll
+++ b/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll
@@ -1372,20 +1372,19 @@ define amdgpu_kernel void @Offset64(ptr addrspace(1) %buffer) {
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v1, v0
; GFX8-NEXT: v_addc_u32_e32 v4, vcc, 0, v2, vcc
; GFX8-NEXT: s_movk_i32 s0, 0xf000
-; GFX8-NEXT: v_add_u32_e32 v5, vcc, s0, v3
-; GFX8-NEXT: v_addc_u32_e32 v6, vcc, 0, v4, vcc
+; GFX8-NEXT: v_add_u32_e32 v7, vcc, s0, v3
+; GFX8-NEXT: v_addc_u32_e32 v8, vcc, 0, v4, vcc
; GFX8-NEXT: s_movk_i32 s0, 0xf800
-; GFX8-NEXT: flat_load_dwordx2 v[7:8], v[3:4]
-; GFX8-NEXT: flat_load_dwordx2 v[5:6], v[5:6]
+; GFX8-NEXT: flat_load_dwordx2 v[5:6], v[3:4]
+; GFX8-NEXT: flat_load_dwordx2 v[7:8], v[7:8]
; GFX8-NEXT: v_add_u32_e32 v9, vcc, s0, v3
; GFX8-NEXT: v_addc_u32_e32 v10, vcc, 0, v4, vcc
; GFX8-NEXT: flat_load_dwordx2 v[9:10], v[9:10]
-; GFX8-NEXT: v_add_u32_e32 v3, vcc, 0, v3
-; GFX8-NEXT: v_addc_u32_e32 v4, vcc, 1, v4, vcc
+; GFX8-NEXT: v_add_u32_e32 v4, vcc, 1, v4
; GFX8-NEXT: flat_load_dwordx2 v[3:4], v[3:4]
; GFX8-NEXT: s_waitcnt vmcnt(2)
-; GFX8-NEXT: v_add_u32_e32 v0, vcc, v5, v7
-; GFX8-NEXT: v_addc_u32_e32 v5, vcc, v6, v8, vcc
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, v7, v5
+; GFX8-NEXT: v_addc_u32_e32 v5, vcc, v8, v6, vcc
; GFX8-NEXT: s_waitcnt vmcnt(1)
; GFX8-NEXT: v_add_u32_e32 v0, vcc, v9, v0
; GFX8-NEXT: v_addc_u32_e32 v5, vcc, v10, v5, vcc
@@ -1416,32 +1415,32 @@ define amdgpu_kernel void @Offset64(ptr addrspace(1) %buffer) {
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_swappc_b64 s[30:31], s[4:5]
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 7, v0
-; GFX9-NEXT: v_and_b32_e32 v12, 0xffff8000, v1
+; GFX9-NEXT: v_and_b32_e32 v10, 0xffff8000, v1
; GFX9-NEXT: v_mov_b32_e32 v1, s35
-; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s34, v12
+; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s34, v10
; GFX9-NEXT: v_mov_b32_e32 v3, 3
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
; GFX9-NEXT: v_lshlrev_b32_sdwa v0, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v2, v0
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
-; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, 0, v0
-; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, 1, v1, vcc
-; GFX9-NEXT: global_load_dwordx2 v[2:3], v[0:1], off
-; GFX9-NEXT: global_load_dwordx2 v[6:7], v[4:5], off offset:-4096
; GFX9-NEXT: s_movk_i32 s0, 0xf000
-; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s0, v0
-; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
-; GFX9-NEXT: global_load_dwordx2 v[8:9], v[4:5], off
-; GFX9-NEXT: global_load_dwordx2 v[10:11], v[0:1], off offset:2048
+; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s0, v0
+; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v1, vcc
+; GFX9-NEXT: global_load_dwordx2 v[4:5], v[2:3], off
+; GFX9-NEXT: global_load_dwordx2 v[6:7], v[0:1], off
+; GFX9-NEXT: global_load_dwordx2 v[8:9], v[2:3], off offset:2048
+; GFX9-NEXT: v_add_u32_e32 v1, 1, v1
+; GFX9-NEXT: global_load_dwordx2 v[0:1], v[0:1], off
; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v6, v2
-; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v7, v3, vcc
+; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v4, v6
+; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v5, v7, vcc
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v8, v2
+; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v9, v3, vcc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v10, v0
-; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v11, v1, vcc
-; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v8, v0
-; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v9, v1, vcc
-; GFX9-NEXT: global_store_dwordx2 v12, v[0:1], s[34:35]
+; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v0, v2
+; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v3, vcc
+; GFX9-NEXT: global_store_dwordx2 v10, v[0:1], s[34:35]
; GFX9-NEXT: s_endpgm
;
; GFX10-LABEL: Offset64:
@@ -1477,8 +1476,7 @@ define amdgpu_kernel void @Offset64(ptr addrspace(1) %buffer) {
; GFX10-NEXT: s_clause 0x1
; GFX10-NEXT: global_load_dwordx2 v[4:5], v[0:1], off
; GFX10-NEXT: global_load_dwordx2 v[6:7], v[2:3], off offset:-2048
-; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, 0, v0
-; GFX10-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, 1, v1, vcc_lo
+; GFX10-NEXT: v_add_nc_u32_e32 v1, 1, v1
; GFX10-NEXT: s_clause 0x1
; GFX10-NEXT: global_load_dwordx2 v[8:9], v[2:3], off
; GFX10-NEXT: global_load_dwordx2 v[10:11], v[0:1], off
@@ -1517,25 +1515,25 @@ define amdgpu_kernel void @Offset64(ptr addrspace(1) %buffer) {
; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v1, v0
; GFX11-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, 0, v2, vcc_lo
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, v0, 0
-; GFX11-NEXT: v_add_co_ci_u32_e32 v5, vcc_lo, 1, v1, vcc_lo
-; GFX11-NEXT: global_load_b64 v[2:3], v[0:1], off
-; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, 0xfffff000, v0
-; GFX11-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo
+; GFX11-NEXT: v_add_co_u32 v2, vcc_lo, 0xfffff000, v0
+; GFX11-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, 0, v1, vcc_lo
; GFX11-NEXT: s_clause 0x2
-; GFX11-NEXT: global_load_b64 v[6:7], v[4:5], off offset:-4096
-; GFX11-NEXT: global_load_b64 v[4:5], v[4:5], off
-; GFX11-NEXT: global_load_b64 v[0:1], v[0:1], off offset:2048
+; GFX11-NEXT: global_load_b64 v[4:5], v[2:3], off
+; GFX11-NEXT: global_load_b64 v[6:7], v[0:1], off
+; GFX11-NEXT: global_load_b64 v[2:3], v[2:3], off offset:2048
+; GFX11-NEXT: v_add_nc_u32_e32 v1, 1, v1
+; GFX11-NEXT: global_load_b64 v[0:1], v[0:1], off
; GFX11-NEXT: s_waitcnt vmcnt(2)
-; GFX11-NEXT: v_add_co_u32 v2, vcc_lo, v6, v2
-; GFX11-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, v7, v3, vcc_lo
+; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, v4, v6
+; GFX11-NEXT: v_add_co_ci_u32_e32 v5, vcc_lo, v5, v7, vcc_lo
+; GFX11-NEXT: s_waitcnt vmcnt(1)
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_add_co_u32 v2, vcc_lo, v2, v4
+; GFX11-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, v3, v5, vcc_lo
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, v2
; GFX11-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v4, v0
-; GFX11-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v5, v1, vcc_lo
; GFX11-NEXT: global_store_b64 v8, v[0:1], s[34:35]
; GFX11-NEXT: s_endpgm
entry:
@@ -2408,18 +2406,17 @@ define hidden amdgpu_kernel void @negativeoffset(ptr addrspace(1) nocapture %buf
; GFX8-NEXT: v_mov_b32_e32 v3, 3
; GFX8-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
; GFX8-NEXT: v_lshlrev_b32_sdwa v0, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX8-NEXT: v_add_u32_e32 v0, vcc, v1, v0
-; GFX8-NEXT: v_addc_u32_e32 v6, vcc, 0, v2, vcc
+; GFX8-NEXT: v_add_u32_e32 v3, vcc, v1, v0
+; GFX8-NEXT: v_addc_u32_e32 v0, vcc, 0, v2, vcc
; GFX8-NEXT: s_movk_i32 s0, 0x800
-; GFX8-NEXT: v_add_u32_e32 v3, vcc, s0, v0
-; GFX8-NEXT: v_addc_u32_e32 v4, vcc, -1, v6, vcc
-; GFX8-NEXT: v_add_u32_e32 v5, vcc, 0, v0
-; GFX8-NEXT: v_addc_u32_e32 v6, vcc, -1, v6, vcc
-; GFX8-NEXT: flat_load_dwordx2 v[3:4], v[3:4]
+; GFX8-NEXT: v_add_u32_e32 v5, vcc, s0, v3
+; GFX8-NEXT: v_addc_u32_e32 v6, vcc, -1, v0, vcc
+; GFX8-NEXT: v_add_u32_e32 v4, vcc, -1, v0
; GFX8-NEXT: flat_load_dwordx2 v[5:6], v[5:6]
+; GFX8-NEXT: flat_load_dwordx2 v[3:4], v[3:4]
; GFX8-NEXT: s_waitcnt vmcnt(0)
-; GFX8-NEXT: v_add_u32_e32 v3, vcc, v5, v3
-; GFX8-NEXT: v_addc_u32_e32 v4, vcc, v6, v4, vcc
+; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v5
+; GFX8-NEXT: v_addc_u32_e32 v4, vcc, v4, v6, vcc
; GFX8-NEXT: flat_store_dwordx2 v[1:2], v[3:4]
; GFX8-NEXT: s_endpgm
;
@@ -2450,14 +2447,13 @@ define hidden amdgpu_kernel void @negativeoffset(ptr addrspace(1) nocapture %buf
; GFX9-NEXT: v_mov_b32_e32 v3, 3
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
; GFX9-NEXT: v_lshlrev_b32_sdwa v0, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v2, v0
-; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v1, vcc
-; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, 0x1000, v2
-; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, -1, v3, vcc
-; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, 0, v2
-; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, -1, v3, vcc
-; GFX9-NEXT: global_load_dwordx2 v[4:5], v[0:1], off offset:-2048
-; GFX9-NEXT: global_load_dwordx2 v[6:7], v[2:3], off
+; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v2, v0
+; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, 0x1000, v0
+; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, -1, v1, vcc
+; GFX9-NEXT: v_add_u32_e32 v1, -1, v1
+; GFX9-NEXT: global_load_dwordx2 v[4:5], v[2:3], off offset:-2048
+; GFX9-NEXT: global_load_dwordx2 v[6:7], v[0:1], off
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v6, v4
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v7, v5, vcc
@@ -2490,15 +2486,14 @@ define hidden amdgpu_kernel void @negativeoffset(ptr addrspace(1) nocapture %buf
; GFX10-NEXT: v_lshlrev_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX10-NEXT: v_add_co_u32 v1, s0, s34, v8
; GFX10-NEXT: v_add_co_ci_u32_e64 v2, s0, s35, 0, s0
-; GFX10-NEXT: v_add_co_u32 v3, vcc_lo, v1, v0
-; GFX10-NEXT: v_add_co_ci_u32_e32 v4, vcc_lo, 0, v2, vcc_lo
-; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, 0x800, v3
-; GFX10-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, -1, v4, vcc_lo
-; GFX10-NEXT: v_add_co_u32 v2, vcc_lo, 0, v3
-; GFX10-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, -1, v4, vcc_lo
+; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, v1, v0
+; GFX10-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, 0, v2, vcc_lo
+; GFX10-NEXT: v_add_co_u32 v2, vcc_lo, 0x800, v0
+; GFX10-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, -1, v1, vcc_lo
+; GFX10-NEXT: v_add_nc_u32_e32 v1, -1, v1
; GFX10-NEXT: s_clause 0x1
-; GFX10-NEXT: global_load_dwordx2 v[4:5], v[0:1], off
-; GFX10-NEXT: global_load_dwordx2 v[6:7], v[2:3], off
+; GFX10-NEXT: global_load_dwordx2 v[4:5], v[2:3], off
+; GFX10-NEXT: global_load_dwordx2 v[6:7], v[0:1], off
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, v6, v4
; GFX10-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v7, v5, vcc_lo
@@ -2525,19 +2520,18 @@ define hidden amdgpu_kernel void @negativeoffset(ptr addrspace(1) nocapture %buf
; GFX11-NEXT: v_add_co_u32 v1, s0, s34, v4
; GFX11-NEXT: v_add_co_ci_u32_e64 v2, null, s35, 0, s0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_co_u32 v3, vcc_lo, v1, v0
-; GFX11-NEXT: v_add_co_ci_u32_e32 v5, vcc_lo, 0, v2, vcc_lo
+; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v1, v0
+; GFX11-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, 0, v2, vcc_lo
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, 0x1000, v3
-; GFX11-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, -1, v5, vcc_lo
-; GFX11-NEXT: v_add_co_u32 v2, vcc_lo, 0, v3
-; GFX11-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, -1, v5, vcc_lo
+; GFX11-NEXT: v_add_co_u32 v2, vcc_lo, 0x1000, v0
+; GFX11-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, -1, v1, vcc_lo
+; GFX11-NEXT: v_add_nc_u32_e32 v1, -1, v1
; GFX11-NEXT: s_clause 0x1
-; GFX11-NEXT: global_load_b64 v[0:1], v[0:1], off offset:-2048
-; GFX11-NEXT: global_load_b64 v[2:3], v[2:3], off
+; GFX11-NEXT: global_load_b64 v[2:3], v[2:3], off offset:-2048
+; GFX11-NEXT: global_load_b64 v[0:1], v[0:1], off
; GFX11-NEXT: s_waitcnt vmcnt(0)
-; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v2, v0
-; GFX11-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v3, v1, vcc_lo
+; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, v2
+; GFX11-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
; GFX11-NEXT: global_store_b64 v4, v[0:1], s[34:35]
; GFX11-NEXT: s_endpgm
entry:
diff --git a/llvm/test/CodeGen/AMDGPU/remat-physreg-copy-subreg-extract-already-live-at-def-issue120970.mir b/llvm/test/CodeGen/AMDGPU/remat-physreg-copy-subreg-extract-already-live-at-def-issue120970.mir
new file mode 100644
index 0000000..3879f6d
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/remat-physreg-copy-subreg-extract-already-live-at-def-issue120970.mir
@@ -0,0 +1,85 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -run-pass=register-coalescer -o - %s | FileCheck %s
+
+# This used to assert due to trying to rematerialize V_MOV_B64_PSEUDO
+# at copy to $vgpr1. This would assert since this would clobber the
+# live value in $vgpr0.
+
+---
+name: rematerialize_physreg_sub_def_already_live_at_def_assert
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0
+
+ ; CHECK-LABEL: name: rematerialize_physreg_sub_def_already_live_at_def_assert
+ ; CHECK: liveins: $vgpr0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[V_MOV_B:%[0-9]+]]:vreg_64 = V_MOV_B64_PSEUDO 1, implicit $exec
+ ; CHECK-NEXT: $vgpr0 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: $vgpr1 = COPY [[V_MOV_B]].sub1
+ ; CHECK-NEXT: SI_RETURN implicit $vgpr0, implicit killed $vgpr1
+ %0:vreg_64 = V_MOV_B64_PSEUDO 1, implicit $exec
+ %1:vgpr_32 = COPY %0.sub1
+ $vgpr0 = V_MOV_B32_e32 0, implicit $exec
+ $vgpr1 = COPY %1
+ SI_RETURN implicit $vgpr0, implicit killed $vgpr1
+...
+
+# Same as previous, except with an IMPLICIT_DEF
+---
+name: rematerialize_physreg_sub_def_already_live_at_def_assert_implicit_def
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0
+
+ ; CHECK-LABEL: name: rematerialize_physreg_sub_def_already_live_at_def_assert_implicit_def
+ ; CHECK: liveins: $vgpr0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:vreg_64 = IMPLICIT_DEF
+ ; CHECK-NEXT: $vgpr0 = V_MOV_B32_e32 0, implicit $exec
+ ; CHECK-NEXT: $vgpr1 = COPY [[DEF]].sub1
+ ; CHECK-NEXT: SI_RETURN implicit $vgpr0, implicit killed $vgpr1
+ %0:vreg_64 = IMPLICIT_DEF
+ %1:vgpr_32 = COPY %0.sub1
+ $vgpr0 = V_MOV_B32_e32 0, implicit $exec
+ $vgpr1 = COPY %1
+ SI_RETURN implicit $vgpr0, implicit killed $vgpr1
+...
+
+---
+name: rematerialize_physreg_sub_def_no_live_sub_def_0
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0
+
+ ; CHECK-LABEL: name: rematerialize_physreg_sub_def_no_live_sub_def_0
+ ; CHECK: liveins: $vgpr0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: dead $vgpr0_vgpr1 = V_MOV_B64_PSEUDO 1, implicit $exec, implicit-def $vgpr1
+ ; CHECK-NEXT: SI_RETURN implicit killed $vgpr1
+ %0:vreg_64 = V_MOV_B64_PSEUDO 1, implicit $exec
+ %1:vgpr_32 = COPY %0.sub1
+ $vgpr1 = COPY %1
+ SI_RETURN implicit killed $vgpr1
+...
+
+---
+name: rematerialize_physreg_sub_def_no_live_sub_def_1
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $vgpr0
+
+ ; CHECK-LABEL: name: rematerialize_physreg_sub_def_no_live_sub_def_1
+ ; CHECK: liveins: $vgpr0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: dead $vgpr1_vgpr2 = V_MOV_B64_PSEUDO 1, implicit $exec, implicit-def $vgpr1
+ ; CHECK-NEXT: SI_RETURN implicit killed $vgpr1
+ %0:vreg_64 = V_MOV_B64_PSEUDO 1, implicit $exec
+ %1:vgpr_32 = COPY %0.sub0
+ $vgpr1 = COPY %1
+ SI_RETURN implicit killed $vgpr1
+...
diff --git a/llvm/test/CodeGen/AMDGPU/sdiv64.ll b/llvm/test/CodeGen/AMDGPU/sdiv64.ll
index 3e8768c..96dd627 100644
--- a/llvm/test/CodeGen/AMDGPU/sdiv64.ll
+++ b/llvm/test/CodeGen/AMDGPU/sdiv64.ll
@@ -1065,100 +1065,37 @@ define amdgpu_kernel void @s_test_sdiv24_48(ptr addrspace(1) %out, i48 %x, i48 %
; GCN-NEXT: s_endpgm
;
; GCN-IR-LABEL: s_test_sdiv24_48:
-; GCN-IR: ; %bb.0: ; %_udiv-special-cases
-; GCN-IR-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0xb
-; GCN-IR-NEXT: s_mov_b32 s15, 0
-; GCN-IR-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-IR-NEXT: s_sext_i32_i16 s1, s1
-; GCN-IR-NEXT: s_ashr_i64 s[0:1], s[0:1], 24
-; GCN-IR-NEXT: s_sext_i32_i16 s3, s3
-; GCN-IR-NEXT: s_lshl_b64 s[0:1], s[0:1], 16
-; GCN-IR-NEXT: s_ashr_i64 s[2:3], s[2:3], 24
-; GCN-IR-NEXT: s_ashr_i64 s[6:7], s[0:1], 16
-; GCN-IR-NEXT: s_ashr_i32 s0, s1, 31
-; GCN-IR-NEXT: s_lshl_b64 s[2:3], s[2:3], 16
-; GCN-IR-NEXT: s_mov_b32 s1, s0
-; GCN-IR-NEXT: s_ashr_i64 s[8:9], s[2:3], 16
-; GCN-IR-NEXT: s_ashr_i32 s2, s3, 31
-; GCN-IR-NEXT: s_xor_b64 s[6:7], s[6:7], s[0:1]
-; GCN-IR-NEXT: s_mov_b32 s3, s2
-; GCN-IR-NEXT: s_sub_u32 s12, s6, s0
-; GCN-IR-NEXT: s_subb_u32 s13, s7, s0
-; GCN-IR-NEXT: s_xor_b64 s[6:7], s[8:9], s[2:3]
-; GCN-IR-NEXT: s_sub_u32 s6, s6, s2
-; GCN-IR-NEXT: s_subb_u32 s7, s7, s2
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[8:9], s[6:7], 0
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[10:11], s[12:13], 0
-; GCN-IR-NEXT: s_flbit_i32_b64 s14, s[6:7]
-; GCN-IR-NEXT: s_or_b64 s[10:11], s[8:9], s[10:11]
-; GCN-IR-NEXT: s_flbit_i32_b64 s20, s[12:13]
-; GCN-IR-NEXT: s_sub_u32 s16, s14, s20
-; GCN-IR-NEXT: s_subb_u32 s17, 0, 0
-; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[18:19], s[16:17], 63
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[22:23], s[16:17], 63
-; GCN-IR-NEXT: s_or_b64 s[18:19], s[10:11], s[18:19]
-; GCN-IR-NEXT: s_and_b64 s[10:11], s[18:19], exec
-; GCN-IR-NEXT: s_cselect_b32 s11, 0, s13
-; GCN-IR-NEXT: s_cselect_b32 s10, 0, s12
-; GCN-IR-NEXT: s_or_b64 s[18:19], s[18:19], s[22:23]
-; GCN-IR-NEXT: s_mov_b64 s[8:9], 0
-; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[18:19]
-; GCN-IR-NEXT: s_cbranch_vccz .LBB9_5
-; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT: s_add_u32 s18, s16, 1
-; GCN-IR-NEXT: s_addc_u32 s19, s17, 0
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[10:11], s[18:19], 0
-; GCN-IR-NEXT: s_sub_i32 s16, 63, s16
-; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[10:11]
-; GCN-IR-NEXT: s_lshl_b64 s[10:11], s[12:13], s16
-; GCN-IR-NEXT: s_cbranch_vccz .LBB9_4
-; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT: s_lshr_b64 s[16:17], s[12:13], s18
-; GCN-IR-NEXT: s_add_u32 s18, s6, -1
-; GCN-IR-NEXT: s_addc_u32 s19, s7, -1
-; GCN-IR-NEXT: s_not_b64 s[8:9], s[14:15]
-; GCN-IR-NEXT: s_add_u32 s12, s8, s20
-; GCN-IR-NEXT: s_addc_u32 s13, s9, 0
-; GCN-IR-NEXT: s_mov_b64 s[14:15], 0
-; GCN-IR-NEXT: s_mov_b32 s9, 0
-; GCN-IR-NEXT: .LBB9_3: ; %udiv-do-while
-; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT: s_lshl_b64 s[16:17], s[16:17], 1
-; GCN-IR-NEXT: s_lshr_b32 s8, s11, 31
-; GCN-IR-NEXT: s_lshl_b64 s[10:11], s[10:11], 1
-; GCN-IR-NEXT: s_or_b64 s[16:17], s[16:17], s[8:9]
-; GCN-IR-NEXT: s_or_b64 s[10:11], s[14:15], s[10:11]
-; GCN-IR-NEXT: s_sub_u32 s8, s18, s16
-; GCN-IR-NEXT: s_subb_u32 s8, s19, s17
-; GCN-IR-NEXT: s_ashr_i32 s14, s8, 31
-; GCN-IR-NEXT: s_mov_b32 s15, s14
-; GCN-IR-NEXT: s_and_b32 s8, s14, 1
-; GCN-IR-NEXT: s_and_b64 s[14:15], s[14:15], s[6:7]
-; GCN-IR-NEXT: s_sub_u32 s16, s16, s14
-; GCN-IR-NEXT: s_subb_u32 s17, s17, s15
-; GCN-IR-NEXT: s_add_u32 s12, s12, 1
-; GCN-IR-NEXT: s_addc_u32 s13, s13, 0
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[20:21], s[12:13], 0
-; GCN-IR-NEXT: s_mov_b64 s[14:15], s[8:9]
-; GCN-IR-NEXT: s_and_b64 vcc, exec, s[20:21]
-; GCN-IR-NEXT: s_cbranch_vccz .LBB9_3
-; GCN-IR-NEXT: .LBB9_4: ; %Flow4
-; GCN-IR-NEXT: s_lshl_b64 s[6:7], s[10:11], 1
-; GCN-IR-NEXT: s_or_b64 s[10:11], s[8:9], s[6:7]
-; GCN-IR-NEXT: .LBB9_5: ; %udiv-end
-; GCN-IR-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x9
-; GCN-IR-NEXT: s_xor_b64 s[0:1], s[2:3], s[0:1]
-; GCN-IR-NEXT: s_xor_b64 s[2:3], s[10:11], s[0:1]
-; GCN-IR-NEXT: s_sub_u32 s0, s2, s0
-; GCN-IR-NEXT: s_subb_u32 s1, s3, s1
+; GCN-IR: ; %bb.0:
+; GCN-IR-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; GCN-IR-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0xd
; GCN-IR-NEXT: s_mov_b32 s7, 0xf000
; GCN-IR-NEXT: s_mov_b32 s6, -1
-; GCN-IR-NEXT: v_mov_b32_e32 v0, s1
; GCN-IR-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-IR-NEXT: buffer_store_short v0, off, s[4:7], 0 offset:4
-; GCN-IR-NEXT: s_waitcnt expcnt(0)
-; GCN-IR-NEXT: v_mov_b32_e32 v0, s0
+; GCN-IR-NEXT: s_mov_b32 s5, s1
+; GCN-IR-NEXT: s_sext_i32_i16 s1, s9
+; GCN-IR-NEXT: v_mov_b32_e32 v0, s8
+; GCN-IR-NEXT: v_alignbit_b32 v0, s1, v0, 24
+; GCN-IR-NEXT: v_cvt_f32_i32_e32 v1, v0
+; GCN-IR-NEXT: s_mov_b32 s4, s0
+; GCN-IR-NEXT: s_sext_i32_i16 s0, s3
+; GCN-IR-NEXT: v_mov_b32_e32 v2, s2
+; GCN-IR-NEXT: v_alignbit_b32 v2, s0, v2, 24
+; GCN-IR-NEXT: v_cvt_f32_i32_e32 v3, v2
+; GCN-IR-NEXT: v_rcp_iflag_f32_e32 v4, v1
+; GCN-IR-NEXT: v_xor_b32_e32 v0, v2, v0
+; GCN-IR-NEXT: v_ashrrev_i32_e32 v0, 30, v0
+; GCN-IR-NEXT: v_or_b32_e32 v0, 1, v0
+; GCN-IR-NEXT: v_mul_f32_e32 v2, v3, v4
+; GCN-IR-NEXT: v_trunc_f32_e32 v2, v2
+; GCN-IR-NEXT: v_mad_f32 v3, -v2, v1, v3
+; GCN-IR-NEXT: v_cvt_i32_f32_e32 v2, v2
+; GCN-IR-NEXT: v_cmp_ge_f32_e64 vcc, |v3|, |v1|
+; GCN-IR-NEXT: v_cndmask_b32_e32 v0, 0, v0, vcc
+; GCN-IR-NEXT: v_add_i32_e32 v0, vcc, v0, v2
+; GCN-IR-NEXT: v_bfe_i32 v0, v0, 0, 24
+; GCN-IR-NEXT: v_ashrrev_i32_e32 v1, 31, v0
; GCN-IR-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GCN-IR-NEXT: buffer_store_short v1, off, s[4:7], 0 offset:4
; GCN-IR-NEXT: s_endpgm
%1 = ashr i48 %x, 24
%2 = ashr i48 %y, 24
diff --git a/llvm/test/CodeGen/AMDGPU/smed3.ll b/llvm/test/CodeGen/AMDGPU/smed3.ll
index e0d0ddc..ddf6297 100644
--- a/llvm/test/CodeGen/AMDGPU/smed3.ll
+++ b/llvm/test/CodeGen/AMDGPU/smed3.ll
@@ -1,6 +1,8 @@
; RUN: llc -mtriple=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
; RUN: llc -mtriple=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
; RUN: llc -mtriple=amdgcn -mcpu=gfx900 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GFX9 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=-flat-for-global,-real-true16 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX11-FAKE16 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=-flat-for-global,+real-true16 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX11-TRUE16 %s
declare i32 @llvm.amdgcn.workitem.id.x() #0
@@ -98,6 +100,8 @@ declare i64 @llvm.smin.i64(i64, i64)
; VI: v_max_i16_e32 [[MAX:v[0-9]]], 12, {{v[0-9]}}
; VI: v_min_i16_e32 {{v[0-9]}}, 17, [[MAX]]
; GFX9: v_med3_i16 v{{[0-9]+}}, v{{[0-9]+}}, 12, 17
+; GFX11-TRUE16: v_med3_i16 v{{[0-9]+}}.l, v{{[0-9]+}}.l, 12, 17
+; GFX11-FAKE16: v_med3_i16 v{{[0-9]+}}, v{{[0-9]+}}, 12, 17
define amdgpu_kernel void @v_test_smed3_r_i_i_i16(ptr addrspace(1) %out, ptr addrspace(1) %aptr) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep0 = getelementptr i16, ptr addrspace(1) %aptr, i32 %tid
@@ -686,6 +690,8 @@ bb:
; VI: v_max_i16
; GFX9: v_med3_i16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; GFX11-TRUE16: v_med3_i16 v{{[0-9]+}}.l, v{{[0-9]+}}.l, v{{[0-9]+}}.h, v{{[0-9]+}}.l
+; GFX11-FAKE16: v_med3_i16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
define amdgpu_kernel void @v_test_smed3_i16_pat_0(ptr addrspace(1) %arg, ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #1 {
bb:
%tid = call i32 @llvm.amdgcn.workitem.id.x()
@@ -707,6 +713,8 @@ bb:
; GCN-LABEL: {{^}}v_test_smed3_i16_pat_1:
; GFX9: v_med3_i16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; GFX11-TRUE16: v_med3_i16 v{{[0-9]+}}.l, v{{[0-9]+}}.l, v{{[0-9]+}}.h, v{{[0-9]+}}.l
+; GFX11-FAKE16: v_med3_i16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
define amdgpu_kernel void @v_test_smed3_i16_pat_1(ptr addrspace(1) %arg, ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #1 {
bb:
diff --git a/llvm/test/CodeGen/AMDGPU/srem64.ll b/llvm/test/CodeGen/AMDGPU/srem64.ll
index cb8f82d..23364e8 100644
--- a/llvm/test/CodeGen/AMDGPU/srem64.ll
+++ b/llvm/test/CodeGen/AMDGPU/srem64.ll
@@ -1188,109 +1188,39 @@ define amdgpu_kernel void @s_test_srem24_48(ptr addrspace(1) %out, i48 %x, i48 %
; GCN-NEXT: s_endpgm
;
; GCN-IR-LABEL: s_test_srem24_48:
-; GCN-IR: ; %bb.0: ; %_udiv-special-cases
-; GCN-IR-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0xb
-; GCN-IR-NEXT: s_mov_b32 s13, 0
+; GCN-IR: ; %bb.0:
+; GCN-IR-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; GCN-IR-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
+; GCN-IR-NEXT: s_mov_b32 s7, 0xf000
+; GCN-IR-NEXT: s_mov_b32 s6, -1
; GCN-IR-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-IR-NEXT: s_sext_i32_i16 s1, s1
; GCN-IR-NEXT: s_sext_i32_i16 s3, s3
-; GCN-IR-NEXT: s_ashr_i64 s[0:1], s[0:1], 24
-; GCN-IR-NEXT: s_ashr_i64 s[2:3], s[2:3], 24
-; GCN-IR-NEXT: s_lshl_b64 s[0:1], s[0:1], 16
-; GCN-IR-NEXT: s_lshl_b64 s[6:7], s[2:3], 16
-; GCN-IR-NEXT: s_ashr_i64 s[2:3], s[0:1], 16
-; GCN-IR-NEXT: s_ashr_i32 s0, s1, 31
-; GCN-IR-NEXT: s_mov_b32 s1, s0
-; GCN-IR-NEXT: s_ashr_i64 s[8:9], s[6:7], 16
-; GCN-IR-NEXT: s_xor_b64 s[2:3], s[2:3], s[0:1]
-; GCN-IR-NEXT: s_sub_u32 s2, s2, s0
-; GCN-IR-NEXT: s_subb_u32 s3, s3, s0
-; GCN-IR-NEXT: s_ashr_i32 s10, s7, 31
-; GCN-IR-NEXT: s_mov_b32 s11, s10
-; GCN-IR-NEXT: s_xor_b64 s[6:7], s[8:9], s[10:11]
-; GCN-IR-NEXT: s_sub_u32 s6, s6, s10
-; GCN-IR-NEXT: s_subb_u32 s7, s7, s10
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[8:9], s[6:7], 0
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[10:11], s[2:3], 0
-; GCN-IR-NEXT: s_flbit_i32_b64 s12, s[6:7]
-; GCN-IR-NEXT: s_or_b64 s[10:11], s[8:9], s[10:11]
-; GCN-IR-NEXT: s_flbit_i32_b64 s20, s[2:3]
-; GCN-IR-NEXT: s_sub_u32 s14, s12, s20
-; GCN-IR-NEXT: s_subb_u32 s15, 0, 0
-; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[16:17], s[14:15], 63
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[18:19], s[14:15], 63
-; GCN-IR-NEXT: s_or_b64 s[16:17], s[10:11], s[16:17]
-; GCN-IR-NEXT: s_and_b64 s[10:11], s[16:17], exec
-; GCN-IR-NEXT: s_cselect_b32 s11, 0, s3
-; GCN-IR-NEXT: s_cselect_b32 s10, 0, s2
-; GCN-IR-NEXT: s_or_b64 s[16:17], s[16:17], s[18:19]
-; GCN-IR-NEXT: s_mov_b64 s[8:9], 0
-; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[16:17]
-; GCN-IR-NEXT: s_cbranch_vccz .LBB9_5
-; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT: s_add_u32 s16, s14, 1
-; GCN-IR-NEXT: s_addc_u32 s17, s15, 0
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[10:11], s[16:17], 0
-; GCN-IR-NEXT: s_sub_i32 s14, 63, s14
-; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[10:11]
-; GCN-IR-NEXT: s_lshl_b64 s[10:11], s[2:3], s14
-; GCN-IR-NEXT: s_cbranch_vccz .LBB9_4
-; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT: s_lshr_b64 s[14:15], s[2:3], s16
-; GCN-IR-NEXT: s_add_u32 s18, s6, -1
-; GCN-IR-NEXT: s_addc_u32 s19, s7, -1
-; GCN-IR-NEXT: s_not_b64 s[8:9], s[12:13]
-; GCN-IR-NEXT: s_add_u32 s12, s8, s20
-; GCN-IR-NEXT: s_addc_u32 s13, s9, 0
-; GCN-IR-NEXT: s_mov_b64 s[16:17], 0
-; GCN-IR-NEXT: s_mov_b32 s9, 0
-; GCN-IR-NEXT: .LBB9_3: ; %udiv-do-while
-; GCN-IR-NEXT: ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT: s_lshl_b64 s[14:15], s[14:15], 1
-; GCN-IR-NEXT: s_lshr_b32 s8, s11, 31
-; GCN-IR-NEXT: s_lshl_b64 s[10:11], s[10:11], 1
-; GCN-IR-NEXT: s_or_b64 s[14:15], s[14:15], s[8:9]
-; GCN-IR-NEXT: s_or_b64 s[10:11], s[16:17], s[10:11]
-; GCN-IR-NEXT: s_sub_u32 s8, s18, s14
-; GCN-IR-NEXT: s_subb_u32 s8, s19, s15
-; GCN-IR-NEXT: s_ashr_i32 s16, s8, 31
-; GCN-IR-NEXT: s_mov_b32 s17, s16
-; GCN-IR-NEXT: s_and_b32 s8, s16, 1
-; GCN-IR-NEXT: s_and_b64 s[16:17], s[16:17], s[6:7]
-; GCN-IR-NEXT: s_sub_u32 s14, s14, s16
-; GCN-IR-NEXT: s_subb_u32 s15, s15, s17
-; GCN-IR-NEXT: s_add_u32 s12, s12, 1
-; GCN-IR-NEXT: s_addc_u32 s13, s13, 0
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[20:21], s[12:13], 0
-; GCN-IR-NEXT: s_mov_b64 s[16:17], s[8:9]
-; GCN-IR-NEXT: s_and_b64 vcc, exec, s[20:21]
-; GCN-IR-NEXT: s_cbranch_vccz .LBB9_3
-; GCN-IR-NEXT: .LBB9_4: ; %Flow4
-; GCN-IR-NEXT: s_lshl_b64 s[10:11], s[10:11], 1
-; GCN-IR-NEXT: s_or_b64 s[10:11], s[8:9], s[10:11]
-; GCN-IR-NEXT: .LBB9_5: ; %udiv-end
-; GCN-IR-NEXT: v_mov_b32_e32 v0, s10
-; GCN-IR-NEXT: v_mul_hi_u32 v0, s6, v0
-; GCN-IR-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0x9
-; GCN-IR-NEXT: s_mul_i32 s4, s6, s11
-; GCN-IR-NEXT: v_mov_b32_e32 v2, s3
-; GCN-IR-NEXT: v_add_i32_e32 v0, vcc, s4, v0
-; GCN-IR-NEXT: s_mul_i32 s4, s7, s10
-; GCN-IR-NEXT: v_add_i32_e32 v0, vcc, s4, v0
-; GCN-IR-NEXT: s_mul_i32 s4, s6, s10
-; GCN-IR-NEXT: v_mov_b32_e32 v1, s4
-; GCN-IR-NEXT: v_sub_i32_e32 v1, vcc, s2, v1
-; GCN-IR-NEXT: v_subb_u32_e32 v0, vcc, v2, v0, vcc
-; GCN-IR-NEXT: v_xor_b32_e32 v1, s0, v1
-; GCN-IR-NEXT: v_xor_b32_e32 v0, s1, v0
-; GCN-IR-NEXT: v_mov_b32_e32 v2, s1
-; GCN-IR-NEXT: v_subrev_i32_e32 v1, vcc, s0, v1
-; GCN-IR-NEXT: s_mov_b32 s15, 0xf000
-; GCN-IR-NEXT: s_mov_b32 s14, -1
-; GCN-IR-NEXT: v_subb_u32_e32 v0, vcc, v0, v2, vcc
-; GCN-IR-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-IR-NEXT: buffer_store_short v0, off, s[12:15], 0 offset:4
-; GCN-IR-NEXT: buffer_store_dword v1, off, s[12:15], 0
+; GCN-IR-NEXT: s_sext_i32_i16 s5, s5
+; GCN-IR-NEXT: v_mov_b32_e32 v0, s4
+; GCN-IR-NEXT: v_alignbit_b32 v0, s5, v0, 24
+; GCN-IR-NEXT: v_cvt_f32_i32_e32 v1, v0
+; GCN-IR-NEXT: v_mov_b32_e32 v2, s2
+; GCN-IR-NEXT: v_alignbit_b32 v2, s3, v2, 24
+; GCN-IR-NEXT: v_cvt_f32_i32_e32 v3, v2
+; GCN-IR-NEXT: v_rcp_iflag_f32_e32 v4, v1
+; GCN-IR-NEXT: v_xor_b32_e32 v5, v2, v0
+; GCN-IR-NEXT: v_ashrrev_i32_e32 v5, 30, v5
+; GCN-IR-NEXT: v_or_b32_e32 v5, 1, v5
+; GCN-IR-NEXT: v_mul_f32_e32 v4, v3, v4
+; GCN-IR-NEXT: v_trunc_f32_e32 v4, v4
+; GCN-IR-NEXT: v_mad_f32 v3, -v4, v1, v3
+; GCN-IR-NEXT: v_cvt_i32_f32_e32 v4, v4
+; GCN-IR-NEXT: v_cmp_ge_f32_e64 vcc, |v3|, |v1|
+; GCN-IR-NEXT: v_cndmask_b32_e32 v1, 0, v5, vcc
+; GCN-IR-NEXT: s_mov_b32 s4, s0
+; GCN-IR-NEXT: v_add_i32_e32 v1, vcc, v1, v4
+; GCN-IR-NEXT: v_mul_lo_u32 v0, v1, v0
+; GCN-IR-NEXT: s_mov_b32 s5, s1
+; GCN-IR-NEXT: v_subrev_i32_e32 v0, vcc, v0, v2
+; GCN-IR-NEXT: v_bfe_i32 v0, v0, 0, 24
+; GCN-IR-NEXT: v_ashrrev_i32_e32 v1, 31, v0
+; GCN-IR-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GCN-IR-NEXT: buffer_store_short v1, off, s[4:7], 0 offset:4
; GCN-IR-NEXT: s_endpgm
%1 = ashr i48 %x, 24
%2 = ashr i48 %y, 24
diff --git a/llvm/test/CodeGen/AMDGPU/sub64-low-32-bits-known-zero.ll b/llvm/test/CodeGen/AMDGPU/sub64-low-32-bits-known-zero.ll
new file mode 100644
index 0000000..f52f116
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/sub64-low-32-bits-known-zero.ll
@@ -0,0 +1,193 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 < %s | FileCheck -check-prefix=GFX9 %s
+
+; Reduce a 64-bit sub by a constant if we know the low 32-bits are all
+; zero.
+
+; sub i64:x, K if computeTrailingZeros(K) >= 32
+; => build_pair (sub x.hi, K.hi), x.lo
+
+define amdgpu_ps i64 @s_sub_i64_const_low_bits_known0_0(i64 inreg %reg) {
+; GFX9-LABEL: s_sub_i64_const_low_bits_known0_0:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_add_i32 s1, s1, 0xfffc0000
+; GFX9-NEXT: ; return to shader part epilog
+ %sub = sub i64 %reg, 1125899906842624 ; (1 << 50)
+ ret i64 %sub
+}
+
+define amdgpu_ps i64 @s_sub_i64_const_low_bits_known0_1(i64 inreg %reg) {
+; GFX9-LABEL: s_sub_i64_const_low_bits_known0_1:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_add_i32 s1, s1, -1
+; GFX9-NEXT: ; return to shader part epilog
+ %sub = sub i64 %reg, 4294967296 ; (1 << 32)
+ ret i64 %sub
+}
+
+define amdgpu_ps i64 @s_sub_i64_const_low_bits_known0_2(i64 inreg %reg) {
+; GFX9-LABEL: s_sub_i64_const_low_bits_known0_2:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_add_i32 s1, s1, -2
+; GFX9-NEXT: ; return to shader part epilog
+ %sub = sub i64 %reg, 8589934592 ; (1 << 33)
+ ret i64 %sub
+}
+
+define amdgpu_ps i64 @s_sub_i64_const_low_bits_known0_3(i64 inreg %reg) {
+; GFX9-LABEL: s_sub_i64_const_low_bits_known0_3:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_add_i32 s1, s1, 0x80000000
+; GFX9-NEXT: ; return to shader part epilog
+ %sub = sub i64 %reg, -9223372036854775808 ; (1 << 63)
+ ret i64 %sub
+}
+
+define amdgpu_ps i64 @s_sub_i64_const_low_bits_known0_4(i64 inreg %reg) {
+; GFX9-LABEL: s_sub_i64_const_low_bits_known0_4:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_add_i32 s1, s1, 1
+; GFX9-NEXT: ; return to shader part epilog
+ %sub = sub i64 %reg, -4294967296 ; 0xffffffff00000000
+ ret i64 %sub
+}
+
+define i64 @v_sub_i64_const_low_bits_known0_0(i64 %reg) {
+; GFX9-LABEL: v_sub_i64_const_low_bits_known0_0:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_add_u32_e32 v1, 0xfffc0000, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %sub = sub i64 %reg, 1125899906842624 ; (1 << 50)
+ ret i64 %sub
+}
+
+define i64 @v_sub_i64_const_low_bits_known0_1(i64 %reg) {
+; GFX9-LABEL: v_sub_i64_const_low_bits_known0_1:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_add_u32_e32 v1, -1, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %sub = sub i64 %reg, 4294967296 ; (1 << 32)
+ ret i64 %sub
+}
+
+define i64 @v_sub_i64_const_low_bits_known0_2(i64 %reg) {
+; GFX9-LABEL: v_sub_i64_const_low_bits_known0_2:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_add_u32_e32 v1, -2, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %sub = sub i64 %reg, 8589934592 ; (1 << 33)
+ ret i64 %sub
+}
+
+define i64 @v_sub_i64_const_low_bits_known0_3(i64 %reg) {
+; GFX9-LABEL: v_sub_i64_const_low_bits_known0_3:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_add_u32_e32 v1, 0x80000000, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %sub = sub i64 %reg, -9223372036854775808 ; (1 << 63)
+ ret i64 %sub
+}
+
+define i64 @v_sub_i64_const_low_bits_known0_4(i64 %reg) {
+; GFX9-LABEL: v_sub_i64_const_low_bits_known0_4:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_add_u32_e32 v1, 1, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %sub = sub i64 %reg, -4294967296 ; 0xffffffff00000000
+ ret i64 %sub
+}
+
+define amdgpu_ps i64 @s_sub_i64_const_high_bits_known0_0(i64 inreg %reg) {
+; GFX9-LABEL: s_sub_i64_const_high_bits_known0_0:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_add_u32 s0, s0, 1
+; GFX9-NEXT: s_addc_u32 s1, s1, -1
+; GFX9-NEXT: ; return to shader part epilog
+ %sub = sub i64 %reg, 4294967295 ; (1 << 31)
+ ret i64 %sub
+}
+
+define i64 @v_sub_i64_const_high_bits_known0_0(i64 %reg) {
+; GFX9-LABEL: v_sub_i64_const_high_bits_known0_0:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, 1, v0
+; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, -1, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %sub = sub i64 %reg, 4294967295 ; (1 << 31)
+ ret i64 %sub
+}
+
+define <2 x i64> @v_sub_v2i64_splat_const_low_bits_known0_0(<2 x i64> %reg) {
+; GFX9-LABEL: v_sub_v2i64_splat_const_low_bits_known0_0:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_add_u32_e32 v1, -1, v1
+; GFX9-NEXT: v_add_u32_e32 v3, -1, v3
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %sub = sub <2 x i64> %reg, <i64 4294967296, i64 4294967296> ; (1 << 32)
+ ret <2 x i64> %sub
+}
+
+define <2 x i64> @v_sub_v2i64_nonsplat_const_low_bits_known0_0(<2 x i64> %reg) {
+; GFX9-LABEL: v_sub_v2i64_nonsplat_const_low_bits_known0_0:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_add_u32_e32 v1, -1, v1
+; GFX9-NEXT: v_add_u32_e32 v3, -2, v3
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %sub = sub <2 x i64> %reg, <i64 4294967296, i64 8589934592> ; (1 << 32), (1 << 33)
+ ret <2 x i64> %sub
+}
+
+define amdgpu_ps <2 x i64> @s_sub_v2i64_splat_const_low_bits_known0_0(<2 x i64> inreg %reg) {
+; GFX9-LABEL: s_sub_v2i64_splat_const_low_bits_known0_0:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_add_i32 s1, s1, -1
+; GFX9-NEXT: s_add_i32 s3, s3, -1
+; GFX9-NEXT: ; return to shader part epilog
+ %sub = sub <2 x i64> %reg, <i64 4294967296, i64 4294967296> ; (1 << 32)
+ ret <2 x i64> %sub
+}
+
+define amdgpu_ps <2 x i64> @s_sub_v2i64_nonsplat_const_low_bits_known0_0(<2 x i64> inreg %reg) {
+; GFX9-LABEL: s_sub_v2i64_nonsplat_const_low_bits_known0_0:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_add_i32 s1, s1, -1
+; GFX9-NEXT: s_add_i32 s3, s3, -2
+; GFX9-NEXT: ; return to shader part epilog
+ %sub = sub <2 x i64> %reg, <i64 4294967296, i64 8589934592> ; (1 << 32), (1 << 33)
+ ret <2 x i64> %sub
+}
+
+; We could reduce this to use a 32-bit sub if we use computeKnownBits
+define i64 @v_sub_i64_variable_high_bits_known0_0(i64 %reg, i32 %offset.hi32) {
+; GFX9-LABEL: v_sub_i64_variable_high_bits_known0_0:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_subrev_co_u32_e32 v0, vcc, 0, v0
+; GFX9-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v2, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+ %zext.offset.hi32 = zext i32 %offset.hi32 to i64
+ %in.high.bits = shl i64 %zext.offset.hi32, 32
+ %sub = sub i64 %reg, %in.high.bits
+ ret i64 %sub
+}
+
+; We could reduce this to use a 32-bit sub if we use computeKnownBits
+define amdgpu_ps i64 @s_sub_i64_variable_high_bits_known0_0(i64 inreg %reg, i32 inreg %offset.hi32) {
+; GFX9-LABEL: s_sub_i64_variable_high_bits_known0_0:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_sub_u32 s0, s0, 0
+; GFX9-NEXT: s_subb_u32 s1, s1, s2
+; GFX9-NEXT: ; return to shader part epilog
+ %zext.offset.hi32 = zext i32 %offset.hi32 to i64
+ %in.high.bits = shl i64 %zext.offset.hi32, 32
+ %sub = sub i64 %reg, %in.high.bits
+ ret i64 %sub
+}
diff --git a/llvm/test/CodeGen/AMDGPU/swdev502267-use-after-free-last-chance-recoloring-alloc-succeeds.mir b/llvm/test/CodeGen/AMDGPU/swdev502267-use-after-free-last-chance-recoloring-alloc-succeeds.mir
index 3135e3c..8315708 100644
--- a/llvm/test/CodeGen/AMDGPU/swdev502267-use-after-free-last-chance-recoloring-alloc-succeeds.mir
+++ b/llvm/test/CodeGen/AMDGPU/swdev502267-use-after-free-last-chance-recoloring-alloc-succeeds.mir
@@ -1,5 +1,5 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
-# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 -stress-regalloc=4 -start-before=greedy,2 -stop-after=virtregrewriter,2 -o - %s | FileCheck %s
+# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx942 -stress-regalloc=4 -verify-regalloc -start-before=greedy,2 -stop-after=virtregrewriter,2 -o - %s | FileCheck %s
# This testcase hit a situation where greedy would hit a use after
# free during last chance recoloring. This case successfully allocates
@@ -26,11 +26,12 @@ body: |
; CHECK-NEXT: renamable $vgpr5 = V_FMA_F32_e64 0, $vgpr7, 0, $vgpr7, 0, $vgpr2, 0, 0, implicit $mode, implicit $exec
; CHECK-NEXT: SI_SPILL_AV128_SAVE $vgpr6_vgpr7_vgpr8_vgpr9, %stack.2, $sgpr32, 0, implicit $exec :: (store (s128) into %stack.2, align 4, addrspace 5)
; CHECK-NEXT: renamable $vgpr6 = V_FMA_F32_e64 0, killed $vgpr8, 0, $vgpr8, 0, $vgpr1, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: renamable $vgpr7 = IMPLICIT_DEF
; CHECK-NEXT: S_CBRANCH_EXECZ %bb.2, implicit $exec
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors: %bb.2(0x80000000)
- ; CHECK-NEXT: liveins: $vgpr0_vgpr1_vgpr2_vgpr3:0x00000000000000FF, $vgpr4_vgpr5_vgpr6_vgpr7:0x000000000000003F
+ ; CHECK-NEXT: liveins: $vgpr0_vgpr1_vgpr2_vgpr3:0x00000000000000FF, $vgpr4_vgpr5_vgpr6_vgpr7:0x00000000000000FF
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: SI_SPILL_AV128_SAVE killed $vgpr0_vgpr1_vgpr2_vgpr3, %stack.1, $sgpr32, 0, implicit $exec :: (store (s128) into %stack.1, align 4, addrspace 5)
; CHECK-NEXT: renamable $vgpr0_vgpr1_vgpr2_vgpr3 = SI_SPILL_V128_RESTORE %stack.2, $sgpr32, 0, implicit $exec :: (load (s128) from %stack.2, align 4, addrspace 5)
@@ -40,7 +41,7 @@ body: |
; CHECK-NEXT: SI_SPILL_V32_SAVE killed $vgpr0, %stack.3, $sgpr32, 0, implicit $exec :: (store (s32) into %stack.3, addrspace 5)
; CHECK-NEXT: renamable $vgpr0 = IMPLICIT_DEF
; CHECK-NEXT: renamable $vgpr2_vgpr3_vgpr4_vgpr5 = SI_SPILL_V128_RESTORE %stack.2, $sgpr32, 0, implicit $exec :: (load (s128) from %stack.2, align 4, addrspace 5)
- ; CHECK-NEXT: renamable $vgpr5 = nofpexcept V_DIV_FIXUP_F32_e64 0, killed $vgpr0, 0, undef $vgpr7, 0, killed $vgpr5, 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: renamable $vgpr5 = nofpexcept V_DIV_FIXUP_F32_e64 0, killed $vgpr0, 0, killed $vgpr7, 0, killed $vgpr5, 0, 0, implicit $mode, implicit $exec
; CHECK-NEXT: renamable $vgpr0 = SI_SPILL_V32_RESTORE %stack.3, $sgpr32, 0, implicit $exec :: (load (s32) from %stack.3, addrspace 5)
; CHECK-NEXT: renamable $vgpr9 = COPY killed renamable $vgpr5
; CHECK-NEXT: renamable $vgpr4_vgpr5_vgpr6_vgpr7 = SI_SPILL_V128_RESTORE %stack.4, $sgpr32, 0, implicit $exec :: (load (s128) from %stack.4, align 4, addrspace 5)
@@ -73,6 +74,7 @@ body: |
undef %4.sub0:vreg_128_align2 = V_FMA_F32_e64 0, %3.sub0, 0, %3.sub0, 0, %0.sub3, 0, 0, implicit $mode, implicit $exec
%4.sub1:vreg_128_align2 = V_FMA_F32_e64 0, %3.sub1, 0, %3.sub1, 0, %0.sub2, 0, 0, implicit $mode, implicit $exec
%4.sub2:vreg_128_align2 = V_FMA_F32_e64 0, %3.sub2, 0, %3.sub2, 0, %0.sub1, 0, 0, implicit $mode, implicit $exec
+ %4.sub3:vreg_128_align2 = IMPLICIT_DEF
S_CBRANCH_EXECZ %bb.2, implicit $exec
bb.1:
diff --git a/llvm/test/CodeGen/AMDGPU/swdev503538-move-to-valu-stack-srd-physreg.ll b/llvm/test/CodeGen/AMDGPU/swdev503538-move-to-valu-stack-srd-physreg.ll
new file mode 100644
index 0000000..f0b3d33
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/swdev503538-move-to-valu-stack-srd-physreg.ll
@@ -0,0 +1,23 @@
+; RUN: not llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a -verify-machineinstrs=0 -O0 2> %t.err < %s | FileCheck %s
+; RUN: FileCheck -check-prefix=ERR %s < %t.err
+
+; FIXME: This error will be fixed by supporting arbitrary divergent
+; dynamic allocas by performing a wave umax of the size.
+
+; ERR: error: <unknown>:0:0: in function move_to_valu_assert_srd_is_physreg_swdev503538 i32 (ptr addrspace(1)): illegal VGPR to SGPR copy
+
+; CHECK: ; illegal copy v0 to s32
+
+define i32 @move_to_valu_assert_srd_is_physreg_swdev503538(ptr addrspace(1) %ptr) {
+entry:
+ %idx = load i32, ptr addrspace(1) %ptr, align 4
+ %zero = extractelement <4 x i32> zeroinitializer, i32 %idx
+ %alloca = alloca [2048 x i8], i32 %zero, align 8, addrspace(5)
+ %ld = load i32, ptr addrspace(5) %alloca, align 8
+ call void @llvm.memset.p5.i32(ptr addrspace(5) %alloca, i8 0, i32 2048, i1 false)
+ ret i32 %ld
+}
+
+declare void @llvm.memset.p5.i32(ptr addrspace(5) nocapture writeonly, i8, i32, i1 immarg) #0
+
+attributes #0 = { nocallback nofree nounwind willreturn memory(argmem: write) }
diff --git a/llvm/test/CodeGen/AMDGPU/umed3.ll b/llvm/test/CodeGen/AMDGPU/umed3.ll
index 557d023..4726e81 100644
--- a/llvm/test/CodeGen/AMDGPU/umed3.ll
+++ b/llvm/test/CodeGen/AMDGPU/umed3.ll
@@ -1,6 +1,8 @@
; RUN: llc -mtriple=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
; RUN: llc -mtriple=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
; RUN: llc -mtriple=amdgcn -mcpu=gfx900 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GFX9 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=-flat-for-global,-real-true16 -verify-machineinstrs < %s | FileCheck -check-prefix=GFX11-FAKE16 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=-flat-for-global,+real-true16 -verify-machineinstrs < %s | FileCheck -check-prefix=GFX11-TRUE16 %s
declare i32 @llvm.amdgcn.workitem.id.x() #0
@@ -84,6 +86,8 @@ define amdgpu_kernel void @v_test_umed3_r_i_i_i64(ptr addrspace(1) %out, ptr add
; VI: v_max_u16_e32 [[MAX:v[0-9]]], 12, {{v[0-9]}}
; VI: v_min_u16_e32 {{v[0-9]}}, 17, [[MAX]]
; GFX9: v_med3_u16 v{{[0-9]+}}, v{{[0-9]+}}, 12, 17
+; GFX11-TRUE16: v_med3_u16 v{{[0-9]+}}.l, v{{[0-9]+}}.l, 12, 17
+; GFX11-FAKE16: v_med3_u16 v{{[0-9]+}}, v{{[0-9]+}}, 12, 17
define amdgpu_kernel void @v_test_umed3_r_i_i_i16(ptr addrspace(1) %out, ptr addrspace(1) %aptr) #1 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%gep0 = getelementptr i16, ptr addrspace(1) %aptr, i32 %tid
@@ -707,6 +711,8 @@ bb:
; VI: v_max_u16
; GFX9: v_med3_u16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; GFX11-TRUE16: v_med3_u16 v{{[0-9]+}}.l, v{{[0-9]+}}.l, v{{[0-9]+}}.h, v{{[0-9]+}}.l
+; GFX11-FAKE16: v_med3_u16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
define amdgpu_kernel void @v_test_umed3_i16_pat_0(ptr addrspace(1) %arg, ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #1 {
bb:
%tid = call i32 @llvm.amdgcn.workitem.id.x()
@@ -728,6 +734,8 @@ bb:
; GCN-LABEL: {{^}}v_test_umed3_i16_pat_1:
; GFX9: v_med3_u16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; GFX11-TRUE16: v_med3_u16 v{{[0-9]+}}.l, v{{[0-9]+}}.l, v{{[0-9]+}}.h, v{{[0-9]+}}.l
+; GFX11-FAKE16: v_med3_u16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
define amdgpu_kernel void @v_test_umed3_i16_pat_1(ptr addrspace(1) %arg, ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #1 {
bb:
%tid = call i32 @llvm.amdgcn.workitem.id.x()
diff --git a/llvm/test/CodeGen/ARM/sink-store-pre-load-dependency.mir b/llvm/test/CodeGen/ARM/sink-store-pre-load-dependency.mir
new file mode 100644
index 0000000..92c983e
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/sink-store-pre-load-dependency.mir
@@ -0,0 +1,41 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -o - %s -mtriple=armv7-- -run-pass=machine-sink | FileCheck %s
+
+name: sink-store-load-dep
+tracksRegLiveness: true
+stack:
+ - { id: 0, type: default, size: 8, alignment: 8 }
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: sink-store-load-dep
+ ; CHECK: bb.0:
+ ; CHECK: [[LDRi12_:%[0-9]+]]:gpr = LDRi12 %stack.0, 0, 14 /* CC::al */, $noreg :: (load (s32))
+ ; CHECK-NEXT: [[MOVi:%[0-9]+]]:gpr = MOVi 55296, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: [[ADDri1:%[0-9]+]]:gpr = ADDri [[LDRi12_:%[0-9]+]], 0, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: [[LDRH:%[0-9]+]]:gpr = LDRH killed [[ADDri1:%[0-9]+]], $noreg, 0, 14 /* CC::al */, $noreg :: (load (s16))
+ ; CHECK-NEXT: [[MOVi1:%[0-9]+]]:gpr = MOVi 0, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK-NEXT: early-clobber %5:gpr = STRH_PRE [[MOVi:%[0-9]+]], [[LDRi12_:%[0-9]+]], [[MOVi1:%[0-9]+]], 0, 14 /* CC::al */, $noreg
+ ; CHECK-NEXT: [[SUBri:%.*]]:gpr = SUBri killed [[LDRi12_:%[0-9]+]], 0, 14 /* CC::al */, $noreg, $noreg
+ ; CHECK: bb.2:
+ ; CHECK-NEXT: [[MOVi2:%[0-9]+]]:gpr = MOVi [[LDRH:%[0-9]+]], 14 /* CC::al */, $noreg, $noreg
+ %0:gpr = LDRi12 %stack.0, 0, 14, $noreg :: (load (s32))
+ %1:gpr = MOVi 55296, 14, $noreg, $noreg
+ %2:gpr = ADDri %0:gpr, 0, 14, $noreg, $noreg
+ %3:gpr = LDRH killed %2:gpr, $noreg, 0, 14, $noreg :: (load (s16))
+ %4:gpr = MOVi 0, 14, $noreg, $noreg
+ early-clobber %5:gpr = STRH_PRE %1:gpr, %0:gpr, %4:gpr, 0, 14, $noreg
+ %6:gpr = SUBri killed %0:gpr, 0, 14, $noreg, $noreg
+ CMPri %6:gpr, 0, 14, $noreg, implicit-def $cpsr
+ Bcc %bb.2, 3, $cpsr
+ B %bb.1
+
+ bb.1:
+ %8:gpr = MOVi 0, 14, $noreg, $noreg
+ $r0 = COPY %8:gpr
+ BX_RET 14, $noreg, implicit $r0
+
+ bb.2:
+ %9:gpr = MOVi %3:gpr, 14, $noreg, $noreg
+ $r0 = COPY %9:gpr
+ BX_RET 14, $noreg, implicit $r0
+...
diff --git a/llvm/test/CodeGen/DirectX/BufferLoad-sm61.ll b/llvm/test/CodeGen/DirectX/BufferLoad-sm61.ll
new file mode 100644
index 0000000..501f151
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/BufferLoad-sm61.ll
@@ -0,0 +1,60 @@
+; RUN: opt -S -dxil-op-lower %s | FileCheck %s
+; Before SM6.2 ByteAddressBuffer and StructuredBuffer lower to bufferLoad.
+
+target triple = "dxil-pc-shadermodel6.1-compute"
+
+; CHECK-LABEL: define void @loadf32_struct
+define void @loadf32_struct(i32 %index) {
+ %buffer = call target("dx.RawBuffer", float, 0, 0, 0)
+ @llvm.dx.resource.handlefrombinding.tdx.RawBuffer_f32_0_0_0(
+ i32 0, i32 0, i32 1, i32 0, i1 false)
+
+ ; CHECK: [[DATA:%.*]] = call %dx.types.ResRet.f32 @dx.op.bufferLoad.f32(i32 68, %dx.types.Handle %{{.*}}, i32 %index, i32 0)
+ %load = call {float, i1}
+ @llvm.dx.resource.load.rawbuffer.f32.tdx.RawBuffer_f32_0_0_0t(
+ target("dx.RawBuffer", float, 0, 0, 0) %buffer,
+ i32 %index,
+ i32 0)
+
+ ret void
+}
+
+; CHECK-LABEL: define void @loadv4f32_byte
+define void @loadv4f32_byte(i32 %offset) {
+ %buffer = call target("dx.RawBuffer", i8, 0, 0, 0)
+ @llvm.dx.resource.handlefrombinding.tdx.RawBuffer_i8_0_0_0(
+ i32 0, i32 0, i32 1, i32 0, i1 false)
+
+ ; CHECK: [[DATA:%.*]] = call %dx.types.ResRet.f32 @dx.op.bufferLoad.f32(i32 68, %dx.types.Handle %{{.*}}, i32 %offset, i32 0)
+ %load = call {<4 x float>, i1}
+ @llvm.dx.resource.load.rawbuffer.f32.tdx.RawBuffer_i8_0_0_0t(
+ target("dx.RawBuffer", i8, 0, 0, 0) %buffer,
+ i32 %offset,
+ i32 0)
+
+ ret void
+}
+
+; CHECK-LABEL: define void @loadnested
+define void @loadnested(i32 %index) {
+ %buffer = call
+ target("dx.RawBuffer", {i32, {<4 x float>, <3 x half>}}, 0, 0, 0)
+ @llvm.dx.resource.handlefrombinding(i32 0, i32 0, i32 1, i32 0, i1 false)
+
+ ; CHECK: [[DATAI32:%.*]] = call %dx.types.ResRet.i32 @dx.op.bufferLoad.i32(i32 68, %dx.types.Handle %{{.*}}, i32 %index, i32 0)
+ %loadi32 = call {i32, i1} @llvm.dx.resource.load.rawbuffer.i32(
+ target("dx.RawBuffer", {i32, {<4 x float>, <3 x half>}}, 0, 0, 0) %buffer,
+ i32 %index, i32 0)
+
+ ; CHECK: [[DATAF32:%.*]] = call %dx.types.ResRet.f32 @dx.op.bufferLoad.f32(i32 68, %dx.types.Handle %{{.*}}, i32 %index, i32 4)
+ %loadf32 = call {<4 x float>, i1} @llvm.dx.resource.load.rawbuffer.v4f32(
+ target("dx.RawBuffer", {i32, {<4 x float>, <3 x half>}}, 0, 0, 0) %buffer,
+ i32 %index, i32 4)
+
+ ; CHECK: [[DATAF16:%.*]] = call %dx.types.ResRet.f16 @dx.op.bufferLoad.f16(i32 68, %dx.types.Handle %{{.*}}, i32 %index, i32 20)
+ %loadf16 = call {<3 x half>, i1} @llvm.dx.resource.load.rawbuffer.v3f16(
+ target("dx.RawBuffer", {i32, {<4 x float>, <3 x half>}}, 0, 0, 0) %buffer,
+ i32 %index, i32 20)
+
+ ret void
+}
diff --git a/llvm/test/CodeGen/DirectX/BufferLoad.ll b/llvm/test/CodeGen/DirectX/BufferLoad.ll
index 7f1291b..86e2217 100644
--- a/llvm/test/CodeGen/DirectX/BufferLoad.ll
+++ b/llvm/test/CodeGen/DirectX/BufferLoad.ll
@@ -17,8 +17,9 @@ define void @loadv4f32() {
; CHECK-NOT: %dx.resource.casthandle
; CHECK: [[DATA0:%.*]] = call %dx.types.ResRet.f32 @dx.op.bufferLoad.f32(i32 68, %dx.types.Handle [[HANDLE]], i32 0, i32 undef)
- %data0 = call <4 x float> @llvm.dx.resource.load.typedbuffer(
+ %load0 = call {<4 x float>, i1} @llvm.dx.resource.load.typedbuffer(
target("dx.TypedBuffer", <4 x float>, 0, 0, 0) %buffer, i32 0)
+ %data0 = extractvalue {<4 x float>, i1} %load0, 0
; The extract order depends on the users, so don't enforce that here.
; CHECK-DAG: [[VAL0_0:%.*]] = extractvalue %dx.types.ResRet.f32 [[DATA0]], 0
@@ -34,8 +35,9 @@ define void @loadv4f32() {
call void @scalar_user(float %data0_2)
; CHECK: [[DATA4:%.*]] = call %dx.types.ResRet.f32 @dx.op.bufferLoad.f32(i32 68, %dx.types.Handle [[HANDLE]], i32 4, i32 undef)
- %data4 = call <4 x float> @llvm.dx.resource.load.typedbuffer(
+ %load4 = call {<4 x float>, i1} @llvm.dx.resource.load.typedbuffer(
target("dx.TypedBuffer", <4 x float>, 0, 0, 0) %buffer, i32 4)
+ %data4 = extractvalue {<4 x float>, i1} %load4, 0
; CHECK: extractvalue %dx.types.ResRet.f32 [[DATA4]], 0
; CHECK: extractvalue %dx.types.ResRet.f32 [[DATA4]], 1
@@ -48,8 +50,9 @@ define void @loadv4f32() {
call void @vector_user(<4 x float> %data4)
; CHECK: [[DATA12:%.*]] = call %dx.types.ResRet.f32 @dx.op.bufferLoad.f32(i32 68, %dx.types.Handle [[HANDLE]], i32 12, i32 undef)
- %data12 = call <4 x float> @llvm.dx.resource.load.typedbuffer(
+ %load12 = call {<4 x float>, i1} @llvm.dx.resource.load.typedbuffer(
target("dx.TypedBuffer", <4 x float>, 0, 0, 0) %buffer, i32 12)
+ %data12 = extractvalue {<4 x float>, i1} %load12, 0
; CHECK: [[DATA12_3:%.*]] = extractvalue %dx.types.ResRet.f32 [[DATA12]], 3
%data12_3 = extractelement <4 x float> %data12, i32 3
@@ -70,8 +73,9 @@ define void @index_dynamic(i32 %bufindex, i32 %elemindex) {
i32 0, i32 0, i32 1, i32 0, i1 false)
; CHECK: [[LOAD:%.*]] = call %dx.types.ResRet.f32 @dx.op.bufferLoad.f32(i32 68, %dx.types.Handle [[HANDLE]], i32 %bufindex, i32 undef)
- %load = call <4 x float> @llvm.dx.resource.load.typedbuffer(
+ %load = call {<4 x float>, i1} @llvm.dx.resource.load.typedbuffer(
target("dx.TypedBuffer", <4 x float>, 0, 0, 0) %buffer, i32 %bufindex)
+ %data = extractvalue {<4 x float>, i1} %load, 0
; CHECK: [[ALLOCA:%.*]] = alloca [4 x float]
; CHECK: [[V0:%.*]] = extractvalue %dx.types.ResRet.f32 [[LOAD]], 0
@@ -89,10 +93,10 @@ define void @index_dynamic(i32 %bufindex, i32 %elemindex) {
;
; CHECK: [[PTR:%.*]] = getelementptr inbounds [4 x float], ptr [[ALLOCA]], i32 0, i32 %elemindex
; CHECK: [[X:%.*]] = load float, ptr [[PTR]]
- %data = extractelement <4 x float> %load, i32 %elemindex
+ %x = extractelement <4 x float> %data, i32 %elemindex
; CHECK: call void @scalar_user(float [[X]])
- call void @scalar_user(float %data)
+ call void @scalar_user(float %x)
ret void
}
@@ -105,8 +109,9 @@ define void @loadf32() {
i32 0, i32 0, i32 1, i32 0, i1 false)
; CHECK: [[DATA0:%.*]] = call %dx.types.ResRet.f32 @dx.op.bufferLoad.f32(i32 68, %dx.types.Handle [[HANDLE]], i32 0, i32 undef)
- %data0 = call float @llvm.dx.resource.load.typedbuffer(
+ %load0 = call {float, i1} @llvm.dx.resource.load.typedbuffer(
target("dx.TypedBuffer", float, 0, 0, 0) %buffer, i32 0)
+ %data0 = extractvalue {float, i1} %load0, 0
; CHECK: [[VAL0:%.*]] = extractvalue %dx.types.ResRet.f32 [[DATA0]], 0
; CHECK: call void @scalar_user(float [[VAL0]])
@@ -123,7 +128,7 @@ define void @loadv2f32() {
i32 0, i32 0, i32 1, i32 0, i1 false)
; CHECK: [[DATA0:%.*]] = call %dx.types.ResRet.f32 @dx.op.bufferLoad.f32(i32 68, %dx.types.Handle [[HANDLE]], i32 0, i32 undef)
- %data0 = call <2 x float> @llvm.dx.resource.load.typedbuffer(
+ %data0 = call {<2 x float>, i1} @llvm.dx.resource.load.typedbuffer(
target("dx.TypedBuffer", <2 x float>, 0, 0, 0) %buffer, i32 0)
ret void
@@ -137,7 +142,7 @@ define void @loadv4f32_checkbit() {
i32 0, i32 0, i32 1, i32 0, i1 false)
; CHECK: [[DATA0:%.*]] = call %dx.types.ResRet.f32 @dx.op.bufferLoad.f32(i32 68, %dx.types.Handle [[HANDLE]], i32 0, i32 undef)
- %data0 = call {<4 x float>, i1} @llvm.dx.resource.loadchecked.typedbuffer.f32(
+ %data0 = call {<4 x float>, i1} @llvm.dx.resource.load.typedbuffer.f32(
target("dx.TypedBuffer", <4 x float>, 0, 0, 0) %buffer, i32 0)
; CHECK: [[STATUS:%.*]] = extractvalue %dx.types.ResRet.f32 [[DATA0]], 4
@@ -158,7 +163,7 @@ define void @loadv4i32() {
i32 0, i32 0, i32 1, i32 0, i1 false)
; CHECK: [[DATA0:%.*]] = call %dx.types.ResRet.i32 @dx.op.bufferLoad.i32(i32 68, %dx.types.Handle [[HANDLE]], i32 0, i32 undef)
- %data0 = call <4 x i32> @llvm.dx.resource.load.typedbuffer(
+ %data0 = call {<4 x i32>, i1} @llvm.dx.resource.load.typedbuffer(
target("dx.TypedBuffer", <4 x i32>, 0, 0, 0) %buffer, i32 0)
ret void
@@ -172,7 +177,7 @@ define void @loadv4f16() {
i32 0, i32 0, i32 1, i32 0, i1 false)
; CHECK: [[DATA0:%.*]] = call %dx.types.ResRet.f16 @dx.op.bufferLoad.f16(i32 68, %dx.types.Handle [[HANDLE]], i32 0, i32 undef)
- %data0 = call <4 x half> @llvm.dx.resource.load.typedbuffer(
+ %data0 = call {<4 x half>, i1} @llvm.dx.resource.load.typedbuffer(
target("dx.TypedBuffer", <4 x half>, 0, 0, 0) %buffer, i32 0)
ret void
@@ -186,7 +191,7 @@ define void @loadv4i16() {
i32 0, i32 0, i32 1, i32 0, i1 false)
; CHECK: [[DATA0:%.*]] = call %dx.types.ResRet.i16 @dx.op.bufferLoad.i16(i32 68, %dx.types.Handle [[HANDLE]], i32 0, i32 undef)
- %data0 = call <4 x i16> @llvm.dx.resource.load.typedbuffer(
+ %data0 = call {<4 x i16>, i1} @llvm.dx.resource.load.typedbuffer(
target("dx.TypedBuffer", <4 x i16>, 0, 0, 0) %buffer, i32 0)
ret void
diff --git a/llvm/test/CodeGen/DirectX/HLSLControlFlowHint.ll b/llvm/test/CodeGen/DirectX/HLSLControlFlowHint.ll
deleted file mode 100644
index fe66e48..0000000
--- a/llvm/test/CodeGen/DirectX/HLSLControlFlowHint.ll
+++ /dev/null
@@ -1,98 +0,0 @@
-; RUN: opt -S -dxil-op-lower -dxil-translate-metadata -mtriple=dxil-pc-shadermodel6.3-library %s | FileCheck %s
-
-; This test make sure LLVM metadata is being translated into DXIL.
-
-
-; CHECK: define i32 @test_branch(i32 %X)
-; CHECK-NO: hlsl.controlflow.hint
-; CHECK: br i1 %cmp, label %if.then, label %if.else, !dx.controlflow.hints [[HINT_BRANCH:![0-9]+]]
-define i32 @test_branch(i32 %X) {
-entry:
- %X.addr = alloca i32, align 4
- %resp = alloca i32, align 4
- store i32 %X, ptr %X.addr, align 4
- %0 = load i32, ptr %X.addr, align 4
- %cmp = icmp sgt i32 %0, 0
- br i1 %cmp, label %if.then, label %if.else, !hlsl.controlflow.hint !0
-
-if.then: ; preds = %entry
- %1 = load i32, ptr %X.addr, align 4
- %sub = sub nsw i32 0, %1
- store i32 %sub, ptr %resp, align 4
- br label %if.end
-
-if.else: ; preds = %entry
- %2 = load i32, ptr %X.addr, align 4
- %mul = mul nsw i32 %2, 2
- store i32 %mul, ptr %resp, align 4
- br label %if.end
-
-if.end: ; preds = %if.else, %if.then
- %3 = load i32, ptr %resp, align 4
- ret i32 %3
-}
-
-
-; CHECK: define i32 @test_flatten(i32 %X)
-; CHECK-NO: hlsl.controlflow.hint
-; CHECK: br i1 %cmp, label %if.then, label %if.else, !dx.controlflow.hints [[HINT_FLATTEN:![0-9]+]]
-define i32 @test_flatten(i32 %X) {
-entry:
- %X.addr = alloca i32, align 4
- %resp = alloca i32, align 4
- store i32 %X, ptr %X.addr, align 4
- %0 = load i32, ptr %X.addr, align 4
- %cmp = icmp sgt i32 %0, 0
- br i1 %cmp, label %if.then, label %if.else, !hlsl.controlflow.hint !1
-
-if.then: ; preds = %entry
- %1 = load i32, ptr %X.addr, align 4
- %sub = sub nsw i32 0, %1
- store i32 %sub, ptr %resp, align 4
- br label %if.end
-
-if.else: ; preds = %entry
- %2 = load i32, ptr %X.addr, align 4
- %mul = mul nsw i32 %2, 2
- store i32 %mul, ptr %resp, align 4
- br label %if.end
-
-if.end: ; preds = %if.else, %if.then
- %3 = load i32, ptr %resp, align 4
- ret i32 %3
-}
-
-
-; CHECK: define i32 @test_no_attr(i32 %X)
-; CHECK-NO: hlsl.controlflow.hint
-; CHECK-NO: !dx.controlflow.hints
-define i32 @test_no_attr(i32 %X) {
-entry:
- %X.addr = alloca i32, align 4
- %resp = alloca i32, align 4
- store i32 %X, ptr %X.addr, align 4
- %0 = load i32, ptr %X.addr, align 4
- %cmp = icmp sgt i32 %0, 0
- br i1 %cmp, label %if.then, label %if.else
-
-if.then: ; preds = %entry
- %1 = load i32, ptr %X.addr, align 4
- %sub = sub nsw i32 0, %1
- store i32 %sub, ptr %resp, align 4
- br label %if.end
-
-if.else: ; preds = %entry
- %2 = load i32, ptr %X.addr, align 4
- %mul = mul nsw i32 %2, 2
- store i32 %mul, ptr %resp, align 4
- br label %if.end
-
-if.end: ; preds = %if.else, %if.then
- %3 = load i32, ptr %resp, align 4
- ret i32 %3
-}
-; CHECK-NO: hlsl.controlflow.hint
-; CHECK: [[HINT_BRANCH]] = !{!"dx.controlflow.hints", i32 1}
-; CHECK: [[HINT_FLATTEN]] = !{!"dx.controlflow.hints", i32 2}
-!0 = !{!"hlsl.controlflow.hint", i32 1}
-!1 = !{!"hlsl.controlflow.hint", i32 2}
diff --git a/llvm/test/CodeGen/DirectX/RawBufferLoad-error64.ll b/llvm/test/CodeGen/DirectX/RawBufferLoad-error64.ll
new file mode 100644
index 0000000..b8a6649
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/RawBufferLoad-error64.ll
@@ -0,0 +1,24 @@
+; We use llc for this test so that we don't abort after the first error.
+; RUN: not llc %s -o /dev/null 2>&1 | FileCheck %s
+
+target triple = "dxil-pc-shadermodel6.2-compute"
+
+declare void @v4f64_user(<4 x double>)
+
+; Can't load 64 bit types directly until SM6.3 (byteaddressbuf.Load<int64_t4>)
+; CHECK: error:
+; CHECK-SAME: in function loadv4f64_byte
+; CHECK-SAME: Cannot create RawBufferLoad operation: Invalid overload type
+define void @loadv4f64_byte(i32 %offset) "hlsl.export" {
+ %buffer = call target("dx.RawBuffer", i8, 0, 0, 0)
+ @llvm.dx.resource.handlefrombinding.tdx.RawBuffer_i8_0_0_0(
+ i32 0, i32 0, i32 1, i32 0, i1 false)
+
+ %load = call {<4 x double>, i1} @llvm.dx.resource.load.rawbuffer.v4i64(
+ target("dx.RawBuffer", i8, 0, 0, 0) %buffer, i32 %offset, i32 0)
+ %data = extractvalue {<4 x double>, i1} %load, 0
+
+ call void @v4f64_user(<4 x double> %data)
+
+ ret void
+}
diff --git a/llvm/test/CodeGen/DirectX/RawBufferLoad.ll b/llvm/test/CodeGen/DirectX/RawBufferLoad.ll
new file mode 100644
index 0000000..586b9c4
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/RawBufferLoad.ll
@@ -0,0 +1,232 @@
+; RUN: opt -S -dxil-op-lower %s | FileCheck %s
+
+target triple = "dxil-pc-shadermodel6.6-compute"
+
+declare void @f32_user(float)
+declare void @v4f32_user(<4 x float>)
+declare void @i32_user(i32)
+declare void @v4i32_user(<4 x i32>)
+declare void @v3f16_user(<3 x half>)
+declare void @v4f64_user(<4 x double>)
+
+; CHECK-LABEL: define void @loadf32_struct
+define void @loadf32_struct(i32 %index) {
+ %buffer = call target("dx.RawBuffer", float, 0, 0, 0)
+ @llvm.dx.resource.handlefrombinding.tdx.RawBuffer_f32_0_0_0(
+ i32 0, i32 0, i32 1, i32 0, i1 false)
+
+ ; CHECK: [[DATA:%.*]] = call %dx.types.ResRet.f32 @dx.op.rawBufferLoad.f32(i32 139, %dx.types.Handle %{{.*}}, i32 %index, i32 0, i8 1, i32 4)
+ %load = call {float, i1}
+ @llvm.dx.resource.load.rawbuffer.f32.tdx.RawBuffer_f32_0_0_0t(
+ target("dx.RawBuffer", float, 0, 0, 0) %buffer,
+ i32 %index,
+ i32 0)
+ %data = extractvalue {float, i1} %load, 0
+
+ ; CHECK: [[VAL:%.*]] = extractvalue %dx.types.ResRet.f32 [[DATA]], 0
+ ; CHECK: call void @f32_user(float [[VAL]])
+ call void @f32_user(float %data)
+
+ ret void
+}
+
+; CHECK-LABEL: define void @loadf32_byte
+define void @loadf32_byte(i32 %offset) {
+ %buffer = call target("dx.RawBuffer", i8, 0, 0, 0)
+ @llvm.dx.resource.handlefrombinding.tdx.RawBuffer_i8_0_0_0(
+ i32 0, i32 0, i32 1, i32 0, i1 false)
+
+ ; CHECK: [[DATA:%.*]] = call %dx.types.ResRet.f32 @dx.op.rawBufferLoad.f32(i32 139, %dx.types.Handle %{{.*}}, i32 %offset, i32 0, i8 1, i32 4)
+ %load = call {float, i1}
+ @llvm.dx.resource.load.rawbuffer.f32.tdx.RawBuffer_i8_0_0_0t(
+ target("dx.RawBuffer", i8, 0, 0, 0) %buffer,
+ i32 %offset,
+ i32 0)
+ %data = extractvalue {float, i1} %load, 0
+
+ ; CHECK: [[VAL:%.*]] = extractvalue %dx.types.ResRet.f32 [[DATA]], 0
+ ; CHECK: call void @f32_user(float [[VAL]])
+ call void @f32_user(float %data)
+
+ ret void
+}
+
+; CHECK-LABEL: define void @loadv4f32_struct
+define void @loadv4f32_struct(i32 %index) {
+ %buffer = call target("dx.RawBuffer", <4 x float>, 0, 0, 0)
+ @llvm.dx.resource.handlefrombinding.tdx.RawBuffer_v4f32_0_0_0(
+ i32 0, i32 0, i32 1, i32 0, i1 false)
+
+ ; CHECK: [[DATA:%.*]] = call %dx.types.ResRet.f32 @dx.op.rawBufferLoad.f32(i32 139, %dx.types.Handle %{{.*}}, i32 %index, i32 0, i8 15, i32 4)
+ %load = call {<4 x float>, i1}
+ @llvm.dx.resource.load.rawbuffer.f32.tdx.RawBuffer_v4f32_0_0_0t(
+ target("dx.RawBuffer", <4 x float>, 0, 0, 0) %buffer,
+ i32 %index,
+ i32 0)
+ %data = extractvalue {<4 x float>, i1} %load, 0
+
+ ; CHECK: extractvalue %dx.types.ResRet.f32 [[DATA]], 0
+ ; CHECK: extractvalue %dx.types.ResRet.f32 [[DATA]], 1
+ ; CHECK: extractvalue %dx.types.ResRet.f32 [[DATA]], 2
+ ; CHECK: extractvalue %dx.types.ResRet.f32 [[DATA]], 3
+ ; CHECK: insertelement <4 x float> undef
+ ; CHECK: insertelement <4 x float>
+ ; CHECK: insertelement <4 x float>
+ ; CHECK: insertelement <4 x float>
+ ; CHECK: call void @v4f32_user(<4 x float>
+ call void @v4f32_user(<4 x float> %data)
+
+ ret void
+}
+
+; CHECK-LABEL: define void @loadv4f32_byte
+define void @loadv4f32_byte(i32 %offset) {
+ %buffer = call target("dx.RawBuffer", i8, 0, 0, 0)
+ @llvm.dx.resource.handlefrombinding.tdx.RawBuffer_i8_0_0_0(
+ i32 0, i32 0, i32 1, i32 0, i1 false)
+
+ ; CHECK: [[DATA:%.*]] = call %dx.types.ResRet.f32 @dx.op.rawBufferLoad.f32(i32 139, %dx.types.Handle %{{.*}}, i32 %offset, i32 0, i8 15, i32 4)
+ %load = call {<4 x float>, i1}
+ @llvm.dx.resource.load.rawbuffer.f32.tdx.RawBuffer_i8_0_0_0t(
+ target("dx.RawBuffer", i8, 0, 0, 0) %buffer,
+ i32 %offset,
+ i32 0)
+ %data = extractvalue {<4 x float>, i1} %load, 0
+
+ ; CHECK: extractvalue %dx.types.ResRet.f32 [[DATA]], 0
+ ; CHECK: extractvalue %dx.types.ResRet.f32 [[DATA]], 1
+ ; CHECK: extractvalue %dx.types.ResRet.f32 [[DATA]], 2
+ ; CHECK: extractvalue %dx.types.ResRet.f32 [[DATA]], 3
+ ; CHECK: insertelement <4 x float> undef
+ ; CHECK: insertelement <4 x float>
+ ; CHECK: insertelement <4 x float>
+ ; CHECK: insertelement <4 x float>
+ ; CHECK: call void @v4f32_user(<4 x float>
+ call void @v4f32_user(<4 x float> %data)
+
+ ret void
+}
+
+; CHECK-LABEL: define void @loadelements
+define void @loadelements(i32 %index) {
+ %buffer = call target("dx.RawBuffer", {<4 x float>, <4 x i32>}, 0, 0, 0)
+ @llvm.dx.resource.handlefrombinding.tdx.RawBuffer_sl_v4f32v4i32s_0_0_0(
+ i32 0, i32 0, i32 1, i32 0, i1 false)
+
+ ; CHECK: [[DATAF32:%.*]] = call %dx.types.ResRet.f32 @dx.op.rawBufferLoad.f32(i32 139, %dx.types.Handle %{{.*}}, i32 %index, i32 0, i8 15, i32 4)
+ %loadf32 = call {<4 x float>, i1}
+ @llvm.dx.resource.load.rawbuffer.v4f32(
+ target("dx.RawBuffer", {<4 x float>, <4 x i32>}, 0, 0, 0) %buffer,
+ i32 %index,
+ i32 0)
+ %dataf32 = extractvalue {<4 x float>, i1} %loadf32, 0
+
+ ; CHECK: extractvalue %dx.types.ResRet.f32 [[DATAF32]], 0
+ ; CHECK: extractvalue %dx.types.ResRet.f32 [[DATAF32]], 1
+ ; CHECK: extractvalue %dx.types.ResRet.f32 [[DATAF32]], 2
+ ; CHECK: extractvalue %dx.types.ResRet.f32 [[DATAF32]], 3
+ ; CHECK: insertelement <4 x float> undef
+ ; CHECK: insertelement <4 x float>
+ ; CHECK: insertelement <4 x float>
+ ; CHECK: insertelement <4 x float>
+ ; CHECK: call void @v4f32_user(<4 x float>
+ call void @v4f32_user(<4 x float> %dataf32)
+
+ ; CHECK: [[DATAI32:%.*]] = call %dx.types.ResRet.i32 @dx.op.rawBufferLoad.i32(i32 139, %dx.types.Handle %{{.*}}, i32 %index, i32 1, i8 15, i32 4)
+ %loadi32 = call {<4 x i32>, i1}
+ @llvm.dx.resource.load.rawbuffer.v4i32(
+ target("dx.RawBuffer", {<4 x float>, <4 x i32>}, 0, 0, 0) %buffer,
+ i32 %index,
+ i32 1)
+ %datai32 = extractvalue {<4 x i32>, i1} %loadi32, 0
+
+ ; CHECK: extractvalue %dx.types.ResRet.i32 [[DATAI32]], 0
+ ; CHECK: extractvalue %dx.types.ResRet.i32 [[DATAI32]], 1
+ ; CHECK: extractvalue %dx.types.ResRet.i32 [[DATAI32]], 2
+ ; CHECK: extractvalue %dx.types.ResRet.i32 [[DATAI32]], 3
+ ; CHECK: insertelement <4 x i32> undef
+ ; CHECK: insertelement <4 x i32>
+ ; CHECK: insertelement <4 x i32>
+ ; CHECK: insertelement <4 x i32>
+ ; CHECK: call void @v4i32_user(<4 x i32>
+ call void @v4i32_user(<4 x i32> %datai32)
+
+ ret void
+}
+
+; CHECK-LABEL: define void @loadnested
+define void @loadnested(i32 %index) {
+ %buffer = call
+ target("dx.RawBuffer", {i32, {<4 x float>, <3 x half>}}, 0, 0, 0)
+ @llvm.dx.resource.handlefrombinding(i32 0, i32 0, i32 1, i32 0, i1 false)
+
+ ; CHECK: [[DATAI32:%.*]] = call %dx.types.ResRet.i32 @dx.op.rawBufferLoad.i32(i32 139, %dx.types.Handle %{{.*}}, i32 %index, i32 0, i8 1, i32 4)
+ %loadi32 = call {i32, i1} @llvm.dx.resource.load.rawbuffer.i32(
+ target("dx.RawBuffer", {i32, {<4 x float>, <3 x half>}}, 0, 0, 0) %buffer,
+ i32 %index, i32 0)
+ %datai32 = extractvalue {i32, i1} %loadi32, 0
+
+ ; CHECK: [[VALI32:%.*]] = extractvalue %dx.types.ResRet.i32 [[DATAI32]], 0
+ ; CHECK: call void @i32_user(i32 [[VALI32]])
+ call void @i32_user(i32 %datai32)
+
+ ; CHECK: [[DATAF32:%.*]] = call %dx.types.ResRet.f32 @dx.op.rawBufferLoad.f32(i32 139, %dx.types.Handle %{{.*}}, i32 %index, i32 4, i8 15, i32 4)
+ %loadf32 = call {<4 x float>, i1} @llvm.dx.resource.load.rawbuffer.v4f32(
+ target("dx.RawBuffer", {i32, {<4 x float>, <3 x half>}}, 0, 0, 0) %buffer,
+ i32 %index, i32 4)
+ %dataf32 = extractvalue {<4 x float>, i1} %loadf32, 0
+
+ ; CHECK: extractvalue %dx.types.ResRet.f32 [[DATAF32]], 0
+ ; CHECK: extractvalue %dx.types.ResRet.f32 [[DATAF32]], 1
+ ; CHECK: extractvalue %dx.types.ResRet.f32 [[DATAF32]], 2
+ ; CHECK: extractvalue %dx.types.ResRet.f32 [[DATAF32]], 3
+ ; CHECK: insertelement <4 x float> undef
+ ; CHECK: insertelement <4 x float>
+ ; CHECK: insertelement <4 x float>
+ ; CHECK: insertelement <4 x float>
+ ; CHECK: call void @v4f32_user(<4 x float>
+ call void @v4f32_user(<4 x float> %dataf32)
+
+ ; CHECK: [[DATAF16:%.*]] = call %dx.types.ResRet.f16 @dx.op.rawBufferLoad.f16(i32 139, %dx.types.Handle %{{.*}}, i32 %index, i32 20, i8 7, i32 2)
+ %loadf16 = call {<3 x half>, i1} @llvm.dx.resource.load.rawbuffer.v3f16(
+ target("dx.RawBuffer", {i32, {<4 x float>, <3 x half>}}, 0, 0, 0) %buffer,
+ i32 %index, i32 20)
+ %dataf16 = extractvalue {<3 x half>, i1} %loadf16, 0
+
+ ; CHECK: extractvalue %dx.types.ResRet.f16 [[DATAF16]], 0
+ ; CHECK: extractvalue %dx.types.ResRet.f16 [[DATAF16]], 1
+ ; CHECK: extractvalue %dx.types.ResRet.f16 [[DATAF16]], 2
+ ; CHECK: insertelement <3 x half> undef
+ ; CHECK: insertelement <3 x half>
+ ; CHECK: insertelement <3 x half>
+ ; CHECK: call void @v3f16_user(<3 x half>
+ call void @v3f16_user(<3 x half> %dataf16)
+
+ ret void
+}
+
+; byteaddressbuf.Load<int64_t4>
+; CHECK-LABEL: define void @loadv4f64_byte
+define void @loadv4f64_byte(i32 %offset) {
+ %buffer = call target("dx.RawBuffer", i8, 0, 0, 0)
+ @llvm.dx.resource.handlefrombinding.tdx.RawBuffer_i8_0_0_0(
+ i32 0, i32 0, i32 1, i32 0, i1 false)
+
+ ; CHECK: [[DATA:%.*]] = call %dx.types.ResRet.f64 @dx.op.rawBufferLoad.f64(i32 139, %dx.types.Handle %{{.*}}, i32 %offset, i32 0, i8 15, i32 8)
+ %load = call {<4 x double>, i1} @llvm.dx.resource.load.rawbuffer.v4i64(
+ target("dx.RawBuffer", i8, 0, 0, 0) %buffer, i32 %offset, i32 0)
+ %data = extractvalue {<4 x double>, i1} %load, 0
+
+ ; CHECK: extractvalue %dx.types.ResRet.f64 [[DATA]], 0
+ ; CHECK: extractvalue %dx.types.ResRet.f64 [[DATA]], 1
+ ; CHECK: extractvalue %dx.types.ResRet.f64 [[DATA]], 2
+ ; CHECK: extractvalue %dx.types.ResRet.f64 [[DATA]], 3
+ ; CHECK: insertelement <4 x double> undef
+ ; CHECK: insertelement <4 x double>
+ ; CHECK: insertelement <4 x double>
+ ; CHECK: insertelement <4 x double>
+ ; CHECK: call void @v4f64_user(<4 x double>
+ call void @v4f64_user(<4 x double> %data)
+
+ ret void
+}
diff --git a/llvm/test/CodeGen/DirectX/ResourceAccess/load_typedbuffer.ll b/llvm/test/CodeGen/DirectX/ResourceAccess/load_typedbuffer.ll
index 9b7e7fd..8769e6e 100644
--- a/llvm/test/CodeGen/DirectX/ResourceAccess/load_typedbuffer.ll
+++ b/llvm/test/CodeGen/DirectX/ResourceAccess/load_typedbuffer.ll
@@ -15,17 +15,19 @@ define void @load_float4(i32 %index, i32 %elemindex) {
%ptr = call ptr @llvm.dx.resource.getpointer(
target("dx.TypedBuffer", <4 x float>, 1, 0, 0) %buffer, i32 %index)
- ; CHECK: %[[VALUE:.*]] = call <4 x float> @llvm.dx.resource.load.typedbuffer.v4f32.tdx.TypedBuffer_v4f32_1_0_0t(target("dx.TypedBuffer", <4 x float>, 1, 0, 0) %buffer, i32 %index)
+ ; CHECK: %[[VALUE:.*]] = call { <4 x float>, i1 } @llvm.dx.resource.load.typedbuffer.v4f32.tdx.TypedBuffer_v4f32_1_0_0t(target("dx.TypedBuffer", <4 x float>, 1, 0, 0) %buffer, i32 %index)
%vec_data = load <4 x float>, ptr %ptr
call void @use_float4(<4 x float> %vec_data)
- ; CHECK: %[[VALUE:.*]] = call <4 x float> @llvm.dx.resource.load.typedbuffer.v4f32.tdx.TypedBuffer_v4f32_1_0_0t(target("dx.TypedBuffer", <4 x float>, 1, 0, 0) %buffer, i32 %index)
+ ; CHECK: %[[LOAD:.*]] = call { <4 x float>, i1 } @llvm.dx.resource.load.typedbuffer.v4f32.tdx.TypedBuffer_v4f32_1_0_0t(target("dx.TypedBuffer", <4 x float>, 1, 0, 0) %buffer, i32 %index)
+ ; CHECK: %[[VALUE:.*]] = extractvalue { <4 x float>, i1 } %[[LOAD]], 0
; CHECK: extractelement <4 x float> %[[VALUE]], i32 1
%y_ptr = getelementptr inbounds <4 x float>, ptr %ptr, i32 0, i32 1
%y_data = load float, ptr %y_ptr
call void @use_float(float %y_data)
- ; CHECK: %[[VALUE:.*]] = call <4 x float> @llvm.dx.resource.load.typedbuffer.v4f32.tdx.TypedBuffer_v4f32_1_0_0t(target("dx.TypedBuffer", <4 x float>, 1, 0, 0) %buffer, i32 %index)
+ ; CHECK: %[[LOAD:.*]] = call { <4 x float>, i1 } @llvm.dx.resource.load.typedbuffer.v4f32.tdx.TypedBuffer_v4f32_1_0_0t(target("dx.TypedBuffer", <4 x float>, 1, 0, 0) %buffer, i32 %index)
+ ; CHECK: %[[VALUE:.*]] = extractvalue { <4 x float>, i1 } %[[LOAD]], 0
; CHECK: extractelement <4 x float> %[[VALUE]], i32 %elemindex
%dynamic = getelementptr inbounds <4 x float>, ptr %ptr, i32 0, i32 %elemindex
%dyndata = load float, ptr %dynamic
diff --git a/llvm/test/CodeGen/DirectX/ResourceAccess/store_typedbuffer.ll b/llvm/test/CodeGen/DirectX/ResourceAccess/store_typedbuffer.ll
index 1760640..0b7882a 100644
--- a/llvm/test/CodeGen/DirectX/ResourceAccess/store_typedbuffer.ll
+++ b/llvm/test/CodeGen/DirectX/ResourceAccess/store_typedbuffer.ll
@@ -18,21 +18,24 @@ define void @store_float4(<4 x float> %data, i32 %index, i32 %elemindex) {
; Store just the .x component
%scalar = extractelement <4 x float> %data, i32 0
- ; CHECK: %[[LOAD:.*]] = call <4 x float> @llvm.dx.resource.load.typedbuffer.v4f32.tdx.TypedBuffer_v4f32_1_0_0t(target("dx.TypedBuffer", <4 x float>, 1, 0, 0) %buffer, i32 %index)
- ; CHECK: %[[INSERT:.*]] = insertelement <4 x float> %[[LOAD]], float %scalar, i32 0
+ ; CHECK: %[[LOAD:.*]] = call { <4 x float>, i1 } @llvm.dx.resource.load.typedbuffer.v4f32.tdx.TypedBuffer_v4f32_1_0_0t(target("dx.TypedBuffer", <4 x float>, 1, 0, 0) %buffer, i32 %index)
+ ; CHECK: %[[VEC:.*]] = extractvalue { <4 x float>, i1 } %[[LOAD]], 0
+ ; CHECK: %[[INSERT:.*]] = insertelement <4 x float> %[[VEC]], float %scalar, i32 0
; CHECK: call void @llvm.dx.resource.store.typedbuffer.tdx.TypedBuffer_v4f32_1_0_0t.v4f32(target("dx.TypedBuffer", <4 x float>, 1, 0, 0) %buffer, i32 %index, <4 x float> %[[INSERT]])
store float %scalar, ptr %ptr
; Store just the .y component
- ; CHECK: %[[LOAD:.*]] = call <4 x float> @llvm.dx.resource.load.typedbuffer.v4f32.tdx.TypedBuffer_v4f32_1_0_0t(target("dx.TypedBuffer", <4 x float>, 1, 0, 0) %buffer, i32 %index)
- ; CHECK: %[[INSERT:.*]] = insertelement <4 x float> %[[LOAD]], float %scalar, i32 1
+ ; CHECK: %[[LOAD:.*]] = call { <4 x float>, i1 } @llvm.dx.resource.load.typedbuffer.v4f32.tdx.TypedBuffer_v4f32_1_0_0t(target("dx.TypedBuffer", <4 x float>, 1, 0, 0) %buffer, i32 %index)
+ ; CHECK: %[[VEC:.*]] = extractvalue { <4 x float>, i1 } %[[LOAD]], 0
+ ; CHECK: %[[INSERT:.*]] = insertelement <4 x float> %[[VEC]], float %scalar, i32 1
; CHECK: call void @llvm.dx.resource.store.typedbuffer.tdx.TypedBuffer_v4f32_1_0_0t.v4f32(target("dx.TypedBuffer", <4 x float>, 1, 0, 0) %buffer, i32 %index, <4 x float> %[[INSERT]])
%y_ptr = getelementptr inbounds i8, ptr %ptr, i32 4
store float %scalar, ptr %y_ptr
; Store to one of the elements dynamically
- ; CHECK: %[[LOAD:.*]] = call <4 x float> @llvm.dx.resource.load.typedbuffer.v4f32.tdx.TypedBuffer_v4f32_1_0_0t(target("dx.TypedBuffer", <4 x float>, 1, 0, 0) %buffer, i32 %index)
- ; CHECK: %[[INSERT:.*]] = insertelement <4 x float> %[[LOAD]], float %scalar, i32 %elemindex
+ ; CHECK: %[[LOAD:.*]] = call { <4 x float>, i1 } @llvm.dx.resource.load.typedbuffer.v4f32.tdx.TypedBuffer_v4f32_1_0_0t(target("dx.TypedBuffer", <4 x float>, 1, 0, 0) %buffer, i32 %index)
+ ; CHECK: %[[VEC:.*]] = extractvalue { <4 x float>, i1 } %[[LOAD]], 0
+ ; CHECK: %[[INSERT:.*]] = insertelement <4 x float> %[[VEC]], float %scalar, i32 %elemindex
; CHECK: call void @llvm.dx.resource.store.typedbuffer.tdx.TypedBuffer_v4f32_1_0_0t.v4f32(target("dx.TypedBuffer", <4 x float>, 1, 0, 0) %buffer, i32 %index, <4 x float> %[[INSERT]])
%dynamic = getelementptr inbounds <4 x float>, ptr %ptr, i32 0, i32 %elemindex
store float %scalar, ptr %dynamic
@@ -56,14 +59,16 @@ define void @store_half4(<4 x half> %data, i32 %index) {
; Store just the .x component
%scalar = extractelement <4 x half> %data, i32 0
- ; CHECK: %[[LOAD:.*]] = call <4 x half> @llvm.dx.resource.load.typedbuffer.v4f16.tdx.TypedBuffer_v4f16_1_0_0t(target("dx.TypedBuffer", <4 x half>, 1, 0, 0) %buffer, i32 %index)
- ; CHECK: %[[INSERT:.*]] = insertelement <4 x half> %[[LOAD]], half %scalar, i32 0
+ ; CHECK: %[[LOAD:.*]] = call { <4 x half>, i1 } @llvm.dx.resource.load.typedbuffer.v4f16.tdx.TypedBuffer_v4f16_1_0_0t(target("dx.TypedBuffer", <4 x half>, 1, 0, 0) %buffer, i32 %index)
+ ; CHECK: %[[VEC:.*]] = extractvalue { <4 x half>, i1 } %[[LOAD]], 0
+ ; CHECK: %[[INSERT:.*]] = insertelement <4 x half> %[[VEC]], half %scalar, i32 0
; CHECK: call void @llvm.dx.resource.store.typedbuffer.tdx.TypedBuffer_v4f16_1_0_0t.v4f16(target("dx.TypedBuffer", <4 x half>, 1, 0, 0) %buffer, i32 %index, <4 x half> %[[INSERT]])
store half %scalar, ptr %ptr
; Store just the .y component
- ; CHECK: %[[LOAD:.*]] = call <4 x half> @llvm.dx.resource.load.typedbuffer.v4f16.tdx.TypedBuffer_v4f16_1_0_0t(target("dx.TypedBuffer", <4 x half>, 1, 0, 0) %buffer, i32 %index)
- ; CHECK: %[[INSERT:.*]] = insertelement <4 x half> %[[LOAD]], half %scalar, i32 1
+ ; CHECK: %[[LOAD:.*]] = call { <4 x half>, i1 } @llvm.dx.resource.load.typedbuffer.v4f16.tdx.TypedBuffer_v4f16_1_0_0t(target("dx.TypedBuffer", <4 x half>, 1, 0, 0) %buffer, i32 %index)
+ ; CHECK: %[[VEC:.*]] = extractvalue { <4 x half>, i1 } %[[LOAD]], 0
+ ; CHECK: %[[INSERT:.*]] = insertelement <4 x half> %[[VEC]], half %scalar, i32 1
; CHECK: call void @llvm.dx.resource.store.typedbuffer.tdx.TypedBuffer_v4f16_1_0_0t.v4f16(target("dx.TypedBuffer", <4 x half>, 1, 0, 0) %buffer, i32 %index, <4 x half> %[[INSERT]])
%y_ptr = getelementptr inbounds i8, ptr %ptr, i32 2
store half %scalar, ptr %y_ptr
@@ -87,14 +92,16 @@ define void @store_double2(<2 x double> %data, i32 %index) {
; Store just the .x component
%scalar = extractelement <2 x double> %data, i32 0
- ; CHECK: %[[LOAD:.*]] = call <2 x double> @llvm.dx.resource.load.typedbuffer.v2f64.tdx.TypedBuffer_v2f64_1_0_0t(target("dx.TypedBuffer", <2 x double>, 1, 0, 0) %buffer, i32 %index)
- ; CHECK: %[[INSERT:.*]] = insertelement <2 x double> %[[LOAD]], double %scalar, i32 0
+ ; CHECK: %[[LOAD:.*]] = call { <2 x double>, i1 } @llvm.dx.resource.load.typedbuffer.v2f64.tdx.TypedBuffer_v2f64_1_0_0t(target("dx.TypedBuffer", <2 x double>, 1, 0, 0) %buffer, i32 %index)
+ ; CHECK: %[[VEC:.*]] = extractvalue { <2 x double>, i1 } %[[LOAD]], 0
+ ; CHECK: %[[INSERT:.*]] = insertelement <2 x double> %[[VEC]], double %scalar, i32 0
; CHECK: call void @llvm.dx.resource.store.typedbuffer.tdx.TypedBuffer_v2f64_1_0_0t.v2f64(target("dx.TypedBuffer", <2 x double>, 1, 0, 0) %buffer, i32 %index, <2 x double> %[[INSERT]])
store double %scalar, ptr %ptr
; Store just the .y component
- ; CHECK: %[[LOAD:.*]] = call <2 x double> @llvm.dx.resource.load.typedbuffer.v2f64.tdx.TypedBuffer_v2f64_1_0_0t(target("dx.TypedBuffer", <2 x double>, 1, 0, 0) %buffer, i32 %index)
- ; CHECK: %[[INSERT:.*]] = insertelement <2 x double> %[[LOAD]], double %scalar, i32 1
+ ; CHECK: %[[LOAD:.*]] = call { <2 x double>, i1 } @llvm.dx.resource.load.typedbuffer.v2f64.tdx.TypedBuffer_v2f64_1_0_0t(target("dx.TypedBuffer", <2 x double>, 1, 0, 0) %buffer, i32 %index)
+ ; CHECK: %[[VEC:.*]] = extractvalue { <2 x double>, i1 } %[[LOAD]], 0
+ ; CHECK: %[[INSERT:.*]] = insertelement <2 x double> %[[VEC]], double %scalar, i32 1
; CHECK: call void @llvm.dx.resource.store.typedbuffer.tdx.TypedBuffer_v2f64_1_0_0t.v2f64(target("dx.TypedBuffer", <2 x double>, 1, 0, 0) %buffer, i32 %index, <2 x double> %[[INSERT]])
%y_ptr = getelementptr inbounds i8, ptr %ptr, i32 8
store double %scalar, ptr %y_ptr
diff --git a/llvm/test/CodeGen/DirectX/ResourceGlobalElimination.ll b/llvm/test/CodeGen/DirectX/ResourceGlobalElimination.ll
index c837b36..cd21adc 100644
--- a/llvm/test/CodeGen/DirectX/ResourceGlobalElimination.ll
+++ b/llvm/test/CodeGen/DirectX/ResourceGlobalElimination.ll
@@ -29,18 +29,20 @@ entry:
%0 = call i32 @llvm.dx.flattened.thread.id.in.group()
; CHECK-NOT: load {{.*}} ptr @In
%1 = load target("dx.TypedBuffer", <4 x float>, 1, 0, 0), ptr @In, align 4
- ; CSE: call noundef <4 x float> @llvm.dx.resource.load.typedbuffer.v4f32.tdx.TypedBuffer_v4f32_1_0_0t
- %2 = call noundef <4 x float> @llvm.dx.resource.load.typedbuffer.v4f32.tdx.TypedBuffer_v4f32_1_0_0t(target("dx.TypedBuffer", <4 x float>, 1, 0, 0) %1, i32 %0)
+ ; CSE: call noundef { <4 x float>, i1 } @llvm.dx.resource.load.typedbuffer.v4f32.tdx.TypedBuffer_v4f32_1_0_0t
+ %load = call noundef {<4 x float>, i1} @llvm.dx.resource.load.typedbuffer.v4f32.tdx.TypedBuffer_v4f32_1_0_0t(target("dx.TypedBuffer", <4 x float>, 1, 0, 0) %1, i32 %0)
+ %2 = extractvalue {<4 x float>, i1} %load, 0
; CHECK-NOT: load {{.*}} ptr @In
%3 = load target("dx.TypedBuffer", <4 x float>, 1, 0, 0), ptr @In, align 4
- %4 = call noundef <4 x float> @llvm.dx.resource.load.typedbuffer.v4f32.tdx.TypedBuffer_v4f32_1_0_0t(target("dx.TypedBuffer", <4 x float>, 1, 0, 0) %3, i32 %0)
+ %load2 = call noundef {<4 x float>, i1} @llvm.dx.resource.load.typedbuffer.v4f32.tdx.TypedBuffer_v4f32_1_0_0t(target("dx.TypedBuffer", <4 x float>, 1, 0, 0) %3, i32 %0)
+ %4 = extractvalue {<4 x float>, i1} %load2, 0
%add.i = fadd <4 x float> %2, %4
call void @llvm.dx.resource.store.typedbuffer.tdx.TypedBuffer_v4f32_1_0_0t.v4f32(target("dx.TypedBuffer", <4 x float>, 1, 0, 0) %Out_h.i, i32 %0, <4 x float> %add.i)
; CHECK: ret void
ret void
}
-; CSE-DAG: declare <4 x float> @llvm.dx.resource.load.typedbuffer.v4f32.tdx.TypedBuffer_v4f32_1_0_0t(target("dx.TypedBuffer", <4 x float>, 1, 0, 0), i32) [[ROAttr:#[0-9]+]]
+; CSE-DAG: declare { <4 x float>, i1 } @llvm.dx.resource.load.typedbuffer.v4f32.tdx.TypedBuffer_v4f32_1_0_0t(target("dx.TypedBuffer", <4 x float>, 1, 0, 0), i32) [[ROAttr:#[0-9]+]]
; CSE-DAG: declare void @llvm.dx.resource.store.typedbuffer.tdx.TypedBuffer_v4f32_1_0_0t.v4f32(target("dx.TypedBuffer", <4 x float>, 1, 0, 0), i32, <4 x float>) [[WOAttr:#[0-9]+]]
attributes #0 = { convergent noinline norecurse "frame-pointer"="all" "hlsl.numthreads"="8,1,1" "hlsl.shader"="compute" "no-trapping-math"="true" "stack-protector-buffer-size"="8" }
diff --git a/llvm/test/CodeGen/DirectX/ShaderFlags/typed-uav-load-additional-formats.ll b/llvm/test/CodeGen/DirectX/ShaderFlags/typed-uav-load-additional-formats.ll
index 2622335..060d54f 100644
--- a/llvm/test/CodeGen/DirectX/ShaderFlags/typed-uav-load-additional-formats.ll
+++ b/llvm/test/CodeGen/DirectX/ShaderFlags/typed-uav-load-additional-formats.ll
@@ -17,8 +17,9 @@ target triple = "dxil-pc-shadermodel6.7-library"
define <4 x float> @multicomponent() #0 {
%res = call target("dx.TypedBuffer", <4 x float>, 1, 0, 0)
@llvm.dx.resource.handlefrombinding(i32 0, i32 0, i32 1, i32 0, i1 false)
- %val = call <4 x float> @llvm.dx.resource.load.typedbuffer(
+ %load = call {<4 x float>, i1} @llvm.dx.resource.load.typedbuffer(
target("dx.TypedBuffer", <4 x float>, 1, 0, 0) %res, i32 0)
+ %val = extractvalue {<4 x float>, i1} %load, 0
ret <4 x float> %val
}
@@ -26,8 +27,9 @@ define <4 x float> @multicomponent() #0 {
define float @onecomponent() #0 {
%res = call target("dx.TypedBuffer", float, 1, 0, 0)
@llvm.dx.resource.handlefrombinding(i32 0, i32 0, i32 1, i32 0, i1 false)
- %val = call float @llvm.dx.resource.load.typedbuffer(
+ %load = call {float, i1} @llvm.dx.resource.load.typedbuffer(
target("dx.TypedBuffer", float, 1, 0, 0) %res, i32 0)
+ %val = extractvalue {float, i1} %load, 0
ret float %val
}
diff --git a/llvm/test/CodeGen/Hexagon/loopIdiom.ll b/llvm/test/CodeGen/Hexagon/loopIdiom.ll
new file mode 100644
index 0000000..9c3df67
--- /dev/null
+++ b/llvm/test/CodeGen/Hexagon/loopIdiom.ll
@@ -0,0 +1,75 @@
+; RUN: opt -debug -S -march=hexagon -O2 < %s | FileCheck %s
+; REQUIRES: asserts
+; CHECK: define dso_local void @complexMultAccum
+target triple = "hexagon"
+
+; Function Attrs: noinline nounwind
+define dso_local void @complexMultAccum(i32 noundef %n) #0 {
+entry:
+ %n.addr = alloca i32, align 4
+ %run_c_code = alloca i8, align 1
+ %run_asm_code = alloca i8, align 1
+ %iOutter = alloca i32, align 4
+ %iOutter1 = alloca i32, align 4
+ store i32 %n, ptr %n.addr, align 4
+ store i8 1, ptr %run_c_code, align 1
+ store i8 0, ptr %run_asm_code, align 1
+ %0 = load i8, ptr %run_c_code, align 1
+ %tobool = icmp ne i8 %0, 0
+ br i1 %tobool, label %if.then, label %if.end
+
+if.then: ; preds = %entry
+ store i32 0, ptr %iOutter, align 4
+ br label %for.cond
+
+for.cond: ; preds = %for.inc, %if.then
+ %1 = load i32, ptr %iOutter, align 4
+ %cmp = icmp slt i32 %1, 2
+ br i1 %cmp, label %for.body, label %for.end
+
+for.body: ; preds = %for.cond
+ br label %for.inc
+
+for.inc: ; preds = %for.body
+ %2 = load i32, ptr %iOutter, align 4
+ %inc = add nsw i32 %2, 1
+ store i32 %inc, ptr %iOutter, align 4
+ br label %for.cond, !llvm.loop !3
+
+for.end: ; preds = %for.cond
+ store i32 0, ptr %iOutter1, align 4
+ br label %for.cond2
+
+for.cond2: ; preds = %for.inc5, %for.end
+ %3 = load i32, ptr %iOutter1, align 4
+ %cmp3 = icmp slt i32 %3, 2
+ br i1 %cmp3, label %for.body4, label %for.end7
+
+for.body4: ; preds = %for.cond2
+ br label %for.inc5
+
+for.inc5: ; preds = %for.body4
+ %4 = load i32, ptr %iOutter1, align 4
+ %inc6 = add nsw i32 %4, 1
+ store i32 %inc6, ptr %iOutter1, align 4
+ br label %for.cond2, !llvm.loop !5
+
+for.end7: ; preds = %for.cond2
+ br label %if.end
+
+if.end: ; preds = %for.end7, %entry
+ ret void
+}
+
+attributes #0 = { noinline nounwind "approx-func-fp-math"="true" "frame-pointer"="all" "no-infs-fp-math"="true" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="hexagonv79" "target-features"="+v79,-long-calls" "unsafe-fp-math"="true" }
+
+!llvm.module.flags = !{!0, !1}
+!llvm.ident = !{!2}
+
+!0 = !{i32 1, !"wchar_size", i32 4}
+!1 = !{i32 7, !"frame-pointer", i32 2}
+!2 = !{!"LLVM Clang"}
+!3 = distinct !{!3, !4}
+!4 = !{!"llvm.loop.mustprogress"}
+!5 = distinct !{!5, !4}
+
diff --git a/llvm/test/CodeGen/NVPTX/b52037.ll b/llvm/test/CodeGen/NVPTX/b52037.ll
index 5d1c390..b6317df 100644
--- a/llvm/test/CodeGen/NVPTX/b52037.ll
+++ b/llvm/test/CodeGen/NVPTX/b52037.ll
@@ -39,7 +39,7 @@ declare %int3 @hoge(i32, i32, i32) local_unnamed_addr
declare i64 @foo() local_unnamed_addr
-define void @barney(ptr nocapture readonly %arg) local_unnamed_addr {
+define ptx_kernel void @barney(ptr nocapture readonly %arg) local_unnamed_addr {
bb:
tail call void asm sideeffect "// KEEP", ""() #1
%tmp = alloca %struct.zot, align 16
@@ -210,9 +210,6 @@ bb14: ; preds = %bb49.i.lr.ph, %bb49
attributes #0 = { argmemonly mustprogress nofree nounwind willreturn }
attributes #1 = { nounwind }
-!nvvm.annotations = !{!0}
-
-!0 = !{ptr @barney, !"kernel", i32 1}
!1 = !{!2, !11, i64 64}
!2 = !{!"_ZTSN7cuneibs22neiblist_iterator_coreE", !3, i64 0, !3, i64 8, !6, i64 16, !8, i64 32, !9, i64 44, !10, i64 48, !11, i64 64, !9, i64 72, !4, i64 76, !9, i64 80}
!3 = !{!"any pointer", !4, i64 0}
diff --git a/llvm/test/CodeGen/NVPTX/bf16x2-instructions.ll b/llvm/test/CodeGen/NVPTX/bf16x2-instructions.ll
index 03cdeb9..8be3a66 100644
--- a/llvm/test/CodeGen/NVPTX/bf16x2-instructions.ll
+++ b/llvm/test/CodeGen/NVPTX/bf16x2-instructions.ll
@@ -182,8 +182,8 @@ define <2 x bfloat> @test_fneg(<2 x bfloat> %a) #0 {
; CHECK-NEXT: .reg .b32 %r<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.u32 %r1, [test_fneg_param_0];
-; CHECK-NEXT: xor.b32 %r2, %r1, -2147450880;
+; CHECK-NEXT: ld.param.b32 %r1, [test_fneg_param_0];
+; CHECK-NEXT: neg.bf16x2 %r2, %r1;
; CHECK-NEXT: st.param.b32 [func_retval0], %r2;
; CHECK-NEXT: ret;
%r = fneg <2 x bfloat> %a
@@ -532,8 +532,8 @@ define <2 x bfloat> @test_fabs(<2 x bfloat> %a) #0 {
; CHECK-NEXT: .reg .b32 %r<3>;
; CHECK-EMPTY:
; CHECK-NEXT: // %bb.0:
-; CHECK-NEXT: ld.param.u32 %r1, [test_fabs_param_0];
-; CHECK-NEXT: and.b32 %r2, %r1, 2147450879;
+; CHECK-NEXT: ld.param.b32 %r1, [test_fabs_param_0];
+; CHECK-NEXT: abs.bf16x2 %r2, %r1;
; CHECK-NEXT: st.param.b32 [func_retval0], %r2;
; CHECK-NEXT: ret;
%r = call <2 x bfloat> @llvm.fabs.f16(<2 x bfloat> %a)
diff --git a/llvm/test/CodeGen/NVPTX/bug21465.ll b/llvm/test/CodeGen/NVPTX/bug21465.ll
index 9b1f104..76300e3 100644
--- a/llvm/test/CodeGen/NVPTX/bug21465.ll
+++ b/llvm/test/CodeGen/NVPTX/bug21465.ll
@@ -8,7 +8,7 @@ target triple = "nvptx64-unknown-unknown"
%struct.S = type { i32, i32 }
; Function Attrs: nounwind
-define void @_Z11TakesStruct1SPi(ptr byval(%struct.S) nocapture readonly %input, ptr nocapture %output) #0 {
+define ptx_kernel void @_Z11TakesStruct1SPi(ptr byval(%struct.S) nocapture readonly %input, ptr nocapture %output) #0 {
entry:
; CHECK-LABEL: @_Z11TakesStruct1SPi
; PTX-LABEL: .visible .entry _Z11TakesStruct1SPi(
@@ -23,7 +23,3 @@ entry:
}
attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="none" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
-!nvvm.annotations = !{!0}
-
-!0 = !{ptr @_Z11TakesStruct1SPi, !"kernel", i32 1}
diff --git a/llvm/test/CodeGen/NVPTX/bug22322.ll b/llvm/test/CodeGen/NVPTX/bug22322.ll
index e3656fd..ace3166 100644
--- a/llvm/test/CodeGen/NVPTX/bug22322.ll
+++ b/llvm/test/CodeGen/NVPTX/bug22322.ll
@@ -8,7 +8,7 @@ target triple = "nvptx64-nvidia-cuda"
; Function Attrs: nounwind
; CHECK-LABEL: some_kernel
-define void @some_kernel(ptr nocapture %dst) #0 {
+define ptx_kernel void @some_kernel(ptr nocapture %dst) #0 {
_ZL11compute_vecRK6float3jb.exit:
%ret_vec.sroa.8.i = alloca float, align 4
%0 = tail call i32 @llvm.nvvm.read.ptx.sreg.ctaid.x()
@@ -55,8 +55,5 @@ attributes #0 = { nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "n
attributes #1 = { nounwind readnone }
attributes #2 = { nounwind }
-!nvvm.annotations = !{!0}
!llvm.ident = !{!1}
-
-!0 = !{ptr @some_kernel, !"kernel", i32 1}
!1 = !{!"clang version 3.5.1 (tags/RELEASE_351/final)"}
diff --git a/llvm/test/CodeGen/NVPTX/bug26185.ll b/llvm/test/CodeGen/NVPTX/bug26185.ll
index 00c97fb..193df7f 100644
--- a/llvm/test/CodeGen/NVPTX/bug26185.ll
+++ b/llvm/test/CodeGen/NVPTX/bug26185.ll
@@ -8,7 +8,7 @@ target datalayout = "e-i64:64-v16:16-v32:32-n16:32:64"
target triple = "nvptx64-unknown-unknown"
; CHECK-LABEL: ex_zext
-define void @ex_zext(ptr noalias readonly %data, ptr %res) {
+define ptx_kernel void @ex_zext(ptr noalias readonly %data, ptr %res) {
entry:
; CHECK: ld.global.nc.u8
%val = load i8, ptr %data
@@ -19,7 +19,7 @@ entry:
}
; CHECK-LABEL: ex_sext
-define void @ex_sext(ptr noalias readonly %data, ptr %res) {
+define ptx_kernel void @ex_sext(ptr noalias readonly %data, ptr %res) {
entry:
; CHECK: ld.global.nc.u8
%val = load i8, ptr %data
@@ -30,7 +30,7 @@ entry:
}
; CHECK-LABEL: ex_zext_v2
-define void @ex_zext_v2(ptr noalias readonly %data, ptr %res) {
+define ptx_kernel void @ex_zext_v2(ptr noalias readonly %data, ptr %res) {
entry:
; CHECK: ld.global.nc.v2.u8
%val = load <2 x i8>, ptr %data
@@ -41,7 +41,7 @@ entry:
}
; CHECK-LABEL: ex_sext_v2
-define void @ex_sext_v2(ptr noalias readonly %data, ptr %res) {
+define ptx_kernel void @ex_sext_v2(ptr noalias readonly %data, ptr %res) {
entry:
; CHECK: ld.global.nc.v2.u8
%val = load <2 x i8>, ptr %data
@@ -51,8 +51,3 @@ entry:
ret void
}
-!nvvm.annotations = !{!0,!1,!2,!3}
-!0 = !{ptr @ex_zext, !"kernel", i32 1}
-!1 = !{ptr @ex_sext, !"kernel", i32 1}
-!2 = !{ptr @ex_zext_v2, !"kernel", i32 1}
-!3 = !{ptr @ex_sext_v2, !"kernel", i32 1}
diff --git a/llvm/test/CodeGen/NVPTX/call-with-alloca-buffer.ll b/llvm/test/CodeGen/NVPTX/call-with-alloca-buffer.ll
index 19f4ef8..1c9d271 100644
--- a/llvm/test/CodeGen/NVPTX/call-with-alloca-buffer.ll
+++ b/llvm/test/CodeGen/NVPTX/call-with-alloca-buffer.ll
@@ -16,7 +16,7 @@
; }
; CHECK: .visible .entry kernel_func
-define void @kernel_func(ptr %a) {
+define ptx_kernel void @kernel_func(ptr %a) {
entry:
%buf = alloca [16 x i8], align 4
@@ -56,7 +56,3 @@ entry:
}
declare void @callee(ptr, ptr)
-
-!nvvm.annotations = !{!0}
-
-!0 = !{ptr @kernel_func, !"kernel", i32 1}
diff --git a/llvm/test/CodeGen/NVPTX/cluster-dim.ll b/llvm/test/CodeGen/NVPTX/cluster-dim.ll
index c9258ad..9275c89 100644
--- a/llvm/test/CodeGen/NVPTX/cluster-dim.ll
+++ b/llvm/test/CodeGen/NVPTX/cluster-dim.ll
@@ -3,7 +3,7 @@
; RUN: llc < %s -mtriple=nvptx64 -mcpu=sm_90 | FileCheck -check-prefixes=CHECK90 %s
; RUN: %if ptxas-12.0 %{ llc < %s -mtriple=nvptx64 -mcpu=sm_90 | %ptxas-verify -arch=sm_90 %}
-define void @kernel_func_clusterxyz() {
+define ptx_kernel void @kernel_func_clusterxyz() {
; CHECK80-LABEL: kernel_func_clusterxyz(
; CHECK80: {
; CHECK80-EMPTY:
@@ -23,7 +23,6 @@ define void @kernel_func_clusterxyz() {
}
-!nvvm.annotations = !{!1, !2}
+!nvvm.annotations = !{!1}
-!1 = !{ptr @kernel_func_clusterxyz, !"kernel", i32 1}
-!2 = !{ptr @kernel_func_clusterxyz, !"cluster_dim_x", i32 3, !"cluster_dim_y", i32 5, !"cluster_dim_z", i32 7}
+!1 = !{ptr @kernel_func_clusterxyz, !"cluster_dim_x", i32 3, !"cluster_dim_y", i32 5, !"cluster_dim_z", i32 7}
diff --git a/llvm/test/CodeGen/NVPTX/disjoint-or-addr.ll b/llvm/test/CodeGen/NVPTX/disjoint-or-addr.ll
new file mode 100644
index 0000000..1b1bb91
--- /dev/null
+++ b/llvm/test/CodeGen/NVPTX/disjoint-or-addr.ll
@@ -0,0 +1,25 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple=nvptx64 -mcpu=sm_35 -verify-machineinstrs | FileCheck %s
+; RUN: %if ptxas %{ llc < %s -mtriple=nvptx64 -mcpu=sm_35 | %ptxas-verify %}
+target triple = "nvptx64-nvidia-cuda"
+
+@a = external global ptr align 16
+
+define i32 @test_disjoint_or_addr(i16 %a) {
+; CHECK-LABEL: test_disjoint_or_addr(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<2>;
+; CHECK-NEXT: .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: mov.u64 %rd1, a;
+; CHECK-NEXT: cvta.global.u64 %rd2, %rd1;
+; CHECK-NEXT: ld.u32 %r1, [%rd2+8];
+; CHECK-NEXT: st.param.b32 [func_retval0], %r1;
+; CHECK-NEXT: ret;
+ %a1 = ptrtoint ptr @a to i64
+ %a2 = or disjoint i64 %a1, 8
+ %a3 = inttoptr i64 %a2 to ptr
+ %v = load i32, ptr %a3
+ ret i32 %v
+}
diff --git a/llvm/test/CodeGen/NVPTX/fabs-fneg-free.ll b/llvm/test/CodeGen/NVPTX/fabs-fneg-free.ll
new file mode 100644
index 0000000..9031f33
--- /dev/null
+++ b/llvm/test/CodeGen/NVPTX/fabs-fneg-free.ll
@@ -0,0 +1,34 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple=nvptx64 -mcpu=sm_35 -verify-machineinstrs | FileCheck %s
+; RUN: %if ptxas %{ llc < %s -mtriple=nvptx64 -mcpu=sm_35 | %ptxas-verify %}
+target triple = "nvptx64-nvidia-cuda"
+
+define float @fabs_free(i32 %in) {
+; CHECK-LABEL: fabs_free(
+; CHECK: {
+; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.f32 %f1, [fabs_free_param_0];
+; CHECK-NEXT: abs.f32 %f2, %f1;
+; CHECK-NEXT: st.param.f32 [func_retval0], %f2;
+; CHECK-NEXT: ret;
+ %b = bitcast i32 %in to float
+ %f = call float @llvm.fabs.f32(float %b)
+ ret float %f
+}
+
+define float @fneg_free(i32 %in) {
+; CHECK-LABEL: fneg_free(
+; CHECK: {
+; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.f32 %f1, [fneg_free_param_0];
+; CHECK-NEXT: neg.f32 %f2, %f1;
+; CHECK-NEXT: st.param.f32 [func_retval0], %f2;
+; CHECK-NEXT: ret;
+ %b = bitcast i32 %in to float
+ %f = fneg float %b
+ ret float %f
+}
diff --git a/llvm/test/CodeGen/NVPTX/generic-to-nvvm.ll b/llvm/test/CodeGen/NVPTX/generic-to-nvvm.ll
index 43e4dfc..2b66311 100644
--- a/llvm/test/CodeGen/NVPTX/generic-to-nvvm.ll
+++ b/llvm/test/CodeGen/NVPTX/generic-to-nvvm.ll
@@ -12,7 +12,7 @@ target triple = "nvptx-nvidia-cuda"
@myconst = internal constant i32 420, align 4
-define void @foo(ptr %a, ptr %b) {
+define ptx_kernel void @foo(ptr %a, ptr %b) {
; Expect one load -- @myconst isn't loaded from, because we know its value
; statically.
; CHECK: ld.global.u32
@@ -24,7 +24,3 @@ define void @foo(ptr %a, ptr %b) {
store i32 %ld2, ptr %b
ret void
}
-
-
-!nvvm.annotations = !{!0}
-!0 = !{ptr @foo, !"kernel", i32 1}
diff --git a/llvm/test/CodeGen/NVPTX/i1-array-global.ll b/llvm/test/CodeGen/NVPTX/i1-array-global.ll
index ff3848b..20b376f 100644
--- a/llvm/test/CodeGen/NVPTX/i1-array-global.ll
+++ b/llvm/test/CodeGen/NVPTX/i1-array-global.ll
@@ -7,13 +7,9 @@ target triple = "nvptx-nvidia-cuda"
@global_cst = private constant [6 x i1] [i1 true, i1 false, i1 true, i1 false, i1 true, i1 false]
; CHECK: .global .align 1 .b8 global_cst[6] = {1, 0, 1, 0, 1}
-define void @kernel(i32 %i, ptr %out) {
+define ptx_kernel void @kernel(i32 %i, ptr %out) {
%5 = getelementptr inbounds i1, ptr @global_cst, i32 %i
%6 = load i1, ptr %5, align 1
store i1 %6, ptr %out, align 1
ret void
}
-
-!nvvm.annotations = !{!0}
-!0 = !{ptr @kernel, !"kernel", i32 1}
-
diff --git a/llvm/test/CodeGen/NVPTX/i1-ext-load.ll b/llvm/test/CodeGen/NVPTX/i1-ext-load.ll
index 83f8f80..f5f1dd9 100644
--- a/llvm/test/CodeGen/NVPTX/i1-ext-load.ll
+++ b/llvm/test/CodeGen/NVPTX/i1-ext-load.ll
@@ -5,7 +5,7 @@
target triple = "nvptx-nvidia-cuda"
-define void @foo(ptr noalias readonly %ptr, ptr noalias %retval) {
+define ptx_kernel void @foo(ptr noalias readonly %ptr, ptr noalias %retval) {
; CHECK-LABEL: foo(
; CHECK: .reg .b16 %rs<2>;
; CHECK: .reg .b32 %r<4>;
@@ -28,7 +28,3 @@ define void @foo(ptr noalias readonly %ptr, ptr noalias %retval) {
store i32 %and, ptr %retval
ret void
}
-
-!nvvm.annotations = !{!0}
-
-!0 = !{ptr @foo, !"kernel", i32 1}
diff --git a/llvm/test/CodeGen/NVPTX/i1-global.ll b/llvm/test/CodeGen/NVPTX/i1-global.ll
index 17af1fa..60d2ccd 100644
--- a/llvm/test/CodeGen/NVPTX/i1-global.ll
+++ b/llvm/test/CodeGen/NVPTX/i1-global.ll
@@ -8,13 +8,9 @@ target triple = "nvptx-nvidia-cuda"
@mypred = addrspace(1) global i1 true, align 1
-define void @foo(i1 %p, ptr %out) {
+define ptx_kernel void @foo(i1 %p, ptr %out) {
%ld = load i1, ptr addrspace(1) @mypred
%val = zext i1 %ld to i32
store i32 %val, ptr %out
ret void
}
-
-
-!nvvm.annotations = !{!0}
-!0 = !{ptr @foo, !"kernel", i32 1}
diff --git a/llvm/test/CodeGen/NVPTX/i1-param.ll b/llvm/test/CodeGen/NVPTX/i1-param.ll
index 3c74ee6..14d417b 100644
--- a/llvm/test/CodeGen/NVPTX/i1-param.ll
+++ b/llvm/test/CodeGen/NVPTX/i1-param.ll
@@ -9,12 +9,8 @@ target triple = "nvptx-nvidia-cuda"
; CHECK: .entry foo
; CHECK: .param .u8 foo_param_0
; CHECK: .param .u64 .ptr .align 1 foo_param_1
-define void @foo(i1 %p, ptr %out) {
+define ptx_kernel void @foo(i1 %p, ptr %out) {
%val = zext i1 %p to i32
store i32 %val, ptr %out
ret void
}
-
-
-!nvvm.annotations = !{!0}
-!0 = !{ptr @foo, !"kernel", i32 1}
diff --git a/llvm/test/CodeGen/NVPTX/intr-range.ll b/llvm/test/CodeGen/NVPTX/intr-range.ll
index 2f3e08a..86776ab 100644
--- a/llvm/test/CodeGen/NVPTX/intr-range.ll
+++ b/llvm/test/CodeGen/NVPTX/intr-range.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-attributes --version 5
; RUN: opt < %s -S -mtriple=nvptx-nvidia-cuda -mcpu=sm_20 -passes=nvvm-intr-range | FileCheck %s
-define i32 @test_maxntid() {
-; CHECK-LABEL: define i32 @test_maxntid(
+define ptx_kernel i32 @test_maxntid() {
+; CHECK-LABEL: define ptx_kernel i32 @test_maxntid(
; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: [[TMP1:%.*]] = call range(i32 0, 96) i32 @llvm.nvvm.read.ptx.sreg.tid.x()
; CHECK-NEXT: [[TMP3:%.*]] = call range(i32 0, 96) i32 @llvm.nvvm.read.ptx.sreg.tid.y()
@@ -31,8 +31,8 @@ define i32 @test_maxntid() {
ret i32 %11
}
-define i32 @test_reqntid() {
-; CHECK-LABEL: define i32 @test_reqntid(
+define ptx_kernel i32 @test_reqntid() {
+; CHECK-LABEL: define ptx_kernel i32 @test_reqntid(
; CHECK-SAME: ) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = call range(i32 0, 20) i32 @llvm.nvvm.read.ptx.sreg.tid.x()
; CHECK-NEXT: [[TMP5:%.*]] = call range(i32 0, 20) i32 @llvm.nvvm.read.ptx.sreg.tid.y()
@@ -64,8 +64,8 @@ define i32 @test_reqntid() {
;; A case like this could occur if a function with the sreg intrinsic was
;; inlined into a kernel where the tid metadata is present, ensure the range is
;; updated.
-define i32 @test_inlined() {
-; CHECK-LABEL: define i32 @test_inlined(
+define ptx_kernel i32 @test_inlined() {
+; CHECK-LABEL: define ptx_kernel i32 @test_inlined(
; CHECK-SAME: ) #[[ATTR0]] {
; CHECK-NEXT: [[TMP1:%.*]] = call range(i32 0, 4) i32 @llvm.nvvm.read.ptx.sreg.tid.x()
; CHECK-NEXT: ret i32 [[TMP1]]
@@ -83,6 +83,6 @@ declare i32 @llvm.nvvm.read.ptx.sreg.ntid.y()
declare i32 @llvm.nvvm.read.ptx.sreg.ntid.z()
!nvvm.annotations = !{!0, !1, !2}
-!0 = !{ptr @test_maxntid, !"kernel", i32 1, !"maxntidx", i32 32, !"maxntidz", i32 3}
-!1 = !{ptr @test_reqntid, !"kernel", i32 1, !"reqntidx", i32 20}
-!2 = !{ptr @test_inlined, !"kernel", i32 1, !"maxntidx", i32 4}
+!0 = !{ptr @test_maxntid, !"maxntidx", i32 32, !"maxntidz", i32 3}
+!1 = !{ptr @test_reqntid, !"reqntidx", i32 20}
+!2 = !{ptr @test_inlined, !"maxntidx", i32 4}
diff --git a/llvm/test/CodeGen/NVPTX/kernel-param-align.ll b/llvm/test/CodeGen/NVPTX/kernel-param-align.ll
index 93d428d..2889d2d 100644
--- a/llvm/test/CodeGen/NVPTX/kernel-param-align.ll
+++ b/llvm/test/CodeGen/NVPTX/kernel-param-align.ll
@@ -10,7 +10,7 @@
; CHECK: .param .u64 .ptr .shared .align 8 func_align_param_3
; CHECK: .param .u64 .ptr .const .align 16 func_align_param_4
; CHECK: .param .u64 .ptr .local .align 32 func_align_param_5
-define void @func_align(ptr nocapture readonly align 1 %input,
+define ptx_kernel void @func_align(ptr nocapture readonly align 1 %input,
ptr nocapture align 2 %out,
ptr addrspace(1) align 4 %global,
ptr addrspace(3) align 8 %shared,
@@ -27,7 +27,7 @@ entry:
; CHECK: .param .u64 .ptr .shared .align 1 func_noalign_param_3
; CHECK: .param .u64 .ptr .const .align 1 func_noalign_param_4
; CHECK: .param .u64 .ptr .local .align 1 func_noalign_param_5
-define void @func_noalign(ptr nocapture readonly %input,
+define ptx_kernel void @func_noalign(ptr nocapture readonly %input,
ptr nocapture %out,
ptr addrspace(1) %global,
ptr addrspace(3) %shared,
@@ -36,7 +36,3 @@ define void @func_noalign(ptr nocapture readonly %input,
entry:
ret void
}
-
-!nvvm.annotations = !{!0, !1}
-!0 = !{ptr @func_align, !"kernel", i32 1}
-!1 = !{ptr @func_noalign, !"kernel", i32 1}
diff --git a/llvm/test/CodeGen/NVPTX/load-with-non-coherent-cache.ll b/llvm/test/CodeGen/NVPTX/load-with-non-coherent-cache.ll
index bdaeccd5..dc1917f 100644
--- a/llvm/test/CodeGen/NVPTX/load-with-non-coherent-cache.ll
+++ b/llvm/test/CodeGen/NVPTX/load-with-non-coherent-cache.ll
@@ -10,7 +10,7 @@ target triple = "nvptx64-unknown-unknown"
; SM20: ld.global.f32
; SM35-LABEL: .visible .entry foo1(
; SM35: ld.global.nc.f32
-define void @foo1(ptr noalias readonly %from, ptr %to) {
+define ptx_kernel void @foo1(ptr noalias readonly %from, ptr %to) {
%1 = load float, ptr %from
store float %1, ptr %to
ret void
@@ -20,7 +20,7 @@ define void @foo1(ptr noalias readonly %from, ptr %to) {
; SM20: ld.global.f64
; SM35-LABEL: .visible .entry foo2(
; SM35: ld.global.nc.f64
-define void @foo2(ptr noalias readonly %from, ptr %to) {
+define ptx_kernel void @foo2(ptr noalias readonly %from, ptr %to) {
%1 = load double, ptr %from
store double %1, ptr %to
ret void
@@ -30,7 +30,7 @@ define void @foo2(ptr noalias readonly %from, ptr %to) {
; SM20: ld.global.u16
; SM35-LABEL: .visible .entry foo3(
; SM35: ld.global.nc.u16
-define void @foo3(ptr noalias readonly %from, ptr %to) {
+define ptx_kernel void @foo3(ptr noalias readonly %from, ptr %to) {
%1 = load i16, ptr %from
store i16 %1, ptr %to
ret void
@@ -40,7 +40,7 @@ define void @foo3(ptr noalias readonly %from, ptr %to) {
; SM20: ld.global.u32
; SM35-LABEL: .visible .entry foo4(
; SM35: ld.global.nc.u32
-define void @foo4(ptr noalias readonly %from, ptr %to) {
+define ptx_kernel void @foo4(ptr noalias readonly %from, ptr %to) {
%1 = load i32, ptr %from
store i32 %1, ptr %to
ret void
@@ -50,7 +50,7 @@ define void @foo4(ptr noalias readonly %from, ptr %to) {
; SM20: ld.global.u64
; SM35-LABEL: .visible .entry foo5(
; SM35: ld.global.nc.u64
-define void @foo5(ptr noalias readonly %from, ptr %to) {
+define ptx_kernel void @foo5(ptr noalias readonly %from, ptr %to) {
%1 = load i64, ptr %from
store i64 %1, ptr %to
ret void
@@ -63,7 +63,7 @@ define void @foo5(ptr noalias readonly %from, ptr %to) {
; SM35-LABEL: .visible .entry foo6(
; SM35: ld.global.nc.u64
; SM35: ld.global.nc.u64
-define void @foo6(ptr noalias readonly %from, ptr %to) {
+define ptx_kernel void @foo6(ptr noalias readonly %from, ptr %to) {
%1 = load i128, ptr %from
store i128 %1, ptr %to
ret void
@@ -73,7 +73,7 @@ define void @foo6(ptr noalias readonly %from, ptr %to) {
; SM20: ld.global.v2.u8
; SM35-LABEL: .visible .entry foo7(
; SM35: ld.global.nc.v2.u8
-define void @foo7(ptr noalias readonly %from, ptr %to) {
+define ptx_kernel void @foo7(ptr noalias readonly %from, ptr %to) {
%1 = load <2 x i8>, ptr %from
store <2 x i8> %1, ptr %to
ret void
@@ -83,7 +83,7 @@ define void @foo7(ptr noalias readonly %from, ptr %to) {
; SM20: ld.global.u32
; SM35-LABEL: .visible .entry foo8(
; SM35: ld.global.nc.u32
-define void @foo8(ptr noalias readonly %from, ptr %to) {
+define ptx_kernel void @foo8(ptr noalias readonly %from, ptr %to) {
%1 = load <2 x i16>, ptr %from
store <2 x i16> %1, ptr %to
ret void
@@ -93,7 +93,7 @@ define void @foo8(ptr noalias readonly %from, ptr %to) {
; SM20: ld.global.v2.u32
; SM35-LABEL: .visible .entry foo9(
; SM35: ld.global.nc.v2.u32
-define void @foo9(ptr noalias readonly %from, ptr %to) {
+define ptx_kernel void @foo9(ptr noalias readonly %from, ptr %to) {
%1 = load <2 x i32>, ptr %from
store <2 x i32> %1, ptr %to
ret void
@@ -103,7 +103,7 @@ define void @foo9(ptr noalias readonly %from, ptr %to) {
; SM20: ld.global.v2.u64
; SM35-LABEL: .visible .entry foo10(
; SM35: ld.global.nc.v2.u64
-define void @foo10(ptr noalias readonly %from, ptr %to) {
+define ptx_kernel void @foo10(ptr noalias readonly %from, ptr %to) {
%1 = load <2 x i64>, ptr %from
store <2 x i64> %1, ptr %to
ret void
@@ -113,7 +113,7 @@ define void @foo10(ptr noalias readonly %from, ptr %to) {
; SM20: ld.global.v2.f32
; SM35-LABEL: .visible .entry foo11(
; SM35: ld.global.nc.v2.f32
-define void @foo11(ptr noalias readonly %from, ptr %to) {
+define ptx_kernel void @foo11(ptr noalias readonly %from, ptr %to) {
%1 = load <2 x float>, ptr %from
store <2 x float> %1, ptr %to
ret void
@@ -123,7 +123,7 @@ define void @foo11(ptr noalias readonly %from, ptr %to) {
; SM20: ld.global.v2.f64
; SM35-LABEL: .visible .entry foo12(
; SM35: ld.global.nc.v2.f64
-define void @foo12(ptr noalias readonly %from, ptr %to) {
+define ptx_kernel void @foo12(ptr noalias readonly %from, ptr %to) {
%1 = load <2 x double>, ptr %from
store <2 x double> %1, ptr %to
ret void
@@ -133,7 +133,7 @@ define void @foo12(ptr noalias readonly %from, ptr %to) {
; SM20: ld.global.u32
; SM35-LABEL: .visible .entry foo13(
; SM35: ld.global.nc.u32
-define void @foo13(ptr noalias readonly %from, ptr %to) {
+define ptx_kernel void @foo13(ptr noalias readonly %from, ptr %to) {
%1 = load <4 x i8>, ptr %from
store <4 x i8> %1, ptr %to
ret void
@@ -143,7 +143,7 @@ define void @foo13(ptr noalias readonly %from, ptr %to) {
; SM20: ld.global.v4.u16
; SM35-LABEL: .visible .entry foo14(
; SM35: ld.global.nc.v4.u16
-define void @foo14(ptr noalias readonly %from, ptr %to) {
+define ptx_kernel void @foo14(ptr noalias readonly %from, ptr %to) {
%1 = load <4 x i16>, ptr %from
store <4 x i16> %1, ptr %to
ret void
@@ -153,7 +153,7 @@ define void @foo14(ptr noalias readonly %from, ptr %to) {
; SM20: ld.global.v4.u32
; SM35-LABEL: .visible .entry foo15(
; SM35: ld.global.nc.v4.u32
-define void @foo15(ptr noalias readonly %from, ptr %to) {
+define ptx_kernel void @foo15(ptr noalias readonly %from, ptr %to) {
%1 = load <4 x i32>, ptr %from
store <4 x i32> %1, ptr %to
ret void
@@ -163,7 +163,7 @@ define void @foo15(ptr noalias readonly %from, ptr %to) {
; SM20: ld.global.v4.f32
; SM35-LABEL: .visible .entry foo16(
; SM35: ld.global.nc.v4.f32
-define void @foo16(ptr noalias readonly %from, ptr %to) {
+define ptx_kernel void @foo16(ptr noalias readonly %from, ptr %to) {
%1 = load <4 x float>, ptr %from
store <4 x float> %1, ptr %to
ret void
@@ -175,7 +175,7 @@ define void @foo16(ptr noalias readonly %from, ptr %to) {
; SM35-LABEL: .visible .entry foo17(
; SM35: ld.global.nc.v2.f64
; SM35: ld.global.nc.v2.f64
-define void @foo17(ptr noalias readonly %from, ptr %to) {
+define ptx_kernel void @foo17(ptr noalias readonly %from, ptr %to) {
%1 = load <4 x double>, ptr %from
store <4 x double> %1, ptr %to
ret void
@@ -185,7 +185,7 @@ define void @foo17(ptr noalias readonly %from, ptr %to) {
; SM20: ld.global.u64
; SM35-LABEL: .visible .entry foo18(
; SM35: ld.global.nc.u64
-define void @foo18(ptr noalias readonly %from, ptr %to) {
+define ptx_kernel void @foo18(ptr noalias readonly %from, ptr %to) {
%1 = load ptr, ptr %from
store ptr %1, ptr %to
ret void
@@ -196,7 +196,7 @@ define void @foo18(ptr noalias readonly %from, ptr %to) {
; SM20: ld.global.f32
; SM35-LABEL: .visible .entry foo19(
; SM35: ld.global.nc.f32
-define void @foo19(ptr noalias readonly %from, ptr %to, i32 %n) {
+define ptx_kernel void @foo19(ptr noalias readonly %from, ptr %to, i32 %n) {
entry:
br label %loop
@@ -243,24 +243,3 @@ define void @notkernel2(ptr addrspace(1) noalias readonly %from, ptr %to) {
store float %1, ptr %to
ret void
}
-
-!nvvm.annotations = !{!1 ,!2 ,!3 ,!4 ,!5 ,!6, !7 ,!8 ,!9 ,!10 ,!11 ,!12, !13, !14, !15, !16, !17, !18, !19}
-!1 = !{ptr @foo1, !"kernel", i32 1}
-!2 = !{ptr @foo2, !"kernel", i32 1}
-!3 = !{ptr @foo3, !"kernel", i32 1}
-!4 = !{ptr @foo4, !"kernel", i32 1}
-!5 = !{ptr @foo5, !"kernel", i32 1}
-!6 = !{ptr @foo6, !"kernel", i32 1}
-!7 = !{ptr @foo7, !"kernel", i32 1}
-!8 = !{ptr @foo8, !"kernel", i32 1}
-!9 = !{ptr @foo9, !"kernel", i32 1}
-!10 = !{ptr @foo10, !"kernel", i32 1}
-!11 = !{ptr @foo11, !"kernel", i32 1}
-!12 = !{ptr @foo12, !"kernel", i32 1}
-!13 = !{ptr @foo13, !"kernel", i32 1}
-!14 = !{ptr @foo14, !"kernel", i32 1}
-!15 = !{ptr @foo15, !"kernel", i32 1}
-!16 = !{ptr @foo16, !"kernel", i32 1}
-!17 = !{ptr @foo17, !"kernel", i32 1}
-!18 = !{ptr @foo18, !"kernel", i32 1}
-!19 = !{ptr @foo19, !"kernel", i32 1}
diff --git a/llvm/test/CodeGen/NVPTX/local-stack-frame.ll b/llvm/test/CodeGen/NVPTX/local-stack-frame.ll
index e42f230..f21ff97 100644
--- a/llvm/test/CodeGen/NVPTX/local-stack-frame.ll
+++ b/llvm/test/CodeGen/NVPTX/local-stack-frame.ll
@@ -29,7 +29,7 @@ define void @foo(i32 %a) {
; PTX64: ld.param.u32 %r{{[0-9]+}}, [foo2_param_0];
; PTX64: add.u64 %rd[[SP_REG:[0-9]+]], %SPL, 0;
; PTX64: st.local.u32 [%rd[[SP_REG]]], %r{{[0-9]+}};
-define void @foo2(i32 %a) {
+define ptx_kernel void @foo2(i32 %a) {
%local = alloca i32, align 4
store i32 %a, ptr %local
call void @bar(ptr %local)
@@ -38,8 +38,6 @@ define void @foo2(i32 %a) {
declare void @bar(ptr %a)
-!nvvm.annotations = !{!0}
-!0 = !{ptr @foo2, !"kernel", i32 1}
; PTX32: mov.u32 %SPL, __local_depot{{[0-9]+}};
; PTX32-NOT: cvta.local.u32 %SP, %SPL;
diff --git a/llvm/test/CodeGen/NVPTX/lower-alloca.ll b/llvm/test/CodeGen/NVPTX/lower-alloca.ll
index 8f2d551..530b48b 100644
--- a/llvm/test/CodeGen/NVPTX/lower-alloca.ll
+++ b/llvm/test/CodeGen/NVPTX/lower-alloca.ll
@@ -6,7 +6,7 @@
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:64-v128:128:128-n16:32:64"
target triple = "nvptx64-unknown-unknown"
-define void @kernel() {
+define ptx_kernel void @kernel() {
; LABEL: @lower_alloca
; PTX-LABEL: .visible .entry kernel(
%A = alloca i32
@@ -37,7 +37,5 @@ define void @alloca_in_explicit_local_as() {
declare void @callee(ptr)
declare void @callee_addrspace5(ptr addrspace(5))
-!nvvm.annotations = !{!0}
!nvvm.annotations = !{!1}
-!0 = !{ptr @kernel, !"kernel", i32 1}
!1 = !{ptr @alloca_in_explicit_local_as, !"alloca_in_explicit_local_as", i32 1}
diff --git a/llvm/test/CodeGen/NVPTX/lower-args-gridconstant.ll b/llvm/test/CodeGen/NVPTX/lower-args-gridconstant.ll
index 9cfe919..208d4f0 100644
--- a/llvm/test/CodeGen/NVPTX/lower-args-gridconstant.ll
+++ b/llvm/test/CodeGen/NVPTX/lower-args-gridconstant.ll
@@ -29,7 +29,7 @@ define dso_local noundef i32 @non_kernel_function(ptr nocapture noundef readonly
; PTX-NEXT: .reg .pred %p<2>;
; PTX-NEXT: .reg .b16 %rs<3>;
; PTX-NEXT: .reg .b32 %r<11>;
-; PTX-NEXT: .reg .b64 %rd<10>;
+; PTX-NEXT: .reg .b64 %rd<9>;
; PTX-EMPTY:
; PTX-NEXT: // %bb.0: // %entry
; PTX-NEXT: mov.u64 %SPL, __local_depot0;
@@ -38,23 +38,22 @@ define dso_local noundef i32 @non_kernel_function(ptr nocapture noundef readonly
; PTX-NEXT: and.b16 %rs2, %rs1, 1;
; PTX-NEXT: setp.eq.b16 %p1, %rs2, 1;
; PTX-NEXT: ld.param.s32 %rd1, [non_kernel_function_param_2];
-; PTX-NEXT: add.u64 %rd2, %SP, 0;
-; PTX-NEXT: or.b64 %rd3, %rd2, 8;
-; PTX-NEXT: ld.param.u64 %rd4, [non_kernel_function_param_0+8];
-; PTX-NEXT: st.u64 [%rd3], %rd4;
-; PTX-NEXT: ld.param.u64 %rd5, [non_kernel_function_param_0];
-; PTX-NEXT: st.u64 [%SP], %rd5;
-; PTX-NEXT: mov.u64 %rd6, gi;
-; PTX-NEXT: cvta.global.u64 %rd7, %rd6;
-; PTX-NEXT: selp.b64 %rd8, %rd2, %rd7, %p1;
-; PTX-NEXT: add.s64 %rd9, %rd8, %rd1;
-; PTX-NEXT: ld.u8 %r1, [%rd9];
-; PTX-NEXT: ld.u8 %r2, [%rd9+1];
+; PTX-NEXT: ld.param.u64 %rd2, [non_kernel_function_param_0+8];
+; PTX-NEXT: st.u64 [%SP+8], %rd2;
+; PTX-NEXT: ld.param.u64 %rd3, [non_kernel_function_param_0];
+; PTX-NEXT: st.u64 [%SP], %rd3;
+; PTX-NEXT: mov.u64 %rd4, gi;
+; PTX-NEXT: cvta.global.u64 %rd5, %rd4;
+; PTX-NEXT: add.u64 %rd6, %SP, 0;
+; PTX-NEXT: selp.b64 %rd7, %rd6, %rd5, %p1;
+; PTX-NEXT: add.s64 %rd8, %rd7, %rd1;
+; PTX-NEXT: ld.u8 %r1, [%rd8];
+; PTX-NEXT: ld.u8 %r2, [%rd8+1];
; PTX-NEXT: shl.b32 %r3, %r2, 8;
; PTX-NEXT: or.b32 %r4, %r3, %r1;
-; PTX-NEXT: ld.u8 %r5, [%rd9+2];
+; PTX-NEXT: ld.u8 %r5, [%rd8+2];
; PTX-NEXT: shl.b32 %r6, %r5, 16;
-; PTX-NEXT: ld.u8 %r7, [%rd9+3];
+; PTX-NEXT: ld.u8 %r7, [%rd8+3];
; PTX-NEXT: shl.b32 %r8, %r7, 24;
; PTX-NEXT: or.b32 %r9, %r8, %r6;
; PTX-NEXT: or.b32 %r10, %r9, %r4;
@@ -68,7 +67,7 @@ entry:
ret i32 %0, !dbg !23
}
-define void @grid_const_int(ptr byval(i32) align 4 %input1, i32 %input2, ptr %out, i32 %n) {
+define ptx_kernel void @grid_const_int(ptr byval(i32) align 4 %input1, i32 %input2, ptr %out, i32 %n) {
; PTX-LABEL: grid_const_int(
; PTX: {
; PTX-NEXT: .reg .b32 %r<4>;
@@ -82,7 +81,7 @@ define void @grid_const_int(ptr byval(i32) align 4 %input1, i32 %input2, ptr %ou
; PTX-NEXT: add.s32 %r3, %r2, %r1;
; PTX-NEXT: st.global.u32 [%rd2], %r3;
; PTX-NEXT: ret;
-; OPT-LABEL: define void @grid_const_int(
+; OPT-LABEL: define ptx_kernel void @grid_const_int(
; OPT-SAME: ptr byval(i32) align 4 [[INPUT1:%.*]], i32 [[INPUT2:%.*]], ptr [[OUT:%.*]], i32 [[N:%.*]]) #[[ATTR0]] {
; OPT-NEXT: [[OUT2:%.*]] = addrspacecast ptr [[OUT]] to ptr addrspace(1)
; OPT-NEXT: [[OUT3:%.*]] = addrspacecast ptr addrspace(1) [[OUT2]] to ptr
@@ -91,6 +90,7 @@ define void @grid_const_int(ptr byval(i32) align 4 %input1, i32 %input2, ptr %ou
; OPT-NEXT: [[ADD:%.*]] = add i32 [[TMP]], [[INPUT2]]
; OPT-NEXT: store i32 [[ADD]], ptr [[OUT3]], align 4
; OPT-NEXT: ret void
+;
%tmp = load i32, ptr %input1, align 4
%add = add i32 %tmp, %input2
store i32 %add, ptr %out
@@ -99,7 +99,7 @@ define void @grid_const_int(ptr byval(i32) align 4 %input1, i32 %input2, ptr %ou
%struct.s = type { i32, i32 }
-define void @grid_const_struct(ptr byval(%struct.s) align 4 %input, ptr %out){
+define ptx_kernel void @grid_const_struct(ptr byval(%struct.s) align 4 %input, ptr %out){
; PTX-LABEL: grid_const_struct(
; PTX: {
; PTX-NEXT: .reg .b32 %r<4>;
@@ -113,7 +113,7 @@ define void @grid_const_struct(ptr byval(%struct.s) align 4 %input, ptr %out){
; PTX-NEXT: add.s32 %r3, %r1, %r2;
; PTX-NEXT: st.global.u32 [%rd2], %r3;
; PTX-NEXT: ret;
-; OPT-LABEL: define void @grid_const_struct(
+; OPT-LABEL: define ptx_kernel void @grid_const_struct(
; OPT-SAME: ptr byval([[STRUCT_S:%.*]]) align 4 [[INPUT:%.*]], ptr [[OUT:%.*]]) #[[ATTR0]] {
; OPT-NEXT: [[OUT4:%.*]] = addrspacecast ptr [[OUT]] to ptr addrspace(1)
; OPT-NEXT: [[OUT5:%.*]] = addrspacecast ptr addrspace(1) [[OUT4]] to ptr
@@ -125,6 +125,7 @@ define void @grid_const_struct(ptr byval(%struct.s) align 4 %input, ptr %out){
; OPT-NEXT: [[ADD:%.*]] = add i32 [[TMP1]], [[TMP2]]
; OPT-NEXT: store i32 [[ADD]], ptr [[OUT5]], align 4
; OPT-NEXT: ret void
+;
%gep1 = getelementptr inbounds %struct.s, ptr %input, i32 0, i32 0
%gep2 = getelementptr inbounds %struct.s, ptr %input, i32 0, i32 1
%int1 = load i32, ptr %gep1
@@ -134,7 +135,7 @@ define void @grid_const_struct(ptr byval(%struct.s) align 4 %input, ptr %out){
ret void
}
-define void @grid_const_escape(ptr byval(%struct.s) align 4 %input) {
+define ptx_kernel void @grid_const_escape(ptr byval(%struct.s) align 4 %input) {
; PTX-LABEL: grid_const_escape(
; PTX: {
; PTX-NEXT: .reg .b32 %r<3>;
@@ -159,17 +160,18 @@ define void @grid_const_escape(ptr byval(%struct.s) align 4 %input) {
; PTX-NEXT: ld.param.b32 %r1, [retval0];
; PTX-NEXT: } // callseq 0
; PTX-NEXT: ret;
-; OPT-LABEL: define void @grid_const_escape(
+; OPT-LABEL: define ptx_kernel void @grid_const_escape(
; OPT-SAME: ptr byval([[STRUCT_S:%.*]]) align 4 [[INPUT:%.*]]) #[[ATTR0]] {
; OPT-NEXT: [[INPUT_PARAM:%.*]] = addrspacecast ptr [[INPUT]] to ptr addrspace(101)
; OPT-NEXT: [[INPUT_PARAM_GEN:%.*]] = call ptr @llvm.nvvm.ptr.param.to.gen.p0.p101(ptr addrspace(101) [[INPUT_PARAM]])
; OPT-NEXT: [[CALL:%.*]] = call i32 @escape(ptr [[INPUT_PARAM_GEN]])
; OPT-NEXT: ret void
+;
%call = call i32 @escape(ptr %input)
ret void
}
-define void @multiple_grid_const_escape(ptr byval(%struct.s) align 4 %input, i32 %a, ptr byval(i32) align 4 %b) {
+define ptx_kernel void @multiple_grid_const_escape(ptr byval(%struct.s) align 4 %input, i32 %a, ptr byval(i32) align 4 %b) {
; PTX-LABEL: multiple_grid_const_escape(
; PTX: {
; PTX-NEXT: .local .align 4 .b8 __local_depot4[4];
@@ -212,7 +214,7 @@ define void @multiple_grid_const_escape(ptr byval(%struct.s) align 4 %input, i32
; PTX-NEXT: ld.param.b32 %r2, [retval0];
; PTX-NEXT: } // callseq 1
; PTX-NEXT: ret;
-; OPT-LABEL: define void @multiple_grid_const_escape(
+; OPT-LABEL: define ptx_kernel void @multiple_grid_const_escape(
; OPT-SAME: ptr byval([[STRUCT_S:%.*]]) align 4 [[INPUT:%.*]], i32 [[A:%.*]], ptr byval(i32) align 4 [[B:%.*]]) #[[ATTR0]] {
; OPT-NEXT: [[B_PARAM:%.*]] = addrspacecast ptr [[B]] to ptr addrspace(101)
; OPT-NEXT: [[B_PARAM_GEN:%.*]] = call ptr @llvm.nvvm.ptr.param.to.gen.p0.p101(ptr addrspace(101) [[B_PARAM]])
@@ -222,13 +224,14 @@ define void @multiple_grid_const_escape(ptr byval(%struct.s) align 4 %input, i32
; OPT-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
; OPT-NEXT: [[CALL:%.*]] = call i32 @escape3(ptr [[INPUT_PARAM_GEN]], ptr [[A_ADDR]], ptr [[B_PARAM_GEN]])
; OPT-NEXT: ret void
+;
%a.addr = alloca i32, align 4
store i32 %a, ptr %a.addr, align 4
%call = call i32 @escape3(ptr %input, ptr %a.addr, ptr %b)
ret void
}
-define void @grid_const_memory_escape(ptr byval(%struct.s) align 4 %input, ptr %addr) {
+define ptx_kernel void @grid_const_memory_escape(ptr byval(%struct.s) align 4 %input, ptr %addr) {
; PTX-LABEL: grid_const_memory_escape(
; PTX: {
; PTX-NEXT: .reg .b64 %rd<6>;
@@ -241,7 +244,7 @@ define void @grid_const_memory_escape(ptr byval(%struct.s) align 4 %input, ptr %
; PTX-NEXT: cvta.param.u64 %rd5, %rd4;
; PTX-NEXT: st.global.u64 [%rd3], %rd5;
; PTX-NEXT: ret;
-; OPT-LABEL: define void @grid_const_memory_escape(
+; OPT-LABEL: define ptx_kernel void @grid_const_memory_escape(
; OPT-SAME: ptr byval([[STRUCT_S:%.*]]) align 4 [[INPUT:%.*]], ptr [[ADDR:%.*]]) #[[ATTR0]] {
; OPT-NEXT: [[ADDR4:%.*]] = addrspacecast ptr [[ADDR]] to ptr addrspace(1)
; OPT-NEXT: [[ADDR5:%.*]] = addrspacecast ptr addrspace(1) [[ADDR4]] to ptr
@@ -249,11 +252,12 @@ define void @grid_const_memory_escape(ptr byval(%struct.s) align 4 %input, ptr %
; OPT-NEXT: [[INPUT1:%.*]] = call ptr @llvm.nvvm.ptr.param.to.gen.p0.p101(ptr addrspace(101) [[INPUT_PARAM]])
; OPT-NEXT: store ptr [[INPUT1]], ptr [[ADDR5]], align 8
; OPT-NEXT: ret void
+;
store ptr %input, ptr %addr, align 8
ret void
}
-define void @grid_const_inlineasm_escape(ptr byval(%struct.s) align 4 %input, ptr %result) {
+define ptx_kernel void @grid_const_inlineasm_escape(ptr byval(%struct.s) align 4 %input, ptr %result) {
; PTX-LABEL: grid_const_inlineasm_escape(
; PTX: {
; PTX-NEXT: .reg .b64 %rd<8>;
@@ -271,7 +275,7 @@ define void @grid_const_inlineasm_escape(ptr byval(%struct.s) align 4 %input, pt
; PTX-NEXT: st.global.u64 [%rd6], %rd1;
; PTX-NEXT: ret;
; PTX-NOT .local
-; OPT-LABEL: define void @grid_const_inlineasm_escape(
+; OPT-LABEL: define ptx_kernel void @grid_const_inlineasm_escape(
; OPT-SAME: ptr byval([[STRUCT_S:%.*]]) align 4 [[INPUT:%.*]], ptr [[RESULT:%.*]]) #[[ATTR0]] {
; OPT-NEXT: [[RESULT4:%.*]] = addrspacecast ptr [[RESULT]] to ptr addrspace(1)
; OPT-NEXT: [[RESULT5:%.*]] = addrspacecast ptr addrspace(1) [[RESULT4]] to ptr
@@ -282,6 +286,7 @@ define void @grid_const_inlineasm_escape(ptr byval(%struct.s) align 4 %input, pt
; OPT-NEXT: [[TMP2:%.*]] = call i64 asm "add.s64 $0, $1, $2
; OPT-NEXT: store i64 [[TMP2]], ptr [[RESULT5]], align 8
; OPT-NEXT: ret void
+;
%tmpptr1 = getelementptr inbounds %struct.s, ptr %input, i32 0, i32 0
%tmpptr2 = getelementptr inbounds %struct.s, ptr %input, i32 0, i32 1
%1 = call i64 asm "add.s64 $0, $1, $2;", "=l,l,l"(ptr %tmpptr1, ptr %tmpptr2) #1
@@ -289,7 +294,7 @@ define void @grid_const_inlineasm_escape(ptr byval(%struct.s) align 4 %input, pt
ret void
}
-define void @grid_const_partial_escape(ptr byval(i32) %input, ptr %output) {
+define ptx_kernel void @grid_const_partial_escape(ptr byval(i32) %input, ptr %output) {
; PTX-LABEL: grid_const_partial_escape(
; PTX: {
; PTX-NEXT: .reg .b32 %r<5>;
@@ -319,7 +324,7 @@ define void @grid_const_partial_escape(ptr byval(i32) %input, ptr %output) {
; PTX-NEXT: ld.param.b32 %r3, [retval0];
; PTX-NEXT: } // callseq 2
; PTX-NEXT: ret;
-; OPT-LABEL: define void @grid_const_partial_escape(
+; OPT-LABEL: define ptx_kernel void @grid_const_partial_escape(
; OPT-SAME: ptr byval(i32) [[INPUT:%.*]], ptr [[OUTPUT:%.*]]) #[[ATTR0]] {
; OPT-NEXT: [[OUTPUT4:%.*]] = addrspacecast ptr [[OUTPUT]] to ptr addrspace(1)
; OPT-NEXT: [[OUTPUT5:%.*]] = addrspacecast ptr addrspace(1) [[OUTPUT4]] to ptr
@@ -330,6 +335,7 @@ define void @grid_const_partial_escape(ptr byval(i32) %input, ptr %output) {
; OPT-NEXT: store i32 [[TWICE]], ptr [[OUTPUT5]], align 4
; OPT-NEXT: [[CALL:%.*]] = call i32 @escape(ptr [[INPUT1_GEN]])
; OPT-NEXT: ret void
+;
%val = load i32, ptr %input
%twice = add i32 %val, %val
store i32 %twice, ptr %output
@@ -337,7 +343,7 @@ define void @grid_const_partial_escape(ptr byval(i32) %input, ptr %output) {
ret void
}
-define i32 @grid_const_partial_escapemem(ptr byval(%struct.s) %input, ptr %output) {
+define ptx_kernel i32 @grid_const_partial_escapemem(ptr byval(%struct.s) %input, ptr %output) {
; PTX-LABEL: grid_const_partial_escapemem(
; PTX: {
; PTX-NEXT: .reg .b32 %r<6>;
@@ -369,7 +375,7 @@ define i32 @grid_const_partial_escapemem(ptr byval(%struct.s) %input, ptr %outpu
; PTX-NEXT: } // callseq 3
; PTX-NEXT: st.param.b32 [func_retval0], %r3;
; PTX-NEXT: ret;
-; OPT-LABEL: define i32 @grid_const_partial_escapemem(
+; OPT-LABEL: define ptx_kernel i32 @grid_const_partial_escapemem(
; OPT-SAME: ptr byval([[STRUCT_S:%.*]]) [[INPUT:%.*]], ptr [[OUTPUT:%.*]]) #[[ATTR0]] {
; OPT-NEXT: [[OUTPUT4:%.*]] = addrspacecast ptr [[OUTPUT]] to ptr addrspace(1)
; OPT-NEXT: [[OUTPUT5:%.*]] = addrspacecast ptr addrspace(1) [[OUTPUT4]] to ptr
@@ -383,6 +389,7 @@ define i32 @grid_const_partial_escapemem(ptr byval(%struct.s) %input, ptr %outpu
; OPT-NEXT: [[ADD:%.*]] = add i32 [[VAL1]], [[VAL2]]
; OPT-NEXT: [[CALL2:%.*]] = call i32 @escape(ptr [[PTR1]])
; OPT-NEXT: ret i32 [[ADD]]
+;
%ptr1 = getelementptr inbounds %struct.s, ptr %input, i32 0, i32 0
%val1 = load i32, ptr %ptr1
%ptr2 = getelementptr inbounds %struct.s, ptr %input, i32 0, i32 1
@@ -393,7 +400,7 @@ define i32 @grid_const_partial_escapemem(ptr byval(%struct.s) %input, ptr %outpu
ret i32 %add
}
-define void @grid_const_phi(ptr byval(%struct.s) align 4 %input1, ptr %inout) {
+define ptx_kernel void @grid_const_phi(ptr byval(%struct.s) align 4 %input1, ptr %inout) {
; PTX-LABEL: grid_const_phi(
; PTX: {
; PTX-NEXT: .reg .pred %p<2>;
@@ -415,7 +422,7 @@ define void @grid_const_phi(ptr byval(%struct.s) align 4 %input1, ptr %inout) {
; PTX-NEXT: ld.u32 %r2, [%rd8];
; PTX-NEXT: st.global.u32 [%rd1], %r2;
; PTX-NEXT: ret;
-; OPT-LABEL: define void @grid_const_phi(
+; OPT-LABEL: define ptx_kernel void @grid_const_phi(
; OPT-SAME: ptr byval([[STRUCT_S:%.*]]) align 4 [[INPUT1:%.*]], ptr [[INOUT:%.*]]) #[[ATTR0]] {
; OPT-NEXT: [[INOUT1:%.*]] = addrspacecast ptr [[INOUT]] to ptr addrspace(1)
; OPT-NEXT: [[INOUT2:%.*]] = addrspacecast ptr addrspace(1) [[INOUT1]] to ptr
@@ -435,6 +442,7 @@ define void @grid_const_phi(ptr byval(%struct.s) align 4 %input1, ptr %inout) {
; OPT-NEXT: [[VALLOADED:%.*]] = load i32, ptr [[PTRNEW]], align 4
; OPT-NEXT: store i32 [[VALLOADED]], ptr [[INOUT2]], align 4
; OPT-NEXT: ret void
+;
%val = load i32, ptr %inout
%less = icmp slt i32 %val, 0
@@ -453,7 +461,7 @@ merge:
}
; NOTE: %input2 is *not* grid_constant
-define void @grid_const_phi_ngc(ptr byval(%struct.s) align 4 %input1, ptr byval(%struct.s) %input2, ptr %inout) {
+define ptx_kernel void @grid_const_phi_ngc(ptr byval(%struct.s) align 4 %input1, ptr byval(%struct.s) %input2, ptr %inout) {
; PTX-LABEL: grid_const_phi_ngc(
; PTX: {
; PTX-NEXT: .reg .pred %p<2>;
@@ -478,7 +486,7 @@ define void @grid_const_phi_ngc(ptr byval(%struct.s) align 4 %input1, ptr byval(
; PTX-NEXT: ld.u32 %r2, [%rd11];
; PTX-NEXT: st.global.u32 [%rd1], %r2;
; PTX-NEXT: ret;
-; OPT-LABEL: define void @grid_const_phi_ngc(
+; OPT-LABEL: define ptx_kernel void @grid_const_phi_ngc(
; OPT-SAME: ptr byval([[STRUCT_S:%.*]]) align 4 [[INPUT1:%.*]], ptr byval([[STRUCT_S]]) [[INPUT2:%.*]], ptr [[INOUT:%.*]]) #[[ATTR0]] {
; OPT-NEXT: [[INOUT1:%.*]] = addrspacecast ptr [[INOUT]] to ptr addrspace(1)
; OPT-NEXT: [[INOUT2:%.*]] = addrspacecast ptr addrspace(1) [[INOUT1]] to ptr
@@ -500,6 +508,7 @@ define void @grid_const_phi_ngc(ptr byval(%struct.s) align 4 %input1, ptr byval(
; OPT-NEXT: [[VALLOADED:%.*]] = load i32, ptr [[PTRNEW]], align 4
; OPT-NEXT: store i32 [[VALLOADED]], ptr [[INOUT2]], align 4
; OPT-NEXT: ret void
+;
%val = load i32, ptr %inout
%less = icmp slt i32 %val, 0
br i1 %less, label %first, label %second
@@ -517,7 +526,7 @@ merge:
}
; NOTE: %input2 is *not* grid_constant
-define void @grid_const_select(ptr byval(i32) align 4 %input1, ptr byval(i32) %input2, ptr %inout) {
+define ptx_kernel void @grid_const_select(ptr byval(i32) align 4 %input1, ptr byval(i32) %input2, ptr %inout) {
; PTX-LABEL: grid_const_select(
; PTX: {
; PTX-NEXT: .reg .pred %p<2>;
@@ -539,7 +548,7 @@ define void @grid_const_select(ptr byval(i32) align 4 %input1, ptr byval(i32) %i
; PTX-NEXT: ld.u32 %r2, [%rd9];
; PTX-NEXT: st.global.u32 [%rd3], %r2;
; PTX-NEXT: ret;
-; OPT-LABEL: define void @grid_const_select(
+; OPT-LABEL: define ptx_kernel void @grid_const_select(
; OPT-SAME: ptr byval(i32) align 4 [[INPUT1:%.*]], ptr byval(i32) [[INPUT2:%.*]], ptr [[INOUT:%.*]]) #[[ATTR0]] {
; OPT-NEXT: [[INOUT1:%.*]] = addrspacecast ptr [[INOUT]] to ptr addrspace(1)
; OPT-NEXT: [[INOUT2:%.*]] = addrspacecast ptr addrspace(1) [[INOUT1]] to ptr
@@ -553,6 +562,7 @@ define void @grid_const_select(ptr byval(i32) align 4 %input1, ptr byval(i32) %i
; OPT-NEXT: [[VALLOADED:%.*]] = load i32, ptr [[PTRNEW]], align 4
; OPT-NEXT: store i32 [[VALLOADED]], ptr [[INOUT2]], align 4
; OPT-NEXT: ret void
+;
%val = load i32, ptr %inout
%less = icmp slt i32 %val, 0
%ptrnew = select i1 %less, ptr %input1, ptr %input2
@@ -561,7 +571,7 @@ define void @grid_const_select(ptr byval(i32) align 4 %input1, ptr byval(i32) %i
ret void
}
-define i32 @grid_const_ptrtoint(ptr byval(i32) %input) {
+define ptx_kernel i32 @grid_const_ptrtoint(ptr byval(i32) %input) {
; PTX-LABEL: grid_const_ptrtoint(
; PTX: {
; PTX-NEXT: .reg .b32 %r<4>;
@@ -576,7 +586,7 @@ define i32 @grid_const_ptrtoint(ptr byval(i32) %input) {
; PTX-NEXT: add.s32 %r3, %r1, %r2;
; PTX-NEXT: st.param.b32 [func_retval0], %r3;
; PTX-NEXT: ret;
-; OPT-LABEL: define i32 @grid_const_ptrtoint(
+; OPT-LABEL: define ptx_kernel i32 @grid_const_ptrtoint(
; OPT-SAME: ptr byval(i32) align 4 [[INPUT:%.*]]) #[[ATTR0]] {
; OPT-NEXT: [[INPUT2:%.*]] = addrspacecast ptr [[INPUT]] to ptr addrspace(101)
; OPT-NEXT: [[INPUT3:%.*]] = load i32, ptr addrspace(101) [[INPUT2]], align 4
@@ -584,6 +594,7 @@ define i32 @grid_const_ptrtoint(ptr byval(i32) %input) {
; OPT-NEXT: [[PTRVAL:%.*]] = ptrtoint ptr [[INPUT1]] to i32
; OPT-NEXT: [[KEEPALIVE:%.*]] = add i32 [[INPUT3]], [[PTRVAL]]
; OPT-NEXT: ret i32 [[KEEPALIVE]]
+;
%val = load i32, ptr %input
%ptrval = ptrtoint ptr %input to i32
%keepalive = add i32 %val, %ptrval
@@ -598,40 +609,40 @@ declare dso_local ptr @escape3(ptr, ptr, ptr) local_unnamed_addr
!nvvm.annotations = !{!0, !1, !2, !3, !4, !5, !6, !7, !8, !9, !10, !11, !12, !13, !14, !15, !16, !17, !18, !19, !20, !21, !22, !23}
-!0 = !{ptr @grid_const_int, !"kernel", i32 1, !"grid_constant", !1}
+!0 = !{ptr @grid_const_int, !"grid_constant", !1}
!1 = !{i32 1}
-!2 = !{ptr @grid_const_struct, !"kernel", i32 1, !"grid_constant", !3}
+!2 = !{ptr @grid_const_struct, !"grid_constant", !3}
!3 = !{i32 1}
-!4 = !{ptr @grid_const_escape, !"kernel", i32 1, !"grid_constant", !5}
+!4 = !{ptr @grid_const_escape, !"grid_constant", !5}
!5 = !{i32 1}
-!6 = !{ptr @multiple_grid_const_escape, !"kernel", i32 1, !"grid_constant", !7}
+!6 = !{ptr @multiple_grid_const_escape, !"grid_constant", !7}
!7 = !{i32 1, i32 3}
-!8 = !{ptr @grid_const_memory_escape, !"kernel", i32 1, !"grid_constant", !9}
+!8 = !{ptr @grid_const_memory_escape, !"grid_constant", !9}
!9 = !{i32 1}
-!10 = !{ptr @grid_const_inlineasm_escape, !"kernel", i32 1, !"grid_constant", !11}
+!10 = !{ptr @grid_const_inlineasm_escape, !"grid_constant", !11}
!11 = !{i32 1}
-!12 = !{ptr @grid_const_partial_escape, !"kernel", i32 1, !"grid_constant", !13}
+!12 = !{ptr @grid_const_partial_escape, !"grid_constant", !13}
!13 = !{i32 1}
-!14 = !{ptr @grid_const_partial_escapemem, !"kernel", i32 1, !"grid_constant", !15}
+!14 = !{ptr @grid_const_partial_escapemem, !"grid_constant", !15}
!15 = !{i32 1}
-!16 = !{ptr @grid_const_phi, !"kernel", i32 1, !"grid_constant", !17}
+!16 = !{ptr @grid_const_phi, !"grid_constant", !17}
!17 = !{i32 1}
-!18 = !{ptr @grid_const_phi_ngc, !"kernel", i32 1, !"grid_constant", !19}
+!18 = !{ptr @grid_const_phi_ngc, !"grid_constant", !19}
!19 = !{i32 1}
-!20 = !{ptr @grid_const_select, !"kernel", i32 1, !"grid_constant", !21}
+!20 = !{ptr @grid_const_select, !"grid_constant", !21}
!21 = !{i32 1}
-!22 = !{ptr @grid_const_ptrtoint, !"kernel", i32 1, !"grid_constant", !23}
+!22 = !{ptr @grid_const_ptrtoint, !"grid_constant", !23}
!23 = !{i32 1}
diff --git a/llvm/test/CodeGen/NVPTX/lower-args.ll b/llvm/test/CodeGen/NVPTX/lower-args.ll
index eba4f27..269bba7 100644
--- a/llvm/test/CodeGen/NVPTX/lower-args.ll
+++ b/llvm/test/CodeGen/NVPTX/lower-args.ll
@@ -65,7 +65,7 @@ define void @load_padding(ptr nocapture readonly byval(%class.padded) %arg) {
}
; COMMON-LABEL: ptr_generic
-define void @ptr_generic(ptr %out, ptr %in) {
+define ptx_kernel void @ptr_generic(ptr %out, ptr %in) {
; IRC: %in3 = addrspacecast ptr %in to ptr addrspace(1)
; IRC: %in4 = addrspacecast ptr addrspace(1) %in3 to ptr
; IRC: %out1 = addrspacecast ptr %out to ptr addrspace(1)
@@ -87,7 +87,7 @@ define void @ptr_generic(ptr %out, ptr %in) {
}
; COMMON-LABEL: ptr_nongeneric
-define void @ptr_nongeneric(ptr addrspace(1) %out, ptr addrspace(4) %in) {
+define ptx_kernel void @ptr_nongeneric(ptr addrspace(1) %out, ptr addrspace(4) %in) {
; IR-NOT: addrspacecast
; PTX-NOT: cvta.to.global
; PTX: ld.const.u32
@@ -98,7 +98,7 @@ define void @ptr_nongeneric(ptr addrspace(1) %out, ptr addrspace(4) %in) {
}
; COMMON-LABEL: ptr_as_int
- define void @ptr_as_int(i64 noundef %i, i32 noundef %v) {
+ define ptx_kernel void @ptr_as_int(i64 noundef %i, i32 noundef %v) {
; IR: [[P:%.*]] = inttoptr i64 %i to ptr
; IRC: [[P1:%.*]] = addrspacecast ptr [[P]] to ptr addrspace(1)
; IRC: addrspacecast ptr addrspace(1) [[P1]] to ptr
@@ -121,7 +121,7 @@ define void @ptr_nongeneric(ptr addrspace(1) %out, ptr addrspace(4) %in) {
%struct.S = type { i64 }
; COMMON-LABEL: ptr_as_int_aggr
-define void @ptr_as_int_aggr(ptr nocapture noundef readonly byval(%struct.S) align 8 %s, i32 noundef %v) {
+define ptx_kernel void @ptr_as_int_aggr(ptr nocapture noundef readonly byval(%struct.S) align 8 %s, i32 noundef %v) {
; IR: [[S:%.*]] = addrspacecast ptr %s to ptr addrspace(101)
; IR: [[I:%.*]] = load i64, ptr addrspace(101) [[S]], align 8
; IR: [[P0:%.*]] = inttoptr i64 [[I]] to ptr
@@ -146,8 +146,3 @@ define void @ptr_as_int_aggr(ptr nocapture noundef readonly byval(%struct.S) ali
; Function Attrs: convergent nounwind
declare dso_local ptr @escape(ptr) local_unnamed_addr
-!nvvm.annotations = !{!0, !1, !2, !3}
-!0 = !{ptr @ptr_generic, !"kernel", i32 1}
-!1 = !{ptr @ptr_nongeneric, !"kernel", i32 1}
-!2 = !{ptr @ptr_as_int, !"kernel", i32 1}
-!3 = !{ptr @ptr_as_int_aggr, !"kernel", i32 1}
diff --git a/llvm/test/CodeGen/NVPTX/lower-byval-args.ll b/llvm/test/CodeGen/NVPTX/lower-byval-args.ll
index 5c52626..26102722 100644
--- a/llvm/test/CodeGen/NVPTX/lower-byval-args.ll
+++ b/llvm/test/CodeGen/NVPTX/lower-byval-args.ll
@@ -24,8 +24,8 @@ declare void @llvm.memmove.p0.p0.i64(ptr nocapture writeonly, ptr nocapture read
declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg) #2
; Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite)
-define dso_local void @read_only(ptr nocapture noundef writeonly %out, ptr nocapture noundef readonly byval(%struct.S) align 4 %s) local_unnamed_addr #0 {
-; SM_60-LABEL: define dso_local void @read_only(
+define dso_local ptx_kernel void @read_only(ptr nocapture noundef writeonly %out, ptr nocapture noundef readonly byval(%struct.S) align 4 %s) local_unnamed_addr #0 {
+; SM_60-LABEL: define dso_local ptx_kernel void @read_only(
; SM_60-SAME: ptr nocapture noundef writeonly [[OUT:%.*]], ptr nocapture noundef readonly byval([[STRUCT_S:%.*]]) align 4 [[S:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
; SM_60-NEXT: [[ENTRY:.*:]]
; SM_60-NEXT: [[S3:%.*]] = addrspacecast ptr [[S]] to ptr addrspace(101)
@@ -35,7 +35,7 @@ define dso_local void @read_only(ptr nocapture noundef writeonly %out, ptr nocap
; SM_60-NEXT: store i32 [[I]], ptr [[OUT2]], align 4
; SM_60-NEXT: ret void
;
-; SM_70-LABEL: define dso_local void @read_only(
+; SM_70-LABEL: define dso_local ptx_kernel void @read_only(
; SM_70-SAME: ptr nocapture noundef writeonly [[OUT:%.*]], ptr nocapture noundef readonly byval([[STRUCT_S:%.*]]) align 4 [[S:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
; SM_70-NEXT: [[ENTRY:.*:]]
; SM_70-NEXT: [[S3:%.*]] = addrspacecast ptr [[S]] to ptr addrspace(101)
@@ -45,7 +45,7 @@ define dso_local void @read_only(ptr nocapture noundef writeonly %out, ptr nocap
; SM_70-NEXT: store i32 [[I]], ptr [[OUT2]], align 4
; SM_70-NEXT: ret void
;
-; COPY-LABEL: define dso_local void @read_only(
+; COPY-LABEL: define dso_local ptx_kernel void @read_only(
; COPY-SAME: ptr nocapture noundef writeonly [[OUT:%.*]], ptr nocapture noundef readonly byval([[STRUCT_S:%.*]]) align 4 [[S:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
; COPY-NEXT: [[ENTRY:.*:]]
; COPY-NEXT: [[S1:%.*]] = alloca [[STRUCT_S]], align 4
@@ -62,8 +62,8 @@ entry:
}
; Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite)
-define dso_local void @read_only_gep(ptr nocapture noundef writeonly %out, ptr nocapture noundef readonly byval(%struct.S) align 4 %s) local_unnamed_addr #0 {
-; SM_60-LABEL: define dso_local void @read_only_gep(
+define dso_local ptx_kernel void @read_only_gep(ptr nocapture noundef writeonly %out, ptr nocapture noundef readonly byval(%struct.S) align 4 %s) local_unnamed_addr #0 {
+; SM_60-LABEL: define dso_local ptx_kernel void @read_only_gep(
; SM_60-SAME: ptr nocapture noundef writeonly [[OUT:%.*]], ptr nocapture noundef readonly byval([[STRUCT_S:%.*]]) align 4 [[S:%.*]]) local_unnamed_addr #[[ATTR0]] {
; SM_60-NEXT: [[ENTRY:.*:]]
; SM_60-NEXT: [[S3:%.*]] = addrspacecast ptr [[S]] to ptr addrspace(101)
@@ -74,7 +74,7 @@ define dso_local void @read_only_gep(ptr nocapture noundef writeonly %out, ptr n
; SM_60-NEXT: store i32 [[I]], ptr [[OUT2]], align 4
; SM_60-NEXT: ret void
;
-; SM_70-LABEL: define dso_local void @read_only_gep(
+; SM_70-LABEL: define dso_local ptx_kernel void @read_only_gep(
; SM_70-SAME: ptr nocapture noundef writeonly [[OUT:%.*]], ptr nocapture noundef readonly byval([[STRUCT_S:%.*]]) align 4 [[S:%.*]]) local_unnamed_addr #[[ATTR0]] {
; SM_70-NEXT: [[ENTRY:.*:]]
; SM_70-NEXT: [[S3:%.*]] = addrspacecast ptr [[S]] to ptr addrspace(101)
@@ -85,7 +85,7 @@ define dso_local void @read_only_gep(ptr nocapture noundef writeonly %out, ptr n
; SM_70-NEXT: store i32 [[I]], ptr [[OUT2]], align 4
; SM_70-NEXT: ret void
;
-; COPY-LABEL: define dso_local void @read_only_gep(
+; COPY-LABEL: define dso_local ptx_kernel void @read_only_gep(
; COPY-SAME: ptr nocapture noundef writeonly [[OUT:%.*]], ptr nocapture noundef readonly byval([[STRUCT_S:%.*]]) align 4 [[S:%.*]]) local_unnamed_addr #[[ATTR0]] {
; COPY-NEXT: [[ENTRY:.*:]]
; COPY-NEXT: [[S1:%.*]] = alloca [[STRUCT_S]], align 4
@@ -104,8 +104,8 @@ entry:
}
; Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite)
-define dso_local void @read_only_gep_asc(ptr nocapture noundef writeonly %out, ptr nocapture noundef readonly byval(%struct.S) align 4 %s) local_unnamed_addr #0 {
-; SM_60-LABEL: define dso_local void @read_only_gep_asc(
+define dso_local ptx_kernel void @read_only_gep_asc(ptr nocapture noundef writeonly %out, ptr nocapture noundef readonly byval(%struct.S) align 4 %s) local_unnamed_addr #0 {
+; SM_60-LABEL: define dso_local ptx_kernel void @read_only_gep_asc(
; SM_60-SAME: ptr nocapture noundef writeonly [[OUT:%.*]], ptr nocapture noundef readonly byval([[STRUCT_S:%.*]]) align 4 [[S:%.*]]) local_unnamed_addr #[[ATTR0]] {
; SM_60-NEXT: [[ENTRY:.*:]]
; SM_60-NEXT: [[S3:%.*]] = addrspacecast ptr [[S]] to ptr addrspace(101)
@@ -116,7 +116,7 @@ define dso_local void @read_only_gep_asc(ptr nocapture noundef writeonly %out, p
; SM_60-NEXT: store i32 [[I]], ptr [[OUT2]], align 4
; SM_60-NEXT: ret void
;
-; SM_70-LABEL: define dso_local void @read_only_gep_asc(
+; SM_70-LABEL: define dso_local ptx_kernel void @read_only_gep_asc(
; SM_70-SAME: ptr nocapture noundef writeonly [[OUT:%.*]], ptr nocapture noundef readonly byval([[STRUCT_S:%.*]]) align 4 [[S:%.*]]) local_unnamed_addr #[[ATTR0]] {
; SM_70-NEXT: [[ENTRY:.*:]]
; SM_70-NEXT: [[S3:%.*]] = addrspacecast ptr [[S]] to ptr addrspace(101)
@@ -127,7 +127,7 @@ define dso_local void @read_only_gep_asc(ptr nocapture noundef writeonly %out, p
; SM_70-NEXT: store i32 [[I]], ptr [[OUT2]], align 4
; SM_70-NEXT: ret void
;
-; COPY-LABEL: define dso_local void @read_only_gep_asc(
+; COPY-LABEL: define dso_local ptx_kernel void @read_only_gep_asc(
; COPY-SAME: ptr nocapture noundef writeonly [[OUT:%.*]], ptr nocapture noundef readonly byval([[STRUCT_S:%.*]]) align 4 [[S:%.*]]) local_unnamed_addr #[[ATTR0]] {
; COPY-NEXT: [[ENTRY:.*:]]
; COPY-NEXT: [[S1:%.*]] = alloca [[STRUCT_S]], align 4
@@ -148,8 +148,8 @@ entry:
}
; Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite)
-define dso_local void @read_only_gep_asc0(ptr nocapture noundef writeonly %out, ptr nocapture noundef readonly byval(%struct.S) align 4 %s) local_unnamed_addr #0 {
-; SM_60-LABEL: define dso_local void @read_only_gep_asc0(
+define dso_local ptx_kernel void @read_only_gep_asc0(ptr nocapture noundef writeonly %out, ptr nocapture noundef readonly byval(%struct.S) align 4 %s) local_unnamed_addr #0 {
+; SM_60-LABEL: define dso_local ptx_kernel void @read_only_gep_asc0(
; SM_60-SAME: ptr nocapture noundef writeonly [[OUT:%.*]], ptr nocapture noundef readonly byval([[STRUCT_S:%.*]]) align 4 [[S:%.*]]) local_unnamed_addr #[[ATTR0]] {
; SM_60-NEXT: [[ENTRY:.*:]]
; SM_60-NEXT: [[S3:%.*]] = alloca [[STRUCT_S]], align 4
@@ -164,7 +164,7 @@ define dso_local void @read_only_gep_asc0(ptr nocapture noundef writeonly %out,
; SM_60-NEXT: store i32 [[I]], ptr [[OUT2]], align 4
; SM_60-NEXT: ret void
;
-; SM_70-LABEL: define dso_local void @read_only_gep_asc0(
+; SM_70-LABEL: define dso_local ptx_kernel void @read_only_gep_asc0(
; SM_70-SAME: ptr nocapture noundef writeonly [[OUT:%.*]], ptr nocapture noundef readonly byval([[STRUCT_S:%.*]]) align 4 [[S:%.*]]) local_unnamed_addr #[[ATTR0]] {
; SM_70-NEXT: [[ENTRY:.*:]]
; SM_70-NEXT: [[S3:%.*]] = alloca [[STRUCT_S]], align 4
@@ -179,7 +179,7 @@ define dso_local void @read_only_gep_asc0(ptr nocapture noundef writeonly %out,
; SM_70-NEXT: store i32 [[I]], ptr [[OUT2]], align 4
; SM_70-NEXT: ret void
;
-; COPY-LABEL: define dso_local void @read_only_gep_asc0(
+; COPY-LABEL: define dso_local ptx_kernel void @read_only_gep_asc0(
; COPY-SAME: ptr nocapture noundef writeonly [[OUT:%.*]], ptr nocapture noundef readonly byval([[STRUCT_S:%.*]]) align 4 [[S:%.*]]) local_unnamed_addr #[[ATTR0]] {
; COPY-NEXT: [[ENTRY:.*:]]
; COPY-NEXT: [[S1:%.*]] = alloca [[STRUCT_S]], align 4
@@ -202,8 +202,8 @@ entry:
}
; Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite)
-define dso_local void @escape_ptr(ptr nocapture noundef readnone %out, ptr noundef byval(%struct.S) align 4 %s) local_unnamed_addr #0 {
-; SM_60-LABEL: define dso_local void @escape_ptr(
+define dso_local ptx_kernel void @escape_ptr(ptr nocapture noundef readnone %out, ptr noundef byval(%struct.S) align 4 %s) local_unnamed_addr #0 {
+; SM_60-LABEL: define dso_local ptx_kernel void @escape_ptr(
; SM_60-SAME: ptr nocapture noundef readnone [[OUT:%.*]], ptr noundef byval([[STRUCT_S:%.*]]) align 4 [[S:%.*]]) local_unnamed_addr #[[ATTR0]] {
; SM_60-NEXT: [[ENTRY:.*:]]
; SM_60-NEXT: [[S3:%.*]] = alloca [[STRUCT_S]], align 4
@@ -214,7 +214,7 @@ define dso_local void @escape_ptr(ptr nocapture noundef readnone %out, ptr nound
; SM_60-NEXT: call void @_Z6escapePv(ptr noundef nonnull [[S3]])
; SM_60-NEXT: ret void
;
-; SM_70-LABEL: define dso_local void @escape_ptr(
+; SM_70-LABEL: define dso_local ptx_kernel void @escape_ptr(
; SM_70-SAME: ptr nocapture noundef readnone [[OUT:%.*]], ptr noundef byval([[STRUCT_S:%.*]]) align 4 [[S:%.*]]) local_unnamed_addr #[[ATTR0]] {
; SM_70-NEXT: [[ENTRY:.*:]]
; SM_70-NEXT: [[S3:%.*]] = alloca [[STRUCT_S]], align 4
@@ -225,7 +225,7 @@ define dso_local void @escape_ptr(ptr nocapture noundef readnone %out, ptr nound
; SM_70-NEXT: call void @_Z6escapePv(ptr noundef nonnull [[S3]])
; SM_70-NEXT: ret void
;
-; COPY-LABEL: define dso_local void @escape_ptr(
+; COPY-LABEL: define dso_local ptx_kernel void @escape_ptr(
; COPY-SAME: ptr nocapture noundef readnone [[OUT:%.*]], ptr noundef byval([[STRUCT_S:%.*]]) align 4 [[S:%.*]]) local_unnamed_addr #[[ATTR0]] {
; COPY-NEXT: [[ENTRY:.*:]]
; COPY-NEXT: [[S1:%.*]] = alloca [[STRUCT_S]], align 4
@@ -240,8 +240,8 @@ entry:
}
; Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite)
-define dso_local void @escape_ptr_gep(ptr nocapture noundef readnone %out, ptr noundef byval(%struct.S) align 4 %s) local_unnamed_addr #0 {
-; SM_60-LABEL: define dso_local void @escape_ptr_gep(
+define dso_local ptx_kernel void @escape_ptr_gep(ptr nocapture noundef readnone %out, ptr noundef byval(%struct.S) align 4 %s) local_unnamed_addr #0 {
+; SM_60-LABEL: define dso_local ptx_kernel void @escape_ptr_gep(
; SM_60-SAME: ptr nocapture noundef readnone [[OUT:%.*]], ptr noundef byval([[STRUCT_S:%.*]]) align 4 [[S:%.*]]) local_unnamed_addr #[[ATTR0]] {
; SM_60-NEXT: [[ENTRY:.*:]]
; SM_60-NEXT: [[S3:%.*]] = alloca [[STRUCT_S]], align 4
@@ -253,7 +253,7 @@ define dso_local void @escape_ptr_gep(ptr nocapture noundef readnone %out, ptr n
; SM_60-NEXT: call void @_Z6escapePv(ptr noundef nonnull [[B]])
; SM_60-NEXT: ret void
;
-; SM_70-LABEL: define dso_local void @escape_ptr_gep(
+; SM_70-LABEL: define dso_local ptx_kernel void @escape_ptr_gep(
; SM_70-SAME: ptr nocapture noundef readnone [[OUT:%.*]], ptr noundef byval([[STRUCT_S:%.*]]) align 4 [[S:%.*]]) local_unnamed_addr #[[ATTR0]] {
; SM_70-NEXT: [[ENTRY:.*:]]
; SM_70-NEXT: [[S3:%.*]] = alloca [[STRUCT_S]], align 4
@@ -265,7 +265,7 @@ define dso_local void @escape_ptr_gep(ptr nocapture noundef readnone %out, ptr n
; SM_70-NEXT: call void @_Z6escapePv(ptr noundef nonnull [[B]])
; SM_70-NEXT: ret void
;
-; COPY-LABEL: define dso_local void @escape_ptr_gep(
+; COPY-LABEL: define dso_local ptx_kernel void @escape_ptr_gep(
; COPY-SAME: ptr nocapture noundef readnone [[OUT:%.*]], ptr noundef byval([[STRUCT_S:%.*]]) align 4 [[S:%.*]]) local_unnamed_addr #[[ATTR0]] {
; COPY-NEXT: [[ENTRY:.*:]]
; COPY-NEXT: [[S1:%.*]] = alloca [[STRUCT_S]], align 4
@@ -282,8 +282,8 @@ entry:
}
; Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite)
-define dso_local void @escape_ptr_store(ptr nocapture noundef writeonly %out, ptr noundef byval(%struct.S) align 4 %s) local_unnamed_addr #0 {
-; SM_60-LABEL: define dso_local void @escape_ptr_store(
+define dso_local ptx_kernel void @escape_ptr_store(ptr nocapture noundef writeonly %out, ptr noundef byval(%struct.S) align 4 %s) local_unnamed_addr #0 {
+; SM_60-LABEL: define dso_local ptx_kernel void @escape_ptr_store(
; SM_60-SAME: ptr nocapture noundef writeonly [[OUT:%.*]], ptr noundef byval([[STRUCT_S:%.*]]) align 4 [[S:%.*]]) local_unnamed_addr #[[ATTR0]] {
; SM_60-NEXT: [[ENTRY:.*:]]
; SM_60-NEXT: [[S3:%.*]] = alloca [[STRUCT_S]], align 4
@@ -294,7 +294,7 @@ define dso_local void @escape_ptr_store(ptr nocapture noundef writeonly %out, pt
; SM_60-NEXT: store ptr [[S3]], ptr [[OUT2]], align 8
; SM_60-NEXT: ret void
;
-; SM_70-LABEL: define dso_local void @escape_ptr_store(
+; SM_70-LABEL: define dso_local ptx_kernel void @escape_ptr_store(
; SM_70-SAME: ptr nocapture noundef writeonly [[OUT:%.*]], ptr noundef byval([[STRUCT_S:%.*]]) align 4 [[S:%.*]]) local_unnamed_addr #[[ATTR0]] {
; SM_70-NEXT: [[ENTRY:.*:]]
; SM_70-NEXT: [[S3:%.*]] = alloca [[STRUCT_S]], align 4
@@ -305,7 +305,7 @@ define dso_local void @escape_ptr_store(ptr nocapture noundef writeonly %out, pt
; SM_70-NEXT: store ptr [[S3]], ptr [[OUT2]], align 8
; SM_70-NEXT: ret void
;
-; COPY-LABEL: define dso_local void @escape_ptr_store(
+; COPY-LABEL: define dso_local ptx_kernel void @escape_ptr_store(
; COPY-SAME: ptr nocapture noundef writeonly [[OUT:%.*]], ptr noundef byval([[STRUCT_S:%.*]]) align 4 [[S:%.*]]) local_unnamed_addr #[[ATTR0]] {
; COPY-NEXT: [[ENTRY:.*:]]
; COPY-NEXT: [[S1:%.*]] = alloca [[STRUCT_S]], align 4
@@ -320,8 +320,8 @@ entry:
}
; Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite)
-define dso_local void @escape_ptr_gep_store(ptr nocapture noundef writeonly %out, ptr noundef byval(%struct.S) align 4 %s) local_unnamed_addr #0 {
-; SM_60-LABEL: define dso_local void @escape_ptr_gep_store(
+define dso_local ptx_kernel void @escape_ptr_gep_store(ptr nocapture noundef writeonly %out, ptr noundef byval(%struct.S) align 4 %s) local_unnamed_addr #0 {
+; SM_60-LABEL: define dso_local ptx_kernel void @escape_ptr_gep_store(
; SM_60-SAME: ptr nocapture noundef writeonly [[OUT:%.*]], ptr noundef byval([[STRUCT_S:%.*]]) align 4 [[S:%.*]]) local_unnamed_addr #[[ATTR0]] {
; SM_60-NEXT: [[ENTRY:.*:]]
; SM_60-NEXT: [[S3:%.*]] = alloca [[STRUCT_S]], align 4
@@ -333,7 +333,7 @@ define dso_local void @escape_ptr_gep_store(ptr nocapture noundef writeonly %out
; SM_60-NEXT: store ptr [[B]], ptr [[OUT2]], align 8
; SM_60-NEXT: ret void
;
-; SM_70-LABEL: define dso_local void @escape_ptr_gep_store(
+; SM_70-LABEL: define dso_local ptx_kernel void @escape_ptr_gep_store(
; SM_70-SAME: ptr nocapture noundef writeonly [[OUT:%.*]], ptr noundef byval([[STRUCT_S:%.*]]) align 4 [[S:%.*]]) local_unnamed_addr #[[ATTR0]] {
; SM_70-NEXT: [[ENTRY:.*:]]
; SM_70-NEXT: [[S3:%.*]] = alloca [[STRUCT_S]], align 4
@@ -345,7 +345,7 @@ define dso_local void @escape_ptr_gep_store(ptr nocapture noundef writeonly %out
; SM_70-NEXT: store ptr [[B]], ptr [[OUT2]], align 8
; SM_70-NEXT: ret void
;
-; COPY-LABEL: define dso_local void @escape_ptr_gep_store(
+; COPY-LABEL: define dso_local ptx_kernel void @escape_ptr_gep_store(
; COPY-SAME: ptr nocapture noundef writeonly [[OUT:%.*]], ptr noundef byval([[STRUCT_S:%.*]]) align 4 [[S:%.*]]) local_unnamed_addr #[[ATTR0]] {
; COPY-NEXT: [[ENTRY:.*:]]
; COPY-NEXT: [[S1:%.*]] = alloca [[STRUCT_S]], align 4
@@ -362,8 +362,8 @@ entry:
}
; Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite)
-define dso_local void @escape_ptrtoint(ptr nocapture noundef writeonly %out, ptr noundef byval(%struct.S) align 4 %s) local_unnamed_addr #0 {
-; SM_60-LABEL: define dso_local void @escape_ptrtoint(
+define dso_local ptx_kernel void @escape_ptrtoint(ptr nocapture noundef writeonly %out, ptr noundef byval(%struct.S) align 4 %s) local_unnamed_addr #0 {
+; SM_60-LABEL: define dso_local ptx_kernel void @escape_ptrtoint(
; SM_60-SAME: ptr nocapture noundef writeonly [[OUT:%.*]], ptr noundef byval([[STRUCT_S:%.*]]) align 4 [[S:%.*]]) local_unnamed_addr #[[ATTR0]] {
; SM_60-NEXT: [[ENTRY:.*:]]
; SM_60-NEXT: [[S3:%.*]] = alloca [[STRUCT_S]], align 4
@@ -375,7 +375,7 @@ define dso_local void @escape_ptrtoint(ptr nocapture noundef writeonly %out, ptr
; SM_60-NEXT: store i64 [[I]], ptr [[OUT2]], align 8
; SM_60-NEXT: ret void
;
-; SM_70-LABEL: define dso_local void @escape_ptrtoint(
+; SM_70-LABEL: define dso_local ptx_kernel void @escape_ptrtoint(
; SM_70-SAME: ptr nocapture noundef writeonly [[OUT:%.*]], ptr noundef byval([[STRUCT_S:%.*]]) align 4 [[S:%.*]]) local_unnamed_addr #[[ATTR0]] {
; SM_70-NEXT: [[ENTRY:.*:]]
; SM_70-NEXT: [[S3:%.*]] = alloca [[STRUCT_S]], align 4
@@ -387,7 +387,7 @@ define dso_local void @escape_ptrtoint(ptr nocapture noundef writeonly %out, ptr
; SM_70-NEXT: store i64 [[I]], ptr [[OUT2]], align 8
; SM_70-NEXT: ret void
;
-; COPY-LABEL: define dso_local void @escape_ptrtoint(
+; COPY-LABEL: define dso_local ptx_kernel void @escape_ptrtoint(
; COPY-SAME: ptr nocapture noundef writeonly [[OUT:%.*]], ptr noundef byval([[STRUCT_S:%.*]]) align 4 [[S:%.*]]) local_unnamed_addr #[[ATTR0]] {
; COPY-NEXT: [[ENTRY:.*:]]
; COPY-NEXT: [[S1:%.*]] = alloca [[STRUCT_S]], align 4
@@ -404,8 +404,8 @@ entry:
}
; Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite)
-define dso_local void @memcpy_from_param(ptr nocapture noundef writeonly %out, ptr nocapture noundef readonly byval(%struct.S) align 4 %s) local_unnamed_addr #0 {
-; SM_60-LABEL: define dso_local void @memcpy_from_param(
+define dso_local ptx_kernel void @memcpy_from_param(ptr nocapture noundef writeonly %out, ptr nocapture noundef readonly byval(%struct.S) align 4 %s) local_unnamed_addr #0 {
+; SM_60-LABEL: define dso_local ptx_kernel void @memcpy_from_param(
; SM_60-SAME: ptr nocapture noundef writeonly [[OUT:%.*]], ptr nocapture noundef readonly byval([[STRUCT_S:%.*]]) align 4 [[S:%.*]]) local_unnamed_addr #[[ATTR0]] {
; SM_60-NEXT: [[ENTRY:.*:]]
; SM_60-NEXT: [[S3:%.*]] = addrspacecast ptr [[S]] to ptr addrspace(101)
@@ -414,7 +414,7 @@ define dso_local void @memcpy_from_param(ptr nocapture noundef writeonly %out, p
; SM_60-NEXT: call void @llvm.memcpy.p0.p101.i64(ptr [[OUT2]], ptr addrspace(101) [[S3]], i64 16, i1 true)
; SM_60-NEXT: ret void
;
-; SM_70-LABEL: define dso_local void @memcpy_from_param(
+; SM_70-LABEL: define dso_local ptx_kernel void @memcpy_from_param(
; SM_70-SAME: ptr nocapture noundef writeonly [[OUT:%.*]], ptr nocapture noundef readonly byval([[STRUCT_S:%.*]]) align 4 [[S:%.*]]) local_unnamed_addr #[[ATTR0]] {
; SM_70-NEXT: [[ENTRY:.*:]]
; SM_70-NEXT: [[S3:%.*]] = addrspacecast ptr [[S]] to ptr addrspace(101)
@@ -423,7 +423,7 @@ define dso_local void @memcpy_from_param(ptr nocapture noundef writeonly %out, p
; SM_70-NEXT: call void @llvm.memcpy.p0.p101.i64(ptr [[OUT2]], ptr addrspace(101) [[S3]], i64 16, i1 true)
; SM_70-NEXT: ret void
;
-; COPY-LABEL: define dso_local void @memcpy_from_param(
+; COPY-LABEL: define dso_local ptx_kernel void @memcpy_from_param(
; COPY-SAME: ptr nocapture noundef writeonly [[OUT:%.*]], ptr nocapture noundef readonly byval([[STRUCT_S:%.*]]) align 4 [[S:%.*]]) local_unnamed_addr #[[ATTR0]] {
; COPY-NEXT: [[ENTRY:.*:]]
; COPY-NEXT: [[S1:%.*]] = alloca [[STRUCT_S]], align 4
@@ -438,8 +438,8 @@ entry:
}
; Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite)
-define dso_local void @memcpy_from_param_noalign (ptr nocapture noundef writeonly %out, ptr nocapture noundef readonly byval(%struct.S) %s) local_unnamed_addr #0 {
-; SM_60-LABEL: define dso_local void @memcpy_from_param_noalign(
+define dso_local ptx_kernel void @memcpy_from_param_noalign (ptr nocapture noundef writeonly %out, ptr nocapture noundef readonly byval(%struct.S) %s) local_unnamed_addr #0 {
+; SM_60-LABEL: define dso_local ptx_kernel void @memcpy_from_param_noalign(
; SM_60-SAME: ptr nocapture noundef writeonly [[OUT:%.*]], ptr nocapture noundef readonly byval([[STRUCT_S:%.*]]) align 4 [[S:%.*]]) local_unnamed_addr #[[ATTR0]] {
; SM_60-NEXT: [[ENTRY:.*:]]
; SM_60-NEXT: [[S3:%.*]] = addrspacecast ptr [[S]] to ptr addrspace(101)
@@ -448,7 +448,7 @@ define dso_local void @memcpy_from_param_noalign (ptr nocapture noundef writeonl
; SM_60-NEXT: call void @llvm.memcpy.p0.p101.i64(ptr [[OUT2]], ptr addrspace(101) [[S3]], i64 16, i1 true)
; SM_60-NEXT: ret void
;
-; SM_70-LABEL: define dso_local void @memcpy_from_param_noalign(
+; SM_70-LABEL: define dso_local ptx_kernel void @memcpy_from_param_noalign(
; SM_70-SAME: ptr nocapture noundef writeonly [[OUT:%.*]], ptr nocapture noundef readonly byval([[STRUCT_S:%.*]]) align 4 [[S:%.*]]) local_unnamed_addr #[[ATTR0]] {
; SM_70-NEXT: [[ENTRY:.*:]]
; SM_70-NEXT: [[S3:%.*]] = addrspacecast ptr [[S]] to ptr addrspace(101)
@@ -457,7 +457,7 @@ define dso_local void @memcpy_from_param_noalign (ptr nocapture noundef writeonl
; SM_70-NEXT: call void @llvm.memcpy.p0.p101.i64(ptr [[OUT2]], ptr addrspace(101) [[S3]], i64 16, i1 true)
; SM_70-NEXT: ret void
;
-; COPY-LABEL: define dso_local void @memcpy_from_param_noalign(
+; COPY-LABEL: define dso_local ptx_kernel void @memcpy_from_param_noalign(
; COPY-SAME: ptr nocapture noundef writeonly [[OUT:%.*]], ptr nocapture noundef readonly byval([[STRUCT_S:%.*]]) [[S:%.*]]) local_unnamed_addr #[[ATTR0]] {
; COPY-NEXT: [[ENTRY:.*:]]
; COPY-NEXT: [[S1:%.*]] = alloca [[STRUCT_S]], align 8
@@ -472,8 +472,8 @@ entry:
}
; Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite)
-define dso_local void @memcpy_to_param(ptr nocapture noundef readonly %in, ptr nocapture noundef readnone byval(%struct.S) align 4 %s) local_unnamed_addr #0 {
-; SM_60-LABEL: define dso_local void @memcpy_to_param(
+define dso_local ptx_kernel void @memcpy_to_param(ptr nocapture noundef readonly %in, ptr nocapture noundef readnone byval(%struct.S) align 4 %s) local_unnamed_addr #0 {
+; SM_60-LABEL: define dso_local ptx_kernel void @memcpy_to_param(
; SM_60-SAME: ptr nocapture noundef readonly [[IN:%.*]], ptr nocapture noundef readnone byval([[STRUCT_S:%.*]]) align 4 [[S:%.*]]) local_unnamed_addr #[[ATTR0]] {
; SM_60-NEXT: [[ENTRY:.*:]]
; SM_60-NEXT: [[S3:%.*]] = alloca [[STRUCT_S]], align 4
@@ -484,7 +484,7 @@ define dso_local void @memcpy_to_param(ptr nocapture noundef readonly %in, ptr n
; SM_60-NEXT: tail call void @llvm.memcpy.p0.p0.i64(ptr [[S3]], ptr [[IN2]], i64 16, i1 true)
; SM_60-NEXT: ret void
;
-; SM_70-LABEL: define dso_local void @memcpy_to_param(
+; SM_70-LABEL: define dso_local ptx_kernel void @memcpy_to_param(
; SM_70-SAME: ptr nocapture noundef readonly [[IN:%.*]], ptr nocapture noundef readnone byval([[STRUCT_S:%.*]]) align 4 [[S:%.*]]) local_unnamed_addr #[[ATTR0]] {
; SM_70-NEXT: [[ENTRY:.*:]]
; SM_70-NEXT: [[S3:%.*]] = alloca [[STRUCT_S]], align 4
@@ -495,7 +495,7 @@ define dso_local void @memcpy_to_param(ptr nocapture noundef readonly %in, ptr n
; SM_70-NEXT: tail call void @llvm.memcpy.p0.p0.i64(ptr [[S3]], ptr [[IN2]], i64 16, i1 true)
; SM_70-NEXT: ret void
;
-; COPY-LABEL: define dso_local void @memcpy_to_param(
+; COPY-LABEL: define dso_local ptx_kernel void @memcpy_to_param(
; COPY-SAME: ptr nocapture noundef readonly [[IN:%.*]], ptr nocapture noundef readnone byval([[STRUCT_S:%.*]]) align 4 [[S:%.*]]) local_unnamed_addr #[[ATTR0]] {
; COPY-NEXT: [[ENTRY:.*:]]
; COPY-NEXT: [[S1:%.*]] = alloca [[STRUCT_S]], align 4
@@ -510,8 +510,8 @@ entry:
}
; Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn memory(argmem: readwrite)
-define dso_local void @copy_on_store(ptr nocapture noundef readonly %in, ptr nocapture noundef byval(%struct.S) align 4 %s, i1 noundef zeroext %b) local_unnamed_addr #0 {
-; SM_60-LABEL: define dso_local void @copy_on_store(
+define dso_local ptx_kernel void @copy_on_store(ptr nocapture noundef readonly %in, ptr nocapture noundef byval(%struct.S) align 4 %s, i1 noundef zeroext %b) local_unnamed_addr #0 {
+; SM_60-LABEL: define dso_local ptx_kernel void @copy_on_store(
; SM_60-SAME: ptr nocapture noundef readonly [[IN:%.*]], ptr nocapture noundef byval([[STRUCT_S:%.*]]) align 4 [[S:%.*]], i1 noundef zeroext [[B:%.*]]) local_unnamed_addr #[[ATTR0]] {
; SM_60-NEXT: [[BB:.*:]]
; SM_60-NEXT: [[S3:%.*]] = alloca [[STRUCT_S]], align 4
@@ -523,7 +523,7 @@ define dso_local void @copy_on_store(ptr nocapture noundef readonly %in, ptr noc
; SM_60-NEXT: store i32 [[I]], ptr [[S3]], align 4
; SM_60-NEXT: ret void
;
-; SM_70-LABEL: define dso_local void @copy_on_store(
+; SM_70-LABEL: define dso_local ptx_kernel void @copy_on_store(
; SM_70-SAME: ptr nocapture noundef readonly [[IN:%.*]], ptr nocapture noundef byval([[STRUCT_S:%.*]]) align 4 [[S:%.*]], i1 noundef zeroext [[B:%.*]]) local_unnamed_addr #[[ATTR0]] {
; SM_70-NEXT: [[BB:.*:]]
; SM_70-NEXT: [[S3:%.*]] = alloca [[STRUCT_S]], align 4
@@ -535,7 +535,7 @@ define dso_local void @copy_on_store(ptr nocapture noundef readonly %in, ptr noc
; SM_70-NEXT: store i32 [[I]], ptr [[S3]], align 4
; SM_70-NEXT: ret void
;
-; COPY-LABEL: define dso_local void @copy_on_store(
+; COPY-LABEL: define dso_local ptx_kernel void @copy_on_store(
; COPY-SAME: ptr nocapture noundef readonly [[IN:%.*]], ptr nocapture noundef byval([[STRUCT_S:%.*]]) align 4 [[S:%.*]], i1 noundef zeroext [[B:%.*]]) local_unnamed_addr #[[ATTR0]] {
; COPY-NEXT: [[BB:.*:]]
; COPY-NEXT: [[S1:%.*]] = alloca [[STRUCT_S]], align 4
@@ -551,8 +551,8 @@ bb:
ret void
}
-define void @test_select(ptr byval(i32) align 4 %input1, ptr byval(i32) %input2, ptr %out, i1 %cond) {
-; SM_60-LABEL: define void @test_select(
+define ptx_kernel void @test_select(ptr byval(i32) align 4 %input1, ptr byval(i32) %input2, ptr %out, i1 %cond) {
+; SM_60-LABEL: define ptx_kernel void @test_select(
; SM_60-SAME: ptr byval(i32) align 4 [[INPUT1:%.*]], ptr byval(i32) [[INPUT2:%.*]], ptr [[OUT:%.*]], i1 [[COND:%.*]]) #[[ATTR3:[0-9]+]] {
; SM_60-NEXT: [[BB:.*:]]
; SM_60-NEXT: [[OUT7:%.*]] = addrspacecast ptr [[OUT]] to ptr addrspace(1)
@@ -568,7 +568,7 @@ define void @test_select(ptr byval(i32) align 4 %input1, ptr byval(i32) %input2,
; SM_60-NEXT: store i32 [[VALLOADED]], ptr [[OUT8]], align 4
; SM_60-NEXT: ret void
;
-; SM_70-LABEL: define void @test_select(
+; SM_70-LABEL: define ptx_kernel void @test_select(
; SM_70-SAME: ptr byval(i32) align 4 [[INPUT1:%.*]], ptr byval(i32) [[INPUT2:%.*]], ptr [[OUT:%.*]], i1 [[COND:%.*]]) #[[ATTR3:[0-9]+]] {
; SM_70-NEXT: [[BB:.*:]]
; SM_70-NEXT: [[OUT1:%.*]] = addrspacecast ptr [[OUT]] to ptr addrspace(1)
@@ -582,7 +582,7 @@ define void @test_select(ptr byval(i32) align 4 %input1, ptr byval(i32) %input2,
; SM_70-NEXT: store i32 [[VALLOADED]], ptr [[OUT2]], align 4
; SM_70-NEXT: ret void
;
-; COPY-LABEL: define void @test_select(
+; COPY-LABEL: define ptx_kernel void @test_select(
; COPY-SAME: ptr byval(i32) align 4 [[INPUT1:%.*]], ptr byval(i32) [[INPUT2:%.*]], ptr [[OUT:%.*]], i1 [[COND:%.*]]) #[[ATTR3:[0-9]+]] {
; COPY-NEXT: [[BB:.*:]]
; COPY-NEXT: [[INPUT23:%.*]] = alloca i32, align 4
@@ -603,8 +603,8 @@ bb:
ret void
}
-define void @test_select_write(ptr byval(i32) align 4 %input1, ptr byval(i32) %input2, ptr %out, i1 %cond) {
-; SM_60-LABEL: define void @test_select_write(
+define ptx_kernel void @test_select_write(ptr byval(i32) align 4 %input1, ptr byval(i32) %input2, ptr %out, i1 %cond) {
+; SM_60-LABEL: define ptx_kernel void @test_select_write(
; SM_60-SAME: ptr byval(i32) align 4 [[INPUT1:%.*]], ptr byval(i32) [[INPUT2:%.*]], ptr [[OUT:%.*]], i1 [[COND:%.*]]) #[[ATTR3]] {
; SM_60-NEXT: [[BB:.*:]]
; SM_60-NEXT: [[OUT5:%.*]] = addrspacecast ptr [[OUT]] to ptr addrspace(1)
@@ -619,7 +619,7 @@ define void @test_select_write(ptr byval(i32) align 4 %input1, ptr byval(i32) %i
; SM_60-NEXT: store i32 1, ptr [[PTRNEW]], align 4
; SM_60-NEXT: ret void
;
-; SM_70-LABEL: define void @test_select_write(
+; SM_70-LABEL: define ptx_kernel void @test_select_write(
; SM_70-SAME: ptr byval(i32) align 4 [[INPUT1:%.*]], ptr byval(i32) [[INPUT2:%.*]], ptr [[OUT:%.*]], i1 [[COND:%.*]]) #[[ATTR3]] {
; SM_70-NEXT: [[BB:.*:]]
; SM_70-NEXT: [[OUT5:%.*]] = addrspacecast ptr [[OUT]] to ptr addrspace(1)
@@ -634,7 +634,7 @@ define void @test_select_write(ptr byval(i32) align 4 %input1, ptr byval(i32) %i
; SM_70-NEXT: store i32 1, ptr [[PTRNEW]], align 4
; SM_70-NEXT: ret void
;
-; COPY-LABEL: define void @test_select_write(
+; COPY-LABEL: define ptx_kernel void @test_select_write(
; COPY-SAME: ptr byval(i32) align 4 [[INPUT1:%.*]], ptr byval(i32) [[INPUT2:%.*]], ptr [[OUT:%.*]], i1 [[COND:%.*]]) #[[ATTR3]] {
; COPY-NEXT: [[BB:.*:]]
; COPY-NEXT: [[INPUT23:%.*]] = alloca i32, align 4
@@ -653,8 +653,8 @@ bb:
ret void
}
-define void @test_phi(ptr byval(%struct.S) align 4 %input1, ptr byval(%struct.S) %input2, ptr %inout, i1 %cond) {
-; SM_60-LABEL: define void @test_phi(
+define ptx_kernel void @test_phi(ptr byval(%struct.S) align 4 %input1, ptr byval(%struct.S) %input2, ptr %inout, i1 %cond) {
+; SM_60-LABEL: define ptx_kernel void @test_phi(
; SM_60-SAME: ptr byval([[STRUCT_S:%.*]]) align 4 [[INPUT1:%.*]], ptr byval([[STRUCT_S]]) [[INPUT2:%.*]], ptr [[INOUT:%.*]], i1 [[COND:%.*]]) #[[ATTR3]] {
; SM_60-NEXT: [[BB:.*:]]
; SM_60-NEXT: [[INOUT7:%.*]] = addrspacecast ptr [[INOUT]] to ptr addrspace(1)
@@ -678,7 +678,7 @@ define void @test_phi(ptr byval(%struct.S) align 4 %input1, ptr byval(%struct.S)
; SM_60-NEXT: store i32 [[VALLOADED]], ptr [[INOUT8]], align 4
; SM_60-NEXT: ret void
;
-; SM_70-LABEL: define void @test_phi(
+; SM_70-LABEL: define ptx_kernel void @test_phi(
; SM_70-SAME: ptr byval([[STRUCT_S:%.*]]) align 4 [[INPUT1:%.*]], ptr byval([[STRUCT_S]]) [[INPUT2:%.*]], ptr [[INOUT:%.*]], i1 [[COND:%.*]]) #[[ATTR3]] {
; SM_70-NEXT: [[BB:.*:]]
; SM_70-NEXT: [[INOUT1:%.*]] = addrspacecast ptr [[INOUT]] to ptr addrspace(1)
@@ -700,7 +700,7 @@ define void @test_phi(ptr byval(%struct.S) align 4 %input1, ptr byval(%struct.S)
; SM_70-NEXT: store i32 [[VALLOADED]], ptr [[INOUT2]], align 4
; SM_70-NEXT: ret void
;
-; COPY-LABEL: define void @test_phi(
+; COPY-LABEL: define ptx_kernel void @test_phi(
; COPY-SAME: ptr byval([[STRUCT_S:%.*]]) align 4 [[INPUT1:%.*]], ptr byval([[STRUCT_S]]) [[INPUT2:%.*]], ptr [[INOUT:%.*]], i1 [[COND:%.*]]) #[[ATTR3]] {
; COPY-NEXT: [[BB:.*:]]
; COPY-NEXT: [[INPUT23:%.*]] = alloca [[STRUCT_S]], align 8
@@ -740,8 +740,8 @@ merge: ; preds = %second, %first
ret void
}
-define void @test_phi_write(ptr byval(%struct.S) align 4 %input1, ptr byval(%struct.S) %input2, i1 %cond) {
-; COMMON-LABEL: define void @test_phi_write(
+define ptx_kernel void @test_phi_write(ptr byval(%struct.S) align 4 %input1, ptr byval(%struct.S) %input2, i1 %cond) {
+; COMMON-LABEL: define ptx_kernel void @test_phi_write(
; COMMON-SAME: ptr byval([[STRUCT_S:%.*]]) align 4 [[INPUT1:%.*]], ptr byval([[STRUCT_S]]) [[INPUT2:%.*]], i1 [[COND:%.*]]) #[[ATTR3:[0-9]+]] {
; COMMON-NEXT: [[BB:.*:]]
; COMMON-NEXT: [[INPUT24:%.*]] = alloca [[STRUCT_S]], align 8
@@ -784,29 +784,11 @@ attributes #1 = { nocallback nofree nounwind willreturn memory(argmem: readwrite
attributes #2 = { nocallback nofree nounwind willreturn memory(argmem: write) }
!llvm.module.flags = !{!0, !1, !2, !3}
-!nvvm.annotations = !{!4, !5, !6, !7, !8, !9, !10, !11, !12, !13, !14, !15, !16, !17, !18, !19, !23}
!llvm.ident = !{!20, !21}
!0 = !{i32 2, !"SDK Version", [2 x i32] [i32 11, i32 8]}
!1 = !{i32 1, !"wchar_size", i32 4}
!2 = !{i32 4, !"nvvm-reflect-ftz", i32 0}
!3 = !{i32 7, !"frame-pointer", i32 2}
-!4 = !{ptr @read_only, !"kernel", i32 1}
-!5 = !{ptr @escape_ptr, !"kernel", i32 1}
-!6 = !{ptr @escape_ptr_gep, !"kernel", i32 1}
-!7 = !{ptr @escape_ptr_store, !"kernel", i32 1}
-!8 = !{ptr @escape_ptr_gep_store, !"kernel", i32 1}
-!9 = !{ptr @escape_ptrtoint, !"kernel", i32 1}
-!10 = !{ptr @memcpy_from_param, !"kernel", i32 1}
-!11 = !{ptr @memcpy_to_param, !"kernel", i32 1}
-!12 = !{ptr @copy_on_store, !"kernel", i32 1}
-!13 = !{ptr @read_only_gep, !"kernel", i32 1}
-!14 = !{ptr @read_only_gep_asc, !"kernel", i32 1}
-!15 = !{ptr @read_only_gep_asc0, !"kernel", i32 1}
-!16 = !{ptr @test_select, !"kernel", i32 1}
-!17 = !{ptr @test_phi, !"kernel", i32 1}
-!18 = !{ptr @test_phi_write, !"kernel", i32 1}
-!19 = !{ptr @test_select_write, !"kernel", i32 1}
!20 = !{!"clang version 20.0.0git"}
!21 = !{!"clang version 3.8.0 (tags/RELEASE_380/final)"}
-!23 = !{ptr @memcpy_from_param_noalign, !"kernel", i32 1}
diff --git a/llvm/test/CodeGen/NVPTX/lower-ctor-dtor.ll b/llvm/test/CodeGen/NVPTX/lower-ctor-dtor.ll
index f8b3b4b..4ee1ca3 100644
--- a/llvm/test/CodeGen/NVPTX/lower-ctor-dtor.ll
+++ b/llvm/test/CodeGen/NVPTX/lower-ctor-dtor.ll
@@ -43,7 +43,7 @@ define internal void @bar() {
ret void
}
-; CHECK-LABEL: define weak_odr void @"nvptx$device$init"() {
+; CHECK-LABEL: define weak_odr ptx_kernel void @"nvptx$device$init"() {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[BEGIN:%.*]] = load ptr addrspace(1), ptr addrspace(1) @__init_array_start, align 8
; CHECK-NEXT: [[STOP:%.*]] = load ptr addrspace(1), ptr addrspace(1) @__init_array_end, align 8
@@ -60,7 +60,7 @@ define internal void @bar() {
; CHECK-NEXT: ret void
;
;
-; CHECK-LABEL: define weak_odr void @"nvptx$device$fini"() {
+; CHECK-LABEL: define weak_odr ptx_kernel void @"nvptx$device$fini"() {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[BEGIN:%.*]] = load ptr addrspace(1), ptr addrspace(1) @__fini_array_start, align 8
; CHECK-NEXT: [[STOP:%.*]] = load ptr addrspace(1), ptr addrspace(1) @__fini_array_end, align 8
@@ -82,12 +82,10 @@ define internal void @bar() {
; CHECK: while.end:
; CHECK-NEXT: ret void
-; CHECK: [[META0:![0-9]+]] = !{ptr @"nvptx$device$init", !"kernel", i32 1}
; CHECK: [[META1:![0-9]+]] = !{ptr @"nvptx$device$init", !"maxntidx", i32 1}
; CHECK: [[META2:![0-9]+]] = !{ptr @"nvptx$device$init", !"maxntidy", i32 1}
; CHECK: [[META3:![0-9]+]] = !{ptr @"nvptx$device$init", !"maxntidz", i32 1}
; CHECK: [[META4:![0-9]+]] = !{ptr @"nvptx$device$init", !"maxclusterrank", i32 1}
-; CHECK: [[META5:![0-9]+]] = !{ptr @"nvptx$device$fini", !"kernel", i32 1}
; CHECK: [[META6:![0-9]+]] = !{ptr @"nvptx$device$fini", !"maxntidx", i32 1}
; CHECK: [[META7:![0-9]+]] = !{ptr @"nvptx$device$fini", !"maxntidy", i32 1}
; CHECK: [[META8:![0-9]+]] = !{ptr @"nvptx$device$fini", !"maxntidz", i32 1}
diff --git a/llvm/test/CodeGen/NVPTX/lower-kernel-ptr-arg.ll b/llvm/test/CodeGen/NVPTX/lower-kernel-ptr-arg.ll
index 9ec690a..2e64c25 100644
--- a/llvm/test/CodeGen/NVPTX/lower-kernel-ptr-arg.ll
+++ b/llvm/test/CodeGen/NVPTX/lower-kernel-ptr-arg.ll
@@ -6,7 +6,7 @@ target triple = "nvptx64-nvidia-cuda"
; Verify that both %input and %output are converted to global pointers and then
; addrspacecast'ed back to the original type.
-define void @kernel(ptr %input, ptr %output) {
+define ptx_kernel void @kernel(ptr %input, ptr %output) {
; CHECK-LABEL: .visible .entry kernel(
; CHECK: cvta.to.global.u64
; CHECK: cvta.to.global.u64
@@ -17,7 +17,7 @@ define void @kernel(ptr %input, ptr %output) {
ret void
}
-define void @kernel2(ptr addrspace(1) %input, ptr addrspace(1) %output) {
+define ptx_kernel void @kernel2(ptr addrspace(1) %input, ptr addrspace(1) %output) {
; CHECK-LABEL: .visible .entry kernel2(
; CHECK-NOT: cvta.to.global.u64
%1 = load float, ptr addrspace(1) %input, align 4
@@ -29,7 +29,7 @@ define void @kernel2(ptr addrspace(1) %input, ptr addrspace(1) %output) {
%struct.S = type { ptr, ptr }
-define void @ptr_in_byval_kernel(ptr byval(%struct.S) %input, ptr %output) {
+define ptx_kernel void @ptr_in_byval_kernel(ptr byval(%struct.S) %input, ptr %output) {
; CHECK-LABEL: .visible .entry ptr_in_byval_kernel(
; CHECK: ld.param.u64 %[[optr:rd.*]], [ptr_in_byval_kernel_param_1]
; CHECK: cvta.to.global.u64 %[[optr_g:.*]], %[[optr]];
@@ -60,7 +60,3 @@ define void @ptr_in_byval_func(ptr byval(%struct.S) %input, ptr %output) {
ret void
}
-!nvvm.annotations = !{!0, !1, !2}
-!0 = !{ptr @kernel, !"kernel", i32 1}
-!1 = !{ptr @kernel2, !"kernel", i32 1}
-!2 = !{ptr @ptr_in_byval_kernel, !"kernel", i32 1}
diff --git a/llvm/test/CodeGen/NVPTX/maxclusterrank.ll b/llvm/test/CodeGen/NVPTX/maxclusterrank.ll
index 3389e09..c445c34 100644
--- a/llvm/test/CodeGen/NVPTX/maxclusterrank.ll
+++ b/llvm/test/CodeGen/NVPTX/maxclusterrank.ll
@@ -11,16 +11,15 @@ target triple = "nvptx64-unknown-unknown"
; Make sure that for SM version prior to 90 `.maxclusterrank` directive is
; sielently ignored.
-define dso_local void @_Z18TestMaxClusterRankv() {
+define dso_local ptx_kernel void @_Z18TestMaxClusterRankv() {
entry:
%a = alloca i32, align 4
store volatile i32 1, ptr %a, align 4
ret void
}
-!nvvm.annotations = !{!0, !1, !2, !3}
+!nvvm.annotations = !{!1, !2, !3}
-!0 = !{ptr @_Z18TestMaxClusterRankv, !"kernel", i32 1}
!1 = !{ptr @_Z18TestMaxClusterRankv, !"maxntidx", i32 128}
!2 = !{ptr @_Z18TestMaxClusterRankv, !"minctasm", i32 2}
!3 = !{ptr @_Z18TestMaxClusterRankv, !"maxclusterrank", i32 8}
diff --git a/llvm/test/CodeGen/NVPTX/noduplicate-syncthreads.ll b/llvm/test/CodeGen/NVPTX/noduplicate-syncthreads.ll
index 2bc6d4c..2a0c5ab 100644
--- a/llvm/test/CodeGen/NVPTX/noduplicate-syncthreads.ll
+++ b/llvm/test/CodeGen/NVPTX/noduplicate-syncthreads.ll
@@ -66,7 +66,4 @@ if.end17: ; preds = %if.else13, %if.then
}
; Function Attrs: noduplicate nounwind
-declare void @llvm.nvvm.barrier0() #2
-
-!0 = !{ptr @foo, !"kernel", i32 1}
-!1 = !{null, !"align", i32 8}
+declare void @llvm.nvvm.barrier0() #2 \ No newline at end of file
diff --git a/llvm/test/CodeGen/NVPTX/noreturn.ll b/llvm/test/CodeGen/NVPTX/noreturn.ll
index 2161d70..6c11d0a 100644
--- a/llvm/test/CodeGen/NVPTX/noreturn.ll
+++ b/llvm/test/CodeGen/NVPTX/noreturn.ll
@@ -27,7 +27,7 @@ define void @true_noreturn0() #0 {
; CHECK: .entry ignore_kernel_noreturn()
; CHECK-NOT: .noreturn
-define void @ignore_kernel_noreturn() #0 {
+define ptx_kernel void @ignore_kernel_noreturn() #0 {
unreachable
}
@@ -35,7 +35,7 @@ define void @ignore_kernel_noreturn() #0 {
; CHECK: prototype_{{[0-9]+}} : .callprototype ()_ (.param .b32 _) .noreturn;
; CHECK: prototype_{{[0-9]+}} : .callprototype (.param .b32 _) _ (.param .b32 _);
-define void @callprototype_noreturn(i32) {
+define ptx_kernel void @callprototype_noreturn(i32) {
%fn = load ptr, ptr addrspace(1) @function_pointer
call void %fn(i32 %0) #0
%non_void = bitcast ptr %fn to ptr
@@ -44,8 +44,3 @@ define void @callprototype_noreturn(i32) {
}
attributes #0 = { noreturn }
-
-!nvvm.annotations = !{!0, !1}
-
-!0 = !{ptr @ignore_kernel_noreturn, !"kernel", i32 1}
-!1 = !{ptr @callprototype_noreturn, !"kernel", i32 1}
diff --git a/llvm/test/CodeGen/NVPTX/nvcl-param-align.ll b/llvm/test/CodeGen/NVPTX/nvcl-param-align.ll
index 48162ea..9a78d31 100644
--- a/llvm/test/CodeGen/NVPTX/nvcl-param-align.ll
+++ b/llvm/test/CodeGen/NVPTX/nvcl-param-align.ll
@@ -3,7 +3,7 @@
target triple = "nvptx-unknown-nvcl"
-define void @foo(i64 %img, i64 %sampler, ptr align 32 %v1, ptr %v2) {
+define ptx_kernel void @foo(i64 %img, i64 %sampler, ptr align 32 %v1, ptr %v2) {
; The parameter alignment is determined by the align attribute (default 1).
; CHECK-LABEL: .entry foo(
; CHECK: .param .u64 .ptr .align 32 foo_param_2
@@ -11,7 +11,6 @@ define void @foo(i64 %img, i64 %sampler, ptr align 32 %v1, ptr %v2) {
ret void
}
-!nvvm.annotations = !{!1, !2, !3}
-!1 = !{ptr @foo, !"kernel", i32 1}
+!nvvm.annotations = !{!2, !3}
!2 = !{ptr @foo, !"rdoimage", i32 0}
!3 = !{ptr @foo, !"sampler", i32 1}
diff --git a/llvm/test/CodeGen/NVPTX/nvvm-reflect-arch.ll b/llvm/test/CodeGen/NVPTX/nvvm-reflect-arch.ll
index ac5875c..83cb3cd 100644
--- a/llvm/test/CodeGen/NVPTX/nvvm-reflect-arch.ll
+++ b/llvm/test/CodeGen/NVPTX/nvvm-reflect-arch.ll
@@ -1,9 +1,9 @@
; Libdevice in recent CUDA versions relies on __CUDA_ARCH reflecting GPU type.
; Verify that __nvvm_reflect() is replaced with an appropriate value.
;
-; RUN: opt %s -S -passes='default<O2>' -mtriple=nvptx64 -mcpu=sm_20 \
+; RUN: opt %s -S -passes='nvvm-reflect' -mtriple=nvptx64 -mcpu=sm_20 \
; RUN: | FileCheck %s --check-prefixes=COMMON,SM20
-; RUN: opt %s -S -passes='default<O2>' -mtriple=nvptx64 -mcpu=sm_35 \
+; RUN: opt %s -S -passes='nvvm-reflect' -mtriple=nvptx64 -mcpu=sm_35 \
; RUN: | FileCheck %s --check-prefixes=COMMON,SM35
@"$str" = private addrspace(1) constant [12 x i8] c"__CUDA_ARCH\00"
diff --git a/llvm/test/CodeGen/NVPTX/nvvm-reflect-ocl.ll b/llvm/test/CodeGen/NVPTX/nvvm-reflect-ocl.ll
index 9d38321..bf8d6e2 100644
--- a/llvm/test/CodeGen/NVPTX/nvvm-reflect-ocl.ll
+++ b/llvm/test/CodeGen/NVPTX/nvvm-reflect-ocl.ll
@@ -1,8 +1,8 @@
; Verify that __nvvm_reflect_ocl() is replaced with an appropriate value
;
-; RUN: opt %s -S -passes='default<O2>' -mtriple=nvptx64 -mcpu=sm_20 \
+; RUN: opt %s -S -passes='nvvm-reflect' -mtriple=nvptx64 -mcpu=sm_20 \
; RUN: | FileCheck %s --check-prefixes=COMMON,SM20
-; RUN: opt %s -S -passes='default<O2>' -mtriple=nvptx64 -mcpu=sm_35 \
+; RUN: opt %s -S -passes='nvvm-reflect' -mtriple=nvptx64 -mcpu=sm_35 \
; RUN: | FileCheck %s --check-prefixes=COMMON,SM35
@"$str" = private addrspace(4) constant [12 x i8] c"__CUDA_ARCH\00"
diff --git a/llvm/test/CodeGen/NVPTX/nvvm-reflect-opaque.ll b/llvm/test/CodeGen/NVPTX/nvvm-reflect-opaque.ll
index 46ab79d..19c74df 100644
--- a/llvm/test/CodeGen/NVPTX/nvvm-reflect-opaque.ll
+++ b/llvm/test/CodeGen/NVPTX/nvvm-reflect-opaque.ll
@@ -3,12 +3,12 @@
; RUN: cat %s > %t.noftz
; RUN: echo '!0 = !{i32 4, !"nvvm-reflect-ftz", i32 0}' >> %t.noftz
-; RUN: opt %t.noftz -S -mtriple=nvptx-nvidia-cuda -passes='default<O2>' \
+; RUN: opt %t.noftz -S -mtriple=nvptx-nvidia-cuda -passes='nvvm-reflect,simplifycfg' \
; RUN: | FileCheck %s --check-prefix=USE_FTZ_0 --check-prefix=CHECK
; RUN: cat %s > %t.ftz
; RUN: echo '!0 = !{i32 4, !"nvvm-reflect-ftz", i32 1}' >> %t.ftz
-; RUN: opt %t.ftz -S -mtriple=nvptx-nvidia-cuda -passes='default<O2>' \
+; RUN: opt %t.ftz -S -mtriple=nvptx-nvidia-cuda -passes='nvvm-reflect,simplifycfg' \
; RUN: | FileCheck %s --check-prefix=USE_FTZ_1 --check-prefix=CHECK
@str = private unnamed_addr addrspace(4) constant [11 x i8] c"__CUDA_FTZ\00"
@@ -43,7 +43,7 @@ exit:
declare i32 @llvm.nvvm.reflect(ptr)
-; CHECK-LABEL: define noundef i32 @intrinsic
+; CHECK-LABEL: define i32 @intrinsic
define i32 @intrinsic() {
; CHECK-NOT: call i32 @llvm.nvvm.reflect
; USE_FTZ_0: ret i32 0
diff --git a/llvm/test/CodeGen/NVPTX/nvvm-reflect.ll b/llvm/test/CodeGen/NVPTX/nvvm-reflect.ll
index 2ed9f7c..244b44f 100644
--- a/llvm/test/CodeGen/NVPTX/nvvm-reflect.ll
+++ b/llvm/test/CodeGen/NVPTX/nvvm-reflect.ll
@@ -3,12 +3,12 @@
; RUN: cat %s > %t.noftz
; RUN: echo '!0 = !{i32 4, !"nvvm-reflect-ftz", i32 0}' >> %t.noftz
-; RUN: opt %t.noftz -S -mtriple=nvptx-nvidia-cuda -passes='default<O2>' \
+; RUN: opt %t.noftz -S -mtriple=nvptx-nvidia-cuda -passes='nvvm-reflect,simplifycfg' \
; RUN: | FileCheck %s --check-prefix=USE_FTZ_0 --check-prefix=CHECK
; RUN: cat %s > %t.ftz
; RUN: echo '!0 = !{i32 4, !"nvvm-reflect-ftz", i32 1}' >> %t.ftz
-; RUN: opt %t.ftz -S -mtriple=nvptx-nvidia-cuda -passes='default<O2>' \
+; RUN: opt %t.ftz -S -mtriple=nvptx-nvidia-cuda -passes='nvvm-reflect,simplifycfg' \
; RUN: | FileCheck %s --check-prefix=USE_FTZ_1 --check-prefix=CHECK
@str = private unnamed_addr addrspace(4) constant [11 x i8] c"__CUDA_FTZ\00"
@@ -43,7 +43,8 @@ exit:
declare i32 @llvm.nvvm.reflect(ptr)
-; CHECK-LABEL: define noundef i32 @intrinsic
+; CHECK-LABEL: define i32 @intrinsic
+
define i32 @intrinsic() {
; CHECK-NOT: call i32 @llvm.nvvm.reflect
; USE_FTZ_0: ret i32 0
diff --git a/llvm/test/CodeGen/NVPTX/refl1.ll b/llvm/test/CodeGen/NVPTX/refl1.ll
index 34db3bb..99b83f4 100644
--- a/llvm/test/CodeGen/NVPTX/refl1.ll
+++ b/llvm/test/CodeGen/NVPTX/refl1.ll
@@ -5,7 +5,7 @@ target triple = "nvptx-nvidia-cuda"
; Function Attrs: nounwind
; CHECK: .entry foo
-define void @foo(ptr nocapture %a) #0 {
+define ptx_kernel void @foo(ptr nocapture %a) #0 {
%val = load float, ptr %a
%tan = tail call fastcc float @__nv_fast_tanf(float %val)
store float %tan, ptr %a
@@ -34,7 +34,3 @@ entry:
attributes #0 = { nounwind }
attributes #1 = { nounwind readnone }
attributes #2 = { alwaysinline inlinehint nounwind readnone }
-
-!nvvm.annotations = !{!0}
-
-!0 = !{ptr @foo, !"kernel", i32 1}
diff --git a/llvm/test/CodeGen/NVPTX/reg-copy.ll b/llvm/test/CodeGen/NVPTX/reg-copy.ll
index f66ef19..20396c4 100644
--- a/llvm/test/CodeGen/NVPTX/reg-copy.ll
+++ b/llvm/test/CodeGen/NVPTX/reg-copy.ll
@@ -4,7 +4,7 @@
target datalayout = "e-i64:64-v16:16-v32:32-n16:32:64"
target triple = "nvptx64-unknown-unknown"
-define void @PR24303(ptr %f) {
+define ptx_kernel void @PR24303(ptr %f) {
; CHECK-LABEL: .visible .entry PR24303(
; Do not use mov.f or mov.u to convert between float and int.
; CHECK-NOT: mov.{{f|u}}{{32|64}} %f{{[0-9]+}}, %r{{[0-9]+}}
@@ -217,7 +217,3 @@ _ZN12cuda_builtinmlIfEENS_7complexIT_EERKS3_S5_.exit: ; preds = %if.then.93.i, %
}
declare float @llvm.nvvm.fabs.f(float)
-
-!nvvm.annotations = !{!0}
-
-!0 = !{ptr @PR24303, !"kernel", i32 1}
diff --git a/llvm/test/CodeGen/NVPTX/simple-call.ll b/llvm/test/CodeGen/NVPTX/simple-call.ll
index 3580604..991ae04 100644
--- a/llvm/test/CodeGen/NVPTX/simple-call.ll
+++ b/llvm/test/CodeGen/NVPTX/simple-call.ll
@@ -10,7 +10,7 @@ define float @device_func(float %a) noinline {
}
; CHECK: .entry kernel_func
-define void @kernel_func(ptr %a) {
+define ptx_kernel void @kernel_func(ptr %a) {
%val = load float, ptr %a
; CHECK: call.uni (retval0),
; CHECK: device_func,
@@ -18,9 +18,3 @@ define void @kernel_func(ptr %a) {
store float %mul, ptr %a
ret void
}
-
-
-
-!nvvm.annotations = !{!1}
-
-!1 = !{ptr @kernel_func, !"kernel", i32 1}
diff --git a/llvm/test/CodeGen/NVPTX/surf-read-cuda.ll b/llvm/test/CodeGen/NVPTX/surf-read-cuda.ll
index 504dcde..7a7904a 100644
--- a/llvm/test/CodeGen/NVPTX/surf-read-cuda.ll
+++ b/llvm/test/CodeGen/NVPTX/surf-read-cuda.ll
@@ -10,7 +10,7 @@ declare i32 @llvm.nvvm.suld.1d.i32.trap(i64, i32)
declare i64 @llvm.nvvm.texsurf.handle.internal.p1(ptr addrspace(1))
-define void @foo(i64 %img, ptr %red, i32 %idx) {
+define ptx_kernel void @foo(i64 %img, ptr %red, i32 %idx) {
; CHECK-LABEL: foo(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<3>;
@@ -34,7 +34,7 @@ define void @foo(i64 %img, ptr %red, i32 %idx) {
@surf0 = internal addrspace(1) global i64 0, align 8
-define void @bar(ptr %red, i32 %idx) {
+define ptx_kernel void @bar(ptr %red, i32 %idx) {
; CHECK-LABEL: bar(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<3>;
@@ -56,11 +56,5 @@ define void @bar(ptr %red, i32 %idx) {
ret void
}
-
-
-
-!nvvm.annotations = !{!1, !2, !3}
-!1 = !{ptr @foo, !"kernel", i32 1}
-!2 = !{ptr @bar, !"kernel", i32 1}
-!3 = !{ptr addrspace(1) @surf0, !"surface", i32 1}
-
+!nvvm.annotations = !{!1}
+!1 = !{ptr addrspace(1) @surf0, !"surface", i32 1}
diff --git a/llvm/test/CodeGen/NVPTX/surf-read.ll b/llvm/test/CodeGen/NVPTX/surf-read.ll
index e0cebd6..cd11b56 100644
--- a/llvm/test/CodeGen/NVPTX/surf-read.ll
+++ b/llvm/test/CodeGen/NVPTX/surf-read.ll
@@ -6,7 +6,7 @@ target triple = "nvptx64-unknown-nvcl"
declare i32 @llvm.nvvm.suld.1d.i32.trap(i64, i32)
; CHECK: .entry foo
-define void @foo(i64 %img, ptr %red, i32 %idx) {
+define ptx_kernel void @foo(i64 %img, ptr %red, i32 %idx) {
; CHECK: suld.b.1d.b32.trap {%r[[RED:[0-9]+]]}, [foo_param_0, {%r{{[0-9]+}}}]
%val = tail call i32 @llvm.nvvm.suld.1d.i32.trap(i64 %img, i32 %idx)
; CHECK: cvt.rn.f32.s32 %f[[REDF:[0-9]+]], %r[[RED]]
@@ -16,6 +16,5 @@ define void @foo(i64 %img, ptr %red, i32 %idx) {
ret void
}
-!nvvm.annotations = !{!1, !2}
-!1 = !{ptr @foo, !"kernel", i32 1}
-!2 = !{ptr @foo, !"rdwrimage", i32 0}
+!nvvm.annotations = !{!1}
+!1 = !{ptr @foo, !"rdwrimage", i32 0}
diff --git a/llvm/test/CodeGen/NVPTX/surf-tex.py b/llvm/test/CodeGen/NVPTX/surf-tex.py
index 9607a58..90d6766 100644
--- a/llvm/test/CodeGen/NVPTX/surf-tex.py
+++ b/llvm/test/CodeGen/NVPTX/surf-tex.py
@@ -224,11 +224,6 @@ def get_ptx_surface(target):
def get_surface_metadata(target, fun_ty, fun_name, has_surface_param):
metadata = []
- md_kernel = '!{{{fun_ty} @{fun_name}, !"kernel", i32 1}}'.format(
- fun_ty=fun_ty, fun_name=fun_name
- )
- metadata.append(md_kernel)
-
if target == "cuda":
# When a parameter is lowered as a .surfref, it still has the
# corresponding ld.param.u64, which is illegal. Do not emit the
@@ -263,14 +258,14 @@ def gen_suld_tests(target, global_surf):
; CHECK-LABEL: .entry ${test_name}_param
; CHECK: ${instruction} ${reg_ret}, [${reg_surf}, ${reg_access}]
;
- define void @${test_name}_param(i64 %s, ${retty}* %ret, ${access}) {
+ define ptx_kernel void @${test_name}_param(i64 %s, ${retty}* %ret, ${access}) {
%val = tail call ${retty} @${intrinsic}(i64 %s, ${access})
store ${retty} %val, ${retty}* %ret
ret void
}
; CHECK-LABEL: .entry ${test_name}_global
; CHECK: ${instruction} ${reg_ret}, [${global_surf}, ${reg_access}]
- define void @${test_name}_global(${retty}* %ret, ${access}) {
+ define ptx_kernel void @${test_name}_global(${retty}* %ret, ${access}) {
%gs = tail call i64 @llvm.nvvm.texsurf.handle.internal.p1i64(i64 addrspace(1)* @${global_surf})
%val = tail call ${retty} @${intrinsic}(i64 %gs, ${access})
store ${retty} %val, ${retty}* %ret
@@ -356,13 +351,13 @@ def gen_sust_tests(target, global_surf):
; CHECK-LABEL: .entry ${test_name}_param
; CHECK: ${instruction} [${reg_surf}, ${reg_access}], ${reg_value}
;
- define void @${test_name}_param(i64 %s, ${value}, ${access}) {
+ define ptx_kernel void @${test_name}_param(i64 %s, ${value}, ${access}) {
tail call void @${intrinsic}(i64 %s, ${access}, ${value})
ret void
}
; CHECK-LABEL: .entry ${test_name}_global
; CHECK: ${instruction} [${global_surf}, ${reg_access}], ${reg_value}
- define void @${test_name}_global(${value}, ${access}) {
+ define ptx_kernel void @${test_name}_global(${value}, ${access}) {
%gs = tail call i64 @llvm.nvvm.texsurf.handle.internal.p1i64(i64 addrspace(1)* @${global_surf})
tail call void @${intrinsic}(i64 %gs, ${access}, ${value})
ret void
@@ -420,19 +415,13 @@ def gen_sust_tests(target, global_surf):
generated_items.append((params["intrinsic"], params["instruction"]))
fun_name = test_name + "_param"
- fun_ty = "void (i64, {value_ty}, {access_ty})*".format(
- value_ty=get_llvm_value_type(vec, ctype),
- access_ty=get_llvm_surface_access_type(geom),
- )
+ fun_ty = "ptr"
generated_metadata += get_surface_metadata(
target, fun_ty, fun_name, has_surface_param=True
)
fun_name = test_name + "_global"
- fun_ty = "void ({value_ty}, {access_ty})*".format(
- value_ty=get_llvm_value_type(vec, ctype),
- access_ty=get_llvm_surface_access_type(geom),
- )
+ fun_ty = "ptr"
generated_metadata += get_surface_metadata(
target, fun_ty, fun_name, has_surface_param=False
)
@@ -559,11 +548,6 @@ def get_ptx_global_sampler(target, global_sampler):
def get_texture_metadata(target, fun_ty, fun_name, has_texture_params):
metadata = []
- md_kernel = '!{{{fun_ty} @{fun_name}, !"kernel", i32 1}}'.format(
- fun_ty=fun_ty, fun_name=fun_name
- )
- metadata.append(md_kernel)
-
if target == "cuda":
# When a parameter is lowered as a .texref, it still has the
# corresponding ld.param.u64, which is illegal. Do not emit the
@@ -615,14 +599,14 @@ def gen_tex_tests(target, global_tex, global_sampler):
; CHECK-LABEL: .entry ${test_name}_param
; CHECK: ${instruction} ${ptx_ret}, [${ptx_tex}, ${ptx_access}]
- define void @${test_name}_param(i64 %tex, ${sampler} ${retty}* %ret, ${access}) {
+ define ptx_kernel void @${test_name}_param(i64 %tex, ${sampler} ${retty}* %ret, ${access}) {
%val = tail call ${retty} @${intrinsic}(i64 %tex, ${sampler} ${access})
store ${retty} %val, ${retty}* %ret
ret void
}
; CHECK-LABEL: .entry ${test_name}_global
; CHECK: ${instruction} ${ptx_ret}, [${global_tex}, ${ptx_global_sampler} ${ptx_access}]
- define void @${test_name}_global(${retty}* %ret, ${access}) {
+ define ptx_kernel void @${test_name}_global(${retty}* %ret, ${access}) {
%gt = tail call i64 @llvm.nvvm.texsurf.handle.internal.p1i64(i64 addrspace(1)* @${global_tex})
${get_sampler_handle}
%val = tail call ${retty} @${intrinsic}(i64 %gt, ${sampler} ${access})
@@ -799,14 +783,14 @@ def gen_tld4_tests(target, global_tex, global_sampler):
; CHECK-LABEL: .entry ${test_name}_param
; CHECK: ${instruction} ${ptx_ret}, [${ptx_tex}, ${ptx_access}]
- define void @${test_name}_param(i64 %tex, ${sampler} ${retty}* %ret, ${access}) {
+ define ptx_kernel void @${test_name}_param(i64 %tex, ${sampler} ${retty}* %ret, ${access}) {
%val = tail call ${retty} @${intrinsic}(i64 %tex, ${sampler} ${access})
store ${retty} %val, ${retty}* %ret
ret void
}
; CHECK-LABEL: .entry ${test_name}_global
; CHECK: ${instruction} ${ptx_ret}, [${global_tex}, ${ptx_global_sampler} ${ptx_access}]
- define void @${test_name}_global(${retty}* %ret, ${access}) {
+ define ptx_kernel void @${test_name}_global(${retty}* %ret, ${access}) {
%gt = tail call i64 @llvm.nvvm.texsurf.handle.internal.p1i64(i64 addrspace(1)* @${global_tex})
${get_sampler_handle}
%val = tail call ${retty} @${intrinsic}(i64 %gt, ${sampler} ${access})
diff --git a/llvm/test/CodeGen/NVPTX/surf-write-cuda.ll b/llvm/test/CodeGen/NVPTX/surf-write-cuda.ll
index 881ea45..5dc44cb 100644
--- a/llvm/test/CodeGen/NVPTX/surf-write-cuda.ll
+++ b/llvm/test/CodeGen/NVPTX/surf-write-cuda.ll
@@ -10,7 +10,7 @@ declare void @llvm.nvvm.sust.b.1d.i32.trap(i64, i32, i32)
declare i64 @llvm.nvvm.texsurf.handle.internal.p1(ptr addrspace(1))
-define void @foo(i64 %img, i32 %val, i32 %idx) {
+define ptx_kernel void @foo(i64 %img, i32 %val, i32 %idx) {
; CHECK-LABEL: foo(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<3>;
@@ -30,7 +30,7 @@ define void @foo(i64 %img, i32 %val, i32 %idx) {
@surf0 = internal addrspace(1) global i64 0, align 8
-define void @bar(i32 %val, i32 %idx) {
+define ptx_kernel void @bar(i32 %val, i32 %idx) {
; CHECK-LABEL: bar(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<3>;
@@ -47,8 +47,6 @@ define void @bar(i32 %val, i32 %idx) {
}
-!nvvm.annotations = !{!1, !2, !3}
-!1 = !{ptr @foo, !"kernel", i32 1}
-!2 = !{ptr @bar, !"kernel", i32 1}
-!3 = !{ptr addrspace(1) @surf0, !"surface", i32 1}
+!nvvm.annotations = !{!1}
+!1 = !{ptr addrspace(1) @surf0, !"surface", i32 1}
diff --git a/llvm/test/CodeGen/NVPTX/surf-write.ll b/llvm/test/CodeGen/NVPTX/surf-write.ll
index 258bb6d..0e1f0cc 100644
--- a/llvm/test/CodeGen/NVPTX/surf-write.ll
+++ b/llvm/test/CodeGen/NVPTX/surf-write.ll
@@ -6,12 +6,11 @@ target triple = "nvptx-unknown-nvcl"
declare void @llvm.nvvm.sust.b.1d.i32.trap(i64, i32, i32)
; CHECK: .entry foo
-define void @foo(i64 %img, i32 %val, i32 %idx) {
+define ptx_kernel void @foo(i64 %img, i32 %val, i32 %idx) {
; CHECK: sust.b.1d.b32.trap [foo_param_0, {%r{{[0-9]+}}}], {%r{{[0-9]+}}}
tail call void @llvm.nvvm.sust.b.1d.i32.trap(i64 %img, i32 %idx, i32 %val)
ret void
}
-!nvvm.annotations = !{!1, !2}
-!1 = !{ptr @foo, !"kernel", i32 1}
-!2 = !{ptr @foo, !"wroimage", i32 0}
+!nvvm.annotations = !{!1}
+!1 = !{ptr @foo, !"wroimage", i32 0}
diff --git a/llvm/test/CodeGen/NVPTX/tex-read-cuda.ll b/llvm/test/CodeGen/NVPTX/tex-read-cuda.ll
index ba556d2d..61837bd 100644
--- a/llvm/test/CodeGen/NVPTX/tex-read-cuda.ll
+++ b/llvm/test/CodeGen/NVPTX/tex-read-cuda.ll
@@ -10,7 +10,7 @@ target triple = "nvptx-unknown-cuda"
declare { float, float, float, float } @llvm.nvvm.tex.unified.1d.v4f32.s32(i64, i32)
declare i64 @llvm.nvvm.texsurf.handle.internal.p1(ptr addrspace(1))
-define void @foo(i64 %img, ptr %red, i32 %idx) {
+define ptx_kernel void @foo(i64 %img, ptr %red, i32 %idx) {
; CHECK-LABEL: foo(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<2>;
@@ -34,7 +34,7 @@ define void @foo(i64 %img, ptr %red, i32 %idx) {
@tex0 = internal addrspace(1) global i64 0, align 8
-define void @bar(ptr %red, i32 %idx) {
+define ptx_kernel void @bar(ptr %red, i32 %idx) {
; CHECK-LABEL: bar(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<2>;
@@ -57,7 +57,7 @@ define void @bar(ptr %red, i32 %idx) {
declare float @texfunc(i64)
-define void @baz(ptr %red, i32 %idx) {
+define ptx_kernel void @baz(ptr %red, i32 %idx) {
; CHECK-LABEL: baz(
; CHECK: {
; CHECK-NEXT: .reg .b32 %r<2>;
@@ -93,8 +93,5 @@ define void @baz(ptr %red, i32 %idx) {
ret void
}
-!nvvm.annotations = !{!1, !2, !3, !4}
-!1 = !{ptr @foo, !"kernel", i32 1}
-!2 = !{ptr @bar, !"kernel", i32 1}
-!3 = !{ptr addrspace(1) @tex0, !"texture", i32 1}
-!4 = !{ptr @baz, !"kernel", i32 1}
+!nvvm.annotations = !{!1}
+!1 = !{ptr addrspace(1) @tex0, !"texture", i32 1}
diff --git a/llvm/test/CodeGen/NVPTX/tex-read.ll b/llvm/test/CodeGen/NVPTX/tex-read.ll
index d11aea4..d74c89f5 100644
--- a/llvm/test/CodeGen/NVPTX/tex-read.ll
+++ b/llvm/test/CodeGen/NVPTX/tex-read.ll
@@ -6,7 +6,7 @@ target triple = "nvptx64-unknown-nvcl"
declare { float, float, float, float } @llvm.nvvm.tex.1d.v4f32.s32(i64, i64, i32)
; CHECK: .entry foo
-define void @foo(i64 %img, i64 %sampler, ptr %red, i32 %idx) {
+define ptx_kernel void @foo(i64 %img, i64 %sampler, ptr %red, i32 %idx) {
; CHECK: tex.1d.v4.f32.s32 {%f[[RED:[0-9]+]], %f[[GREEN:[0-9]+]], %f[[BLUE:[0-9]+]], %f[[ALPHA:[0-9]+]]}, [foo_param_0, foo_param_1, {%r{{[0-9]+}}}]
%val = tail call { float, float, float, float } @llvm.nvvm.tex.1d.v4f32.s32(i64 %img, i64 %sampler, i32 %idx)
%ret = extractvalue { float, float, float, float } %val, 0
@@ -15,7 +15,6 @@ define void @foo(i64 %img, i64 %sampler, ptr %red, i32 %idx) {
ret void
}
-!nvvm.annotations = !{!1, !2, !3}
-!1 = !{ptr @foo, !"kernel", i32 1}
+!nvvm.annotations = !{!2, !3}
!2 = !{ptr @foo, !"rdoimage", i32 0}
!3 = !{ptr @foo, !"sampler", i32 1}
diff --git a/llvm/test/CodeGen/NVPTX/unreachable.ll b/llvm/test/CodeGen/NVPTX/unreachable.ll
index 286f358..80cf938 100644
--- a/llvm/test/CodeGen/NVPTX/unreachable.ll
+++ b/llvm/test/CodeGen/NVPTX/unreachable.ll
@@ -21,7 +21,7 @@ target triple = "nvptx-unknown-cuda"
declare void @throw() #0
declare void @llvm.trap() #0
-define void @kernel_func() {
+define ptx_kernel void @kernel_func() {
; NO-TRAP-UNREACHABLE-LABEL: kernel_func(
; NO-TRAP-UNREACHABLE: {
; NO-TRAP-UNREACHABLE-EMPTY:
@@ -102,6 +102,3 @@ define void @kernel_func_2() {
}
attributes #0 = { noreturn }
-
-!nvvm.annotations = !{!1}
-!1 = !{ptr @kernel_func, !"kernel", i32 1}
diff --git a/llvm/test/CodeGen/NVPTX/variadics-backend.ll b/llvm/test/CodeGen/NVPTX/variadics-backend.ll
index cb54812..f7ed690 100644
--- a/llvm/test/CodeGen/NVPTX/variadics-backend.ll
+++ b/llvm/test/CodeGen/NVPTX/variadics-backend.ll
@@ -153,7 +153,7 @@ define dso_local i32 @variadics2(i32 noundef %first, ...) {
; CHECK-PTX-NEXT: .reg .b64 %SPL;
; CHECK-PTX-NEXT: .reg .b16 %rs<6>;
; CHECK-PTX-NEXT: .reg .b32 %r<7>;
-; CHECK-PTX-NEXT: .reg .b64 %rd<11>;
+; CHECK-PTX-NEXT: .reg .b64 %rd<7>;
; CHECK-PTX-EMPTY:
; CHECK-PTX-NEXT: // %bb.0: // %entry
; CHECK-PTX-NEXT: mov.u64 %SPL, __local_depot2;
@@ -163,24 +163,20 @@ define dso_local i32 @variadics2(i32 noundef %first, ...) {
; CHECK-PTX-NEXT: add.s64 %rd2, %rd1, 7;
; CHECK-PTX-NEXT: and.b64 %rd3, %rd2, -8;
; CHECK-PTX-NEXT: ld.u32 %r2, [%rd3];
-; CHECK-PTX-NEXT: or.b64 %rd4, %rd3, 4;
-; CHECK-PTX-NEXT: ld.s8 %r3, [%rd4];
-; CHECK-PTX-NEXT: or.b64 %rd5, %rd3, 5;
-; CHECK-PTX-NEXT: or.b64 %rd6, %rd3, 7;
-; CHECK-PTX-NEXT: ld.u8 %rs1, [%rd6];
+; CHECK-PTX-NEXT: ld.s8 %r3, [%rd3+4];
+; CHECK-PTX-NEXT: ld.u8 %rs1, [%rd3+7];
; CHECK-PTX-NEXT: st.u8 [%SP+2], %rs1;
-; CHECK-PTX-NEXT: ld.u8 %rs2, [%rd5];
-; CHECK-PTX-NEXT: or.b64 %rd7, %rd3, 6;
-; CHECK-PTX-NEXT: ld.u8 %rs3, [%rd7];
+; CHECK-PTX-NEXT: ld.u8 %rs2, [%rd3+5];
+; CHECK-PTX-NEXT: ld.u8 %rs3, [%rd3+6];
; CHECK-PTX-NEXT: shl.b16 %rs4, %rs3, 8;
; CHECK-PTX-NEXT: or.b16 %rs5, %rs4, %rs2;
; CHECK-PTX-NEXT: st.u16 [%SP], %rs5;
-; CHECK-PTX-NEXT: ld.u64 %rd8, [%rd3+8];
+; CHECK-PTX-NEXT: ld.u64 %rd4, [%rd3+8];
; CHECK-PTX-NEXT: add.s32 %r4, %r1, %r2;
; CHECK-PTX-NEXT: add.s32 %r5, %r4, %r3;
-; CHECK-PTX-NEXT: cvt.u64.u32 %rd9, %r5;
-; CHECK-PTX-NEXT: add.s64 %rd10, %rd9, %rd8;
-; CHECK-PTX-NEXT: cvt.u32.u64 %r6, %rd10;
+; CHECK-PTX-NEXT: cvt.u64.u32 %rd5, %r5;
+; CHECK-PTX-NEXT: add.s64 %rd6, %rd5, %rd4;
+; CHECK-PTX-NEXT: cvt.u32.u64 %r6, %rd6;
; CHECK-PTX-NEXT: st.param.b32 [func_retval0], %r6;
; CHECK-PTX-NEXT: ret;
entry:
@@ -219,7 +215,7 @@ define dso_local i32 @bar() {
; CHECK-PTX-NEXT: .reg .b64 %SPL;
; CHECK-PTX-NEXT: .reg .b16 %rs<10>;
; CHECK-PTX-NEXT: .reg .b32 %r<4>;
-; CHECK-PTX-NEXT: .reg .b64 %rd<8>;
+; CHECK-PTX-NEXT: .reg .b64 %rd<7>;
; CHECK-PTX-EMPTY:
; CHECK-PTX-NEXT: // %bb.0: // %entry
; CHECK-PTX-NEXT: mov.u64 %SPL, __local_depot3;
@@ -240,17 +236,16 @@ define dso_local i32 @bar() {
; CHECK-PTX-NEXT: st.u16 [%SP], %rs8;
; CHECK-PTX-NEXT: mov.b32 %r1, 1;
; CHECK-PTX-NEXT: st.u32 [%SP+8], %r1;
-; CHECK-PTX-NEXT: add.u64 %rd5, %SP, 8;
-; CHECK-PTX-NEXT: or.b64 %rd6, %rd5, 4;
; CHECK-PTX-NEXT: mov.b16 %rs9, 1;
-; CHECK-PTX-NEXT: st.u8 [%rd6], %rs9;
-; CHECK-PTX-NEXT: mov.b64 %rd7, 1;
-; CHECK-PTX-NEXT: st.u64 [%SP+16], %rd7;
+; CHECK-PTX-NEXT: st.u8 [%SP+12], %rs9;
+; CHECK-PTX-NEXT: mov.b64 %rd5, 1;
+; CHECK-PTX-NEXT: st.u64 [%SP+16], %rd5;
+; CHECK-PTX-NEXT: add.u64 %rd6, %SP, 8;
; CHECK-PTX-NEXT: { // callseq 1, 0
; CHECK-PTX-NEXT: .param .b32 param0;
; CHECK-PTX-NEXT: st.param.b32 [param0], 1;
; CHECK-PTX-NEXT: .param .b64 param1;
-; CHECK-PTX-NEXT: st.param.b64 [param1], %rd5;
+; CHECK-PTX-NEXT: st.param.b64 [param1], %rd6;
; CHECK-PTX-NEXT: .param .b32 retval0;
; CHECK-PTX-NEXT: call.uni (retval0),
; CHECK-PTX-NEXT: variadics2,
diff --git a/llvm/test/CodeGen/PowerPC/global-merge-aix-zero-size-struct.ll b/llvm/test/CodeGen/PowerPC/global-merge-aix-zero-size-struct.ll
new file mode 100644
index 0000000..ec6fd7e
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/global-merge-aix-zero-size-struct.ll
@@ -0,0 +1,20 @@
+; RUN: llc -verify-machineinstrs -mtriple powerpc64-ibm-aix-xcoff -mcpu=pwr7 < %s | FileCheck %s
+
+; RUN: llc -verify-machineinstrs -mtriple powerpc64-ibm-aix-xcoff -mcpu=pwr7 --filetype=obj -o %t.o < %s
+; RUN: llvm-objdump --syms %t.o | FileCheck %s --check-prefix=OBJ
+
+%struct.anon = type {}
+
+@a = internal constant %struct.anon zeroinitializer, align 1
+@b = internal constant [6 x i8] c"hello\00", align 1
+
+; CHECK: .csect L.._MergedGlobals[RO],2
+; CHECK-NEXT: .lglobl a # @_MergedGlobals
+; CHECK-NEXT: .lglobl b
+; CHECK-NEXT: a:
+; CHECK-NEXT: b:
+; CHECK-NEXT: .string "hello"
+
+; OBJ: 0000000000000000 l .text 0000000000000006 L.._MergedGlobals
+; OBJ-NEXT: 0000000000000000 l .text (csect: L.._MergedGlobals) 0000000000000000 a
+; OBJ-NEXT: 0000000000000000 l .text (csect: L.._MergedGlobals) 0000000000000000 b
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/add-imm.ll b/llvm/test/CodeGen/RISCV/GlobalISel/add-imm.ll
index ff56ab1..0fd23a7 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/add-imm.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/add-imm.ll
@@ -14,7 +14,7 @@ define i32 @add_positive_low_bound_reject(i32 %a) nounwind {
;
; RV64I-LABEL: add_positive_low_bound_reject:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi a0, a0, 2047
+; RV64I-NEXT: addiw a0, a0, 2047
; RV64I-NEXT: ret
%1 = add i32 %a, 2047
ret i32 %1
@@ -30,7 +30,7 @@ define i32 @add_positive_low_bound_accept(i32 %a) nounwind {
; RV64I-LABEL: add_positive_low_bound_accept:
; RV64I: # %bb.0:
; RV64I-NEXT: addi a0, a0, 2047
-; RV64I-NEXT: addi a0, a0, 1
+; RV64I-NEXT: addiw a0, a0, 1
; RV64I-NEXT: ret
%1 = add i32 %a, 2048
ret i32 %1
@@ -46,7 +46,7 @@ define i32 @add_positive_high_bound_accept(i32 %a) nounwind {
; RV64I-LABEL: add_positive_high_bound_accept:
; RV64I: # %bb.0:
; RV64I-NEXT: addi a0, a0, 2047
-; RV64I-NEXT: addi a0, a0, 2047
+; RV64I-NEXT: addiw a0, a0, 2047
; RV64I-NEXT: ret
%1 = add i32 %a, 4094
ret i32 %1
@@ -63,8 +63,8 @@ define i32 @add_positive_high_bound_reject(i32 %a) nounwind {
; RV64I-LABEL: add_positive_high_bound_reject:
; RV64I: # %bb.0:
; RV64I-NEXT: lui a1, 1
-; RV64I-NEXT: addiw a1, a1, -1
-; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: addi a1, a1, -1
+; RV64I-NEXT: addw a0, a0, a1
; RV64I-NEXT: ret
%1 = add i32 %a, 4095
ret i32 %1
@@ -78,7 +78,7 @@ define i32 @add_negative_high_bound_reject(i32 %a) nounwind {
;
; RV64I-LABEL: add_negative_high_bound_reject:
; RV64I: # %bb.0:
-; RV64I-NEXT: addi a0, a0, -2048
+; RV64I-NEXT: addiw a0, a0, -2048
; RV64I-NEXT: ret
%1 = add i32 %a, -2048
ret i32 %1
@@ -94,7 +94,7 @@ define i32 @add_negative_high_bound_accept(i32 %a) nounwind {
; RV64I-LABEL: add_negative_high_bound_accept:
; RV64I: # %bb.0:
; RV64I-NEXT: addi a0, a0, -2048
-; RV64I-NEXT: addi a0, a0, -1
+; RV64I-NEXT: addiw a0, a0, -1
; RV64I-NEXT: ret
%1 = add i32 %a, -2049
ret i32 %1
@@ -110,7 +110,7 @@ define i32 @add_negative_low_bound_accept(i32 %a) nounwind {
; RV64I-LABEL: add_negative_low_bound_accept:
; RV64I: # %bb.0:
; RV64I-NEXT: addi a0, a0, -2048
-; RV64I-NEXT: addi a0, a0, -2048
+; RV64I-NEXT: addiw a0, a0, -2048
; RV64I-NEXT: ret
%1 = add i32 %a, -4096
ret i32 %1
@@ -127,8 +127,8 @@ define i32 @add_negative_low_bound_reject(i32 %a) nounwind {
; RV64I-LABEL: add_negative_low_bound_reject:
; RV64I: # %bb.0:
; RV64I-NEXT: lui a1, 1048575
-; RV64I-NEXT: addiw a1, a1, -1
-; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: addi a1, a1, -1
+; RV64I-NEXT: addw a0, a0, a1
; RV64I-NEXT: ret
%1 = add i32 %a, -4097
ret i32 %1
@@ -144,7 +144,7 @@ define i32 @add32_accept(i32 %a) nounwind {
; RV64I-LABEL: add32_accept:
; RV64I: # %bb.0:
; RV64I-NEXT: addi a0, a0, 2047
-; RV64I-NEXT: addi a0, a0, 952
+; RV64I-NEXT: addiw a0, a0, 952
; RV64I-NEXT: ret
%1 = add i32 %a, 2999
ret i32 %1
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/alu-roundtrip.ll b/llvm/test/CodeGen/RISCV/GlobalISel/alu-roundtrip.ll
index ee41499..f1c0fcc 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/alu-roundtrip.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/alu-roundtrip.ll
@@ -37,7 +37,7 @@ define i32 @add_i8_signext_i32(i8 %a, i8 %b) {
; RV64IM-NEXT: slli a1, a1, 56
; RV64IM-NEXT: srai a0, a0, 56
; RV64IM-NEXT: srai a1, a1, 56
-; RV64IM-NEXT: add a0, a0, a1
+; RV64IM-NEXT: addw a0, a0, a1
; RV64IM-NEXT: ret
entry:
%0 = sext i8 %a to i32
@@ -58,7 +58,7 @@ define i32 @add_i8_zeroext_i32(i8 %a, i8 %b) {
; RV64IM: # %bb.0: # %entry
; RV64IM-NEXT: andi a0, a0, 255
; RV64IM-NEXT: andi a1, a1, 255
-; RV64IM-NEXT: add a0, a0, a1
+; RV64IM-NEXT: addw a0, a0, a1
; RV64IM-NEXT: ret
entry:
%0 = zext i8 %a to i32
@@ -78,7 +78,7 @@ define i32 @add_i32(i32 %a, i32 %b) {
;
; RV64IM-LABEL: add_i32:
; RV64IM: # %bb.0: # %entry
-; RV64IM-NEXT: add a0, a0, a1
+; RV64IM-NEXT: addw a0, a0, a1
; RV64IM-NEXT: ret
entry:
%0 = add i32 %a, %b
@@ -93,7 +93,7 @@ define i32 @addi_i32(i32 %a) {
;
; RV64IM-LABEL: addi_i32:
; RV64IM: # %bb.0: # %entry
-; RV64IM-NEXT: addi a0, a0, 1234
+; RV64IM-NEXT: addiw a0, a0, 1234
; RV64IM-NEXT: ret
entry:
%0 = add i32 %a, 1234
@@ -108,7 +108,7 @@ define i32 @sub_i32(i32 %a, i32 %b) {
;
; RV64IM-LABEL: sub_i32:
; RV64IM: # %bb.0: # %entry
-; RV64IM-NEXT: sub a0, a0, a1
+; RV64IM-NEXT: subw a0, a0, a1
; RV64IM-NEXT: ret
entry:
%0 = sub i32 %a, %b
@@ -123,7 +123,7 @@ define i32 @subi_i32(i32 %a) {
;
; RV64IM-LABEL: subi_i32:
; RV64IM: # %bb.0: # %entry
-; RV64IM-NEXT: addi a0, a0, -1234
+; RV64IM-NEXT: addiw a0, a0, -1234
; RV64IM-NEXT: ret
entry:
%0 = sub i32 %a, 1234
@@ -138,7 +138,7 @@ define i32 @neg_i32(i32 %a) {
;
; RV64IM-LABEL: neg_i32:
; RV64IM: # %bb.0: # %entry
-; RV64IM-NEXT: neg a0, a0
+; RV64IM-NEXT: negw a0, a0
; RV64IM-NEXT: ret
entry:
%0 = sub i32 0, %a
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/combine-neg-abs.ll b/llvm/test/CodeGen/RISCV/GlobalISel/combine-neg-abs.ll
index 6c848ec..3a55189 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/combine-neg-abs.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/combine-neg-abs.ll
@@ -27,14 +27,13 @@ define i32 @expanded_neg_abs32(i32 %x) {
;
; RV64I-LABEL: expanded_neg_abs32:
; RV64I: # %bb.0:
-; RV64I-NEXT: neg a1, a0
-; RV64I-NEXT: sext.w a2, a1
-; RV64I-NEXT: sext.w a3, a0
-; RV64I-NEXT: blt a3, a2, .LBB0_2
+; RV64I-NEXT: negw a1, a0
+; RV64I-NEXT: sext.w a2, a0
+; RV64I-NEXT: blt a2, a1, .LBB0_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: .LBB0_2:
-; RV64I-NEXT: neg a0, a1
+; RV64I-NEXT: negw a0, a1
; RV64I-NEXT: ret
;
; RV64ZBB-LABEL: expanded_neg_abs32:
@@ -42,7 +41,7 @@ define i32 @expanded_neg_abs32(i32 %x) {
; RV64ZBB-NEXT: negw a1, a0
; RV64ZBB-NEXT: sext.w a0, a0
; RV64ZBB-NEXT: max a0, a1, a0
-; RV64ZBB-NEXT: neg a0, a0
+; RV64ZBB-NEXT: negw a0, a0
; RV64ZBB-NEXT: ret
%n = sub i32 0, %x
%t = call i32 @llvm.smax.i32(i32 %n, i32 %x)
@@ -69,14 +68,13 @@ define i32 @expanded_neg_abs32_unsigned(i32 %x) {
;
; RV64I-LABEL: expanded_neg_abs32_unsigned:
; RV64I: # %bb.0:
-; RV64I-NEXT: neg a1, a0
-; RV64I-NEXT: sext.w a2, a1
-; RV64I-NEXT: sext.w a3, a0
-; RV64I-NEXT: bltu a3, a2, .LBB1_2
+; RV64I-NEXT: negw a1, a0
+; RV64I-NEXT: sext.w a2, a0
+; RV64I-NEXT: bltu a2, a1, .LBB1_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: .LBB1_2:
-; RV64I-NEXT: neg a0, a1
+; RV64I-NEXT: negw a0, a1
; RV64I-NEXT: ret
;
; RV64ZBB-LABEL: expanded_neg_abs32_unsigned:
@@ -84,7 +82,7 @@ define i32 @expanded_neg_abs32_unsigned(i32 %x) {
; RV64ZBB-NEXT: negw a1, a0
; RV64ZBB-NEXT: sext.w a0, a0
; RV64ZBB-NEXT: maxu a0, a1, a0
-; RV64ZBB-NEXT: neg a0, a0
+; RV64ZBB-NEXT: negw a0, a0
; RV64ZBB-NEXT: ret
%n = sub i32 0, %x
%t = call i32 @llvm.umax.i32(i32 %n, i32 %x)
@@ -251,14 +249,13 @@ define i32 @expanded_neg_inv_abs32(i32 %x) {
;
; RV64I-LABEL: expanded_neg_inv_abs32:
; RV64I: # %bb.0:
-; RV64I-NEXT: neg a1, a0
-; RV64I-NEXT: sext.w a2, a1
-; RV64I-NEXT: sext.w a3, a0
-; RV64I-NEXT: blt a2, a3, .LBB4_2
+; RV64I-NEXT: negw a1, a0
+; RV64I-NEXT: sext.w a2, a0
+; RV64I-NEXT: blt a1, a2, .LBB4_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: .LBB4_2:
-; RV64I-NEXT: neg a0, a1
+; RV64I-NEXT: negw a0, a1
; RV64I-NEXT: ret
;
; RV64ZBB-LABEL: expanded_neg_inv_abs32:
@@ -266,7 +263,7 @@ define i32 @expanded_neg_inv_abs32(i32 %x) {
; RV64ZBB-NEXT: negw a1, a0
; RV64ZBB-NEXT: sext.w a0, a0
; RV64ZBB-NEXT: min a0, a1, a0
-; RV64ZBB-NEXT: neg a0, a0
+; RV64ZBB-NEXT: negw a0, a0
; RV64ZBB-NEXT: ret
%n = sub i32 0, %x
%t = call i32 @llvm.smin.i32(i32 %n, i32 %x)
@@ -293,14 +290,13 @@ define i32 @expanded_neg_inv_abs32_unsigned(i32 %x) {
;
; RV64I-LABEL: expanded_neg_inv_abs32_unsigned:
; RV64I: # %bb.0:
-; RV64I-NEXT: neg a1, a0
-; RV64I-NEXT: sext.w a2, a1
-; RV64I-NEXT: sext.w a3, a0
-; RV64I-NEXT: bltu a2, a3, .LBB5_2
+; RV64I-NEXT: negw a1, a0
+; RV64I-NEXT: sext.w a2, a0
+; RV64I-NEXT: bltu a1, a2, .LBB5_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: .LBB5_2:
-; RV64I-NEXT: neg a0, a1
+; RV64I-NEXT: negw a0, a1
; RV64I-NEXT: ret
;
; RV64ZBB-LABEL: expanded_neg_inv_abs32_unsigned:
@@ -308,7 +304,7 @@ define i32 @expanded_neg_inv_abs32_unsigned(i32 %x) {
; RV64ZBB-NEXT: negw a1, a0
; RV64ZBB-NEXT: sext.w a0, a0
; RV64ZBB-NEXT: minu a0, a1, a0
-; RV64ZBB-NEXT: neg a0, a0
+; RV64ZBB-NEXT: negw a0, a0
; RV64ZBB-NEXT: ret
%n = sub i32 0, %x
%t = call i32 @llvm.umin.i32(i32 %n, i32 %x)
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/combine.ll b/llvm/test/CodeGen/RISCV/GlobalISel/combine.ll
index 9c7fd68..360e84d 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/combine.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/combine.ll
@@ -21,6 +21,7 @@ define i32 @constant_to_rhs(i32 %x) {
; RV64-O0-NEXT: mv a1, a0
; RV64-O0-NEXT: li a0, 1
; RV64-O0-NEXT: add a0, a0, a1
+; RV64-O0-NEXT: sext.w a0, a0
; RV64-O0-NEXT: ret
;
; RV32-OPT-LABEL: constant_to_rhs:
@@ -30,7 +31,7 @@ define i32 @constant_to_rhs(i32 %x) {
;
; RV64-OPT-LABEL: constant_to_rhs:
; RV64-OPT: # %bb.0:
-; RV64-OPT-NEXT: addi a0, a0, 1
+; RV64-OPT-NEXT: addiw a0, a0, 1
; RV64-OPT-NEXT: ret
%a = add i32 1, %x
ret i32 %a
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/double-zfa.ll b/llvm/test/CodeGen/RISCV/GlobalISel/double-zfa.ll
index 385156b..4878699 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/double-zfa.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/double-zfa.ll
@@ -1,9 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-
; RUN: llc -mtriple=riscv32 -mattr=+zfa,d -global-isel < %s \
-; RUN: | FileCheck %s
+; RUN: | FileCheck %s --check-prefixes=CHECK,RV32IDZFA
; RUN: llc -mtriple=riscv64 -mattr=+zfa,d -global-isel < %s \
-; RUN: | FileCheck %s
+; RUN: | FileCheck %s --check-prefixes=CHECK,RV64DZFA
define double @fceil(double %a) {
@@ -86,3 +85,32 @@ define double @fminimum(double %a, double %b) {
%c = call double @llvm.minimum.f64(double %a, double %b)
ret double %c
}
+
+define i64 @fmvh_x_d(double %fa) {
+; RV32IDZFA-LABEL: fmvh_x_d:
+; RV32IDZFA: # %bb.0:
+; RV32IDZFA-NEXT: fmv.x.w a0, fa0
+; RV32IDZFA-NEXT: fmvh.x.d a1, fa0
+; RV32IDZFA-NEXT: ret
+;
+; RV64DZFA-LABEL: fmvh_x_d:
+; RV64DZFA: # %bb.0:
+; RV64DZFA-NEXT: fmv.x.d a0, fa0
+; RV64DZFA-NEXT: ret
+ %i = bitcast double %fa to i64
+ ret i64 %i
+}
+
+define double @fmvp_d_x(i64 %a) {
+; RV32IDZFA-LABEL: fmvp_d_x:
+; RV32IDZFA: # %bb.0:
+; RV32IDZFA-NEXT: fmvp.d.x fa0, a0, a1
+; RV32IDZFA-NEXT: ret
+;
+; RV64DZFA-LABEL: fmvp_d_x:
+; RV64DZFA: # %bb.0:
+; RV64DZFA-NEXT: fmv.d.x fa0, a0
+; RV64DZFA-NEXT: ret
+ %or = bitcast i64 %a to double
+ ret double %or
+}
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/freeze.ll b/llvm/test/CodeGen/RISCV/GlobalISel/freeze.ll
index 72f0ab1..234f338 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/freeze.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/freeze.ll
@@ -96,12 +96,19 @@ define ptr @freeze_ptr(ptr %x) {
%struct.T = type { i32, i32 }
define i32 @freeze_struct(ptr %p) {
-; CHECK-LABEL: freeze_struct:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lw a1, 0(a0)
-; CHECK-NEXT: lw a0, 4(a0)
-; CHECK-NEXT: add a0, a1, a0
-; CHECK-NEXT: ret
+; RV32-LABEL: freeze_struct:
+; RV32: # %bb.0:
+; RV32-NEXT: lw a1, 0(a0)
+; RV32-NEXT: lw a0, 4(a0)
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: freeze_struct:
+; RV64: # %bb.0:
+; RV64-NEXT: lw a1, 0(a0)
+; RV64-NEXT: lw a0, 4(a0)
+; RV64-NEXT: addw a0, a1, a0
+; RV64-NEXT: ret
%s = load %struct.T, ptr %p
%y1 = freeze %struct.T %s
%v1 = extractvalue %struct.T %y1, 0
@@ -111,12 +118,19 @@ define i32 @freeze_struct(ptr %p) {
}
define i32 @freeze_anonstruct(ptr %p) {
-; CHECK-LABEL: freeze_anonstruct:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lw a1, 0(a0)
-; CHECK-NEXT: lw a0, 4(a0)
-; CHECK-NEXT: add a0, a1, a0
-; CHECK-NEXT: ret
+; RV32-LABEL: freeze_anonstruct:
+; RV32: # %bb.0:
+; RV32-NEXT: lw a1, 0(a0)
+; RV32-NEXT: lw a0, 4(a0)
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: freeze_anonstruct:
+; RV64: # %bb.0:
+; RV64-NEXT: lw a1, 0(a0)
+; RV64-NEXT: lw a0, 4(a0)
+; RV64-NEXT: addw a0, a1, a0
+; RV64-NEXT: ret
%s = load {i32, i32}, ptr %p
%y1 = freeze {i32, i32} %s
%v1 = extractvalue {i32, i32} %y1, 0
@@ -141,7 +155,7 @@ define i32 @freeze_anonstruct2(ptr %p) {
; RV64-NEXT: lw a0, 0(a0)
; RV64-NEXT: slli a1, a1, 48
; RV64-NEXT: srli a1, a1, 48
-; RV64-NEXT: add a0, a0, a1
+; RV64-NEXT: addw a0, a0, a1
; RV64-NEXT: ret
%s = load {i32, i16}, ptr %p
%y1 = freeze {i32, i16} %s
@@ -168,7 +182,7 @@ define i32 @freeze_anonstruct2_sext(ptr %p) {
; RV64-NEXT: lw a0, 0(a0)
; RV64-NEXT: slli a1, a1, 48
; RV64-NEXT: srai a1, a1, 48
-; RV64-NEXT: add a0, a0, a1
+; RV64-NEXT: addw a0, a0, a1
; RV64-NEXT: ret
%s = load {i32, i16}, ptr %p
%y1 = freeze {i32, i16} %s
@@ -180,12 +194,19 @@ define i32 @freeze_anonstruct2_sext(ptr %p) {
}
define i32 @freeze_array(ptr %p) nounwind {
-; CHECK-LABEL: freeze_array:
-; CHECK: # %bb.0:
-; CHECK-NEXT: lw a1, 0(a0)
-; CHECK-NEXT: lw a0, 4(a0)
-; CHECK-NEXT: add a0, a1, a0
-; CHECK-NEXT: ret
+; RV32-LABEL: freeze_array:
+; RV32: # %bb.0:
+; RV32-NEXT: lw a1, 0(a0)
+; RV32-NEXT: lw a0, 4(a0)
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: freeze_array:
+; RV64: # %bb.0:
+; RV64-NEXT: lw a1, 0(a0)
+; RV64-NEXT: lw a0, 4(a0)
+; RV64-NEXT: addw a0, a1, a0
+; RV64-NEXT: ret
%s = load [2 x i32], ptr %p
%y1 = freeze [2 x i32] %s
%v1 = extractvalue [2 x i32] %y1, 0
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/iabs.ll b/llvm/test/CodeGen/RISCV/GlobalISel/iabs.ll
index 1156edf..31a78d4 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/iabs.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/iabs.ll
@@ -98,7 +98,7 @@ define i32 @abs32(i32 %x) {
; RV64I-LABEL: abs32:
; RV64I: # %bb.0:
; RV64I-NEXT: sraiw a1, a0, 31
-; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: addw a0, a0, a1
; RV64I-NEXT: xor a0, a0, a1
; RV64I-NEXT: ret
;
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer-info-validation.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer-info-validation.mir
index a27e2b8..dbc1384 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer-info-validation.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer-info-validation.mir
@@ -23,7 +23,7 @@
# DEBUG-NEXT: .. imm index coverage check SKIPPED: user-defined predicate detected
#
# DEBUG-NEXT: G_SUB (opcode [[SUB_OPC:[0-9]+]]): 1 type index, 0 imm indices
-# DEBUG-NEXT: .. opcode [[SUB_OPC]] is aliased to [[ADD_OPC]]
+# DEBUG-NEXT: .. opcode {{[0-9]+}} is aliased to {{[0-9]+}}
# DEBUG-NEXT: .. type index coverage check SKIPPED: user-defined predicate detected
# DEBUG-NEXT: .. imm index coverage check SKIPPED: user-defined predicate detected
#
@@ -59,7 +59,6 @@
# DEBUG-NEXT: .. imm index coverage check SKIPPED: user-defined predicate detected
#
# DEBUG-NEXT: G_AND (opcode {{[0-9]+}}): 1 type index, 0 imm indices
-# DEBUG-NEXT: .. opcode {{[0-9]+}} is aliased to {{[0-9]+}}
# DEBUG-NEXT: .. type index coverage check SKIPPED: user-defined predicate detected
# DEBUG-NEXT: .. imm index coverage check SKIPPED: user-defined predicate detected
#
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-abs-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-abs-rv64.mir
index 22ce8a0..78a2227b 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-abs-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-abs-rv64.mir
@@ -86,9 +86,10 @@ body: |
; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 31
; RV64I-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[ASSERT_SEXT]], [[C]](s64)
; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[ASSERT_SEXT]], [[ASHR]]
- ; RV64I-NEXT: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[ADD]], [[ASHR]]
- ; RV64I-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[XOR]], 32
- ; RV64I-NEXT: $x10 = COPY [[SEXT_INREG]](s64)
+ ; RV64I-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[ADD]], 32
+ ; RV64I-NEXT: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[SEXT_INREG]], [[ASHR]]
+ ; RV64I-NEXT: [[SEXT_INREG1:%[0-9]+]]:_(s64) = G_SEXT_INREG [[XOR]], 32
+ ; RV64I-NEXT: $x10 = COPY [[SEXT_INREG1]](s64)
; RV64I-NEXT: PseudoRET implicit $x10
;
; RV64ZBB-LABEL: name: abs_i32
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv64.mir
index 48b65a1..8f2b9f3 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-add-rv64.mir
@@ -69,7 +69,8 @@ body: |
; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[COPY1]]
- ; CHECK-NEXT: $x10 = COPY [[ADD]](s64)
+ ; CHECK-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[ADD]], 32
+ ; CHECK-NEXT: $x10 = COPY [[SEXT_INREG]](s64)
; CHECK-NEXT: PseudoRET implicit $x10
%0:_(s64) = COPY $x10
%1:_(s64) = COPY $x11
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-addo-subo-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-addo-subo-rv64.mir
index f2ec709..eed1aac 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-addo-subo-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-addo-subo-rv64.mir
@@ -339,7 +339,7 @@ body: |
; CHECK-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[ADD]], 32
; CHECK-NEXT: [[SEXT_INREG1:%[0-9]+]]:_(s64) = G_SEXT_INREG [[COPY1]], 32
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[SEXT_INREG]](s64), [[SEXT_INREG1]]
- ; CHECK-NEXT: $x10 = COPY [[ADD]](s64)
+ ; CHECK-NEXT: $x10 = COPY [[SEXT_INREG]](s64)
; CHECK-NEXT: $x11 = COPY [[ICMP]](s64)
; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11
%2:_(s64) = COPY $x10
@@ -454,10 +454,11 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
; CHECK-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[COPY]], [[COPY1]]
- ; CHECK-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[COPY]], 32
- ; CHECK-NEXT: [[SEXT_INREG1:%[0-9]+]]:_(s64) = G_SEXT_INREG [[COPY1]], 32
- ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[SEXT_INREG]](s64), [[SEXT_INREG1]]
- ; CHECK-NEXT: $x10 = COPY [[SUB]](s64)
+ ; CHECK-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[SUB]], 32
+ ; CHECK-NEXT: [[SEXT_INREG1:%[0-9]+]]:_(s64) = G_SEXT_INREG [[COPY]], 32
+ ; CHECK-NEXT: [[SEXT_INREG2:%[0-9]+]]:_(s64) = G_SEXT_INREG [[COPY1]], 32
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[SEXT_INREG1]](s64), [[SEXT_INREG2]]
+ ; CHECK-NEXT: $x10 = COPY [[SEXT_INREG]](s64)
; CHECK-NEXT: $x11 = COPY [[ICMP]](s64)
; CHECK-NEXT: PseudoRET implicit $x10, implicit $x11
%2:_(s64) = COPY $x10
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-const-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-const-rv64.mir
index 57fc513..e28572d 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-const-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-const-rv64.mir
@@ -145,7 +145,8 @@ body: |
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -64769
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[C]]
- ; CHECK-NEXT: $x10 = COPY [[ADD]](s64)
+ ; CHECK-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[ADD]], 32
+ ; CHECK-NEXT: $x10 = COPY [[SEXT_INREG]](s64)
; CHECK-NEXT: PseudoRET implicit $x10
%0:_(s32) = G_CONSTANT i32 -64769
%1:_(s64) = COPY $x10
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctlz-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctlz-rv64.mir
index 6cc5477..62d7313 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctlz-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctlz-rv64.mir
@@ -59,7 +59,8 @@ body: |
; RV64ZBB-NEXT: [[CLZW:%[0-9]+]]:_(s64) = G_CLZW [[AND]]
; RV64ZBB-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
; RV64ZBB-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[CLZW]], [[C1]]
- ; RV64ZBB-NEXT: $x10 = COPY [[SUB]](s64)
+ ; RV64ZBB-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[SUB]], 32
+ ; RV64ZBB-NEXT: $x10 = COPY [[SEXT_INREG]](s64)
; RV64ZBB-NEXT: PseudoRET implicit $x10
%1:_(s64) = COPY $x10
%0:_(s8) = G_TRUNC %1(s64)
@@ -129,7 +130,8 @@ body: |
; RV64ZBB-NEXT: [[CLZW:%[0-9]+]]:_(s64) = G_CLZW [[AND]]
; RV64ZBB-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; RV64ZBB-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[CLZW]], [[C1]]
- ; RV64ZBB-NEXT: $x10 = COPY [[SUB]](s64)
+ ; RV64ZBB-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[SUB]], 32
+ ; RV64ZBB-NEXT: $x10 = COPY [[SEXT_INREG]](s64)
; RV64ZBB-NEXT: PseudoRET implicit $x10
%1:_(s64) = COPY $x10
%0:_(s16) = G_TRUNC %1(s64)
@@ -175,16 +177,19 @@ body: |
; RV64I-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 1431655765
; RV64I-NEXT: [[AND6:%[0-9]+]]:_(s64) = G_AND [[LSHR5]], [[C6]]
; RV64I-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[OR4]], [[AND6]]
- ; RV64I-NEXT: [[AND7:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[C1]]
+ ; RV64I-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[SUB]], 32
+ ; RV64I-NEXT: [[AND7:%[0-9]+]]:_(s64) = G_AND [[SEXT_INREG]], [[C1]]
; RV64I-NEXT: [[LSHR6:%[0-9]+]]:_(s64) = G_LSHR [[AND7]], [[C2]](s64)
; RV64I-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 858993459
; RV64I-NEXT: [[AND8:%[0-9]+]]:_(s64) = G_AND [[LSHR6]], [[C7]]
- ; RV64I-NEXT: [[AND9:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[C7]]
+ ; RV64I-NEXT: [[AND9:%[0-9]+]]:_(s64) = G_AND [[SEXT_INREG]], [[C7]]
; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[AND8]], [[AND9]]
- ; RV64I-NEXT: [[LSHR7:%[0-9]+]]:_(s64) = G_LSHR [[ADD]], [[C3]](s64)
- ; RV64I-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[LSHR7]], [[ADD]]
+ ; RV64I-NEXT: [[SEXT_INREG1:%[0-9]+]]:_(s64) = G_SEXT_INREG [[ADD]], 32
+ ; RV64I-NEXT: [[LSHR7:%[0-9]+]]:_(s64) = G_LSHR [[SEXT_INREG1]], [[C3]](s64)
+ ; RV64I-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[LSHR7]], [[SEXT_INREG1]]
+ ; RV64I-NEXT: [[SEXT_INREG2:%[0-9]+]]:_(s64) = G_SEXT_INREG [[ADD1]], 32
; RV64I-NEXT: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 252645135
- ; RV64I-NEXT: [[AND10:%[0-9]+]]:_(s64) = G_AND [[ADD1]], [[C8]]
+ ; RV64I-NEXT: [[AND10:%[0-9]+]]:_(s64) = G_AND [[SEXT_INREG2]], [[C8]]
; RV64I-NEXT: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 16843009
; RV64I-NEXT: [[C10:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND10]], [[C9]]
@@ -192,7 +197,8 @@ body: |
; RV64I-NEXT: [[LSHR8:%[0-9]+]]:_(s64) = G_LSHR [[AND11]], [[C10]](s64)
; RV64I-NEXT: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
; RV64I-NEXT: [[SUB1:%[0-9]+]]:_(s64) = G_SUB [[C11]], [[LSHR8]]
- ; RV64I-NEXT: $x10 = COPY [[SUB1]](s64)
+ ; RV64I-NEXT: [[SEXT_INREG3:%[0-9]+]]:_(s64) = G_SEXT_INREG [[SUB1]], 32
+ ; RV64I-NEXT: $x10 = COPY [[SEXT_INREG3]](s64)
; RV64I-NEXT: PseudoRET implicit $x10
;
; RV64ZBB-LABEL: name: ctlz_i32
@@ -328,7 +334,8 @@ body: |
; RV64ZBB-NEXT: [[CLZW:%[0-9]+]]:_(s64) = G_CLZW [[AND]]
; RV64ZBB-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
; RV64ZBB-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[CLZW]], [[C1]]
- ; RV64ZBB-NEXT: $x10 = COPY [[SUB]](s64)
+ ; RV64ZBB-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[SUB]], 32
+ ; RV64ZBB-NEXT: $x10 = COPY [[SEXT_INREG]](s64)
; RV64ZBB-NEXT: PseudoRET implicit $x10
%1:_(s64) = COPY $x10
%0:_(s8) = G_TRUNC %1(s64)
@@ -398,7 +405,8 @@ body: |
; RV64ZBB-NEXT: [[CLZW:%[0-9]+]]:_(s64) = G_CLZW [[AND]]
; RV64ZBB-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
; RV64ZBB-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[CLZW]], [[C1]]
- ; RV64ZBB-NEXT: $x10 = COPY [[SUB]](s64)
+ ; RV64ZBB-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[SUB]], 32
+ ; RV64ZBB-NEXT: $x10 = COPY [[SEXT_INREG]](s64)
; RV64ZBB-NEXT: PseudoRET implicit $x10
%1:_(s64) = COPY $x10
%0:_(s16) = G_TRUNC %1(s64)
@@ -444,16 +452,19 @@ body: |
; RV64I-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 1431655765
; RV64I-NEXT: [[AND6:%[0-9]+]]:_(s64) = G_AND [[LSHR5]], [[C6]]
; RV64I-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[OR4]], [[AND6]]
- ; RV64I-NEXT: [[AND7:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[C1]]
+ ; RV64I-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[SUB]], 32
+ ; RV64I-NEXT: [[AND7:%[0-9]+]]:_(s64) = G_AND [[SEXT_INREG]], [[C1]]
; RV64I-NEXT: [[LSHR6:%[0-9]+]]:_(s64) = G_LSHR [[AND7]], [[C2]](s64)
; RV64I-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 858993459
; RV64I-NEXT: [[AND8:%[0-9]+]]:_(s64) = G_AND [[LSHR6]], [[C7]]
- ; RV64I-NEXT: [[AND9:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[C7]]
+ ; RV64I-NEXT: [[AND9:%[0-9]+]]:_(s64) = G_AND [[SEXT_INREG]], [[C7]]
; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[AND8]], [[AND9]]
- ; RV64I-NEXT: [[LSHR7:%[0-9]+]]:_(s64) = G_LSHR [[ADD]], [[C3]](s64)
- ; RV64I-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[LSHR7]], [[ADD]]
+ ; RV64I-NEXT: [[SEXT_INREG1:%[0-9]+]]:_(s64) = G_SEXT_INREG [[ADD]], 32
+ ; RV64I-NEXT: [[LSHR7:%[0-9]+]]:_(s64) = G_LSHR [[SEXT_INREG1]], [[C3]](s64)
+ ; RV64I-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[LSHR7]], [[SEXT_INREG1]]
+ ; RV64I-NEXT: [[SEXT_INREG2:%[0-9]+]]:_(s64) = G_SEXT_INREG [[ADD1]], 32
; RV64I-NEXT: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 252645135
- ; RV64I-NEXT: [[AND10:%[0-9]+]]:_(s64) = G_AND [[ADD1]], [[C8]]
+ ; RV64I-NEXT: [[AND10:%[0-9]+]]:_(s64) = G_AND [[SEXT_INREG2]], [[C8]]
; RV64I-NEXT: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 16843009
; RV64I-NEXT: [[C10:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND10]], [[C9]]
@@ -461,7 +472,8 @@ body: |
; RV64I-NEXT: [[LSHR8:%[0-9]+]]:_(s64) = G_LSHR [[AND11]], [[C10]](s64)
; RV64I-NEXT: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
; RV64I-NEXT: [[SUB1:%[0-9]+]]:_(s64) = G_SUB [[C11]], [[LSHR8]]
- ; RV64I-NEXT: $x10 = COPY [[SUB1]](s64)
+ ; RV64I-NEXT: [[SEXT_INREG3:%[0-9]+]]:_(s64) = G_SEXT_INREG [[SUB1]], 32
+ ; RV64I-NEXT: $x10 = COPY [[SEXT_INREG3]](s64)
; RV64I-NEXT: PseudoRET implicit $x10
;
; RV64ZBB-LABEL: name: ctlz_zero_undef_i32
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctpop-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctpop-rv64.mir
index 1493514..c61c46d 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctpop-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ctpop-rv64.mir
@@ -129,18 +129,21 @@ body: |
; RV64I-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 1431655765
; RV64I-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[LSHR]], [[C2]]
; RV64I-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[COPY]], [[AND1]]
+ ; RV64I-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[SUB]], 32
; RV64I-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; RV64I-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[C1]]
+ ; RV64I-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[SEXT_INREG]], [[C1]]
; RV64I-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND2]], [[C3]](s64)
; RV64I-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 858993459
; RV64I-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[LSHR1]], [[C4]]
- ; RV64I-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[C4]]
+ ; RV64I-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[SEXT_INREG]], [[C4]]
; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[AND3]], [[AND4]]
+ ; RV64I-NEXT: [[SEXT_INREG1:%[0-9]+]]:_(s64) = G_SEXT_INREG [[ADD]], 32
; RV64I-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; RV64I-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[ADD]], [[C5]](s64)
- ; RV64I-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[LSHR2]], [[ADD]]
+ ; RV64I-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[SEXT_INREG1]], [[C5]](s64)
+ ; RV64I-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[LSHR2]], [[SEXT_INREG1]]
+ ; RV64I-NEXT: [[SEXT_INREG2:%[0-9]+]]:_(s64) = G_SEXT_INREG [[ADD1]], 32
; RV64I-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 252645135
- ; RV64I-NEXT: [[AND5:%[0-9]+]]:_(s64) = G_AND [[ADD1]], [[C6]]
+ ; RV64I-NEXT: [[AND5:%[0-9]+]]:_(s64) = G_AND [[SEXT_INREG2]], [[C6]]
; RV64I-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 16843009
; RV64I-NEXT: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND5]], [[C7]]
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-cttz-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-cttz-rv64.mir
index 252e792..87155bb 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-cttz-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-cttz-rv64.mir
@@ -131,7 +131,8 @@ body: |
; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
; RV64I-NEXT: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[COPY]], [[C]]
; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[C]]
- ; RV64I-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[XOR]], [[ADD]]
+ ; RV64I-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[ADD]], 32
+ ; RV64I-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[XOR]], [[SEXT_INREG]]
; RV64I-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
; RV64I-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295
; RV64I-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[AND]], [[C2]]
@@ -139,18 +140,21 @@ body: |
; RV64I-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1431655765
; RV64I-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[LSHR]], [[C3]]
; RV64I-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[AND]], [[AND2]]
+ ; RV64I-NEXT: [[SEXT_INREG1:%[0-9]+]]:_(s64) = G_SEXT_INREG [[SUB]], 32
; RV64I-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; RV64I-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[C2]]
+ ; RV64I-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[SEXT_INREG1]], [[C2]]
; RV64I-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND3]], [[C4]](s64)
; RV64I-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 858993459
; RV64I-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[LSHR1]], [[C5]]
- ; RV64I-NEXT: [[AND5:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[C5]]
+ ; RV64I-NEXT: [[AND5:%[0-9]+]]:_(s64) = G_AND [[SEXT_INREG1]], [[C5]]
; RV64I-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[AND4]], [[AND5]]
+ ; RV64I-NEXT: [[SEXT_INREG2:%[0-9]+]]:_(s64) = G_SEXT_INREG [[ADD1]], 32
; RV64I-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; RV64I-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[ADD1]], [[C6]](s64)
- ; RV64I-NEXT: [[ADD2:%[0-9]+]]:_(s64) = G_ADD [[LSHR2]], [[ADD1]]
+ ; RV64I-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[SEXT_INREG2]], [[C6]](s64)
+ ; RV64I-NEXT: [[ADD2:%[0-9]+]]:_(s64) = G_ADD [[LSHR2]], [[SEXT_INREG2]]
+ ; RV64I-NEXT: [[SEXT_INREG3:%[0-9]+]]:_(s64) = G_SEXT_INREG [[ADD2]], 32
; RV64I-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 252645135
- ; RV64I-NEXT: [[AND6:%[0-9]+]]:_(s64) = G_AND [[ADD2]], [[C7]]
+ ; RV64I-NEXT: [[AND6:%[0-9]+]]:_(s64) = G_AND [[SEXT_INREG3]], [[C7]]
; RV64I-NEXT: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 16843009
; RV64I-NEXT: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND6]], [[C8]]
@@ -351,7 +355,8 @@ body: |
; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
; RV64I-NEXT: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[COPY]], [[C]]
; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[C]]
- ; RV64I-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[XOR]], [[ADD]]
+ ; RV64I-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[ADD]], 32
+ ; RV64I-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[XOR]], [[SEXT_INREG]]
; RV64I-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
; RV64I-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295
; RV64I-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[AND]], [[C2]]
@@ -359,18 +364,21 @@ body: |
; RV64I-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 1431655765
; RV64I-NEXT: [[AND2:%[0-9]+]]:_(s64) = G_AND [[LSHR]], [[C3]]
; RV64I-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[AND]], [[AND2]]
+ ; RV64I-NEXT: [[SEXT_INREG1:%[0-9]+]]:_(s64) = G_SEXT_INREG [[SUB]], 32
; RV64I-NEXT: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
- ; RV64I-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[C2]]
+ ; RV64I-NEXT: [[AND3:%[0-9]+]]:_(s64) = G_AND [[SEXT_INREG1]], [[C2]]
; RV64I-NEXT: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[AND3]], [[C4]](s64)
; RV64I-NEXT: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 858993459
; RV64I-NEXT: [[AND4:%[0-9]+]]:_(s64) = G_AND [[LSHR1]], [[C5]]
- ; RV64I-NEXT: [[AND5:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[C5]]
+ ; RV64I-NEXT: [[AND5:%[0-9]+]]:_(s64) = G_AND [[SEXT_INREG1]], [[C5]]
; RV64I-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[AND4]], [[AND5]]
+ ; RV64I-NEXT: [[SEXT_INREG2:%[0-9]+]]:_(s64) = G_SEXT_INREG [[ADD1]], 32
; RV64I-NEXT: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; RV64I-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[ADD1]], [[C6]](s64)
- ; RV64I-NEXT: [[ADD2:%[0-9]+]]:_(s64) = G_ADD [[LSHR2]], [[ADD1]]
+ ; RV64I-NEXT: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[SEXT_INREG2]], [[C6]](s64)
+ ; RV64I-NEXT: [[ADD2:%[0-9]+]]:_(s64) = G_ADD [[LSHR2]], [[SEXT_INREG2]]
+ ; RV64I-NEXT: [[SEXT_INREG3:%[0-9]+]]:_(s64) = G_SEXT_INREG [[ADD2]], 32
; RV64I-NEXT: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 252645135
- ; RV64I-NEXT: [[AND6:%[0-9]+]]:_(s64) = G_AND [[ADD2]], [[C7]]
+ ; RV64I-NEXT: [[AND6:%[0-9]+]]:_(s64) = G_AND [[SEXT_INREG3]], [[C7]]
; RV64I-NEXT: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 16843009
; RV64I-NEXT: [[C9:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
; RV64I-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[AND6]], [[C8]]
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ext-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ext-rv64.mir
index f3bc1ce..aff7d4d 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ext-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-ext-rv64.mir
@@ -30,8 +30,9 @@ body: |
; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[COPY1]]
+ ; CHECK-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[ADD]], 32
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295
- ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[ADD]], [[C]]
+ ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[SEXT_INREG]], [[C]]
; CHECK-NEXT: $x10 = COPY [[AND]](s64)
; CHECK-NEXT: PseudoRET implicit $x10
%0:_(s64) = COPY $x10
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-rotate-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-rotate-rv64.mir
index 4689a7d..776f5f5 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-rotate-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-rotate-rv64.mir
@@ -88,9 +88,10 @@ body: |
; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64I-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 31
; RV64I-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[C]], [[COPY1]]
+ ; RV64I-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[SUB]], 32
; RV64I-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[C1]]
; RV64I-NEXT: [[SLLW:%[0-9]+]]:_(s64) = G_SLLW [[COPY]], [[AND]]
- ; RV64I-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[C1]]
+ ; RV64I-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[SEXT_INREG]], [[C1]]
; RV64I-NEXT: [[SRLW:%[0-9]+]]:_(s64) = G_SRLW [[COPY]], [[AND1]]
; RV64I-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[SLLW]], [[SRLW]]
; RV64I-NEXT: $x10 = COPY [[OR]](s64)
@@ -233,9 +234,10 @@ body: |
; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; RV64I-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 31
; RV64I-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[C]], [[COPY1]]
+ ; RV64I-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[SUB]], 32
; RV64I-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[C1]]
; RV64I-NEXT: [[SRLW:%[0-9]+]]:_(s64) = G_SRLW [[COPY]], [[AND]]
- ; RV64I-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[SUB]], [[C1]]
+ ; RV64I-NEXT: [[AND1:%[0-9]+]]:_(s64) = G_AND [[SEXT_INREG]], [[C1]]
; RV64I-NEXT: [[SLLW:%[0-9]+]]:_(s64) = G_SLLW [[COPY]], [[AND1]]
; RV64I-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[SRLW]], [[SLLW]]
; RV64I-NEXT: $x10 = COPY [[OR]](s64)
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sat-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sat-rv64.mir
index bf8c8d6..d162bfc 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sat-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sat-rv64.mir
@@ -16,8 +16,8 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY]], [[COPY1]]
- ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ADD]](s64)
; CHECK-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[ADD]], 32
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[SEXT_INREG]](s64)
; CHECK-NEXT: [[SEXT_INREG1:%[0-9]+]]:_(s64) = G_SEXT_INREG [[COPY1]], 32
; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[SEXT_INREG]](s64), [[SEXT_INREG1]]
; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[TRUNC]](s32)
@@ -97,7 +97,8 @@ body: |
; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY3]], [[C]](s64)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -2147483648
; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(s64) = G_ADD [[ASHR]], [[C1]]
- ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[ADD1]](s64)
+ ; CHECK-NEXT: [[SEXT_INREG3:%[0-9]+]]:_(s64) = G_SEXT_INREG [[ADD1]], 32
+ ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[SEXT_INREG3]](s64)
; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s64), [[TRUNC1]], [[COPY2]]
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[SELECT]](s32)
; CHECK-NEXT: $x10 = COPY [[ANYEXT]](s64)
@@ -173,10 +174,11 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
; CHECK-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[COPY]], [[COPY1]]
- ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[SUB]](s64)
- ; CHECK-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[COPY]], 32
- ; CHECK-NEXT: [[SEXT_INREG1:%[0-9]+]]:_(s64) = G_SEXT_INREG [[COPY1]], 32
- ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[SEXT_INREG]](s64), [[SEXT_INREG1]]
+ ; CHECK-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[SUB]], 32
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[SEXT_INREG]](s64)
+ ; CHECK-NEXT: [[SEXT_INREG1:%[0-9]+]]:_(s64) = G_SEXT_INREG [[COPY]], 32
+ ; CHECK-NEXT: [[SEXT_INREG2:%[0-9]+]]:_(s64) = G_SEXT_INREG [[COPY1]], 32
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ult), [[SEXT_INREG1]](s64), [[SEXT_INREG2]]
; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[C]](s64)
; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s64), [[TRUNC1]], [[TRUNC]]
@@ -250,7 +252,8 @@ body: |
; CHECK-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY3]], [[C]](s64)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -2147483648
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[ASHR]], [[C1]]
- ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[ADD]](s64)
+ ; CHECK-NEXT: [[SEXT_INREG3:%[0-9]+]]:_(s64) = G_SEXT_INREG [[ADD]], 32
+ ; CHECK-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[SEXT_INREG3]](s64)
; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s64), [[TRUNC1]], [[COPY2]]
; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[SELECT]](s32)
; CHECK-NEXT: $x10 = COPY [[ANYEXT]](s64)
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sub-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sub-rv64.mir
index da3ab9e..7ab07ee 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sub-rv64.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-sub-rv64.mir
@@ -69,7 +69,8 @@ body: |
; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11
; CHECK-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[COPY]], [[COPY1]]
- ; CHECK-NEXT: $x10 = COPY [[SUB]](s64)
+ ; CHECK-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[SUB]], 32
+ ; CHECK-NEXT: $x10 = COPY [[SEXT_INREG]](s64)
; CHECK-NEXT: PseudoRET implicit $x10
%0:_(s64) = COPY $x10
%1:_(s64) = COPY $x11
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb.ll
index 0b876fe..9df319e 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/rv64zbb.ll
@@ -18,7 +18,7 @@ define signext i32 @ctlz_i32(i32 signext %a) nounwind {
; RV64I-NEXT: srliw a1, a0, 1
; RV64I-NEXT: lui a2, 349525
; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: addiw a1, a2, 1365
+; RV64I-NEXT: addi a1, a2, 1365
; RV64I-NEXT: srliw a2, a0, 2
; RV64I-NEXT: or a0, a0, a2
; RV64I-NEXT: srliw a2, a0, 4
@@ -30,15 +30,15 @@ define signext i32 @ctlz_i32(i32 signext %a) nounwind {
; RV64I-NEXT: srliw a2, a0, 1
; RV64I-NEXT: and a1, a2, a1
; RV64I-NEXT: lui a2, 209715
-; RV64I-NEXT: addiw a2, a2, 819
+; RV64I-NEXT: addi a2, a2, 819
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: srliw a1, a0, 2
; RV64I-NEXT: and a0, a0, a2
; RV64I-NEXT: and a1, a1, a2
; RV64I-NEXT: lui a2, 61681
-; RV64I-NEXT: add a0, a1, a0
+; RV64I-NEXT: addw a0, a1, a0
; RV64I-NEXT: srli a1, a0, 4
-; RV64I-NEXT: add a0, a1, a0
+; RV64I-NEXT: addw a0, a1, a0
; RV64I-NEXT: lui a1, 4112
; RV64I-NEXT: addiw a2, a2, -241
; RV64I-NEXT: and a0, a0, a2
@@ -75,7 +75,7 @@ define signext i32 @log2_i32(i32 signext %a) nounwind {
; RV64I-NEXT: srliw a1, a0, 1
; RV64I-NEXT: lui a2, 349525
; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: addiw a1, a2, 1365
+; RV64I-NEXT: addi a1, a2, 1365
; RV64I-NEXT: srliw a2, a0, 2
; RV64I-NEXT: or a0, a0, a2
; RV64I-NEXT: srliw a2, a0, 4
@@ -87,15 +87,15 @@ define signext i32 @log2_i32(i32 signext %a) nounwind {
; RV64I-NEXT: srliw a2, a0, 1
; RV64I-NEXT: and a1, a2, a1
; RV64I-NEXT: lui a2, 209715
-; RV64I-NEXT: addiw a2, a2, 819
+; RV64I-NEXT: addi a2, a2, 819
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: srliw a1, a0, 2
; RV64I-NEXT: and a0, a0, a2
; RV64I-NEXT: and a1, a1, a2
; RV64I-NEXT: lui a2, 61681
-; RV64I-NEXT: add a0, a1, a0
+; RV64I-NEXT: addw a0, a1, a0
; RV64I-NEXT: srli a1, a0, 4
-; RV64I-NEXT: add a0, a1, a0
+; RV64I-NEXT: addw a0, a1, a0
; RV64I-NEXT: lui a1, 4112
; RV64I-NEXT: addiw a2, a2, -241
; RV64I-NEXT: and a0, a0, a2
@@ -133,15 +133,14 @@ define signext i32 @log2_ceil_i32(i32 signext %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
; RV64I-NEXT: li s0, 32
-; RV64I-NEXT: addi a0, a0, -1
-; RV64I-NEXT: sext.w a2, a0
+; RV64I-NEXT: addiw a0, a0, -1
; RV64I-NEXT: li a1, 32
-; RV64I-NEXT: beqz a2, .LBB2_2
+; RV64I-NEXT: beqz a0, .LBB2_2
; RV64I-NEXT: # %bb.1: # %cond.false
; RV64I-NEXT: srliw a1, a0, 1
; RV64I-NEXT: lui a2, 349525
; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: addiw a1, a2, 1365
+; RV64I-NEXT: addi a1, a2, 1365
; RV64I-NEXT: srliw a2, a0, 2
; RV64I-NEXT: or a0, a0, a2
; RV64I-NEXT: srliw a2, a0, 4
@@ -153,15 +152,15 @@ define signext i32 @log2_ceil_i32(i32 signext %a) nounwind {
; RV64I-NEXT: srliw a2, a0, 1
; RV64I-NEXT: and a1, a2, a1
; RV64I-NEXT: lui a2, 209715
-; RV64I-NEXT: addiw a2, a2, 819
+; RV64I-NEXT: addi a2, a2, 819
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: srliw a1, a0, 2
; RV64I-NEXT: and a0, a0, a2
; RV64I-NEXT: and a1, a1, a2
; RV64I-NEXT: lui a2, 61681
-; RV64I-NEXT: add a0, a1, a0
+; RV64I-NEXT: addw a0, a1, a0
; RV64I-NEXT: srli a1, a0, 4
-; RV64I-NEXT: add a0, a1, a0
+; RV64I-NEXT: addw a0, a1, a0
; RV64I-NEXT: lui a1, 4112
; RV64I-NEXT: addiw a2, a2, -241
; RV64I-NEXT: and a0, a0, a2
@@ -200,7 +199,7 @@ define signext i32 @findLastSet_i32(i32 signext %a) nounwind {
; RV64I-NEXT: srliw a0, a0, 1
; RV64I-NEXT: lui a1, 349525
; RV64I-NEXT: or a0, s0, a0
-; RV64I-NEXT: addiw a1, a1, 1365
+; RV64I-NEXT: addi a1, a1, 1365
; RV64I-NEXT: srliw a2, a0, 2
; RV64I-NEXT: or a0, a0, a2
; RV64I-NEXT: srliw a2, a0, 4
@@ -212,15 +211,15 @@ define signext i32 @findLastSet_i32(i32 signext %a) nounwind {
; RV64I-NEXT: srliw a2, a0, 1
; RV64I-NEXT: and a1, a2, a1
; RV64I-NEXT: lui a2, 209715
-; RV64I-NEXT: addiw a2, a2, 819
+; RV64I-NEXT: addi a2, a2, 819
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: srliw a1, a0, 2
; RV64I-NEXT: and a0, a0, a2
; RV64I-NEXT: and a1, a1, a2
; RV64I-NEXT: lui a2, 61681
-; RV64I-NEXT: add a0, a1, a0
+; RV64I-NEXT: addw a0, a1, a0
; RV64I-NEXT: srli a1, a0, 4
-; RV64I-NEXT: add a0, a1, a0
+; RV64I-NEXT: addw a0, a1, a0
; RV64I-NEXT: lui a1, 4112
; RV64I-NEXT: addiw a2, a2, -241
; RV64I-NEXT: and a0, a0, a2
@@ -271,7 +270,7 @@ define i32 @ctlz_lshr_i32(i32 signext %a) {
; RV64I-NEXT: srliw a0, a0, 2
; RV64I-NEXT: lui a2, 349525
; RV64I-NEXT: or a0, a1, a0
-; RV64I-NEXT: addiw a1, a2, 1365
+; RV64I-NEXT: addi a1, a2, 1365
; RV64I-NEXT: srli a2, a0, 2
; RV64I-NEXT: or a0, a0, a2
; RV64I-NEXT: srli a2, a0, 4
@@ -283,15 +282,15 @@ define i32 @ctlz_lshr_i32(i32 signext %a) {
; RV64I-NEXT: srliw a2, a0, 1
; RV64I-NEXT: and a1, a2, a1
; RV64I-NEXT: lui a2, 209715
-; RV64I-NEXT: addiw a2, a2, 819
+; RV64I-NEXT: addi a2, a2, 819
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: srliw a1, a0, 2
; RV64I-NEXT: and a0, a0, a2
; RV64I-NEXT: and a1, a1, a2
; RV64I-NEXT: lui a2, 61681
-; RV64I-NEXT: add a0, a1, a0
+; RV64I-NEXT: addw a0, a1, a0
; RV64I-NEXT: srli a1, a0, 4
-; RV64I-NEXT: add a0, a1, a0
+; RV64I-NEXT: addw a0, a1, a0
; RV64I-NEXT: lui a1, 4112
; RV64I-NEXT: addiw a2, a2, -241
; RV64I-NEXT: and a0, a0, a2
@@ -299,7 +298,7 @@ define i32 @ctlz_lshr_i32(i32 signext %a) {
; RV64I-NEXT: call __muldi3
; RV64I-NEXT: srliw a0, a0, 24
; RV64I-NEXT: li a1, 32
-; RV64I-NEXT: sub a0, a1, a0
+; RV64I-NEXT: subw a0, a1, a0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: .cfi_restore ra
; RV64I-NEXT: addi sp, sp, 16
@@ -408,19 +407,19 @@ define signext i32 @cttz_i32(i32 signext %a) nounwind {
; RV64I-NEXT: addi a0, a0, -1
; RV64I-NEXT: lui a2, 349525
; RV64I-NEXT: and a0, a1, a0
-; RV64I-NEXT: addiw a1, a2, 1365
+; RV64I-NEXT: addi a1, a2, 1365
; RV64I-NEXT: srliw a2, a0, 1
; RV64I-NEXT: and a1, a2, a1
; RV64I-NEXT: lui a2, 209715
-; RV64I-NEXT: addiw a2, a2, 819
+; RV64I-NEXT: addi a2, a2, 819
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: srliw a1, a0, 2
; RV64I-NEXT: and a0, a0, a2
; RV64I-NEXT: and a1, a1, a2
; RV64I-NEXT: lui a2, 61681
-; RV64I-NEXT: add a0, a1, a0
+; RV64I-NEXT: addw a0, a1, a0
; RV64I-NEXT: srli a1, a0, 4
-; RV64I-NEXT: add a0, a1, a0
+; RV64I-NEXT: addw a0, a1, a0
; RV64I-NEXT: lui a1, 4112
; RV64I-NEXT: addiw a2, a2, -241
; RV64I-NEXT: and a0, a0, a2
@@ -451,19 +450,19 @@ define signext i32 @cttz_zero_undef_i32(i32 signext %a) nounwind {
; RV64I-NEXT: addi a0, a0, -1
; RV64I-NEXT: lui a2, 349525
; RV64I-NEXT: and a0, a1, a0
-; RV64I-NEXT: addiw a1, a2, 1365
+; RV64I-NEXT: addi a1, a2, 1365
; RV64I-NEXT: srliw a2, a0, 1
; RV64I-NEXT: and a1, a2, a1
; RV64I-NEXT: lui a2, 209715
-; RV64I-NEXT: addiw a2, a2, 819
+; RV64I-NEXT: addi a2, a2, 819
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: srliw a1, a0, 2
; RV64I-NEXT: and a0, a0, a2
; RV64I-NEXT: and a1, a1, a2
; RV64I-NEXT: lui a2, 61681
-; RV64I-NEXT: add a0, a1, a0
+; RV64I-NEXT: addw a0, a1, a0
; RV64I-NEXT: srli a1, a0, 4
-; RV64I-NEXT: add a0, a1, a0
+; RV64I-NEXT: addw a0, a1, a0
; RV64I-NEXT: lui a1, 4112
; RV64I-NEXT: addiw a2, a2, -241
; RV64I-NEXT: and a0, a0, a2
@@ -493,19 +492,19 @@ define signext i32 @findFirstSet_i32(i32 signext %a) nounwind {
; RV64I-NEXT: addi a1, s0, -1
; RV64I-NEXT: lui a2, 349525
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: addiw a1, a2, 1365
+; RV64I-NEXT: addi a1, a2, 1365
; RV64I-NEXT: srliw a2, a0, 1
; RV64I-NEXT: and a1, a2, a1
; RV64I-NEXT: lui a2, 209715
-; RV64I-NEXT: addiw a2, a2, 819
+; RV64I-NEXT: addi a2, a2, 819
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: srliw a1, a0, 2
; RV64I-NEXT: and a0, a0, a2
; RV64I-NEXT: and a1, a1, a2
; RV64I-NEXT: lui a2, 61681
-; RV64I-NEXT: add a0, a1, a0
+; RV64I-NEXT: addw a0, a1, a0
; RV64I-NEXT: srli a1, a0, 4
-; RV64I-NEXT: add a0, a1, a0
+; RV64I-NEXT: addw a0, a1, a0
; RV64I-NEXT: lui a1, 4112
; RV64I-NEXT: addiw a2, a2, -241
; RV64I-NEXT: and a0, a0, a2
@@ -549,19 +548,19 @@ define signext i32 @ffs_i32(i32 signext %a) nounwind {
; RV64I-NEXT: addi a1, s0, -1
; RV64I-NEXT: lui a2, 349525
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: addiw a1, a2, 1365
+; RV64I-NEXT: addi a1, a2, 1365
; RV64I-NEXT: srliw a2, a0, 1
; RV64I-NEXT: and a1, a2, a1
; RV64I-NEXT: lui a2, 209715
-; RV64I-NEXT: addiw a2, a2, 819
+; RV64I-NEXT: addi a2, a2, 819
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: srliw a1, a0, 2
; RV64I-NEXT: and a0, a0, a2
; RV64I-NEXT: and a1, a1, a2
; RV64I-NEXT: lui a2, 61681
-; RV64I-NEXT: add a0, a1, a0
+; RV64I-NEXT: addw a0, a1, a0
; RV64I-NEXT: srli a1, a0, 4
-; RV64I-NEXT: add a0, a1, a0
+; RV64I-NEXT: addw a0, a1, a0
; RV64I-NEXT: lui a1, 4112
; RV64I-NEXT: addiw a2, a2, -241
; RV64I-NEXT: and a0, a0, a2
@@ -669,18 +668,18 @@ define signext i32 @ctpop_i32(i32 signext %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: srliw a1, a0, 1
; RV64I-NEXT: lui a2, 349525
-; RV64I-NEXT: addiw a2, a2, 1365
+; RV64I-NEXT: addi a2, a2, 1365
; RV64I-NEXT: and a1, a1, a2
; RV64I-NEXT: lui a2, 209715
-; RV64I-NEXT: addiw a2, a2, 819
+; RV64I-NEXT: addi a2, a2, 819
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: srliw a1, a0, 2
; RV64I-NEXT: and a0, a0, a2
; RV64I-NEXT: and a1, a1, a2
; RV64I-NEXT: lui a2, 61681
-; RV64I-NEXT: add a0, a1, a0
+; RV64I-NEXT: addw a0, a1, a0
; RV64I-NEXT: srli a1, a0, 4
-; RV64I-NEXT: add a0, a1, a0
+; RV64I-NEXT: addw a0, a1, a0
; RV64I-NEXT: lui a1, 4112
; RV64I-NEXT: addiw a2, a2, -241
; RV64I-NEXT: and a0, a0, a2
@@ -706,18 +705,18 @@ define i1 @ctpop_i32_ult_two(i32 signext %a) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: srliw a1, a0, 1
; RV64I-NEXT: lui a2, 349525
-; RV64I-NEXT: addiw a2, a2, 1365
+; RV64I-NEXT: addi a2, a2, 1365
; RV64I-NEXT: and a1, a1, a2
; RV64I-NEXT: lui a2, 209715
-; RV64I-NEXT: addiw a2, a2, 819
+; RV64I-NEXT: addi a2, a2, 819
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: srliw a1, a0, 2
; RV64I-NEXT: and a0, a0, a2
; RV64I-NEXT: and a1, a1, a2
; RV64I-NEXT: lui a2, 61681
-; RV64I-NEXT: add a0, a1, a0
+; RV64I-NEXT: addw a0, a1, a0
; RV64I-NEXT: srli a1, a0, 4
-; RV64I-NEXT: add a0, a1, a0
+; RV64I-NEXT: addw a0, a1, a0
; RV64I-NEXT: lui a1, 4112
; RV64I-NEXT: addiw a2, a2, -241
; RV64I-NEXT: and a0, a0, a2
@@ -746,19 +745,19 @@ define signext i32 @ctpop_i32_load(ptr %p) nounwind {
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: lwu a0, 0(a0)
; RV64I-NEXT: lui a1, 349525
-; RV64I-NEXT: addiw a1, a1, 1365
+; RV64I-NEXT: addi a1, a1, 1365
; RV64I-NEXT: srli a2, a0, 1
; RV64I-NEXT: and a1, a2, a1
; RV64I-NEXT: lui a2, 209715
-; RV64I-NEXT: addiw a2, a2, 819
+; RV64I-NEXT: addi a2, a2, 819
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: srliw a1, a0, 2
; RV64I-NEXT: and a0, a0, a2
; RV64I-NEXT: and a1, a1, a2
; RV64I-NEXT: lui a2, 61681
-; RV64I-NEXT: add a0, a1, a0
+; RV64I-NEXT: addw a0, a1, a0
; RV64I-NEXT: srli a1, a0, 4
-; RV64I-NEXT: add a0, a1, a0
+; RV64I-NEXT: addw a0, a1, a0
; RV64I-NEXT: lui a1, 4112
; RV64I-NEXT: addiw a2, a2, -241
; RV64I-NEXT: and a0, a0, a2
@@ -1057,7 +1056,7 @@ define i32 @abs_i32(i32 %x) {
; RV64I-LABEL: abs_i32:
; RV64I: # %bb.0:
; RV64I-NEXT: sraiw a1, a0, 31
-; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: addw a0, a0, a1
; RV64I-NEXT: xor a0, a0, a1
; RV64I-NEXT: ret
;
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/scmp.ll b/llvm/test/CodeGen/RISCV/GlobalISel/scmp.ll
index 4346e04..daeb2e6 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/scmp.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/scmp.ll
@@ -97,7 +97,7 @@ define i32 @scmp.32.32(i32 %x, i32 %y) nounwind {
; RV64I-NEXT: sext.w a1, a1
; RV64I-NEXT: slt a2, a1, a0
; RV64I-NEXT: slt a0, a0, a1
-; RV64I-NEXT: sub a0, a2, a0
+; RV64I-NEXT: subw a0, a2, a0
; RV64I-NEXT: ret
%1 = call i32 @llvm.scmp(i32 %x, i32 %y)
ret i32 %1
@@ -122,7 +122,7 @@ define i32 @scmp.32.64(i64 %x, i64 %y) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: slt a2, a1, a0
; RV64I-NEXT: slt a0, a0, a1
-; RV64I-NEXT: sub a0, a2, a0
+; RV64I-NEXT: subw a0, a2, a0
; RV64I-NEXT: ret
%1 = call i32 @llvm.scmp(i64 %x, i64 %y)
ret i32 %1
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/ucmp.ll b/llvm/test/CodeGen/RISCV/GlobalISel/ucmp.ll
index 9784c58..463883b 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/ucmp.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/ucmp.ll
@@ -97,7 +97,7 @@ define i32 @ucmp.32.32(i32 %x, i32 %y) nounwind {
; RV64I-NEXT: sext.w a1, a1
; RV64I-NEXT: sltu a2, a1, a0
; RV64I-NEXT: sltu a0, a0, a1
-; RV64I-NEXT: sub a0, a2, a0
+; RV64I-NEXT: subw a0, a2, a0
; RV64I-NEXT: ret
%1 = call i32 @llvm.ucmp(i32 %x, i32 %y)
ret i32 %1
@@ -115,7 +115,7 @@ define i32 @ucmp.32.32_sext(i32 signext %x, i32 signext %y) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: sltu a2, a1, a0
; RV64I-NEXT: sltu a0, a0, a1
-; RV64I-NEXT: sub a0, a2, a0
+; RV64I-NEXT: subw a0, a2, a0
; RV64I-NEXT: ret
%1 = call i32 @llvm.ucmp(i32 %x, i32 %y)
ret i32 %1
@@ -135,7 +135,7 @@ define i32 @ucmp.32.32_zext(i32 zeroext %x, i32 zeroext %y) nounwind {
; RV64I-NEXT: sext.w a1, a1
; RV64I-NEXT: sltu a2, a1, a0
; RV64I-NEXT: sltu a0, a0, a1
-; RV64I-NEXT: sub a0, a2, a0
+; RV64I-NEXT: subw a0, a2, a0
; RV64I-NEXT: ret
%1 = call i32 @llvm.ucmp(i32 %x, i32 %y)
ret i32 %1
@@ -160,7 +160,7 @@ define i32 @ucmp.32.64(i64 %x, i64 %y) nounwind {
; RV64I: # %bb.0:
; RV64I-NEXT: sltu a2, a1, a0
; RV64I-NEXT: sltu a0, a0, a1
-; RV64I-NEXT: sub a0, a2, a0
+; RV64I-NEXT: subw a0, a2, a0
; RV64I-NEXT: ret
%1 = call i32 @llvm.ucmp(i64 %x, i64 %y)
ret i32 %1
diff --git a/llvm/test/CodeGen/RISCV/add_sext_shl_constant.ll b/llvm/test/CodeGen/RISCV/add_sext_shl_constant.ll
index fe89b4a..d7f62ae 100644
--- a/llvm/test/CodeGen/RISCV/add_sext_shl_constant.ll
+++ b/llvm/test/CodeGen/RISCV/add_sext_shl_constant.ll
@@ -320,6 +320,19 @@ define i64 @add_shl_moreOneUse_sh3add(i64 %x) {
ret i64 %add
}
+;; Covers a case which previously crashed (pr119527)
+define i64 @add_shl_sext(i32 %1) {
+; RV64-LABEL: add_shl_sext:
+; RV64: # %bb.0:
+; RV64-NEXT: addi a1, a0, 3
+; RV64-NEXT: sllw a0, a1, a0
+; RV64-NEXT: ret
+ %3 = add i32 %1, 3
+ %4 = shl i32 %3, %1
+ %5 = sext i32 %4 to i64
+ ret i64 %5
+}
+
define i64 @add_shl_moreOneUse_sh4add(i64 %x) {
; RV64-LABEL: add_shl_moreOneUse_sh4add:
; RV64: # %bb.0:
diff --git a/llvm/test/CodeGen/RISCV/attributes.ll b/llvm/test/CodeGen/RISCV/attributes.ll
index 7e55e05..c0fcc6f 100644
--- a/llvm/test/CodeGen/RISCV/attributes.ll
+++ b/llvm/test/CodeGen/RISCV/attributes.ll
@@ -84,6 +84,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcia %s -o - | FileCheck --check-prefix=RV32XQCIA %s
; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqciac %s -o - | FileCheck --check-prefix=RV32XQCIAC %s
; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicli %s -o - | FileCheck --check-prefix=RV32XQCICLI %s
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicm %s -o - | FileCheck --check-prefix=RV32XQCICM %s
; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcics %s -o - | FileCheck --check-prefix=RV32XQCICS %s
; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcicsr %s -o - | FileCheck --check-prefix=RV32XQCICSR %s
; RUN: llc -mtriple=riscv32 -mattr=+experimental-xqcilsm %s -o - | FileCheck --check-prefix=RV32XQCILSM %s
@@ -397,6 +398,7 @@
; RV32XQCIA: .attribute 5, "rv32i2p1_xqcia0p2"
; RV32XQCIAC: .attribute 5, "rv32i2p1_zca1p0_xqciac0p2"
; RV32XQCICLI: .attribute 5, "rv32i2p1_xqcicli0p2"
+; RV32XQCICM: .attribute 5, "rv32i2p1_zca1p0_xqcicm0p2"
; RV32XQCICS: .attribute 5, "rv32i2p1_xqcics0p2"
; RV32XQCICSR: .attribute 5, "rv32i2p1_xqcicsr0p2"
; RV32XQCILSM: .attribute 5, "rv32i2p1_xqcilsm0p2"
diff --git a/llvm/test/CodeGen/RISCV/kcfi-isel-mir.ll b/llvm/test/CodeGen/RISCV/kcfi-isel-mir.ll
index 4c47b5f..2c428cf 100644
--- a/llvm/test/CodeGen/RISCV/kcfi-isel-mir.ll
+++ b/llvm/test/CodeGen/RISCV/kcfi-isel-mir.ll
@@ -20,7 +20,7 @@ define void @f2(ptr noundef %x) #0 {
; CHECK-NEXT: liveins: $x10
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:gprtc = COPY $x10
- ; CHECK-NEXT: PseudoTAILIndirect [[COPY]], implicit $x2, cfi-type 12345678
+ ; CHECK-NEXT: PseudoTAILIndirect [[COPY]], csr_ilp32_lp64, implicit $x2, cfi-type 12345678
tail call void %x() [ "kcfi"(i32 12345678) ]
ret void
}
diff --git a/llvm/test/CodeGen/RISCV/kcfi-mir.ll b/llvm/test/CodeGen/RISCV/kcfi-mir.ll
index f9f383a..0c0d39a 100644
--- a/llvm/test/CodeGen/RISCV/kcfi-mir.ll
+++ b/llvm/test/CodeGen/RISCV/kcfi-mir.ll
@@ -30,7 +30,7 @@ define void @f2(ptr noundef %x) #0 {
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: BUNDLE implicit-def $x6, implicit-def $x6_w, implicit-def $x6_h, implicit-def $x7, implicit-def $x7_w, implicit-def $x7_h, implicit-def $x28, implicit-def $x28_w, implicit-def $x28_h, implicit-def $x29, implicit-def $x29_w, implicit-def $x29_h, implicit-def $x30, implicit-def $x30_w, implicit-def $x30_h, implicit-def $x31, implicit-def $x31_w, implicit-def $x31_h, implicit killed $x10, implicit $x2 {
; CHECK-NEXT: KCFI_CHECK $x10, 12345678, implicit-def $x6, implicit-def $x7, implicit-def $x28, implicit-def $x29, implicit-def $x30, implicit-def $x31
- ; CHECK-NEXT: PseudoTAILIndirect killed $x10, implicit $x2
+ ; CHECK-NEXT: PseudoTAILIndirect killed $x10, csr_ilp32_lp64, implicit $x2
; CHECK-NEXT: }
tail call void %x() [ "kcfi"(i32 12345678) ]
ret void
diff --git a/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll b/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll
index 10d2492..4d34621 100644
--- a/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll
@@ -1445,10 +1445,9 @@ define <vscale x 1 x i64> @vp_bitreverse_nxv1i64(<vscale x 1 x i64> %va, <vscale
; RV32-NEXT: addi a6, sp, 8
; RV32-NEXT: sw a4, 8(sp)
; RV32-NEXT: sw zero, 12(sp)
-; RV32-NEXT: vsetvli a4, zero, e64, m1, ta, ma
+; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; RV32-NEXT: vlse64.v v9, (a6), zero
; RV32-NEXT: lui a4, 61681
-; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; RV32-NEXT: vsll.vx v10, v8, a3, v0.t
; RV32-NEXT: addi a5, a5, -256
; RV32-NEXT: vand.vx v11, v8, a5, v0.t
@@ -1595,9 +1594,7 @@ define <vscale x 1 x i64> @vp_bitreverse_nxv1i64_unmasked(<vscale x 1 x i64> %va
; RV32-NEXT: vand.vx v13, v8, a1
; RV32-NEXT: vand.vx v12, v12, a1
; RV32-NEXT: vor.vv v11, v12, v11
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
; RV32-NEXT: vlse64.v v12, (a6), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; RV32-NEXT: vsll.vx v13, v13, a4
; RV32-NEXT: vor.vv v10, v10, v13
; RV32-NEXT: vsrl.vi v13, v8, 8
@@ -1730,10 +1727,9 @@ define <vscale x 2 x i64> @vp_bitreverse_nxv2i64(<vscale x 2 x i64> %va, <vscale
; RV32-NEXT: addi a6, sp, 8
; RV32-NEXT: sw a4, 8(sp)
; RV32-NEXT: sw zero, 12(sp)
-; RV32-NEXT: vsetvli a4, zero, e64, m2, ta, ma
+; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; RV32-NEXT: vlse64.v v10, (a6), zero
; RV32-NEXT: lui a4, 61681
-; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; RV32-NEXT: vsll.vx v12, v8, a3, v0.t
; RV32-NEXT: addi a5, a5, -256
; RV32-NEXT: vand.vx v14, v8, a5, v0.t
@@ -1880,9 +1876,7 @@ define <vscale x 2 x i64> @vp_bitreverse_nxv2i64_unmasked(<vscale x 2 x i64> %va
; RV32-NEXT: vand.vx v18, v8, a1
; RV32-NEXT: vand.vx v16, v16, a1
; RV32-NEXT: vor.vv v10, v16, v10
-; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
; RV32-NEXT: vlse64.v v16, (a6), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; RV32-NEXT: vsll.vx v18, v18, a4
; RV32-NEXT: vor.vv v12, v12, v18
; RV32-NEXT: vsrl.vi v18, v8, 8
@@ -2015,10 +2009,9 @@ define <vscale x 4 x i64> @vp_bitreverse_nxv4i64(<vscale x 4 x i64> %va, <vscale
; RV32-NEXT: addi a6, sp, 8
; RV32-NEXT: sw a4, 8(sp)
; RV32-NEXT: sw zero, 12(sp)
-; RV32-NEXT: vsetvli a4, zero, e64, m4, ta, ma
+; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; RV32-NEXT: vlse64.v v12, (a6), zero
; RV32-NEXT: lui a4, 61681
-; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; RV32-NEXT: vsll.vx v16, v8, a3, v0.t
; RV32-NEXT: addi a5, a5, -256
; RV32-NEXT: vand.vx v20, v8, a5, v0.t
@@ -2165,9 +2158,7 @@ define <vscale x 4 x i64> @vp_bitreverse_nxv4i64_unmasked(<vscale x 4 x i64> %va
; RV32-NEXT: vand.vx v28, v8, a1
; RV32-NEXT: vand.vx v24, v24, a1
; RV32-NEXT: vor.vv v12, v24, v12
-; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
; RV32-NEXT: vlse64.v v24, (a6), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; RV32-NEXT: vsll.vx v28, v28, a4
; RV32-NEXT: vor.vv v16, v16, v28
; RV32-NEXT: vsrl.vi v28, v8, 8
@@ -2315,7 +2306,6 @@ define <vscale x 7 x i64> @vp_bitreverse_nxv7i64(<vscale x 7 x i64> %va, <vscale
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 16
; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli a3, zero, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a5), zero
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: slli a3, a3, 3
@@ -2323,7 +2313,6 @@ define <vscale x 7 x i64> @vp_bitreverse_nxv7i64(<vscale x 7 x i64> %va, <vscale
; RV32-NEXT: addi a3, a3, 16
; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
; RV32-NEXT: lui a3, 4080
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vand.vx v24, v8, a3, v0.t
; RV32-NEXT: vsll.vi v24, v24, 24, v0.t
; RV32-NEXT: addi a5, sp, 16
@@ -2528,9 +2517,7 @@ define <vscale x 7 x i64> @vp_bitreverse_nxv7i64_unmasked(<vscale x 7 x i64> %va
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v24, (a6), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vsrl.vi v16, v8, 24
; RV32-NEXT: vand.vx v16, v16, a5
; RV32-NEXT: vsrl.vi v0, v8, 8
@@ -2704,7 +2691,6 @@ define <vscale x 8 x i64> @vp_bitreverse_nxv8i64(<vscale x 8 x i64> %va, <vscale
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 16
; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli a3, zero, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a5), zero
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: slli a3, a3, 3
@@ -2712,7 +2698,6 @@ define <vscale x 8 x i64> @vp_bitreverse_nxv8i64(<vscale x 8 x i64> %va, <vscale
; RV32-NEXT: addi a3, a3, 16
; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
; RV32-NEXT: lui a3, 4080
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vand.vx v24, v8, a3, v0.t
; RV32-NEXT: vsll.vi v24, v24, 24, v0.t
; RV32-NEXT: addi a5, sp, 16
@@ -2917,9 +2902,7 @@ define <vscale x 8 x i64> @vp_bitreverse_nxv8i64_unmasked(<vscale x 8 x i64> %va
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v24, (a6), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vsrl.vi v16, v8, 24
; RV32-NEXT: vand.vx v16, v16, a5
; RV32-NEXT: vsrl.vi v0, v8, 8
diff --git a/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll b/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll
index 0dc1d0c..0c58cca 100644
--- a/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll
@@ -523,11 +523,9 @@ define <vscale x 1 x i64> @vp_bswap_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1
; RV32-NEXT: sw zero, 12(sp)
; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; RV32-NEXT: vsll.vx v9, v8, a2, v0.t
-; RV32-NEXT: addi a1, a3, -256
-; RV32-NEXT: vand.vx v10, v8, a1, v0.t
-; RV32-NEXT: vsetvli a3, zero, e64, m1, ta, ma
+; RV32-NEXT: addi a0, a3, -256
+; RV32-NEXT: vand.vx v10, v8, a0, v0.t
; RV32-NEXT: vlse64.v v11, (a6), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; RV32-NEXT: vsll.vx v10, v10, a4, v0.t
; RV32-NEXT: vor.vv v9, v9, v10, v0.t
; RV32-NEXT: vand.vx v10, v8, a5, v0.t
@@ -538,7 +536,7 @@ define <vscale x 1 x i64> @vp_bswap_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1
; RV32-NEXT: vor.vv v9, v9, v10, v0.t
; RV32-NEXT: vsrl.vx v10, v8, a2, v0.t
; RV32-NEXT: vsrl.vx v12, v8, a4, v0.t
-; RV32-NEXT: vand.vx v12, v12, a1, v0.t
+; RV32-NEXT: vand.vx v12, v12, a0, v0.t
; RV32-NEXT: vor.vv v10, v12, v10, v0.t
; RV32-NEXT: vsrl.vi v12, v8, 24, v0.t
; RV32-NEXT: vand.vx v12, v12, a5, v0.t
@@ -609,15 +607,13 @@ define <vscale x 1 x i64> @vp_bswap_nxv1i64_unmasked(<vscale x 1 x i64> %va, i32
; RV32-NEXT: sw a1, 8(sp)
; RV32-NEXT: sw zero, 12(sp)
; RV32-NEXT: vsll.vx v10, v8, a2
-; RV32-NEXT: addi a1, a3, -256
+; RV32-NEXT: addi a0, a3, -256
; RV32-NEXT: vsrl.vx v11, v8, a2
; RV32-NEXT: vsrl.vx v12, v8, a4
-; RV32-NEXT: vand.vx v13, v8, a1
-; RV32-NEXT: vand.vx v12, v12, a1
+; RV32-NEXT: vand.vx v13, v8, a0
+; RV32-NEXT: vand.vx v12, v12, a0
; RV32-NEXT: vor.vv v11, v12, v11
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
; RV32-NEXT: vlse64.v v12, (a6), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; RV32-NEXT: vsll.vx v13, v13, a4
; RV32-NEXT: vor.vv v10, v10, v13
; RV32-NEXT: vsrl.vi v13, v8, 8
@@ -695,11 +691,9 @@ define <vscale x 2 x i64> @vp_bswap_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2
; RV32-NEXT: sw zero, 12(sp)
; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; RV32-NEXT: vsll.vx v10, v8, a2, v0.t
-; RV32-NEXT: addi a1, a3, -256
-; RV32-NEXT: vand.vx v12, v8, a1, v0.t
-; RV32-NEXT: vsetvli a3, zero, e64, m2, ta, ma
+; RV32-NEXT: addi a0, a3, -256
+; RV32-NEXT: vand.vx v12, v8, a0, v0.t
; RV32-NEXT: vlse64.v v14, (a6), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; RV32-NEXT: vsll.vx v12, v12, a4, v0.t
; RV32-NEXT: vor.vv v10, v10, v12, v0.t
; RV32-NEXT: vand.vx v12, v8, a5, v0.t
@@ -710,7 +704,7 @@ define <vscale x 2 x i64> @vp_bswap_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2
; RV32-NEXT: vor.vv v10, v10, v12, v0.t
; RV32-NEXT: vsrl.vx v12, v8, a2, v0.t
; RV32-NEXT: vsrl.vx v16, v8, a4, v0.t
-; RV32-NEXT: vand.vx v16, v16, a1, v0.t
+; RV32-NEXT: vand.vx v16, v16, a0, v0.t
; RV32-NEXT: vor.vv v12, v16, v12, v0.t
; RV32-NEXT: vsrl.vi v16, v8, 24, v0.t
; RV32-NEXT: vand.vx v16, v16, a5, v0.t
@@ -781,15 +775,13 @@ define <vscale x 2 x i64> @vp_bswap_nxv2i64_unmasked(<vscale x 2 x i64> %va, i32
; RV32-NEXT: sw a1, 8(sp)
; RV32-NEXT: sw zero, 12(sp)
; RV32-NEXT: vsll.vx v12, v8, a2
-; RV32-NEXT: addi a1, a3, -256
+; RV32-NEXT: addi a0, a3, -256
; RV32-NEXT: vsrl.vx v14, v8, a2
; RV32-NEXT: vsrl.vx v16, v8, a4
-; RV32-NEXT: vand.vx v18, v8, a1
-; RV32-NEXT: vand.vx v16, v16, a1
+; RV32-NEXT: vand.vx v18, v8, a0
+; RV32-NEXT: vand.vx v16, v16, a0
; RV32-NEXT: vor.vv v14, v16, v14
-; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
; RV32-NEXT: vlse64.v v16, (a6), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; RV32-NEXT: vsll.vx v18, v18, a4
; RV32-NEXT: vor.vv v12, v12, v18
; RV32-NEXT: vsrl.vi v18, v8, 8
@@ -867,11 +859,9 @@ define <vscale x 4 x i64> @vp_bswap_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4
; RV32-NEXT: sw zero, 12(sp)
; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; RV32-NEXT: vsll.vx v16, v8, a2, v0.t
-; RV32-NEXT: addi a1, a3, -256
-; RV32-NEXT: vand.vx v20, v8, a1, v0.t
-; RV32-NEXT: vsetvli a3, zero, e64, m4, ta, ma
+; RV32-NEXT: addi a0, a3, -256
+; RV32-NEXT: vand.vx v20, v8, a0, v0.t
; RV32-NEXT: vlse64.v v12, (a6), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; RV32-NEXT: vsll.vx v20, v20, a4, v0.t
; RV32-NEXT: vor.vv v16, v16, v20, v0.t
; RV32-NEXT: vand.vx v20, v8, a5, v0.t
@@ -882,7 +872,7 @@ define <vscale x 4 x i64> @vp_bswap_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4
; RV32-NEXT: vor.vv v16, v16, v20, v0.t
; RV32-NEXT: vsrl.vx v20, v8, a2, v0.t
; RV32-NEXT: vsrl.vx v24, v8, a4, v0.t
-; RV32-NEXT: vand.vx v24, v24, a1, v0.t
+; RV32-NEXT: vand.vx v24, v24, a0, v0.t
; RV32-NEXT: vor.vv v20, v24, v20, v0.t
; RV32-NEXT: vsrl.vi v24, v8, 24, v0.t
; RV32-NEXT: vand.vx v24, v24, a5, v0.t
@@ -953,15 +943,13 @@ define <vscale x 4 x i64> @vp_bswap_nxv4i64_unmasked(<vscale x 4 x i64> %va, i32
; RV32-NEXT: sw a1, 8(sp)
; RV32-NEXT: sw zero, 12(sp)
; RV32-NEXT: vsll.vx v16, v8, a2
-; RV32-NEXT: addi a1, a3, -256
+; RV32-NEXT: addi a0, a3, -256
; RV32-NEXT: vsrl.vx v20, v8, a2
; RV32-NEXT: vsrl.vx v24, v8, a4
-; RV32-NEXT: vand.vx v28, v8, a1
-; RV32-NEXT: vand.vx v24, v24, a1
+; RV32-NEXT: vand.vx v28, v8, a0
+; RV32-NEXT: vand.vx v24, v24, a0
; RV32-NEXT: vor.vv v20, v24, v20
-; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
; RV32-NEXT: vlse64.v v24, (a6), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; RV32-NEXT: vsll.vx v28, v28, a4
; RV32-NEXT: vor.vv v16, v16, v28
; RV32-NEXT: vsrl.vi v28, v8, 8
@@ -1043,51 +1031,49 @@ define <vscale x 7 x i64> @vp_bswap_nxv7i64(<vscale x 7 x i64> %va, <vscale x 7
; RV32-NEXT: sw zero, 12(sp)
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vsll.vx v16, v8, a2, v0.t
-; RV32-NEXT: addi a1, a3, -256
-; RV32-NEXT: vand.vx v24, v8, a1, v0.t
+; RV32-NEXT: addi a0, a3, -256
+; RV32-NEXT: vand.vx v24, v8, a0, v0.t
; RV32-NEXT: vsll.vx v24, v24, a4, v0.t
; RV32-NEXT: vor.vv v16, v16, v24, v0.t
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 4
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: vlse64.v v16, (a5), zero
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: lui a1, 4080
+; RV32-NEXT: vand.vx v24, v8, a1, v0.t
+; RV32-NEXT: vsll.vi v24, v24, 24, v0.t
+; RV32-NEXT: addi a3, sp, 16
+; RV32-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill
+; RV32-NEXT: vand.vv v24, v8, v16, v0.t
+; RV32-NEXT: vsll.vi v16, v24, 8, v0.t
+; RV32-NEXT: vl8r.v v24, (a3) # Unknown-size Folded Reload
+; RV32-NEXT: vor.vv v16, v24, v16, v0.t
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: slli a3, a3, 4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 16
-; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli a3, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a5), zero
+; RV32-NEXT: vl8r.v v24, (a3) # Unknown-size Folded Reload
+; RV32-NEXT: vor.vv v16, v24, v16, v0.t
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a3, a3, 3
+; RV32-NEXT: slli a3, a3, 4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 16
; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: lui a3, 4080
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vx v24, v8, a3, v0.t
-; RV32-NEXT: vsll.vi v24, v24, 24, v0.t
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vand.vv v24, v8, v16, v0.t
-; RV32-NEXT: vsll.vi v16, v24, 8, v0.t
-; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vor.vv v16, v24, v16, v0.t
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 4
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vor.vv v16, v24, v16, v0.t
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 4
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
; RV32-NEXT: vsrl.vx v16, v8, a2, v0.t
; RV32-NEXT: vsrl.vx v24, v8, a4, v0.t
-; RV32-NEXT: vand.vx v24, v24, a1, v0.t
+; RV32-NEXT: vand.vx v24, v24, a0, v0.t
; RV32-NEXT: vor.vv v16, v24, v16, v0.t
; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
; RV32-NEXT: vsrl.vi v24, v8, 24, v0.t
-; RV32-NEXT: vand.vx v24, v24, a3, v0.t
+; RV32-NEXT: vand.vx v24, v24, a1, v0.t
; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 3
@@ -1193,24 +1179,22 @@ define <vscale x 7 x i64> @vp_bswap_nxv7i64_unmasked(<vscale x 7 x i64> %va, i32
; RV32-NEXT: sw zero, 12(sp)
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vsll.vx v24, v8, a2
-; RV32-NEXT: addi a1, a3, -256
+; RV32-NEXT: addi a0, a3, -256
; RV32-NEXT: vsrl.vx v16, v8, a2
; RV32-NEXT: vsrl.vx v0, v8, a4
-; RV32-NEXT: vand.vx v0, v0, a1
+; RV32-NEXT: vand.vx v0, v0, a0
; RV32-NEXT: vor.vv v16, v0, v16
-; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: slli a2, a2, 3
-; RV32-NEXT: add a2, sp, a2
-; RV32-NEXT: addi a2, a2, 16
-; RV32-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
-; RV32-NEXT: vand.vx v0, v8, a1
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: vand.vx v0, v8, a0
; RV32-NEXT: vsll.vx v0, v0, a4
; RV32-NEXT: vor.vv v16, v24, v0
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
; RV32-NEXT: vlse64.v v0, (a6), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vsrl.vi v16, v8, 24
; RV32-NEXT: vand.vx v16, v16, a5
; RV32-NEXT: vsrl.vi v24, v8, 8
@@ -1221,7 +1205,6 @@ define <vscale x 7 x i64> @vp_bswap_nxv7i64_unmasked(<vscale x 7 x i64> %va, i32
; RV32-NEXT: vsll.vi v8, v8, 24
; RV32-NEXT: vsll.vi v24, v24, 8
; RV32-NEXT: vor.vv v8, v8, v24
-; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vor.vv v8, v24, v8
; RV32-NEXT: csrr a0, vlenb
@@ -1318,51 +1301,49 @@ define <vscale x 8 x i64> @vp_bswap_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8
; RV32-NEXT: sw zero, 12(sp)
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vsll.vx v16, v8, a2, v0.t
-; RV32-NEXT: addi a1, a3, -256
-; RV32-NEXT: vand.vx v24, v8, a1, v0.t
+; RV32-NEXT: addi a0, a3, -256
+; RV32-NEXT: vand.vx v24, v8, a0, v0.t
; RV32-NEXT: vsll.vx v24, v24, a4, v0.t
; RV32-NEXT: vor.vv v16, v16, v24, v0.t
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 4
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: vlse64.v v16, (a5), zero
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: lui a1, 4080
+; RV32-NEXT: vand.vx v24, v8, a1, v0.t
+; RV32-NEXT: vsll.vi v24, v24, 24, v0.t
+; RV32-NEXT: addi a3, sp, 16
+; RV32-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill
+; RV32-NEXT: vand.vv v24, v8, v16, v0.t
+; RV32-NEXT: vsll.vi v16, v24, 8, v0.t
+; RV32-NEXT: vl8r.v v24, (a3) # Unknown-size Folded Reload
+; RV32-NEXT: vor.vv v16, v24, v16, v0.t
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: slli a3, a3, 4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 16
-; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli a3, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a5), zero
+; RV32-NEXT: vl8r.v v24, (a3) # Unknown-size Folded Reload
+; RV32-NEXT: vor.vv v16, v24, v16, v0.t
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a3, a3, 3
+; RV32-NEXT: slli a3, a3, 4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 16
; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: lui a3, 4080
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vx v24, v8, a3, v0.t
-; RV32-NEXT: vsll.vi v24, v24, 24, v0.t
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vand.vv v24, v8, v16, v0.t
-; RV32-NEXT: vsll.vi v16, v24, 8, v0.t
-; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vor.vv v16, v24, v16, v0.t
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 4
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vor.vv v16, v24, v16, v0.t
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 4
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
; RV32-NEXT: vsrl.vx v16, v8, a2, v0.t
; RV32-NEXT: vsrl.vx v24, v8, a4, v0.t
-; RV32-NEXT: vand.vx v24, v24, a1, v0.t
+; RV32-NEXT: vand.vx v24, v24, a0, v0.t
; RV32-NEXT: vor.vv v16, v24, v16, v0.t
; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
; RV32-NEXT: vsrl.vi v24, v8, 24, v0.t
-; RV32-NEXT: vand.vx v24, v24, a3, v0.t
+; RV32-NEXT: vand.vx v24, v24, a1, v0.t
; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 3
@@ -1468,24 +1449,22 @@ define <vscale x 8 x i64> @vp_bswap_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32
; RV32-NEXT: sw zero, 12(sp)
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vsll.vx v24, v8, a2
-; RV32-NEXT: addi a1, a3, -256
+; RV32-NEXT: addi a0, a3, -256
; RV32-NEXT: vsrl.vx v16, v8, a2
; RV32-NEXT: vsrl.vx v0, v8, a4
-; RV32-NEXT: vand.vx v0, v0, a1
+; RV32-NEXT: vand.vx v0, v0, a0
; RV32-NEXT: vor.vv v16, v0, v16
-; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: slli a2, a2, 3
-; RV32-NEXT: add a2, sp, a2
-; RV32-NEXT: addi a2, a2, 16
-; RV32-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
-; RV32-NEXT: vand.vx v0, v8, a1
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: vand.vx v0, v8, a0
; RV32-NEXT: vsll.vx v0, v0, a4
; RV32-NEXT: vor.vv v16, v24, v0
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
; RV32-NEXT: vlse64.v v0, (a6), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vsrl.vi v16, v8, 24
; RV32-NEXT: vand.vx v16, v16, a5
; RV32-NEXT: vsrl.vi v24, v8, 8
@@ -1496,7 +1475,6 @@ define <vscale x 8 x i64> @vp_bswap_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32
; RV32-NEXT: vsll.vi v8, v8, 24
; RV32-NEXT: vsll.vi v24, v24, 8
; RV32-NEXT: vor.vv v8, v8, v24
-; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vor.vv v8, v24, v8
; RV32-NEXT: csrr a0, vlenb
@@ -1716,11 +1694,9 @@ define <vscale x 1 x i48> @vp_bswap_nxv1i48(<vscale x 1 x i48> %va, <vscale x 1
; RV32-NEXT: sw zero, 12(sp)
; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; RV32-NEXT: vsll.vx v9, v8, a2, v0.t
-; RV32-NEXT: addi a1, a3, -256
-; RV32-NEXT: vand.vx v10, v8, a1, v0.t
-; RV32-NEXT: vsetvli a3, zero, e64, m1, ta, ma
+; RV32-NEXT: addi a0, a3, -256
+; RV32-NEXT: vand.vx v10, v8, a0, v0.t
; RV32-NEXT: vlse64.v v11, (a6), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; RV32-NEXT: vsll.vx v10, v10, a4, v0.t
; RV32-NEXT: vor.vv v9, v9, v10, v0.t
; RV32-NEXT: vand.vx v10, v8, a5, v0.t
@@ -1731,7 +1707,7 @@ define <vscale x 1 x i48> @vp_bswap_nxv1i48(<vscale x 1 x i48> %va, <vscale x 1
; RV32-NEXT: vor.vv v9, v9, v10, v0.t
; RV32-NEXT: vsrl.vx v10, v8, a2, v0.t
; RV32-NEXT: vsrl.vx v12, v8, a4, v0.t
-; RV32-NEXT: vand.vx v12, v12, a1, v0.t
+; RV32-NEXT: vand.vx v12, v12, a0, v0.t
; RV32-NEXT: vor.vv v10, v12, v10, v0.t
; RV32-NEXT: vsrl.vi v12, v8, 24, v0.t
; RV32-NEXT: vand.vx v12, v12, a5, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll
index 15793ea..66952ca 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll
@@ -1254,12 +1254,10 @@ define void @copysign_neg_trunc_v4f16_v4f32(ptr %x, ptr %y) {
define void @copysign_neg_trunc_v3f16_v3f32(ptr %x, ptr %y) {
; ZVFH-LABEL: copysign_neg_trunc_v3f16_v3f32:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetivli zero, 3, e32, m1, ta, ma
+; ZVFH-NEXT: vsetivli zero, 3, e16, mf2, ta, ma
; ZVFH-NEXT: vle32.v v8, (a1)
; ZVFH-NEXT: vle16.v v9, (a0)
-; ZVFH-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; ZVFH-NEXT: vfncvt.f.f.w v10, v8
-; ZVFH-NEXT: vsetivli zero, 3, e16, mf2, ta, ma
; ZVFH-NEXT: vfsgnjn.vv v8, v9, v10
; ZVFH-NEXT: vse16.v v8, (a0)
; ZVFH-NEXT: ret
@@ -1272,9 +1270,7 @@ define void @copysign_neg_trunc_v3f16_v3f32(ptr %x, ptr %y) {
; ZVFHMIN-NEXT: lui a1, 8
; ZVFHMIN-NEXT: addi a2, a1, -1
; ZVFHMIN-NEXT: vand.vx v8, v8, a2
-; ZVFHMIN-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v10, v9
-; ZVFHMIN-NEXT: vsetivli zero, 3, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vxor.vx v9, v10, a1
; ZVFHMIN-NEXT: vand.vx v9, v9, a1
; ZVFHMIN-NEXT: vor.vv v8, v8, v9
@@ -4013,9 +4009,10 @@ define void @trunc_v6f16(ptr %x) {
; ZVFH-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFH-NEXT: vfabs.v v9, v8
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
+; ZVFH-NEXT: vsetivli zero, 6, e16, m1, ta, ma
; ZVFH-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t
; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; ZVFH-NEXT: vsetivli zero, 6, e16, m1, ta, mu
+; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
; ZVFH-NEXT: vse16.v v8, (a0)
; ZVFH-NEXT: ret
@@ -4197,10 +4194,11 @@ define void @ceil_v6f16(ptr %x) {
; ZVFH-NEXT: vfabs.v v9, v8
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a1, 3
+; ZVFH-NEXT: vsetivli zero, 6, e16, m1, ta, ma
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
; ZVFH-NEXT: fsrm a1
; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; ZVFH-NEXT: vsetivli zero, 6, e16, m1, ta, mu
+; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
; ZVFH-NEXT: vse16.v v8, (a0)
; ZVFH-NEXT: ret
@@ -4388,10 +4386,11 @@ define void @floor_v6f16(ptr %x) {
; ZVFH-NEXT: vfabs.v v9, v8
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a1, 2
+; ZVFH-NEXT: vsetivli zero, 6, e16, m1, ta, ma
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
; ZVFH-NEXT: fsrm a1
; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; ZVFH-NEXT: vsetivli zero, 6, e16, m1, ta, mu
+; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
; ZVFH-NEXT: vse16.v v8, (a0)
; ZVFH-NEXT: ret
@@ -4579,10 +4578,11 @@ define void @round_v6f16(ptr %x) {
; ZVFH-NEXT: vfabs.v v9, v8
; ZVFH-NEXT: vmflt.vf v0, v9, fa5
; ZVFH-NEXT: fsrmi a1, 4
+; ZVFH-NEXT: vsetivli zero, 6, e16, m1, ta, ma
; ZVFH-NEXT: vfcvt.x.f.v v9, v8, v0.t
; ZVFH-NEXT: fsrm a1
; ZVFH-NEXT: vfcvt.f.x.v v9, v9, v0.t
-; ZVFH-NEXT: vsetivli zero, 6, e16, m1, ta, mu
+; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; ZVFH-NEXT: vfsgnj.vv v8, v9, v8, v0.t
; ZVFH-NEXT: vse16.v v8, (a0)
; ZVFH-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
index 59c7feb..80e462c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
@@ -1142,9 +1142,7 @@ define void @mulhu_v6i16(ptr %x) {
; CHECK-NEXT: vle16.v v8, (a0)
; CHECK-NEXT: lui a1, %hi(.LCPI67_0)
; CHECK-NEXT: addi a1, a1, %lo(.LCPI67_0)
-; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; CHECK-NEXT: vle16.v v9, (a1)
-; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
; CHECK-NEXT: vdivu.vv v8, v8, v9
; CHECK-NEXT: vse16.v v8, (a0)
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-formation.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-formation.ll
index 4f0f5dd..bf8baaf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-formation.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-formation.ll
@@ -530,7 +530,7 @@ define i32 @reduce_and_16xi32_prefix5(ptr %p) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 5, e32, m2, ta, ma
; CHECK-NEXT: vle32.v v8, (a0)
-; CHECK-NEXT: vsetivli zero, 5, e32, m1, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vmv.v.i v10, -1
; CHECK-NEXT: vsetivli zero, 5, e32, m2, ta, ma
; CHECK-NEXT: vredand.vs v8, v8, v10
@@ -725,7 +725,7 @@ define i32 @reduce_umin_16xi32_prefix5(ptr %p) {
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 5, e32, m2, ta, ma
; RV32-NEXT: vle32.v v8, (a0)
-; RV32-NEXT: vsetivli zero, 5, e32, m1, ta, ma
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: vmv.v.i v10, -1
; RV32-NEXT: vsetivli zero, 5, e32, m2, ta, ma
; RV32-NEXT: vredminu.vs v8, v8, v10
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll
index 5d407ca..05254e6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll
@@ -473,6 +473,7 @@ define <32 x i64> @select_evl_v32i64(<32 x i1> %a, <32 x i64> %b, <32 x i64> %c)
; CHECK-NEXT: addi a1, a0, 128
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v16, (a1)
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
diff --git a/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll
index 7649d60..33fe73a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll
@@ -582,14 +582,14 @@ define <vscale x 1 x half> @vfmax_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfmax_vv_nxv1f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v0, v11, v11, v0.t
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vmerge.vvm v9, v11, v8, v0
; ZVFHMIN-NEXT: vmv1r.v v0, v10
; ZVFHMIN-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -616,13 +616,13 @@ define <vscale x 1 x half> @vfmax_vv_nxv1f16_unmasked(<vscale x 1 x half> %va, <
;
; ZVFHMIN-LABEL: vfmax_vv_nxv1f16_unmasked:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v0, v10, v10
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vmerge.vvm v9, v10, v8, v0
; ZVFHMIN-NEXT: vmfeq.vv v0, v8, v8
; ZVFHMIN-NEXT: vmerge.vvm v8, v8, v10, v0
@@ -652,14 +652,14 @@ define <vscale x 2 x half> @vfmax_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfmax_vv_nxv2f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v0, v11, v11, v0.t
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; ZVFHMIN-NEXT: vmerge.vvm v9, v11, v8, v0
; ZVFHMIN-NEXT: vmv1r.v v0, v10
; ZVFHMIN-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -686,13 +686,13 @@ define <vscale x 2 x half> @vfmax_vv_nxv2f16_unmasked(<vscale x 2 x half> %va, <
;
; ZVFHMIN-LABEL: vfmax_vv_nxv2f16_unmasked:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v0, v10, v10
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; ZVFHMIN-NEXT: vmerge.vvm v9, v10, v8, v0
; ZVFHMIN-NEXT: vmfeq.vv v0, v8, v8
; ZVFHMIN-NEXT: vmerge.vvm v8, v8, v10, v0
@@ -722,15 +722,15 @@ define <vscale x 4 x half> @vfmax_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfmax_vv_nxv4f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v8, v12, v12, v0.t
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v14, v9
; ZVFHMIN-NEXT: vmv1r.v v0, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vmerge.vvm v16, v12, v14, v0
; ZVFHMIN-NEXT: vmv1r.v v0, v10
; ZVFHMIN-NEXT: vmfeq.vv v8, v14, v14, v0.t
@@ -758,13 +758,13 @@ define <vscale x 4 x half> @vfmax_vv_nxv4f16_unmasked(<vscale x 4 x half> %va, <
;
; ZVFHMIN-LABEL: vfmax_vv_nxv4f16_unmasked:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v0, v10, v10
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vmerge.vvm v8, v10, v12, v0
; ZVFHMIN-NEXT: vmfeq.vv v0, v12, v12
; ZVFHMIN-NEXT: vmerge.vvm v10, v12, v10, v0
@@ -796,15 +796,15 @@ define <vscale x 8 x half> @vfmax_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfmax_vv_nxv8f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v12, v0
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v8, v16, v16, v0.t
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v20, v10
; ZVFHMIN-NEXT: vmv1r.v v0, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmerge.vvm v24, v16, v20, v0
; ZVFHMIN-NEXT: vmv1r.v v0, v12
; ZVFHMIN-NEXT: vmfeq.vv v8, v20, v20, v0.t
@@ -832,13 +832,13 @@ define <vscale x 8 x half> @vfmax_vv_nxv8f16_unmasked(<vscale x 8 x half> %va, <
;
; ZVFHMIN-LABEL: vfmax_vv_nxv8f16_unmasked:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v0, v12, v12
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmerge.vvm v8, v12, v16, v0
; ZVFHMIN-NEXT: vmfeq.vv v0, v16, v16
; ZVFHMIN-NEXT: vmerge.vvm v12, v16, v12, v0
@@ -876,15 +876,15 @@ define <vscale x 16 x half> @vfmax_vv_nxv16f16(<vscale x 16 x half> %va, <vscale
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v8, v24, v24, v0.t
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: vmv1r.v v0, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmerge.vvm v8, v24, v16, v0
; ZVFHMIN-NEXT: addi a0, sp, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
@@ -927,10 +927,10 @@ define <vscale x 16 x half> @vfmax_vv_nxv16f16_unmasked(<vscale x 16 x half> %va
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v0, v16, v16
; ZVFHMIN-NEXT: vmfeq.vv v7, v24, v24
; ZVFHMIN-NEXT: vmerge.vvm v8, v16, v24, v0
@@ -995,64 +995,62 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: add a1, a2, a1
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x21, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 33 * vlenb
-; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: li a3, 24
-; ZVFHMIN-NEXT: mul a1, a1, a3
+; ZVFHMIN-NEXT: li a2, 25
+; ZVFHMIN-NEXT: mul a1, a1, a2
; ZVFHMIN-NEXT: add a1, sp, a1
; ZVFHMIN-NEXT: addi a1, a1, 16
-; ZVFHMIN-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
+; ZVFHMIN-NEXT: vmv8r.v v16, v8
+; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a1, a2, 1
; ZVFHMIN-NEXT: srli a2, a2, 2
; ZVFHMIN-NEXT: sub a3, a0, a1
; ZVFHMIN-NEXT: csrr a4, vlenb
-; ZVFHMIN-NEXT: slli a4, a4, 5
+; ZVFHMIN-NEXT: li a5, 24
+; ZVFHMIN-NEXT: mul a4, a4, a5
; ZVFHMIN-NEXT: add a4, sp, a4
; ZVFHMIN-NEXT: addi a4, a4, 16
; ZVFHMIN-NEXT: vs1r.v v0, (a4) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT: vslidedown.vx v12, v0, a2
+; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a2
+; ZVFHMIN-NEXT: addi a2, sp, 16
+; ZVFHMIN-NEXT: vs1r.v v0, (a2) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: sltu a2, a0, a3
; ZVFHMIN-NEXT: addi a2, a2, -1
; ZVFHMIN-NEXT: and a2, a2, a3
-; ZVFHMIN-NEXT: vmv1r.v v0, v12
-; ZVFHMIN-NEXT: vsetvli zero, a2, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmfeq.vv v13, v24, v24, v0.t
-; ZVFHMIN-NEXT: vmv8r.v v0, v16
; ZVFHMIN-NEXT: csrr a3, vlenb
; ZVFHMIN-NEXT: slli a3, a3, 4
; ZVFHMIN-NEXT: add a3, sp, a3
; ZVFHMIN-NEXT: addi a3, a3, 16
-; ZVFHMIN-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v4
-; ZVFHMIN-NEXT: vmv1r.v v0, v13
-; ZVFHMIN-NEXT: csrr a3, vlenb
-; ZVFHMIN-NEXT: li a4, 24
-; ZVFHMIN-NEXT: mul a3, a3, a4
-; ZVFHMIN-NEXT: add a3, sp, a3
-; ZVFHMIN-NEXT: addi a3, a3, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a3) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vsetvli zero, a2, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmerge.vvm v24, v24, v16, v0
+; ZVFHMIN-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vmfeq.vv v12, v24, v24, v0.t
; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: slli a2, a2, 3
+; ZVFHMIN-NEXT: li a3, 25
+; ZVFHMIN-NEXT: mul a2, a2, a3
; ZVFHMIN-NEXT: add a2, sp, a2
; ZVFHMIN-NEXT: addi a2, a2, 16
-; ZVFHMIN-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vl8r.v v0, (a2) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v4
; ZVFHMIN-NEXT: vmv1r.v v0, v12
-; ZVFHMIN-NEXT: vmfeq.vv v13, v16, v16, v0.t
-; ZVFHMIN-NEXT: vmv1r.v v0, v13
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vmerge.vvm v8, v24, v16, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: li a3, 24
-; ZVFHMIN-NEXT: mul a2, a2, a3
+; ZVFHMIN-NEXT: slli a2, a2, 3
; ZVFHMIN-NEXT: add a2, sp, a2
; ZVFHMIN-NEXT: addi a2, a2, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vmerge.vvm v16, v16, v24, v0
+; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: addi a2, sp, 16
+; ZVFHMIN-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vmv1r.v v0, v8
+; ZVFHMIN-NEXT: vmfeq.vv v12, v16, v16, v0.t
; ZVFHMIN-NEXT: vmv1r.v v0, v12
+; ZVFHMIN-NEXT: vmerge.vvm v16, v16, v24, v0
+; ZVFHMIN-NEXT: vmv1r.v v0, v8
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a2, a2, 3
; ZVFHMIN-NEXT: add a2, sp, a2
@@ -1070,35 +1068,43 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: # %bb.1:
; ZVFHMIN-NEXT: mv a0, a1
; ZVFHMIN-NEXT: .LBB22_2:
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
-; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: slli a1, a1, 5
-; ZVFHMIN-NEXT: add a1, sp, a1
-; ZVFHMIN-NEXT: addi a1, a1, 16
-; ZVFHMIN-NEXT: vl1r.v v0, (a1) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmfeq.vv v24, v16, v16, v0.t
-; ZVFHMIN-NEXT: vmv8r.v v8, v16
-; ZVFHMIN-NEXT: addi a1, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: csrr a1, vlenb
; ZVFHMIN-NEXT: slli a1, a1, 4
; ZVFHMIN-NEXT: add a1, sp, a1
; ZVFHMIN-NEXT: addi a1, a1, 16
-; ZVFHMIN-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v0
-; ZVFHMIN-NEXT: vmv1r.v v0, v24
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmerge.vvm v24, v8, v16, v0
+; ZVFHMIN-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: li a1, 24
; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vl1r.v v0, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vmfeq.vv v8, v24, v24, v0.t
+; ZVFHMIN-NEXT: addi a0, sp, 16
; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 5
+; ZVFHMIN-NEXT: li a1, 25
+; ZVFHMIN-NEXT: mul a0, a0, a1
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v0
+; ZVFHMIN-NEXT: vmv1r.v v0, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vmerge.vvm v24, v24, v16, v0
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: li a1, 25
+; ZVFHMIN-NEXT: mul a0, a0, a1
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: li a1, 24
+; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
@@ -1110,7 +1116,7 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: vmerge.vvm v16, v16, v24, v0
; ZVFHMIN-NEXT: vmv1r.v v0, v9
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: li a1, 24
+; ZVFHMIN-NEXT: li a1, 25
; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
@@ -1152,68 +1158,61 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: addi sp, sp, -16
; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: slli a1, a1, 5
+; ZVFHMIN-NEXT: li a2, 25
+; ZVFHMIN-NEXT: mul a1, a1, a2
; ZVFHMIN-NEXT: sub sp, sp, a1
-; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; ZVFHMIN-NEXT: csrr a2, vlenb
+; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x19, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 25 * vlenb
; ZVFHMIN-NEXT: vsetvli a1, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT: vmset.m v7
-; ZVFHMIN-NEXT: addi a1, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
-; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: li a3, 24
-; ZVFHMIN-NEXT: mul a1, a1, a3
-; ZVFHMIN-NEXT: add a1, sp, a1
-; ZVFHMIN-NEXT: addi a1, a1, 16
-; ZVFHMIN-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vmv8r.v v0, v8
+; ZVFHMIN-NEXT: csrr a2, vlenb
+; ZVFHMIN-NEXT: vmset.m v24
; ZVFHMIN-NEXT: slli a1, a2, 1
; ZVFHMIN-NEXT: srli a2, a2, 2
; ZVFHMIN-NEXT: sub a3, a0, a1
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT: vslidedown.vx v12, v7, a2
+; ZVFHMIN-NEXT: vslidedown.vx v8, v24, a2
+; ZVFHMIN-NEXT: addi a2, sp, 16
+; ZVFHMIN-NEXT: vs1r.v v8, (a2) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: sltu a2, a0, a3
; ZVFHMIN-NEXT: addi a2, a2, -1
; ZVFHMIN-NEXT: and a2, a2, a3
-; ZVFHMIN-NEXT: vmv1r.v v0, v12
-; ZVFHMIN-NEXT: vsetvli zero, a2, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmfeq.vv v13, v24, v24, v0.t
-; ZVFHMIN-NEXT: vmv8r.v v0, v16
; ZVFHMIN-NEXT: csrr a3, vlenb
-; ZVFHMIN-NEXT: slli a3, a3, 4
; ZVFHMIN-NEXT: add a3, sp, a3
; ZVFHMIN-NEXT: addi a3, a3, 16
-; ZVFHMIN-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v4
-; ZVFHMIN-NEXT: vmv1r.v v0, v13
-; ZVFHMIN-NEXT: csrr a3, vlenb
-; ZVFHMIN-NEXT: li a4, 24
-; ZVFHMIN-NEXT: mul a3, a3, a4
-; ZVFHMIN-NEXT: add a3, sp, a3
-; ZVFHMIN-NEXT: addi a3, a3, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a3) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vsetvli zero, a2, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmerge.vvm v24, v24, v16, v0
+; ZVFHMIN-NEXT: vs8r.v v0, (a3) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v4
+; ZVFHMIN-NEXT: vmv1r.v v0, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vmfeq.vv v12, v24, v24, v0.t
+; ZVFHMIN-NEXT: vmv8r.v v0, v16
; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: slli a2, a2, 3
+; ZVFHMIN-NEXT: slli a3, a2, 4
+; ZVFHMIN-NEXT: add a2, a3, a2
; ZVFHMIN-NEXT: add a2, sp, a2
; ZVFHMIN-NEXT: addi a2, a2, 16
-; ZVFHMIN-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v4
; ZVFHMIN-NEXT: vmv1r.v v0, v12
-; ZVFHMIN-NEXT: vmfeq.vv v13, v16, v16, v0.t
-; ZVFHMIN-NEXT: vmv1r.v v0, v13
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vmerge.vvm v8, v24, v16, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: li a3, 24
-; ZVFHMIN-NEXT: mul a2, a2, a3
+; ZVFHMIN-NEXT: slli a3, a2, 3
+; ZVFHMIN-NEXT: add a2, a3, a2
; ZVFHMIN-NEXT: add a2, sp, a2
; ZVFHMIN-NEXT: addi a2, a2, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vmerge.vvm v16, v16, v24, v0
+; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: addi a2, sp, 16
+; ZVFHMIN-NEXT: vl1r.v v13, (a2) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vmv1r.v v0, v13
+; ZVFHMIN-NEXT: vmfeq.vv v12, v16, v16, v0.t
; ZVFHMIN-NEXT: vmv1r.v v0, v12
+; ZVFHMIN-NEXT: vmerge.vvm v16, v16, v24, v0
+; ZVFHMIN-NEXT: vmv1r.v v0, v13
; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: slli a2, a2, 3
+; ZVFHMIN-NEXT: slli a3, a2, 3
+; ZVFHMIN-NEXT: add a2, a3, a2
; ZVFHMIN-NEXT: add a2, sp, a2
; ZVFHMIN-NEXT: addi a2, a2, 16
; ZVFHMIN-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload
@@ -1221,7 +1220,8 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: slli a2, a2, 3
+; ZVFHMIN-NEXT: slli a3, a2, 3
+; ZVFHMIN-NEXT: add a2, a3, a2
; ZVFHMIN-NEXT: add a2, sp, a2
; ZVFHMIN-NEXT: addi a2, a2, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
@@ -1229,43 +1229,49 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: # %bb.1:
; ZVFHMIN-NEXT: mv a0, a1
; ZVFHMIN-NEXT: .LBB23_2:
-; ZVFHMIN-NEXT: addi a1, sp, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16
; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: slli a1, a1, 4
; ZVFHMIN-NEXT: add a1, sp, a1
; ZVFHMIN-NEXT: addi a1, a1, 16
-; ZVFHMIN-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a1, a0, 4
+; ZVFHMIN-NEXT: add a0, a1, a0
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v0
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v0, v8, v8
; ZVFHMIN-NEXT: vmfeq.vv v7, v16, v16
; ZVFHMIN-NEXT: vmerge.vvm v24, v8, v16, v0
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: li a1, 24
-; ZVFHMIN-NEXT: mul a0, a0, a1
+; ZVFHMIN-NEXT: slli a1, a0, 4
+; ZVFHMIN-NEXT: add a0, a1, a0
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: vmv1r.v v0, v7
; ZVFHMIN-NEXT: vmerge.vvm v16, v16, v8, v0
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: li a1, 24
-; ZVFHMIN-NEXT: mul a0, a0, a1
+; ZVFHMIN-NEXT: slli a1, a0, 4
+; ZVFHMIN-NEXT: add a0, a1, a0
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfmax.vv v16, v16, v24
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: slli a1, a0, 3
+; ZVFHMIN-NEXT: add a0, a1, a0
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 5
+; ZVFHMIN-NEXT: li a1, 25
+; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add sp, sp, a0
; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
; ZVFHMIN-NEXT: addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll
index 8e448fc..c65712e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll
@@ -582,14 +582,14 @@ define <vscale x 1 x half> @vfmin_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfmin_vv_nxv1f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v0, v11, v11, v0.t
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vmerge.vvm v9, v11, v8, v0
; ZVFHMIN-NEXT: vmv1r.v v0, v10
; ZVFHMIN-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -616,13 +616,13 @@ define <vscale x 1 x half> @vfmin_vv_nxv1f16_unmasked(<vscale x 1 x half> %va, <
;
; ZVFHMIN-LABEL: vfmin_vv_nxv1f16_unmasked:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v0, v10, v10
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vmerge.vvm v9, v10, v8, v0
; ZVFHMIN-NEXT: vmfeq.vv v0, v8, v8
; ZVFHMIN-NEXT: vmerge.vvm v8, v8, v10, v0
@@ -652,14 +652,14 @@ define <vscale x 2 x half> @vfmin_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfmin_vv_nxv2f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v0, v11, v11, v0.t
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; ZVFHMIN-NEXT: vmerge.vvm v9, v11, v8, v0
; ZVFHMIN-NEXT: vmv1r.v v0, v10
; ZVFHMIN-NEXT: vmfeq.vv v0, v8, v8, v0.t
@@ -686,13 +686,13 @@ define <vscale x 2 x half> @vfmin_vv_nxv2f16_unmasked(<vscale x 2 x half> %va, <
;
; ZVFHMIN-LABEL: vfmin_vv_nxv2f16_unmasked:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v0, v10, v10
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; ZVFHMIN-NEXT: vmerge.vvm v9, v10, v8, v0
; ZVFHMIN-NEXT: vmfeq.vv v0, v8, v8
; ZVFHMIN-NEXT: vmerge.vvm v8, v8, v10, v0
@@ -722,15 +722,15 @@ define <vscale x 4 x half> @vfmin_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfmin_vv_nxv4f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v10, v0
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v8, v12, v12, v0.t
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v14, v9
; ZVFHMIN-NEXT: vmv1r.v v0, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vmerge.vvm v16, v12, v14, v0
; ZVFHMIN-NEXT: vmv1r.v v0, v10
; ZVFHMIN-NEXT: vmfeq.vv v8, v14, v14, v0.t
@@ -758,13 +758,13 @@ define <vscale x 4 x half> @vfmin_vv_nxv4f16_unmasked(<vscale x 4 x half> %va, <
;
; ZVFHMIN-LABEL: vfmin_vv_nxv4f16_unmasked:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v0, v10, v10
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vmerge.vvm v8, v10, v12, v0
; ZVFHMIN-NEXT: vmfeq.vv v0, v12, v12
; ZVFHMIN-NEXT: vmerge.vvm v10, v12, v10, v0
@@ -796,15 +796,15 @@ define <vscale x 8 x half> @vfmin_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfmin_vv_nxv8f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v12, v0
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v8, v16, v16, v0.t
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v20, v10
; ZVFHMIN-NEXT: vmv1r.v v0, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmerge.vvm v24, v16, v20, v0
; ZVFHMIN-NEXT: vmv1r.v v0, v12
; ZVFHMIN-NEXT: vmfeq.vv v8, v20, v20, v0.t
@@ -832,13 +832,13 @@ define <vscale x 8 x half> @vfmin_vv_nxv8f16_unmasked(<vscale x 8 x half> %va, <
;
; ZVFHMIN-LABEL: vfmin_vv_nxv8f16_unmasked:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v0, v12, v12
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmerge.vvm v8, v12, v16, v0
; ZVFHMIN-NEXT: vmfeq.vv v0, v16, v16
; ZVFHMIN-NEXT: vmerge.vvm v12, v16, v12, v0
@@ -876,15 +876,15 @@ define <vscale x 16 x half> @vfmin_vv_nxv16f16(<vscale x 16 x half> %va, <vscale
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v8, v24, v24, v0.t
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: vmv1r.v v0, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmerge.vvm v8, v24, v16, v0
; ZVFHMIN-NEXT: addi a0, sp, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
@@ -927,10 +927,10 @@ define <vscale x 16 x half> @vfmin_vv_nxv16f16_unmasked(<vscale x 16 x half> %va
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v0, v16, v16
; ZVFHMIN-NEXT: vmfeq.vv v7, v24, v24
; ZVFHMIN-NEXT: vmerge.vvm v8, v16, v24, v0
@@ -995,64 +995,62 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: add a1, a2, a1
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x21, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 33 * vlenb
-; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: li a3, 24
-; ZVFHMIN-NEXT: mul a1, a1, a3
+; ZVFHMIN-NEXT: li a2, 25
+; ZVFHMIN-NEXT: mul a1, a1, a2
; ZVFHMIN-NEXT: add a1, sp, a1
; ZVFHMIN-NEXT: addi a1, a1, 16
-; ZVFHMIN-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
+; ZVFHMIN-NEXT: vmv8r.v v16, v8
+; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a1, a2, 1
; ZVFHMIN-NEXT: srli a2, a2, 2
; ZVFHMIN-NEXT: sub a3, a0, a1
; ZVFHMIN-NEXT: csrr a4, vlenb
-; ZVFHMIN-NEXT: slli a4, a4, 5
+; ZVFHMIN-NEXT: li a5, 24
+; ZVFHMIN-NEXT: mul a4, a4, a5
; ZVFHMIN-NEXT: add a4, sp, a4
; ZVFHMIN-NEXT: addi a4, a4, 16
; ZVFHMIN-NEXT: vs1r.v v0, (a4) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT: vslidedown.vx v12, v0, a2
+; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a2
+; ZVFHMIN-NEXT: addi a2, sp, 16
+; ZVFHMIN-NEXT: vs1r.v v0, (a2) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: sltu a2, a0, a3
; ZVFHMIN-NEXT: addi a2, a2, -1
; ZVFHMIN-NEXT: and a2, a2, a3
-; ZVFHMIN-NEXT: vmv1r.v v0, v12
-; ZVFHMIN-NEXT: vsetvli zero, a2, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmfeq.vv v13, v24, v24, v0.t
-; ZVFHMIN-NEXT: vmv8r.v v0, v16
; ZVFHMIN-NEXT: csrr a3, vlenb
; ZVFHMIN-NEXT: slli a3, a3, 4
; ZVFHMIN-NEXT: add a3, sp, a3
; ZVFHMIN-NEXT: addi a3, a3, 16
-; ZVFHMIN-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v4
-; ZVFHMIN-NEXT: vmv1r.v v0, v13
-; ZVFHMIN-NEXT: csrr a3, vlenb
-; ZVFHMIN-NEXT: li a4, 24
-; ZVFHMIN-NEXT: mul a3, a3, a4
-; ZVFHMIN-NEXT: add a3, sp, a3
-; ZVFHMIN-NEXT: addi a3, a3, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a3) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vsetvli zero, a2, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmerge.vvm v24, v24, v16, v0
+; ZVFHMIN-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vmfeq.vv v12, v24, v24, v0.t
; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: slli a2, a2, 3
+; ZVFHMIN-NEXT: li a3, 25
+; ZVFHMIN-NEXT: mul a2, a2, a3
; ZVFHMIN-NEXT: add a2, sp, a2
; ZVFHMIN-NEXT: addi a2, a2, 16
-; ZVFHMIN-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vl8r.v v0, (a2) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v4
; ZVFHMIN-NEXT: vmv1r.v v0, v12
-; ZVFHMIN-NEXT: vmfeq.vv v13, v16, v16, v0.t
-; ZVFHMIN-NEXT: vmv1r.v v0, v13
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vmerge.vvm v8, v24, v16, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: li a3, 24
-; ZVFHMIN-NEXT: mul a2, a2, a3
+; ZVFHMIN-NEXT: slli a2, a2, 3
; ZVFHMIN-NEXT: add a2, sp, a2
; ZVFHMIN-NEXT: addi a2, a2, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vmerge.vvm v16, v16, v24, v0
+; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: addi a2, sp, 16
+; ZVFHMIN-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vmv1r.v v0, v8
+; ZVFHMIN-NEXT: vmfeq.vv v12, v16, v16, v0.t
; ZVFHMIN-NEXT: vmv1r.v v0, v12
+; ZVFHMIN-NEXT: vmerge.vvm v16, v16, v24, v0
+; ZVFHMIN-NEXT: vmv1r.v v0, v8
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a2, a2, 3
; ZVFHMIN-NEXT: add a2, sp, a2
@@ -1070,35 +1068,43 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: # %bb.1:
; ZVFHMIN-NEXT: mv a0, a1
; ZVFHMIN-NEXT: .LBB22_2:
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
-; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: slli a1, a1, 5
-; ZVFHMIN-NEXT: add a1, sp, a1
-; ZVFHMIN-NEXT: addi a1, a1, 16
-; ZVFHMIN-NEXT: vl1r.v v0, (a1) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmfeq.vv v24, v16, v16, v0.t
-; ZVFHMIN-NEXT: vmv8r.v v8, v16
-; ZVFHMIN-NEXT: addi a1, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: csrr a1, vlenb
; ZVFHMIN-NEXT: slli a1, a1, 4
; ZVFHMIN-NEXT: add a1, sp, a1
; ZVFHMIN-NEXT: addi a1, a1, 16
-; ZVFHMIN-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v0
-; ZVFHMIN-NEXT: vmv1r.v v0, v24
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmerge.vvm v24, v8, v16, v0
+; ZVFHMIN-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: li a1, 24
; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vl1r.v v0, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vmfeq.vv v8, v24, v24, v0.t
+; ZVFHMIN-NEXT: addi a0, sp, 16
; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 5
+; ZVFHMIN-NEXT: li a1, 25
+; ZVFHMIN-NEXT: mul a0, a0, a1
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v0
+; ZVFHMIN-NEXT: vmv1r.v v0, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vmerge.vvm v24, v24, v16, v0
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: li a1, 25
+; ZVFHMIN-NEXT: mul a0, a0, a1
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: li a1, 24
+; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
@@ -1110,7 +1116,7 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: vmerge.vvm v16, v16, v24, v0
; ZVFHMIN-NEXT: vmv1r.v v0, v9
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: li a1, 24
+; ZVFHMIN-NEXT: li a1, 25
; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
@@ -1152,68 +1158,61 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: addi sp, sp, -16
; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: slli a1, a1, 5
+; ZVFHMIN-NEXT: li a2, 25
+; ZVFHMIN-NEXT: mul a1, a1, a2
; ZVFHMIN-NEXT: sub sp, sp, a1
-; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; ZVFHMIN-NEXT: csrr a2, vlenb
+; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x19, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 25 * vlenb
; ZVFHMIN-NEXT: vsetvli a1, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT: vmset.m v7
-; ZVFHMIN-NEXT: addi a1, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
-; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: li a3, 24
-; ZVFHMIN-NEXT: mul a1, a1, a3
-; ZVFHMIN-NEXT: add a1, sp, a1
-; ZVFHMIN-NEXT: addi a1, a1, 16
-; ZVFHMIN-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vmv8r.v v0, v8
+; ZVFHMIN-NEXT: csrr a2, vlenb
+; ZVFHMIN-NEXT: vmset.m v24
; ZVFHMIN-NEXT: slli a1, a2, 1
; ZVFHMIN-NEXT: srli a2, a2, 2
; ZVFHMIN-NEXT: sub a3, a0, a1
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT: vslidedown.vx v12, v7, a2
+; ZVFHMIN-NEXT: vslidedown.vx v8, v24, a2
+; ZVFHMIN-NEXT: addi a2, sp, 16
+; ZVFHMIN-NEXT: vs1r.v v8, (a2) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: sltu a2, a0, a3
; ZVFHMIN-NEXT: addi a2, a2, -1
; ZVFHMIN-NEXT: and a2, a2, a3
-; ZVFHMIN-NEXT: vmv1r.v v0, v12
-; ZVFHMIN-NEXT: vsetvli zero, a2, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmfeq.vv v13, v24, v24, v0.t
-; ZVFHMIN-NEXT: vmv8r.v v0, v16
; ZVFHMIN-NEXT: csrr a3, vlenb
-; ZVFHMIN-NEXT: slli a3, a3, 4
; ZVFHMIN-NEXT: add a3, sp, a3
; ZVFHMIN-NEXT: addi a3, a3, 16
-; ZVFHMIN-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v4
-; ZVFHMIN-NEXT: vmv1r.v v0, v13
-; ZVFHMIN-NEXT: csrr a3, vlenb
-; ZVFHMIN-NEXT: li a4, 24
-; ZVFHMIN-NEXT: mul a3, a3, a4
-; ZVFHMIN-NEXT: add a3, sp, a3
-; ZVFHMIN-NEXT: addi a3, a3, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a3) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vsetvli zero, a2, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmerge.vvm v24, v24, v16, v0
+; ZVFHMIN-NEXT: vs8r.v v0, (a3) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v4
+; ZVFHMIN-NEXT: vmv1r.v v0, v8
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vmfeq.vv v12, v24, v24, v0.t
+; ZVFHMIN-NEXT: vmv8r.v v0, v16
; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: slli a2, a2, 3
+; ZVFHMIN-NEXT: slli a3, a2, 4
+; ZVFHMIN-NEXT: add a2, a3, a2
; ZVFHMIN-NEXT: add a2, sp, a2
; ZVFHMIN-NEXT: addi a2, a2, 16
-; ZVFHMIN-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v4
; ZVFHMIN-NEXT: vmv1r.v v0, v12
-; ZVFHMIN-NEXT: vmfeq.vv v13, v16, v16, v0.t
-; ZVFHMIN-NEXT: vmv1r.v v0, v13
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vmerge.vvm v8, v24, v16, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: li a3, 24
-; ZVFHMIN-NEXT: mul a2, a2, a3
+; ZVFHMIN-NEXT: slli a3, a2, 3
+; ZVFHMIN-NEXT: add a2, a3, a2
; ZVFHMIN-NEXT: add a2, sp, a2
; ZVFHMIN-NEXT: addi a2, a2, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vmerge.vvm v16, v16, v24, v0
+; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: addi a2, sp, 16
+; ZVFHMIN-NEXT: vl1r.v v13, (a2) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vmv1r.v v0, v13
+; ZVFHMIN-NEXT: vmfeq.vv v12, v16, v16, v0.t
; ZVFHMIN-NEXT: vmv1r.v v0, v12
+; ZVFHMIN-NEXT: vmerge.vvm v16, v16, v24, v0
+; ZVFHMIN-NEXT: vmv1r.v v0, v13
; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: slli a2, a2, 3
+; ZVFHMIN-NEXT: slli a3, a2, 3
+; ZVFHMIN-NEXT: add a2, a3, a2
; ZVFHMIN-NEXT: add a2, sp, a2
; ZVFHMIN-NEXT: addi a2, a2, 16
; ZVFHMIN-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload
@@ -1221,7 +1220,8 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: slli a2, a2, 3
+; ZVFHMIN-NEXT: slli a3, a2, 3
+; ZVFHMIN-NEXT: add a2, a3, a2
; ZVFHMIN-NEXT: add a2, sp, a2
; ZVFHMIN-NEXT: addi a2, a2, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
@@ -1229,43 +1229,49 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: # %bb.1:
; ZVFHMIN-NEXT: mv a0, a1
; ZVFHMIN-NEXT: .LBB23_2:
-; ZVFHMIN-NEXT: addi a1, sp, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16
; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: slli a1, a1, 4
; ZVFHMIN-NEXT: add a1, sp, a1
; ZVFHMIN-NEXT: addi a1, a1, 16
-; ZVFHMIN-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a1, a0, 4
+; ZVFHMIN-NEXT: add a0, a1, a0
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v0
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v0, v8, v8
; ZVFHMIN-NEXT: vmfeq.vv v7, v16, v16
; ZVFHMIN-NEXT: vmerge.vvm v24, v8, v16, v0
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: li a1, 24
-; ZVFHMIN-NEXT: mul a0, a0, a1
+; ZVFHMIN-NEXT: slli a1, a0, 4
+; ZVFHMIN-NEXT: add a0, a1, a0
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: vmv1r.v v0, v7
; ZVFHMIN-NEXT: vmerge.vvm v16, v16, v8, v0
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: li a1, 24
-; ZVFHMIN-NEXT: mul a0, a0, a1
+; ZVFHMIN-NEXT: slli a1, a0, 4
+; ZVFHMIN-NEXT: add a0, a1, a0
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfmin.vv v16, v16, v24
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: slli a1, a0, 3
+; ZVFHMIN-NEXT: add a0, a1, a0
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 5
+; ZVFHMIN-NEXT: li a1, 25
+; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add sp, sp, a0
; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
; ZVFHMIN-NEXT: addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/rvv/fold-binary-reduce.ll b/llvm/test/CodeGen/RISCV/rvv/fold-binary-reduce.ll
index 2fda344..6787c8c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fold-binary-reduce.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fold-binary-reduce.ll
@@ -18,7 +18,7 @@ entry:
define i64 @reduce_add2(<4 x i64> %v) {
; CHECK-LABEL: reduce_add2:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 4, e64, m1, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-NEXT: vmv.v.i v10, 8
; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; CHECK-NEXT: vredsum.vs v8, v8, v10
diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll
index 70b5384..06f4876 100644
--- a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll
@@ -1658,10 +1658,10 @@ define <vscale x 1 x i1> @fcmp_oeq_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: fcmp_oeq_vv_nxv1f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v0, v9, v10, v0.t
; ZVFHMIN-NEXT: ret
%v = call <vscale x 1 x i1> @llvm.vp.fcmp.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, metadata !"oeq", <vscale x 1 x i1> %m, i32 %evl)
@@ -1678,11 +1678,11 @@ define <vscale x 1 x i1> @fcmp_oeq_vf_nxv1f16(<vscale x 1 x half> %va, half %b,
; ZVFHMIN-LABEL: fcmp_oeq_vf_nxv1f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v9, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v0, v10, v8, v0.t
; ZVFHMIN-NEXT: ret
%elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
@@ -1701,11 +1701,11 @@ define <vscale x 1 x i1> @fcmp_oeq_vf_swap_nxv1f16(<vscale x 1 x half> %va, half
; ZVFHMIN-LABEL: fcmp_oeq_vf_swap_nxv1f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v9, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v0, v8, v10, v0.t
; ZVFHMIN-NEXT: ret
%elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
@@ -1723,10 +1723,10 @@ define <vscale x 1 x i1> @fcmp_ogt_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: fcmp_ogt_vv_nxv1f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vmflt.vv v0, v8, v10, v0.t
; ZVFHMIN-NEXT: ret
%v = call <vscale x 1 x i1> @llvm.vp.fcmp.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, metadata !"ogt", <vscale x 1 x i1> %m, i32 %evl)
@@ -1743,11 +1743,11 @@ define <vscale x 1 x i1> @fcmp_ogt_vf_nxv1f16(<vscale x 1 x half> %va, half %b,
; ZVFHMIN-LABEL: fcmp_ogt_vf_nxv1f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v9, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vmflt.vv v0, v8, v10, v0.t
; ZVFHMIN-NEXT: ret
%elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
@@ -1766,11 +1766,11 @@ define <vscale x 1 x i1> @fcmp_ogt_vf_swap_nxv1f16(<vscale x 1 x half> %va, half
; ZVFHMIN-LABEL: fcmp_ogt_vf_swap_nxv1f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v9, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vmflt.vv v0, v10, v8, v0.t
; ZVFHMIN-NEXT: ret
%elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
@@ -1788,10 +1788,10 @@ define <vscale x 1 x i1> @fcmp_oge_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: fcmp_oge_vv_nxv1f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vmfle.vv v0, v8, v10, v0.t
; ZVFHMIN-NEXT: ret
%v = call <vscale x 1 x i1> @llvm.vp.fcmp.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, metadata !"oge", <vscale x 1 x i1> %m, i32 %evl)
@@ -1808,11 +1808,11 @@ define <vscale x 1 x i1> @fcmp_oge_vf_nxv1f16(<vscale x 1 x half> %va, half %b,
; ZVFHMIN-LABEL: fcmp_oge_vf_nxv1f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v9, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vmfle.vv v0, v8, v10, v0.t
; ZVFHMIN-NEXT: ret
%elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
@@ -1831,11 +1831,11 @@ define <vscale x 1 x i1> @fcmp_oge_vf_swap_nxv1f16(<vscale x 1 x half> %va, half
; ZVFHMIN-LABEL: fcmp_oge_vf_swap_nxv1f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v9, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vmfle.vv v0, v10, v8, v0.t
; ZVFHMIN-NEXT: ret
%elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
@@ -1853,10 +1853,10 @@ define <vscale x 1 x i1> @fcmp_olt_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: fcmp_olt_vv_nxv1f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vmflt.vv v0, v9, v10, v0.t
; ZVFHMIN-NEXT: ret
%v = call <vscale x 1 x i1> @llvm.vp.fcmp.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, metadata !"olt", <vscale x 1 x i1> %m, i32 %evl)
@@ -1873,11 +1873,11 @@ define <vscale x 1 x i1> @fcmp_olt_vf_nxv1f16(<vscale x 1 x half> %va, half %b,
; ZVFHMIN-LABEL: fcmp_olt_vf_nxv1f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v9, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vmflt.vv v0, v10, v8, v0.t
; ZVFHMIN-NEXT: ret
%elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
@@ -1896,11 +1896,11 @@ define <vscale x 1 x i1> @fcmp_olt_vf_swap_nxv1f16(<vscale x 1 x half> %va, half
; ZVFHMIN-LABEL: fcmp_olt_vf_swap_nxv1f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v9, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vmflt.vv v0, v8, v10, v0.t
; ZVFHMIN-NEXT: ret
%elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
@@ -1918,10 +1918,10 @@ define <vscale x 1 x i1> @fcmp_ole_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: fcmp_ole_vv_nxv1f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vmfle.vv v0, v9, v10, v0.t
; ZVFHMIN-NEXT: ret
%v = call <vscale x 1 x i1> @llvm.vp.fcmp.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, metadata !"ole", <vscale x 1 x i1> %m, i32 %evl)
@@ -1938,11 +1938,11 @@ define <vscale x 1 x i1> @fcmp_ole_vf_nxv1f16(<vscale x 1 x half> %va, half %b,
; ZVFHMIN-LABEL: fcmp_ole_vf_nxv1f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v9, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vmfle.vv v0, v10, v8, v0.t
; ZVFHMIN-NEXT: ret
%elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
@@ -1961,11 +1961,11 @@ define <vscale x 1 x i1> @fcmp_ole_vf_swap_nxv1f16(<vscale x 1 x half> %va, half
; ZVFHMIN-LABEL: fcmp_ole_vf_swap_nxv1f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v9, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vmfle.vv v0, v8, v10, v0.t
; ZVFHMIN-NEXT: ret
%elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
@@ -1985,10 +1985,10 @@ define <vscale x 1 x i1> @fcmp_one_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: fcmp_one_vv_nxv1f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vmflt.vv v8, v9, v10, v0.t
; ZVFHMIN-NEXT: vmflt.vv v9, v10, v9, v0.t
; ZVFHMIN-NEXT: vmor.mm v0, v9, v8
@@ -2009,11 +2009,11 @@ define <vscale x 1 x i1> @fcmp_one_vf_nxv1f16(<vscale x 1 x half> %va, half %b,
; ZVFHMIN-LABEL: fcmp_one_vf_nxv1f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
; ZVFHMIN-NEXT: vmv.v.x v8, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vmflt.vv v8, v9, v10, v0.t
; ZVFHMIN-NEXT: vmflt.vv v9, v10, v9, v0.t
; ZVFHMIN-NEXT: vmor.mm v0, v9, v8
@@ -2036,11 +2036,11 @@ define <vscale x 1 x i1> @fcmp_one_vf_swap_nxv1f16(<vscale x 1 x half> %va, half
; ZVFHMIN-LABEL: fcmp_one_vf_swap_nxv1f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
; ZVFHMIN-NEXT: vmv.v.x v8, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vmflt.vv v8, v10, v9, v0.t
; ZVFHMIN-NEXT: vmflt.vv v9, v9, v10, v0.t
; ZVFHMIN-NEXT: vmor.mm v0, v9, v8
@@ -2062,10 +2062,10 @@ define <vscale x 1 x i1> @fcmp_ord_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: fcmp_ord_vv_nxv1f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v8, v10, v10, v0.t
; ZVFHMIN-NEXT: vmfeq.vv v9, v9, v9, v0.t
; ZVFHMIN-NEXT: vmand.mm v0, v9, v8
@@ -2088,14 +2088,14 @@ define <vscale x 1 x i1> @fcmp_ord_vf_nxv1f16(<vscale x 1 x half> %va, half %b,
; ZVFHMIN-LABEL: fcmp_ord_vf_nxv1f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
; ZVFHMIN-NEXT: vmv.v.x v8, a1
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v9, v9, v9, v0.t
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v8, v10, v10, v0.t
; ZVFHMIN-NEXT: vmand.mm v0, v9, v8
; ZVFHMIN-NEXT: ret
@@ -2119,14 +2119,14 @@ define <vscale x 1 x i1> @fcmp_ord_vf_swap_nxv1f16(<vscale x 1 x half> %va, half
; ZVFHMIN-LABEL: fcmp_ord_vf_swap_nxv1f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
; ZVFHMIN-NEXT: vmv.v.x v8, a1
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v9, v9, v9, v0.t
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v8, v10, v10, v0.t
; ZVFHMIN-NEXT: vmand.mm v0, v8, v9
; ZVFHMIN-NEXT: ret
@@ -2147,10 +2147,10 @@ define <vscale x 1 x i1> @fcmp_ueq_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: fcmp_ueq_vv_nxv1f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vmflt.vv v8, v9, v10, v0.t
; ZVFHMIN-NEXT: vmflt.vv v9, v10, v9, v0.t
; ZVFHMIN-NEXT: vmnor.mm v0, v9, v8
@@ -2171,11 +2171,11 @@ define <vscale x 1 x i1> @fcmp_ueq_vf_nxv1f16(<vscale x 1 x half> %va, half %b,
; ZVFHMIN-LABEL: fcmp_ueq_vf_nxv1f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
; ZVFHMIN-NEXT: vmv.v.x v8, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vmflt.vv v8, v9, v10, v0.t
; ZVFHMIN-NEXT: vmflt.vv v9, v10, v9, v0.t
; ZVFHMIN-NEXT: vmnor.mm v0, v9, v8
@@ -2198,11 +2198,11 @@ define <vscale x 1 x i1> @fcmp_ueq_vf_swap_nxv1f16(<vscale x 1 x half> %va, half
; ZVFHMIN-LABEL: fcmp_ueq_vf_swap_nxv1f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
; ZVFHMIN-NEXT: vmv.v.x v8, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vmflt.vv v8, v10, v9, v0.t
; ZVFHMIN-NEXT: vmflt.vv v9, v9, v10, v0.t
; ZVFHMIN-NEXT: vmnor.mm v0, v9, v8
@@ -2223,10 +2223,10 @@ define <vscale x 1 x i1> @fcmp_ugt_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: fcmp_ugt_vv_nxv1f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vmfle.vv v8, v9, v10, v0.t
; ZVFHMIN-NEXT: vmnot.m v0, v8
; ZVFHMIN-NEXT: ret
@@ -2245,11 +2245,11 @@ define <vscale x 1 x i1> @fcmp_ugt_vf_nxv1f16(<vscale x 1 x half> %va, half %b,
; ZVFHMIN-LABEL: fcmp_ugt_vf_nxv1f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v9, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vmfle.vv v8, v10, v8, v0.t
; ZVFHMIN-NEXT: vmnot.m v0, v8
; ZVFHMIN-NEXT: ret
@@ -2270,11 +2270,11 @@ define <vscale x 1 x i1> @fcmp_ugt_vf_swap_nxv1f16(<vscale x 1 x half> %va, half
; ZVFHMIN-LABEL: fcmp_ugt_vf_swap_nxv1f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v9, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vmfle.vv v8, v8, v10, v0.t
; ZVFHMIN-NEXT: vmnot.m v0, v8
; ZVFHMIN-NEXT: ret
@@ -2294,10 +2294,10 @@ define <vscale x 1 x i1> @fcmp_uge_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: fcmp_uge_vv_nxv1f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vmflt.vv v8, v9, v10, v0.t
; ZVFHMIN-NEXT: vmnot.m v0, v8
; ZVFHMIN-NEXT: ret
@@ -2316,11 +2316,11 @@ define <vscale x 1 x i1> @fcmp_uge_vf_nxv1f16(<vscale x 1 x half> %va, half %b,
; ZVFHMIN-LABEL: fcmp_uge_vf_nxv1f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v9, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vmflt.vv v8, v10, v8, v0.t
; ZVFHMIN-NEXT: vmnot.m v0, v8
; ZVFHMIN-NEXT: ret
@@ -2341,11 +2341,11 @@ define <vscale x 1 x i1> @fcmp_uge_vf_swap_nxv1f16(<vscale x 1 x half> %va, half
; ZVFHMIN-LABEL: fcmp_uge_vf_swap_nxv1f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v9, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vmflt.vv v8, v8, v10, v0.t
; ZVFHMIN-NEXT: vmnot.m v0, v8
; ZVFHMIN-NEXT: ret
@@ -2365,10 +2365,10 @@ define <vscale x 1 x i1> @fcmp_ult_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: fcmp_ult_vv_nxv1f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vmfle.vv v8, v8, v10, v0.t
; ZVFHMIN-NEXT: vmnot.m v0, v8
; ZVFHMIN-NEXT: ret
@@ -2387,11 +2387,11 @@ define <vscale x 1 x i1> @fcmp_ult_vf_nxv1f16(<vscale x 1 x half> %va, half %b,
; ZVFHMIN-LABEL: fcmp_ult_vf_nxv1f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v9, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vmfle.vv v8, v8, v10, v0.t
; ZVFHMIN-NEXT: vmnot.m v0, v8
; ZVFHMIN-NEXT: ret
@@ -2412,11 +2412,11 @@ define <vscale x 1 x i1> @fcmp_ult_vf_swap_nxv1f16(<vscale x 1 x half> %va, half
; ZVFHMIN-LABEL: fcmp_ult_vf_swap_nxv1f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v9, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vmfle.vv v8, v10, v8, v0.t
; ZVFHMIN-NEXT: vmnot.m v0, v8
; ZVFHMIN-NEXT: ret
@@ -2436,10 +2436,10 @@ define <vscale x 1 x i1> @fcmp_ule_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: fcmp_ule_vv_nxv1f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vmflt.vv v8, v8, v10, v0.t
; ZVFHMIN-NEXT: vmnot.m v0, v8
; ZVFHMIN-NEXT: ret
@@ -2458,11 +2458,11 @@ define <vscale x 1 x i1> @fcmp_ule_vf_nxv1f16(<vscale x 1 x half> %va, half %b,
; ZVFHMIN-LABEL: fcmp_ule_vf_nxv1f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v9, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vmflt.vv v8, v8, v10, v0.t
; ZVFHMIN-NEXT: vmnot.m v0, v8
; ZVFHMIN-NEXT: ret
@@ -2483,11 +2483,11 @@ define <vscale x 1 x i1> @fcmp_ule_vf_swap_nxv1f16(<vscale x 1 x half> %va, half
; ZVFHMIN-LABEL: fcmp_ule_vf_swap_nxv1f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v9, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vmflt.vv v8, v10, v8, v0.t
; ZVFHMIN-NEXT: vmnot.m v0, v8
; ZVFHMIN-NEXT: ret
@@ -2506,10 +2506,10 @@ define <vscale x 1 x i1> @fcmp_une_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: fcmp_une_vv_nxv1f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vmfne.vv v0, v9, v10, v0.t
; ZVFHMIN-NEXT: ret
%v = call <vscale x 1 x i1> @llvm.vp.fcmp.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, metadata !"une", <vscale x 1 x i1> %m, i32 %evl)
@@ -2526,11 +2526,11 @@ define <vscale x 1 x i1> @fcmp_une_vf_nxv1f16(<vscale x 1 x half> %va, half %b,
; ZVFHMIN-LABEL: fcmp_une_vf_nxv1f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v9, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vmfne.vv v0, v10, v8, v0.t
; ZVFHMIN-NEXT: ret
%elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
@@ -2549,11 +2549,11 @@ define <vscale x 1 x i1> @fcmp_une_vf_swap_nxv1f16(<vscale x 1 x half> %va, half
; ZVFHMIN-LABEL: fcmp_une_vf_swap_nxv1f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v9, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vmfne.vv v0, v8, v10, v0.t
; ZVFHMIN-NEXT: ret
%elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
@@ -2573,10 +2573,10 @@ define <vscale x 1 x i1> @fcmp_uno_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: fcmp_uno_vv_nxv1f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vmfne.vv v8, v10, v10, v0.t
; ZVFHMIN-NEXT: vmfne.vv v9, v9, v9, v0.t
; ZVFHMIN-NEXT: vmor.mm v0, v9, v8
@@ -2599,14 +2599,14 @@ define <vscale x 1 x i1> @fcmp_uno_vf_nxv1f16(<vscale x 1 x half> %va, half %b,
; ZVFHMIN-LABEL: fcmp_uno_vf_nxv1f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
; ZVFHMIN-NEXT: vmv.v.x v8, a1
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vmfne.vv v9, v9, v9, v0.t
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vmfne.vv v8, v10, v10, v0.t
; ZVFHMIN-NEXT: vmor.mm v0, v9, v8
; ZVFHMIN-NEXT: ret
@@ -2630,14 +2630,14 @@ define <vscale x 1 x i1> @fcmp_uno_vf_swap_nxv1f16(<vscale x 1 x half> %va, half
; ZVFHMIN-LABEL: fcmp_uno_vf_swap_nxv1f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
; ZVFHMIN-NEXT: vmv.v.x v8, a1
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vmfne.vv v9, v9, v9, v0.t
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vmfne.vv v8, v10, v10, v0.t
; ZVFHMIN-NEXT: vmor.mm v0, v8, v9
; ZVFHMIN-NEXT: ret
@@ -2658,10 +2658,10 @@ define <vscale x 3 x i1> @fcmp_oeq_vv_nxv3f16(<vscale x 3 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: fcmp_oeq_vv_nxv3f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v8, v12, v10, v0.t
; ZVFHMIN-NEXT: vmv1r.v v0, v8
; ZVFHMIN-NEXT: ret
@@ -2681,10 +2681,10 @@ define <vscale x 8 x i1> @fcmp_oeq_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: fcmp_oeq_vv_nxv8f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v8, v16, v12, v0.t
; ZVFHMIN-NEXT: vmv1r.v v0, v8
; ZVFHMIN-NEXT: ret
@@ -2703,11 +2703,11 @@ define <vscale x 8 x i1> @fcmp_oeq_vf_nxv8f16(<vscale x 8 x half> %va, half %b,
; ZVFHMIN-LABEL: fcmp_oeq_vf_nxv8f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v10, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v8, v12, v16, v0.t
; ZVFHMIN-NEXT: vmv1r.v v0, v8
; ZVFHMIN-NEXT: ret
@@ -2728,11 +2728,11 @@ define <vscale x 8 x i1> @fcmp_oeq_vf_swap_nxv8f16(<vscale x 8 x half> %va, half
; ZVFHMIN-LABEL: fcmp_oeq_vf_swap_nxv8f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v10, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v8, v16, v12, v0.t
; ZVFHMIN-NEXT: vmv1r.v v0, v8
; ZVFHMIN-NEXT: ret
@@ -2752,10 +2752,10 @@ define <vscale x 8 x i1> @fcmp_ogt_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: fcmp_ogt_vv_nxv8f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmflt.vv v8, v16, v12, v0.t
; ZVFHMIN-NEXT: vmv1r.v v0, v8
; ZVFHMIN-NEXT: ret
@@ -2774,11 +2774,11 @@ define <vscale x 8 x i1> @fcmp_ogt_vf_nxv8f16(<vscale x 8 x half> %va, half %b,
; ZVFHMIN-LABEL: fcmp_ogt_vf_nxv8f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v10, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmflt.vv v8, v16, v12, v0.t
; ZVFHMIN-NEXT: vmv1r.v v0, v8
; ZVFHMIN-NEXT: ret
@@ -2799,11 +2799,11 @@ define <vscale x 8 x i1> @fcmp_ogt_vf_swap_nxv8f16(<vscale x 8 x half> %va, half
; ZVFHMIN-LABEL: fcmp_ogt_vf_swap_nxv8f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v10, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmflt.vv v8, v12, v16, v0.t
; ZVFHMIN-NEXT: vmv1r.v v0, v8
; ZVFHMIN-NEXT: ret
@@ -2823,10 +2823,10 @@ define <vscale x 8 x i1> @fcmp_oge_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: fcmp_oge_vv_nxv8f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmfle.vv v8, v16, v12, v0.t
; ZVFHMIN-NEXT: vmv1r.v v0, v8
; ZVFHMIN-NEXT: ret
@@ -2845,11 +2845,11 @@ define <vscale x 8 x i1> @fcmp_oge_vf_nxv8f16(<vscale x 8 x half> %va, half %b,
; ZVFHMIN-LABEL: fcmp_oge_vf_nxv8f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v10, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmfle.vv v8, v16, v12, v0.t
; ZVFHMIN-NEXT: vmv1r.v v0, v8
; ZVFHMIN-NEXT: ret
@@ -2870,11 +2870,11 @@ define <vscale x 8 x i1> @fcmp_oge_vf_swap_nxv8f16(<vscale x 8 x half> %va, half
; ZVFHMIN-LABEL: fcmp_oge_vf_swap_nxv8f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v10, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmfle.vv v8, v12, v16, v0.t
; ZVFHMIN-NEXT: vmv1r.v v0, v8
; ZVFHMIN-NEXT: ret
@@ -2894,10 +2894,10 @@ define <vscale x 8 x i1> @fcmp_olt_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: fcmp_olt_vv_nxv8f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmflt.vv v8, v16, v12, v0.t
; ZVFHMIN-NEXT: vmv1r.v v0, v8
; ZVFHMIN-NEXT: ret
@@ -2916,11 +2916,11 @@ define <vscale x 8 x i1> @fcmp_olt_vf_nxv8f16(<vscale x 8 x half> %va, half %b,
; ZVFHMIN-LABEL: fcmp_olt_vf_nxv8f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v10, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmflt.vv v8, v12, v16, v0.t
; ZVFHMIN-NEXT: vmv1r.v v0, v8
; ZVFHMIN-NEXT: ret
@@ -2941,11 +2941,11 @@ define <vscale x 8 x i1> @fcmp_olt_vf_swap_nxv8f16(<vscale x 8 x half> %va, half
; ZVFHMIN-LABEL: fcmp_olt_vf_swap_nxv8f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v10, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmflt.vv v8, v16, v12, v0.t
; ZVFHMIN-NEXT: vmv1r.v v0, v8
; ZVFHMIN-NEXT: ret
@@ -2965,10 +2965,10 @@ define <vscale x 8 x i1> @fcmp_ole_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: fcmp_ole_vv_nxv8f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmfle.vv v8, v16, v12, v0.t
; ZVFHMIN-NEXT: vmv1r.v v0, v8
; ZVFHMIN-NEXT: ret
@@ -2987,11 +2987,11 @@ define <vscale x 8 x i1> @fcmp_ole_vf_nxv8f16(<vscale x 8 x half> %va, half %b,
; ZVFHMIN-LABEL: fcmp_ole_vf_nxv8f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v10, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmfle.vv v8, v12, v16, v0.t
; ZVFHMIN-NEXT: vmv1r.v v0, v8
; ZVFHMIN-NEXT: ret
@@ -3012,11 +3012,11 @@ define <vscale x 8 x i1> @fcmp_ole_vf_swap_nxv8f16(<vscale x 8 x half> %va, half
; ZVFHMIN-LABEL: fcmp_ole_vf_swap_nxv8f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v10, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmfle.vv v8, v16, v12, v0.t
; ZVFHMIN-NEXT: vmv1r.v v0, v8
; ZVFHMIN-NEXT: ret
@@ -3037,10 +3037,10 @@ define <vscale x 8 x i1> @fcmp_one_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: fcmp_one_vv_nxv8f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmflt.vv v8, v16, v12, v0.t
; ZVFHMIN-NEXT: vmflt.vv v9, v12, v16, v0.t
; ZVFHMIN-NEXT: vmor.mm v0, v9, v8
@@ -3061,11 +3061,11 @@ define <vscale x 8 x i1> @fcmp_one_vf_nxv8f16(<vscale x 8 x half> %va, half %b,
; ZVFHMIN-LABEL: fcmp_one_vf_nxv8f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
; ZVFHMIN-NEXT: vmv.v.x v8, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmflt.vv v8, v12, v16, v0.t
; ZVFHMIN-NEXT: vmflt.vv v9, v16, v12, v0.t
; ZVFHMIN-NEXT: vmor.mm v0, v9, v8
@@ -3088,11 +3088,11 @@ define <vscale x 8 x i1> @fcmp_one_vf_swap_nxv8f16(<vscale x 8 x half> %va, half
; ZVFHMIN-LABEL: fcmp_one_vf_swap_nxv8f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
; ZVFHMIN-NEXT: vmv.v.x v8, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmflt.vv v8, v16, v12, v0.t
; ZVFHMIN-NEXT: vmflt.vv v9, v12, v16, v0.t
; ZVFHMIN-NEXT: vmor.mm v0, v9, v8
@@ -3114,13 +3114,13 @@ define <vscale x 8 x i1> @fcmp_ord_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: fcmp_ord_vv_nxv8f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v10, v12, v12, v0.t
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v8, v12, v12, v0.t
; ZVFHMIN-NEXT: vmand.mm v0, v8, v10
; ZVFHMIN-NEXT: ret
@@ -3142,14 +3142,14 @@ define <vscale x 8 x i1> @fcmp_ord_vf_nxv8f16(<vscale x 8 x half> %va, half %b,
; ZVFHMIN-LABEL: fcmp_ord_vf_nxv8f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
; ZVFHMIN-NEXT: vmv.v.x v8, a1
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v10, v12, v12, v0.t
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v8, v12, v12, v0.t
; ZVFHMIN-NEXT: vmand.mm v0, v10, v8
; ZVFHMIN-NEXT: ret
@@ -3173,14 +3173,14 @@ define <vscale x 8 x i1> @fcmp_ord_vf_swap_nxv8f16(<vscale x 8 x half> %va, half
; ZVFHMIN-LABEL: fcmp_ord_vf_swap_nxv8f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
; ZVFHMIN-NEXT: vmv.v.x v8, a1
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v10, v12, v12, v0.t
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v8, v12, v12, v0.t
; ZVFHMIN-NEXT: vmand.mm v0, v8, v10
; ZVFHMIN-NEXT: ret
@@ -3201,10 +3201,10 @@ define <vscale x 8 x i1> @fcmp_ueq_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: fcmp_ueq_vv_nxv8f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmflt.vv v8, v16, v12, v0.t
; ZVFHMIN-NEXT: vmflt.vv v9, v12, v16, v0.t
; ZVFHMIN-NEXT: vmnor.mm v0, v9, v8
@@ -3225,11 +3225,11 @@ define <vscale x 8 x i1> @fcmp_ueq_vf_nxv8f16(<vscale x 8 x half> %va, half %b,
; ZVFHMIN-LABEL: fcmp_ueq_vf_nxv8f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
; ZVFHMIN-NEXT: vmv.v.x v8, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmflt.vv v8, v12, v16, v0.t
; ZVFHMIN-NEXT: vmflt.vv v9, v16, v12, v0.t
; ZVFHMIN-NEXT: vmnor.mm v0, v9, v8
@@ -3252,11 +3252,11 @@ define <vscale x 8 x i1> @fcmp_ueq_vf_swap_nxv8f16(<vscale x 8 x half> %va, half
; ZVFHMIN-LABEL: fcmp_ueq_vf_swap_nxv8f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
; ZVFHMIN-NEXT: vmv.v.x v8, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmflt.vv v8, v16, v12, v0.t
; ZVFHMIN-NEXT: vmflt.vv v9, v12, v16, v0.t
; ZVFHMIN-NEXT: vmnor.mm v0, v9, v8
@@ -3277,10 +3277,10 @@ define <vscale x 8 x i1> @fcmp_ugt_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: fcmp_ugt_vv_nxv8f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmfle.vv v8, v16, v12, v0.t
; ZVFHMIN-NEXT: vmnot.m v0, v8
; ZVFHMIN-NEXT: ret
@@ -3299,11 +3299,11 @@ define <vscale x 8 x i1> @fcmp_ugt_vf_nxv8f16(<vscale x 8 x half> %va, half %b,
; ZVFHMIN-LABEL: fcmp_ugt_vf_nxv8f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v10, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmfle.vv v8, v12, v16, v0.t
; ZVFHMIN-NEXT: vmnot.m v0, v8
; ZVFHMIN-NEXT: ret
@@ -3324,11 +3324,11 @@ define <vscale x 8 x i1> @fcmp_ugt_vf_swap_nxv8f16(<vscale x 8 x half> %va, half
; ZVFHMIN-LABEL: fcmp_ugt_vf_swap_nxv8f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v10, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmfle.vv v8, v16, v12, v0.t
; ZVFHMIN-NEXT: vmnot.m v0, v8
; ZVFHMIN-NEXT: ret
@@ -3348,10 +3348,10 @@ define <vscale x 8 x i1> @fcmp_uge_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: fcmp_uge_vv_nxv8f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmflt.vv v8, v16, v12, v0.t
; ZVFHMIN-NEXT: vmnot.m v0, v8
; ZVFHMIN-NEXT: ret
@@ -3370,11 +3370,11 @@ define <vscale x 8 x i1> @fcmp_uge_vf_nxv8f16(<vscale x 8 x half> %va, half %b,
; ZVFHMIN-LABEL: fcmp_uge_vf_nxv8f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v10, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmflt.vv v8, v12, v16, v0.t
; ZVFHMIN-NEXT: vmnot.m v0, v8
; ZVFHMIN-NEXT: ret
@@ -3395,11 +3395,11 @@ define <vscale x 8 x i1> @fcmp_uge_vf_swap_nxv8f16(<vscale x 8 x half> %va, half
; ZVFHMIN-LABEL: fcmp_uge_vf_swap_nxv8f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v10, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmflt.vv v8, v16, v12, v0.t
; ZVFHMIN-NEXT: vmnot.m v0, v8
; ZVFHMIN-NEXT: ret
@@ -3419,10 +3419,10 @@ define <vscale x 8 x i1> @fcmp_ult_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: fcmp_ult_vv_nxv8f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmfle.vv v8, v16, v12, v0.t
; ZVFHMIN-NEXT: vmnot.m v0, v8
; ZVFHMIN-NEXT: ret
@@ -3441,11 +3441,11 @@ define <vscale x 8 x i1> @fcmp_ult_vf_nxv8f16(<vscale x 8 x half> %va, half %b,
; ZVFHMIN-LABEL: fcmp_ult_vf_nxv8f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v10, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmfle.vv v8, v16, v12, v0.t
; ZVFHMIN-NEXT: vmnot.m v0, v8
; ZVFHMIN-NEXT: ret
@@ -3466,11 +3466,11 @@ define <vscale x 8 x i1> @fcmp_ult_vf_swap_nxv8f16(<vscale x 8 x half> %va, half
; ZVFHMIN-LABEL: fcmp_ult_vf_swap_nxv8f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v10, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmfle.vv v8, v12, v16, v0.t
; ZVFHMIN-NEXT: vmnot.m v0, v8
; ZVFHMIN-NEXT: ret
@@ -3490,10 +3490,10 @@ define <vscale x 8 x i1> @fcmp_ule_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: fcmp_ule_vv_nxv8f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmflt.vv v8, v16, v12, v0.t
; ZVFHMIN-NEXT: vmnot.m v0, v8
; ZVFHMIN-NEXT: ret
@@ -3512,11 +3512,11 @@ define <vscale x 8 x i1> @fcmp_ule_vf_nxv8f16(<vscale x 8 x half> %va, half %b,
; ZVFHMIN-LABEL: fcmp_ule_vf_nxv8f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v10, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmflt.vv v8, v16, v12, v0.t
; ZVFHMIN-NEXT: vmnot.m v0, v8
; ZVFHMIN-NEXT: ret
@@ -3537,11 +3537,11 @@ define <vscale x 8 x i1> @fcmp_ule_vf_swap_nxv8f16(<vscale x 8 x half> %va, half
; ZVFHMIN-LABEL: fcmp_ule_vf_swap_nxv8f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v10, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmflt.vv v8, v12, v16, v0.t
; ZVFHMIN-NEXT: vmnot.m v0, v8
; ZVFHMIN-NEXT: ret
@@ -3561,10 +3561,10 @@ define <vscale x 8 x i1> @fcmp_une_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: fcmp_une_vv_nxv8f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmfne.vv v8, v16, v12, v0.t
; ZVFHMIN-NEXT: vmv1r.v v0, v8
; ZVFHMIN-NEXT: ret
@@ -3583,11 +3583,11 @@ define <vscale x 8 x i1> @fcmp_une_vf_nxv8f16(<vscale x 8 x half> %va, half %b,
; ZVFHMIN-LABEL: fcmp_une_vf_nxv8f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v10, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmfne.vv v8, v12, v16, v0.t
; ZVFHMIN-NEXT: vmv1r.v v0, v8
; ZVFHMIN-NEXT: ret
@@ -3608,11 +3608,11 @@ define <vscale x 8 x i1> @fcmp_une_vf_swap_nxv8f16(<vscale x 8 x half> %va, half
; ZVFHMIN-LABEL: fcmp_une_vf_swap_nxv8f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v10, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmfne.vv v8, v16, v12, v0.t
; ZVFHMIN-NEXT: vmv1r.v v0, v8
; ZVFHMIN-NEXT: ret
@@ -3633,13 +3633,13 @@ define <vscale x 8 x i1> @fcmp_uno_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: fcmp_uno_vv_nxv8f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmfne.vv v10, v12, v12, v0.t
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmfne.vv v8, v12, v12, v0.t
; ZVFHMIN-NEXT: vmor.mm v0, v8, v10
; ZVFHMIN-NEXT: ret
@@ -3661,14 +3661,14 @@ define <vscale x 8 x i1> @fcmp_uno_vf_nxv8f16(<vscale x 8 x half> %va, half %b,
; ZVFHMIN-LABEL: fcmp_uno_vf_nxv8f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
; ZVFHMIN-NEXT: vmv.v.x v8, a1
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmfne.vv v10, v12, v12, v0.t
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmfne.vv v8, v12, v12, v0.t
; ZVFHMIN-NEXT: vmor.mm v0, v10, v8
; ZVFHMIN-NEXT: ret
@@ -3692,14 +3692,14 @@ define <vscale x 8 x i1> @fcmp_uno_vf_swap_nxv8f16(<vscale x 8 x half> %va, half
; ZVFHMIN-LABEL: fcmp_uno_vf_swap_nxv8f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
; ZVFHMIN-NEXT: vmv.v.x v8, a1
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmfne.vv v10, v12, v12, v0.t
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmfne.vv v8, v12, v12, v0.t
; ZVFHMIN-NEXT: vmor.mm v0, v8, v10
; ZVFHMIN-NEXT: ret
@@ -3829,14 +3829,14 @@ define <vscale x 64 x i1> @fcmp_oeq_vv_nxv64f16(<vscale x 64 x half> %va, <vscal
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a7, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v4
; ZVFHMIN-NEXT: addi a0, sp, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: vmv8r.v v8, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli t0, a0, 4
-; ZVFHMIN-NEXT: add a0, t0, a0
+; ZVFHMIN-NEXT: slli a7, a0, 4
+; ZVFHMIN-NEXT: add a0, a7, a0
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
@@ -3844,7 +3844,7 @@ define <vscale x 64 x i1> @fcmp_oeq_vv_nxv64f16(<vscale x 64 x half> %va, <vscal
; ZVFHMIN-NEXT: vmv1r.v v0, v24
; ZVFHMIN-NEXT: addi a0, sp, 16
; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vsetvli zero, a7, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v6, v16, v8, v0.t
; ZVFHMIN-NEXT: bltu a6, a4, .LBB171_2
; ZVFHMIN-NEXT: # %bb.1:
@@ -3857,16 +3857,16 @@ define <vscale x 64 x i1> @fcmp_oeq_vv_nxv64f16(<vscale x 64 x half> %va, <vscal
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a6, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a7, a0, 4
-; ZVFHMIN-NEXT: add a0, a7, a0
+; ZVFHMIN-NEXT: slli a6, a0, 4
+; ZVFHMIN-NEXT: add a0, a6, a0
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16
-; ZVFHMIN-NEXT: vsetvli zero, a6, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v5, v24, v8, v0.t
; ZVFHMIN-NEXT: add a0, a3, a3
; ZVFHMIN-NEXT: bltu a2, a5, .LBB171_4
@@ -3881,6 +3881,9 @@ define <vscale x 64 x i1> @fcmp_oeq_vv_nxv64f16(<vscale x 64 x half> %va, <vscal
; ZVFHMIN-NEXT: vl1r.v v7, (a6) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vsetvli a6, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v0, v7, a3
+; ZVFHMIN-NEXT: sltu a6, a2, a5
+; ZVFHMIN-NEXT: addi a6, a6, -1
+; ZVFHMIN-NEXT: and a5, a6, a5
; ZVFHMIN-NEXT: csrr a6, vlenb
; ZVFHMIN-NEXT: mv a7, a6
; ZVFHMIN-NEXT: slli a6, a6, 3
@@ -3890,31 +3893,28 @@ define <vscale x 64 x i1> @fcmp_oeq_vv_nxv64f16(<vscale x 64 x half> %va, <vscal
; ZVFHMIN-NEXT: add a6, sp, a6
; ZVFHMIN-NEXT: addi a6, a6, 16
; ZVFHMIN-NEXT: vl8r.v v16, (a6) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vsetvli a6, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a5, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20
-; ZVFHMIN-NEXT: csrr a6, vlenb
-; ZVFHMIN-NEXT: slli a7, a6, 4
-; ZVFHMIN-NEXT: add a6, a7, a6
-; ZVFHMIN-NEXT: add a6, sp, a6
-; ZVFHMIN-NEXT: addi a6, a6, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a6) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: csrr a6, vlenb
-; ZVFHMIN-NEXT: slli a7, a6, 5
-; ZVFHMIN-NEXT: add a6, a7, a6
-; ZVFHMIN-NEXT: add a6, sp, a6
-; ZVFHMIN-NEXT: addi a6, a6, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a6) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: csrr a5, vlenb
+; ZVFHMIN-NEXT: slli a6, a5, 4
+; ZVFHMIN-NEXT: add a5, a6, a5
+; ZVFHMIN-NEXT: add a5, sp, a5
+; ZVFHMIN-NEXT: addi a5, a5, 16
+; ZVFHMIN-NEXT: vs8r.v v8, (a5) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: csrr a5, vlenb
+; ZVFHMIN-NEXT: slli a6, a5, 5
+; ZVFHMIN-NEXT: add a5, a6, a5
+; ZVFHMIN-NEXT: add a5, sp, a5
+; ZVFHMIN-NEXT: addi a5, a5, 16
+; ZVFHMIN-NEXT: vl8r.v v24, (a5) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v28
-; ZVFHMIN-NEXT: sltu a6, a2, a5
-; ZVFHMIN-NEXT: addi a6, a6, -1
-; ZVFHMIN-NEXT: and a5, a6, a5
-; ZVFHMIN-NEXT: csrr a6, vlenb
-; ZVFHMIN-NEXT: slli a7, a6, 4
-; ZVFHMIN-NEXT: add a6, a7, a6
-; ZVFHMIN-NEXT: add a6, sp, a6
-; ZVFHMIN-NEXT: addi a6, a6, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a6) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vsetvli zero, a5, e32, m8, ta, ma
+; ZVFHMIN-NEXT: csrr a5, vlenb
+; ZVFHMIN-NEXT: slli a6, a5, 4
+; ZVFHMIN-NEXT: add a5, a6, a5
+; ZVFHMIN-NEXT: add a5, sp, a5
+; ZVFHMIN-NEXT: addi a5, a5, 16
+; ZVFHMIN-NEXT: vl8r.v v24, (a5) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v4, v24, v8, v0.t
; ZVFHMIN-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslideup.vx v5, v6, a3
@@ -3923,16 +3923,16 @@ define <vscale x 64 x i1> @fcmp_oeq_vv_nxv64f16(<vscale x 64 x half> %va, <vscal
; ZVFHMIN-NEXT: mv a2, a4
; ZVFHMIN-NEXT: .LBB171_6:
; ZVFHMIN-NEXT: vmv1r.v v0, v7
-; ZVFHMIN-NEXT: vsetvli a4, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a2, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16
-; ZVFHMIN-NEXT: csrr a4, vlenb
-; ZVFHMIN-NEXT: slli a5, a4, 5
-; ZVFHMIN-NEXT: add a4, a5, a4
-; ZVFHMIN-NEXT: add a4, sp, a4
-; ZVFHMIN-NEXT: addi a4, a4, 16
-; ZVFHMIN-NEXT: vl8r.v v8, (a4) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: csrr a2, vlenb
+; ZVFHMIN-NEXT: slli a4, a2, 5
+; ZVFHMIN-NEXT: add a2, a4, a2
+; ZVFHMIN-NEXT: add a2, sp, a2
+; ZVFHMIN-NEXT: addi a2, a2, 16
+; ZVFHMIN-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
-; ZVFHMIN-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v8, v24, v16, v0.t
; ZVFHMIN-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslideup.vx v8, v4, a3
diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll
index 61cc754..9c733b1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll
@@ -2367,9 +2367,8 @@ define <vscale x 1 x i1> @icmp_eq_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vs
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vmseq.vv v0, v8, v9, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -2394,9 +2393,8 @@ define <vscale x 1 x i1> @icmp_eq_vx_swap_nxv1i64(<vscale x 1 x i64> %va, i64 %b
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vmseq.vv v0, v9, v8, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -2451,9 +2449,8 @@ define <vscale x 1 x i1> @icmp_ne_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vs
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vmsne.vv v0, v8, v9, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -2478,9 +2475,8 @@ define <vscale x 1 x i1> @icmp_ne_vx_swap_nxv1i64(<vscale x 1 x i64> %va, i64 %b
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vmsne.vv v0, v9, v8, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -2535,9 +2531,8 @@ define <vscale x 1 x i1> @icmp_ugt_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <v
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vmsltu.vv v0, v9, v8, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -2562,9 +2557,8 @@ define <vscale x 1 x i1> @icmp_ugt_vx_swap_nxv1i64(<vscale x 1 x i64> %va, i64 %
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vmsltu.vv v0, v8, v9, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -2619,9 +2613,8 @@ define <vscale x 1 x i1> @icmp_uge_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <v
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vmsleu.vv v0, v9, v8, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -2647,9 +2640,8 @@ define <vscale x 1 x i1> @icmp_uge_vx_swap_nxv1i64(<vscale x 1 x i64> %va, i64 %
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vmsleu.vv v0, v8, v9, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -2704,9 +2696,8 @@ define <vscale x 1 x i1> @icmp_ult_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <v
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vmsltu.vv v0, v8, v9, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -2731,9 +2722,8 @@ define <vscale x 1 x i1> @icmp_ult_vx_swap_nxv1i64(<vscale x 1 x i64> %va, i64 %
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vmsltu.vv v0, v9, v8, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -2788,9 +2778,8 @@ define <vscale x 1 x i1> @icmp_sgt_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <v
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vmslt.vv v0, v9, v8, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -2815,9 +2804,8 @@ define <vscale x 1 x i1> @icmp_sgt_vx_swap_nxv1i64(<vscale x 1 x i64> %va, i64 %
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vmslt.vv v0, v8, v9, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -2872,9 +2860,8 @@ define <vscale x 1 x i1> @icmp_sge_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <v
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vmsle.vv v0, v9, v8, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -2900,9 +2887,8 @@ define <vscale x 1 x i1> @icmp_sge_vx_swap_nxv1i64(<vscale x 1 x i64> %va, i64 %
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vmsle.vv v0, v8, v9, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -2957,9 +2943,8 @@ define <vscale x 1 x i1> @icmp_slt_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <v
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vmslt.vv v0, v8, v9, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -2984,9 +2969,8 @@ define <vscale x 1 x i1> @icmp_slt_vx_swap_nxv1i64(<vscale x 1 x i64> %va, i64 %
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vmslt.vv v0, v9, v8, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -3041,9 +3025,8 @@ define <vscale x 1 x i1> @icmp_sle_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <v
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vmsle.vv v0, v8, v9, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -3068,9 +3051,8 @@ define <vscale x 1 x i1> @icmp_sle_vx_swap_nxv1i64(<vscale x 1 x i64> %va, i64 %
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vmsle.vv v0, v9, v8, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -3129,9 +3111,8 @@ define <vscale x 8 x i1> @icmp_eq_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vs
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v24, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v24, (a0), zero
; RV32-NEXT: vmseq.vv v16, v8, v24, v0.t
; RV32-NEXT: vmv1r.v v0, v16
; RV32-NEXT: addi sp, sp, 16
@@ -3158,9 +3139,8 @@ define <vscale x 8 x i1> @icmp_eq_vx_swap_nxv8i64(<vscale x 8 x i64> %va, i64 %b
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v24, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v24, (a0), zero
; RV32-NEXT: vmseq.vv v16, v24, v8, v0.t
; RV32-NEXT: vmv1r.v v0, v16
; RV32-NEXT: addi sp, sp, 16
@@ -3220,9 +3200,8 @@ define <vscale x 8 x i1> @icmp_ne_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vs
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v24, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v24, (a0), zero
; RV32-NEXT: vmsne.vv v16, v8, v24, v0.t
; RV32-NEXT: vmv1r.v v0, v16
; RV32-NEXT: addi sp, sp, 16
@@ -3249,9 +3228,8 @@ define <vscale x 8 x i1> @icmp_ne_vx_swap_nxv8i64(<vscale x 8 x i64> %va, i64 %b
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v24, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v24, (a0), zero
; RV32-NEXT: vmsne.vv v16, v24, v8, v0.t
; RV32-NEXT: vmv1r.v v0, v16
; RV32-NEXT: addi sp, sp, 16
@@ -3311,9 +3289,8 @@ define <vscale x 8 x i1> @icmp_ugt_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <v
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v24, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v24, (a0), zero
; RV32-NEXT: vmsltu.vv v16, v24, v8, v0.t
; RV32-NEXT: vmv1r.v v0, v16
; RV32-NEXT: addi sp, sp, 16
@@ -3340,9 +3317,8 @@ define <vscale x 8 x i1> @icmp_ugt_vx_swap_nxv8i64(<vscale x 8 x i64> %va, i64 %
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v24, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v24, (a0), zero
; RV32-NEXT: vmsltu.vv v16, v8, v24, v0.t
; RV32-NEXT: vmv1r.v v0, v16
; RV32-NEXT: addi sp, sp, 16
@@ -3402,9 +3378,8 @@ define <vscale x 8 x i1> @icmp_uge_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <v
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v24, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v24, (a0), zero
; RV32-NEXT: vmsleu.vv v16, v24, v8, v0.t
; RV32-NEXT: vmv1r.v v0, v16
; RV32-NEXT: addi sp, sp, 16
@@ -3432,9 +3407,8 @@ define <vscale x 8 x i1> @icmp_uge_vx_swap_nxv8i64(<vscale x 8 x i64> %va, i64 %
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v24, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v24, (a0), zero
; RV32-NEXT: vmsleu.vv v16, v8, v24, v0.t
; RV32-NEXT: vmv1r.v v0, v16
; RV32-NEXT: addi sp, sp, 16
@@ -3494,9 +3468,8 @@ define <vscale x 8 x i1> @icmp_ult_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <v
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v24, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v24, (a0), zero
; RV32-NEXT: vmsltu.vv v16, v8, v24, v0.t
; RV32-NEXT: vmv1r.v v0, v16
; RV32-NEXT: addi sp, sp, 16
@@ -3523,9 +3496,8 @@ define <vscale x 8 x i1> @icmp_ult_vx_swap_nxv8i64(<vscale x 8 x i64> %va, i64 %
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v24, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v24, (a0), zero
; RV32-NEXT: vmsltu.vv v16, v24, v8, v0.t
; RV32-NEXT: vmv1r.v v0, v16
; RV32-NEXT: addi sp, sp, 16
@@ -3585,9 +3557,8 @@ define <vscale x 8 x i1> @icmp_sgt_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <v
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v24, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v24, (a0), zero
; RV32-NEXT: vmslt.vv v16, v24, v8, v0.t
; RV32-NEXT: vmv1r.v v0, v16
; RV32-NEXT: addi sp, sp, 16
@@ -3614,9 +3585,8 @@ define <vscale x 8 x i1> @icmp_sgt_vx_swap_nxv8i64(<vscale x 8 x i64> %va, i64 %
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v24, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v24, (a0), zero
; RV32-NEXT: vmslt.vv v16, v8, v24, v0.t
; RV32-NEXT: vmv1r.v v0, v16
; RV32-NEXT: addi sp, sp, 16
@@ -3676,9 +3646,8 @@ define <vscale x 8 x i1> @icmp_sge_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <v
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v24, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v24, (a0), zero
; RV32-NEXT: vmsle.vv v16, v24, v8, v0.t
; RV32-NEXT: vmv1r.v v0, v16
; RV32-NEXT: addi sp, sp, 16
@@ -3706,9 +3675,8 @@ define <vscale x 8 x i1> @icmp_sge_vx_swap_nxv8i64(<vscale x 8 x i64> %va, i64 %
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v24, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v24, (a0), zero
; RV32-NEXT: vmsle.vv v16, v8, v24, v0.t
; RV32-NEXT: vmv1r.v v0, v16
; RV32-NEXT: addi sp, sp, 16
@@ -3768,9 +3736,8 @@ define <vscale x 8 x i1> @icmp_slt_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <v
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v24, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v24, (a0), zero
; RV32-NEXT: vmslt.vv v16, v8, v24, v0.t
; RV32-NEXT: vmv1r.v v0, v16
; RV32-NEXT: addi sp, sp, 16
@@ -3797,9 +3764,8 @@ define <vscale x 8 x i1> @icmp_slt_vx_swap_nxv8i64(<vscale x 8 x i64> %va, i64 %
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v24, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v24, (a0), zero
; RV32-NEXT: vmslt.vv v16, v24, v8, v0.t
; RV32-NEXT: vmv1r.v v0, v16
; RV32-NEXT: addi sp, sp, 16
@@ -3859,9 +3825,8 @@ define <vscale x 8 x i1> @icmp_sle_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <v
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v24, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v24, (a0), zero
; RV32-NEXT: vmsle.vv v16, v8, v24, v0.t
; RV32-NEXT: vmv1r.v v0, v16
; RV32-NEXT: addi sp, sp, 16
@@ -3888,9 +3853,8 @@ define <vscale x 8 x i1> @icmp_sle_vx_swap_nxv8i64(<vscale x 8 x i64> %va, i64 %
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v24, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v24, (a0), zero
; RV32-NEXT: vmsle.vv v16, v24, v8, v0.t
; RV32-NEXT: vmv1r.v v0, v16
; RV32-NEXT: addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll
index fee6799..77f3cf3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll
@@ -1487,9 +1487,8 @@ define <vscale x 1 x i64> @vadd_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vsca
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vadd.vv v8, v8, v9, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1514,9 +1513,8 @@ define <vscale x 1 x i64> @vadd_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vadd.vv v8, v8, v9
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1583,9 +1581,8 @@ define <vscale x 2 x i64> @vadd_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vsca
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
-; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vadd.vv v8, v8, v10, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1610,9 +1607,8 @@ define <vscale x 2 x i64> @vadd_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
-; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vadd.vv v8, v8, v10
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1679,9 +1675,8 @@ define <vscale x 4 x i64> @vadd_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vsca
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
-; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vadd.vv v8, v8, v12, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1706,9 +1701,8 @@ define <vscale x 4 x i64> @vadd_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
-; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vadd.vv v8, v8, v12
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1775,9 +1769,8 @@ define <vscale x 8 x i64> @vadd_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vsca
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1802,9 +1795,8 @@ define <vscale x 8 x i64> @vadd_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vadd.vv v8, v8, v16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vand-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vand-vp.ll
index b0c5a72..4866bb0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vand-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vand-vp.ll
@@ -1314,9 +1314,8 @@ define <vscale x 1 x i64> @vand_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vsca
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vand.vv v8, v8, v9, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1341,9 +1340,8 @@ define <vscale x 1 x i64> @vand_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vand.vv v8, v8, v9
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1410,9 +1408,8 @@ define <vscale x 2 x i64> @vand_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vsca
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
-; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vand.vv v8, v8, v10, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1437,9 +1434,8 @@ define <vscale x 2 x i64> @vand_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
-; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vand.vv v8, v8, v10
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1506,9 +1502,8 @@ define <vscale x 4 x i64> @vand_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vsca
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
-; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vand.vv v8, v8, v12, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1533,9 +1528,8 @@ define <vscale x 4 x i64> @vand_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
-; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vand.vv v8, v8, v12
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1602,9 +1596,8 @@ define <vscale x 8 x i64> @vand_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vsca
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1629,9 +1622,8 @@ define <vscale x 8 x i64> @vand_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vand.vv v8, v8, v16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vandn-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vandn-vp.ll
index 32992301..763b290 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vandn-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vandn-vp.ll
@@ -1115,9 +1115,8 @@ define <vscale x 1 x i64> @vandn_vx_vp_nxv1i64(i64 %a, <vscale x 1 x i64> %b, <v
; CHECK-RV32-NEXT: sw a0, 8(sp)
; CHECK-RV32-NEXT: sw a1, 12(sp)
; CHECK-RV32-NEXT: addi a0, sp, 8
-; CHECK-RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; CHECK-RV32-NEXT: vlse64.v v9, (a0), zero
; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; CHECK-RV32-NEXT: vlse64.v v9, (a0), zero
; CHECK-RV32-NEXT: vand.vv v8, v8, v9, v0.t
; CHECK-RV32-NEXT: addi sp, sp, 16
; CHECK-RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1139,9 +1138,8 @@ define <vscale x 1 x i64> @vandn_vx_vp_nxv1i64(i64 %a, <vscale x 1 x i64> %b, <v
; CHECK-ZVKB32-NEXT: sw a0, 8(sp)
; CHECK-ZVKB32-NEXT: sw a1, 12(sp)
; CHECK-ZVKB32-NEXT: addi a0, sp, 8
-; CHECK-ZVKB32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; CHECK-ZVKB32-NEXT: vlse64.v v9, (a0), zero
; CHECK-ZVKB32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; CHECK-ZVKB32-NEXT: vlse64.v v9, (a0), zero
; CHECK-ZVKB32-NEXT: vand.vv v8, v8, v9, v0.t
; CHECK-ZVKB32-NEXT: addi sp, sp, 16
; CHECK-ZVKB32-NEXT: .cfi_def_cfa_offset 0
@@ -1208,9 +1206,8 @@ define <vscale x 2 x i64> @vandn_vx_vp_nxv2i64(i64 %a, <vscale x 2 x i64> %b, <v
; CHECK-RV32-NEXT: sw a0, 8(sp)
; CHECK-RV32-NEXT: sw a1, 12(sp)
; CHECK-RV32-NEXT: addi a0, sp, 8
-; CHECK-RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
-; CHECK-RV32-NEXT: vlse64.v v10, (a0), zero
; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; CHECK-RV32-NEXT: vlse64.v v10, (a0), zero
; CHECK-RV32-NEXT: vand.vv v8, v8, v10, v0.t
; CHECK-RV32-NEXT: addi sp, sp, 16
; CHECK-RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1232,9 +1229,8 @@ define <vscale x 2 x i64> @vandn_vx_vp_nxv2i64(i64 %a, <vscale x 2 x i64> %b, <v
; CHECK-ZVKB32-NEXT: sw a0, 8(sp)
; CHECK-ZVKB32-NEXT: sw a1, 12(sp)
; CHECK-ZVKB32-NEXT: addi a0, sp, 8
-; CHECK-ZVKB32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
-; CHECK-ZVKB32-NEXT: vlse64.v v10, (a0), zero
; CHECK-ZVKB32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; CHECK-ZVKB32-NEXT: vlse64.v v10, (a0), zero
; CHECK-ZVKB32-NEXT: vand.vv v8, v8, v10, v0.t
; CHECK-ZVKB32-NEXT: addi sp, sp, 16
; CHECK-ZVKB32-NEXT: .cfi_def_cfa_offset 0
@@ -1301,9 +1297,8 @@ define <vscale x 4 x i64> @vandn_vx_vp_nxv4i64(i64 %a, <vscale x 4 x i64> %b, <v
; CHECK-RV32-NEXT: sw a0, 8(sp)
; CHECK-RV32-NEXT: sw a1, 12(sp)
; CHECK-RV32-NEXT: addi a0, sp, 8
-; CHECK-RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
-; CHECK-RV32-NEXT: vlse64.v v12, (a0), zero
; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; CHECK-RV32-NEXT: vlse64.v v12, (a0), zero
; CHECK-RV32-NEXT: vand.vv v8, v8, v12, v0.t
; CHECK-RV32-NEXT: addi sp, sp, 16
; CHECK-RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1325,9 +1320,8 @@ define <vscale x 4 x i64> @vandn_vx_vp_nxv4i64(i64 %a, <vscale x 4 x i64> %b, <v
; CHECK-ZVKB32-NEXT: sw a0, 8(sp)
; CHECK-ZVKB32-NEXT: sw a1, 12(sp)
; CHECK-ZVKB32-NEXT: addi a0, sp, 8
-; CHECK-ZVKB32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
-; CHECK-ZVKB32-NEXT: vlse64.v v12, (a0), zero
; CHECK-ZVKB32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; CHECK-ZVKB32-NEXT: vlse64.v v12, (a0), zero
; CHECK-ZVKB32-NEXT: vand.vv v8, v8, v12, v0.t
; CHECK-ZVKB32-NEXT: addi sp, sp, 16
; CHECK-ZVKB32-NEXT: .cfi_def_cfa_offset 0
@@ -1394,9 +1388,8 @@ define <vscale x 8 x i64> @vandn_vx_vp_nxv8i64(i64 %a, <vscale x 8 x i64> %b, <v
; CHECK-RV32-NEXT: sw a0, 8(sp)
; CHECK-RV32-NEXT: sw a1, 12(sp)
; CHECK-RV32-NEXT: addi a0, sp, 8
-; CHECK-RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; CHECK-RV32-NEXT: vlse64.v v16, (a0), zero
; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; CHECK-RV32-NEXT: vlse64.v v16, (a0), zero
; CHECK-RV32-NEXT: vand.vv v8, v8, v16, v0.t
; CHECK-RV32-NEXT: addi sp, sp, 16
; CHECK-RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1418,9 +1411,8 @@ define <vscale x 8 x i64> @vandn_vx_vp_nxv8i64(i64 %a, <vscale x 8 x i64> %b, <v
; CHECK-ZVKB32-NEXT: sw a0, 8(sp)
; CHECK-ZVKB32-NEXT: sw a1, 12(sp)
; CHECK-ZVKB32-NEXT: addi a0, sp, 8
-; CHECK-ZVKB32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; CHECK-ZVKB32-NEXT: vlse64.v v16, (a0), zero
; CHECK-ZVKB32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; CHECK-ZVKB32-NEXT: vlse64.v v16, (a0), zero
; CHECK-ZVKB32-NEXT: vand.vv v8, v8, v16, v0.t
; CHECK-ZVKB32-NEXT: addi sp, sp, 16
; CHECK-ZVKB32-NEXT: .cfi_def_cfa_offset 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-vp.ll
index 2814be2..03e4e1f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vdiv-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-vp.ll
@@ -893,9 +893,8 @@ define <vscale x 1 x i64> @vdiv_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vsca
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vdiv.vv v8, v8, v9, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -920,9 +919,8 @@ define <vscale x 1 x i64> @vdiv_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vdiv.vv v8, v8, v9
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -969,9 +967,8 @@ define <vscale x 2 x i64> @vdiv_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vsca
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
-; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vdiv.vv v8, v8, v10, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -996,9 +993,8 @@ define <vscale x 2 x i64> @vdiv_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
-; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vdiv.vv v8, v8, v10
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1045,9 +1041,8 @@ define <vscale x 4 x i64> @vdiv_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vsca
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
-; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vdiv.vv v8, v8, v12, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1072,9 +1067,8 @@ define <vscale x 4 x i64> @vdiv_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
-; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vdiv.vv v8, v8, v12
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1121,9 +1115,8 @@ define <vscale x 8 x i64> @vdiv_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vsca
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vdiv.vv v8, v8, v16, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1148,9 +1141,8 @@ define <vscale x 8 x i64> @vdiv_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vdiv.vv v8, v8, v16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll
index 3e913d4..2f35f91 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll
@@ -892,9 +892,8 @@ define <vscale x 1 x i64> @vdivu_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vsc
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vdivu.vv v8, v8, v9, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -919,9 +918,8 @@ define <vscale x 1 x i64> @vdivu_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vdivu.vv v8, v8, v9
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -968,9 +966,8 @@ define <vscale x 2 x i64> @vdivu_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vsc
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
-; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vdivu.vv v8, v8, v10, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -995,9 +992,8 @@ define <vscale x 2 x i64> @vdivu_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
-; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vdivu.vv v8, v8, v10
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1044,9 +1040,8 @@ define <vscale x 4 x i64> @vdivu_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vsc
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
-; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vdivu.vv v8, v8, v12, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1071,9 +1066,8 @@ define <vscale x 4 x i64> @vdivu_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
-; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vdivu.vv v8, v8, v12
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1120,9 +1114,8 @@ define <vscale x 8 x i64> @vdivu_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vsc
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vdivu.vv v8, v8, v16, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1147,9 +1140,8 @@ define <vscale x 8 x i64> @vdivu_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vdivu.vv v8, v8, v16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll
index 87bc9f2..31359c3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll
@@ -679,10 +679,10 @@ define <vscale x 1 x half> @vfadd_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfadd_vv_nxv1f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vfadd.vv v9, v9, v10, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
@@ -700,10 +700,10 @@ define <vscale x 1 x half> @vfadd_vv_nxv1f16_unmasked(<vscale x 1 x half> %va, <
;
; ZVFHMIN-LABEL: vfadd_vv_nxv1f16_unmasked:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vfadd.vv v9, v9, v10
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
@@ -722,11 +722,11 @@ define <vscale x 1 x half> @vfadd_vf_nxv1f16(<vscale x 1 x half> %va, half %b, <
; ZVFHMIN-LABEL: vfadd_vf_nxv1f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v9, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vfadd.vv v9, v10, v8, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
@@ -747,11 +747,11 @@ define <vscale x 1 x half> @vfadd_vf_nxv1f16_commute(<vscale x 1 x half> %va, ha
; ZVFHMIN-LABEL: vfadd_vf_nxv1f16_commute:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v9, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vfadd.vv v9, v8, v10, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
@@ -772,11 +772,11 @@ define <vscale x 1 x half> @vfadd_vf_nxv1f16_unmasked(<vscale x 1 x half> %va, h
; ZVFHMIN-LABEL: vfadd_vf_nxv1f16_unmasked:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v9, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vfadd.vv v9, v10, v8
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
@@ -797,11 +797,11 @@ define <vscale x 1 x half> @vfadd_vf_nxv1f16_unmasked_commute(<vscale x 1 x half
; ZVFHMIN-LABEL: vfadd_vf_nxv1f16_unmasked_commute:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v9, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vfadd.vv v9, v8, v10
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
@@ -823,10 +823,10 @@ define <vscale x 2 x half> @vfadd_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfadd_vv_nxv2f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; ZVFHMIN-NEXT: vfadd.vv v9, v9, v10, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
@@ -844,10 +844,10 @@ define <vscale x 2 x half> @vfadd_vv_nxv2f16_unmasked(<vscale x 2 x half> %va, <
;
; ZVFHMIN-LABEL: vfadd_vv_nxv2f16_unmasked:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; ZVFHMIN-NEXT: vfadd.vv v9, v9, v10
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
@@ -866,11 +866,11 @@ define <vscale x 2 x half> @vfadd_vf_nxv2f16(<vscale x 2 x half> %va, half %b, <
; ZVFHMIN-LABEL: vfadd_vf_nxv2f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v9, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; ZVFHMIN-NEXT: vfadd.vv v9, v10, v8, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
@@ -891,11 +891,11 @@ define <vscale x 2 x half> @vfadd_vf_nxv2f16_unmasked(<vscale x 2 x half> %va, h
; ZVFHMIN-LABEL: vfadd_vf_nxv2f16_unmasked:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v9, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; ZVFHMIN-NEXT: vfadd.vv v9, v10, v8
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
@@ -917,10 +917,10 @@ define <vscale x 4 x half> @vfadd_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfadd_vv_nxv4f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vfadd.vv v10, v12, v10, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
@@ -938,10 +938,10 @@ define <vscale x 4 x half> @vfadd_vv_nxv4f16_unmasked(<vscale x 4 x half> %va, <
;
; ZVFHMIN-LABEL: vfadd_vv_nxv4f16_unmasked:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vfadd.vv v10, v12, v10
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
@@ -960,11 +960,11 @@ define <vscale x 4 x half> @vfadd_vf_nxv4f16(<vscale x 4 x half> %va, half %b, <
; ZVFHMIN-LABEL: vfadd_vf_nxv4f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v9, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vfadd.vv v10, v10, v12, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
@@ -985,11 +985,11 @@ define <vscale x 4 x half> @vfadd_vf_nxv4f16_unmasked(<vscale x 4 x half> %va, h
; ZVFHMIN-LABEL: vfadd_vf_nxv4f16_unmasked:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v9, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vfadd.vv v10, v10, v12
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
@@ -1011,10 +1011,10 @@ define <vscale x 8 x half> @vfadd_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfadd_vv_nxv8f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vfadd.vv v12, v16, v12, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
@@ -1032,10 +1032,10 @@ define <vscale x 8 x half> @vfadd_vv_nxv8f16_unmasked(<vscale x 8 x half> %va, <
;
; ZVFHMIN-LABEL: vfadd_vv_nxv8f16_unmasked:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vfadd.vv v12, v16, v12
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
@@ -1054,11 +1054,11 @@ define <vscale x 8 x half> @vfadd_vf_nxv8f16(<vscale x 8 x half> %va, half %b, <
; ZVFHMIN-LABEL: vfadd_vf_nxv8f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v10, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vfadd.vv v12, v12, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
@@ -1079,11 +1079,11 @@ define <vscale x 8 x half> @vfadd_vf_nxv8f16_unmasked(<vscale x 8 x half> %va, h
; ZVFHMIN-LABEL: vfadd_vf_nxv8f16_unmasked:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v10, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vfadd.vv v12, v12, v16
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
@@ -1105,10 +1105,10 @@ define <vscale x 16 x half> @vfadd_vv_nxv16f16(<vscale x 16 x half> %va, <vscale
;
; ZVFHMIN-LABEL: vfadd_vv_nxv16f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfadd.vv v16, v24, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -1126,10 +1126,10 @@ define <vscale x 16 x half> @vfadd_vv_nxv16f16_unmasked(<vscale x 16 x half> %va
;
; ZVFHMIN-LABEL: vfadd_vv_nxv16f16_unmasked:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfadd.vv v16, v24, v16
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -1148,11 +1148,11 @@ define <vscale x 16 x half> @vfadd_vf_nxv16f16(<vscale x 16 x half> %va, half %b
; ZVFHMIN-LABEL: vfadd_vf_nxv16f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v12, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfadd.vv v16, v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -1173,11 +1173,11 @@ define <vscale x 16 x half> @vfadd_vf_nxv16f16_unmasked(<vscale x 16 x half> %va
; ZVFHMIN-LABEL: vfadd_vf_nxv16f16_unmasked:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v12, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfadd.vv v16, v16, v24
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -1205,23 +1205,22 @@ define <vscale x 32 x half> @vfadd_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: addi a1, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: slli a1, a2, 1
; ZVFHMIN-NEXT: srli a2, a2, 2
; ZVFHMIN-NEXT: sub a3, a0, a1
-; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a2
; ZVFHMIN-NEXT: sltu a2, a0, a3
; ZVFHMIN-NEXT: addi a2, a2, -1
; ZVFHMIN-NEXT: and a2, a2, a3
-; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: addi a3, sp, 16
+; ZVFHMIN-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
-; ZVFHMIN-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfadd.vv v16, v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
@@ -1231,10 +1230,11 @@ define <vscale x 32 x half> @vfadd_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: .LBB48_2:
; ZVFHMIN-NEXT: addi a1, sp, 16
; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
; ZVFHMIN-NEXT: vmv1r.v v0, v7
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfadd.vv v16, v24, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -1266,22 +1266,21 @@ define <vscale x 32 x half> @vfadd_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: vsetvli a1, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT: vmset.m v7
-; ZVFHMIN-NEXT: addi a1, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
+; ZVFHMIN-NEXT: vmset.m v24
; ZVFHMIN-NEXT: slli a1, a2, 1
; ZVFHMIN-NEXT: srli a2, a2, 2
; ZVFHMIN-NEXT: sub a3, a0, a1
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT: vslidedown.vx v0, v7, a2
+; ZVFHMIN-NEXT: vslidedown.vx v0, v24, a2
; ZVFHMIN-NEXT: sltu a2, a0, a3
; ZVFHMIN-NEXT: addi a2, a2, -1
; ZVFHMIN-NEXT: and a2, a2, a3
-; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: addi a3, sp, 16
+; ZVFHMIN-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
-; ZVFHMIN-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfadd.vv v16, v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
@@ -1291,9 +1290,10 @@ define <vscale x 32 x half> @vfadd_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: .LBB49_2:
; ZVFHMIN-NEXT: addi a1, sp, 16
; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfadd.vv v16, v24, v16
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -1324,14 +1324,10 @@ define <vscale x 32 x half> @vfadd_vf_nxv32f16(<vscale x 32 x half> %va, half %b
; ZVFHMIN-NEXT: add a1, a2, a1
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x11, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 17 * vlenb
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vmv8r.v v16, v8
+; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m8, ta, ma
+; ZVFHMIN-NEXT: vmv8r.v v24, v8
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: addi a3, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20
-; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m8, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v16, a1
; ZVFHMIN-NEXT: csrr a1, vlenb
; ZVFHMIN-NEXT: slli a3, a1, 3
@@ -1352,15 +1348,18 @@ define <vscale x 32 x half> @vfadd_vf_nxv32f16(<vscale x 32 x half> %va, half %b
; ZVFHMIN-NEXT: sltu a2, a0, a3
; ZVFHMIN-NEXT: addi a2, a2, -1
; ZVFHMIN-NEXT: and a2, a2, a3
-; ZVFHMIN-NEXT: csrr a3, vlenb
-; ZVFHMIN-NEXT: slli a4, a3, 3
-; ZVFHMIN-NEXT: add a3, a4, a3
-; ZVFHMIN-NEXT: add a3, sp, a3
-; ZVFHMIN-NEXT: addi a3, a3, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a3) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: addi a3, sp, 16
+; ZVFHMIN-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v28
+; ZVFHMIN-NEXT: csrr a2, vlenb
+; ZVFHMIN-NEXT: slli a3, a2, 3
+; ZVFHMIN-NEXT: add a2, a3, a2
+; ZVFHMIN-NEXT: add a2, sp, a2
+; ZVFHMIN-NEXT: addi a2, a2, 16
+; ZVFHMIN-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v28
-; ZVFHMIN-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfadd.vv v16, v8, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
@@ -1370,20 +1369,21 @@ define <vscale x 32 x half> @vfadd_vf_nxv32f16(<vscale x 32 x half> %va, half %b
; ZVFHMIN-NEXT: .LBB50_2:
; ZVFHMIN-NEXT: addi a1, sp, 16
; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
-; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: slli a2, a1, 3
-; ZVFHMIN-NEXT: add a1, a2, a1
-; ZVFHMIN-NEXT: add a1, sp, a1
-; ZVFHMIN-NEXT: addi a1, a1, 16
-; ZVFHMIN-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a1, a0, 3
+; ZVFHMIN-NEXT: add a0, a1, a0
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v0
-; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: slli a1, a1, 3
-; ZVFHMIN-NEXT: add a1, sp, a1
-; ZVFHMIN-NEXT: addi a1, a1, 16
-; ZVFHMIN-NEXT: vl1r.v v0, (a1) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vl1r.v v0, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfadd.vv v16, v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -1416,16 +1416,10 @@ define <vscale x 32 x half> @vfadd_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: slli a1, a1, 4
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; ZVFHMIN-NEXT: vsetvli a1, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT: vmv8r.v v16, v8
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: vmset.m v7
-; ZVFHMIN-NEXT: addi a3, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20
; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m8, ta, ma
+; ZVFHMIN-NEXT: vmset.m v24
; ZVFHMIN-NEXT: vmv.v.x v16, a1
; ZVFHMIN-NEXT: csrr a1, vlenb
; ZVFHMIN-NEXT: slli a1, a1, 3
@@ -1436,18 +1430,22 @@ define <vscale x 32 x half> @vfadd_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: srli a2, a2, 2
; ZVFHMIN-NEXT: sub a3, a0, a1
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT: vslidedown.vx v0, v7, a2
+; ZVFHMIN-NEXT: vslidedown.vx v0, v24, a2
; ZVFHMIN-NEXT: sltu a2, a0, a3
; ZVFHMIN-NEXT: addi a2, a2, -1
; ZVFHMIN-NEXT: and a2, a2, a3
-; ZVFHMIN-NEXT: csrr a3, vlenb
-; ZVFHMIN-NEXT: slli a3, a3, 3
-; ZVFHMIN-NEXT: add a3, sp, a3
-; ZVFHMIN-NEXT: addi a3, a3, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a3) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vmv8r.v v16, v8
+; ZVFHMIN-NEXT: addi a3, sp, 16
+; ZVFHMIN-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20
+; ZVFHMIN-NEXT: csrr a2, vlenb
+; ZVFHMIN-NEXT: slli a2, a2, 3
+; ZVFHMIN-NEXT: add a2, sp, a2
+; ZVFHMIN-NEXT: addi a2, a2, 16
+; ZVFHMIN-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v28
-; ZVFHMIN-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfadd.vv v16, v8, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
@@ -1457,14 +1455,15 @@ define <vscale x 32 x half> @vfadd_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: .LBB51_2:
; ZVFHMIN-NEXT: addi a1, sp, 16
; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
-; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: slli a1, a1, 3
-; ZVFHMIN-NEXT: add a1, sp, a1
-; ZVFHMIN-NEXT: addi a1, a1, 16
-; ZVFHMIN-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v0
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfadd.vv v16, v16, v24
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll
index 061af45..2205769 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll
@@ -641,10 +641,10 @@ define <vscale x 1 x half> @vfdiv_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfdiv_vv_nxv1f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vfdiv.vv v9, v9, v10, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
@@ -662,10 +662,10 @@ define <vscale x 1 x half> @vfdiv_vv_nxv1f16_unmasked(<vscale x 1 x half> %va, <
;
; ZVFHMIN-LABEL: vfdiv_vv_nxv1f16_unmasked:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vfdiv.vv v9, v9, v10
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
@@ -684,11 +684,11 @@ define <vscale x 1 x half> @vfdiv_vf_nxv1f16(<vscale x 1 x half> %va, half %b, <
; ZVFHMIN-LABEL: vfdiv_vf_nxv1f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v9, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vfdiv.vv v9, v10, v8, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
@@ -709,11 +709,11 @@ define <vscale x 1 x half> @vfdiv_vf_nxv1f16_unmasked(<vscale x 1 x half> %va, h
; ZVFHMIN-LABEL: vfdiv_vf_nxv1f16_unmasked:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v9, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vfdiv.vv v9, v10, v8
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
@@ -735,10 +735,10 @@ define <vscale x 2 x half> @vfdiv_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfdiv_vv_nxv2f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; ZVFHMIN-NEXT: vfdiv.vv v9, v9, v10, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
@@ -756,10 +756,10 @@ define <vscale x 2 x half> @vfdiv_vv_nxv2f16_unmasked(<vscale x 2 x half> %va, <
;
; ZVFHMIN-LABEL: vfdiv_vv_nxv2f16_unmasked:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; ZVFHMIN-NEXT: vfdiv.vv v9, v9, v10
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
@@ -778,11 +778,11 @@ define <vscale x 2 x half> @vfdiv_vf_nxv2f16(<vscale x 2 x half> %va, half %b, <
; ZVFHMIN-LABEL: vfdiv_vf_nxv2f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v9, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; ZVFHMIN-NEXT: vfdiv.vv v9, v10, v8, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
@@ -803,11 +803,11 @@ define <vscale x 2 x half> @vfdiv_vf_nxv2f16_unmasked(<vscale x 2 x half> %va, h
; ZVFHMIN-LABEL: vfdiv_vf_nxv2f16_unmasked:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v9, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; ZVFHMIN-NEXT: vfdiv.vv v9, v10, v8
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
@@ -829,10 +829,10 @@ define <vscale x 4 x half> @vfdiv_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfdiv_vv_nxv4f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vfdiv.vv v10, v12, v10, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
@@ -850,10 +850,10 @@ define <vscale x 4 x half> @vfdiv_vv_nxv4f16_unmasked(<vscale x 4 x half> %va, <
;
; ZVFHMIN-LABEL: vfdiv_vv_nxv4f16_unmasked:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vfdiv.vv v10, v12, v10
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
@@ -872,11 +872,11 @@ define <vscale x 4 x half> @vfdiv_vf_nxv4f16(<vscale x 4 x half> %va, half %b, <
; ZVFHMIN-LABEL: vfdiv_vf_nxv4f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v9, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vfdiv.vv v10, v10, v12, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
@@ -897,11 +897,11 @@ define <vscale x 4 x half> @vfdiv_vf_nxv4f16_unmasked(<vscale x 4 x half> %va, h
; ZVFHMIN-LABEL: vfdiv_vf_nxv4f16_unmasked:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v9, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vfdiv.vv v10, v10, v12
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
@@ -923,10 +923,10 @@ define <vscale x 8 x half> @vfdiv_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfdiv_vv_nxv8f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vfdiv.vv v12, v16, v12, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
@@ -944,10 +944,10 @@ define <vscale x 8 x half> @vfdiv_vv_nxv8f16_unmasked(<vscale x 8 x half> %va, <
;
; ZVFHMIN-LABEL: vfdiv_vv_nxv8f16_unmasked:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vfdiv.vv v12, v16, v12
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
@@ -966,11 +966,11 @@ define <vscale x 8 x half> @vfdiv_vf_nxv8f16(<vscale x 8 x half> %va, half %b, <
; ZVFHMIN-LABEL: vfdiv_vf_nxv8f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v10, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vfdiv.vv v12, v12, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
@@ -991,11 +991,11 @@ define <vscale x 8 x half> @vfdiv_vf_nxv8f16_unmasked(<vscale x 8 x half> %va, h
; ZVFHMIN-LABEL: vfdiv_vf_nxv8f16_unmasked:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v10, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vfdiv.vv v12, v12, v16
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
@@ -1017,10 +1017,10 @@ define <vscale x 16 x half> @vfdiv_vv_nxv16f16(<vscale x 16 x half> %va, <vscale
;
; ZVFHMIN-LABEL: vfdiv_vv_nxv16f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfdiv.vv v16, v24, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -1038,10 +1038,10 @@ define <vscale x 16 x half> @vfdiv_vv_nxv16f16_unmasked(<vscale x 16 x half> %va
;
; ZVFHMIN-LABEL: vfdiv_vv_nxv16f16_unmasked:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfdiv.vv v16, v24, v16
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -1060,11 +1060,11 @@ define <vscale x 16 x half> @vfdiv_vf_nxv16f16(<vscale x 16 x half> %va, half %b
; ZVFHMIN-LABEL: vfdiv_vf_nxv16f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v12, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfdiv.vv v16, v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -1085,11 +1085,11 @@ define <vscale x 16 x half> @vfdiv_vf_nxv16f16_unmasked(<vscale x 16 x half> %va
; ZVFHMIN-LABEL: vfdiv_vf_nxv16f16_unmasked:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v12, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfdiv.vv v16, v16, v24
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -1117,23 +1117,22 @@ define <vscale x 32 x half> @vfdiv_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: addi a1, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: slli a1, a2, 1
; ZVFHMIN-NEXT: srli a2, a2, 2
; ZVFHMIN-NEXT: sub a3, a0, a1
-; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a2
; ZVFHMIN-NEXT: sltu a2, a0, a3
; ZVFHMIN-NEXT: addi a2, a2, -1
; ZVFHMIN-NEXT: and a2, a2, a3
-; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: addi a3, sp, 16
+; ZVFHMIN-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
-; ZVFHMIN-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfdiv.vv v16, v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
@@ -1143,10 +1142,11 @@ define <vscale x 32 x half> @vfdiv_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: .LBB44_2:
; ZVFHMIN-NEXT: addi a1, sp, 16
; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
; ZVFHMIN-NEXT: vmv1r.v v0, v7
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfdiv.vv v16, v24, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -1178,22 +1178,21 @@ define <vscale x 32 x half> @vfdiv_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: vsetvli a1, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT: vmset.m v7
-; ZVFHMIN-NEXT: addi a1, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
+; ZVFHMIN-NEXT: vmset.m v24
; ZVFHMIN-NEXT: slli a1, a2, 1
; ZVFHMIN-NEXT: srli a2, a2, 2
; ZVFHMIN-NEXT: sub a3, a0, a1
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT: vslidedown.vx v0, v7, a2
+; ZVFHMIN-NEXT: vslidedown.vx v0, v24, a2
; ZVFHMIN-NEXT: sltu a2, a0, a3
; ZVFHMIN-NEXT: addi a2, a2, -1
; ZVFHMIN-NEXT: and a2, a2, a3
-; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: addi a3, sp, 16
+; ZVFHMIN-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
-; ZVFHMIN-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfdiv.vv v16, v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
@@ -1203,9 +1202,10 @@ define <vscale x 32 x half> @vfdiv_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: .LBB45_2:
; ZVFHMIN-NEXT: addi a1, sp, 16
; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfdiv.vv v16, v24, v16
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -1236,14 +1236,10 @@ define <vscale x 32 x half> @vfdiv_vf_nxv32f16(<vscale x 32 x half> %va, half %b
; ZVFHMIN-NEXT: add a1, a2, a1
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x11, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 17 * vlenb
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vmv8r.v v16, v8
+; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m8, ta, ma
+; ZVFHMIN-NEXT: vmv8r.v v24, v8
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: addi a3, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20
-; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m8, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v16, a1
; ZVFHMIN-NEXT: csrr a1, vlenb
; ZVFHMIN-NEXT: slli a3, a1, 3
@@ -1264,15 +1260,18 @@ define <vscale x 32 x half> @vfdiv_vf_nxv32f16(<vscale x 32 x half> %va, half %b
; ZVFHMIN-NEXT: sltu a2, a0, a3
; ZVFHMIN-NEXT: addi a2, a2, -1
; ZVFHMIN-NEXT: and a2, a2, a3
-; ZVFHMIN-NEXT: csrr a3, vlenb
-; ZVFHMIN-NEXT: slli a4, a3, 3
-; ZVFHMIN-NEXT: add a3, a4, a3
-; ZVFHMIN-NEXT: add a3, sp, a3
-; ZVFHMIN-NEXT: addi a3, a3, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a3) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: addi a3, sp, 16
+; ZVFHMIN-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v28
+; ZVFHMIN-NEXT: csrr a2, vlenb
+; ZVFHMIN-NEXT: slli a3, a2, 3
+; ZVFHMIN-NEXT: add a2, a3, a2
+; ZVFHMIN-NEXT: add a2, sp, a2
+; ZVFHMIN-NEXT: addi a2, a2, 16
+; ZVFHMIN-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v28
-; ZVFHMIN-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfdiv.vv v16, v8, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
@@ -1282,20 +1281,21 @@ define <vscale x 32 x half> @vfdiv_vf_nxv32f16(<vscale x 32 x half> %va, half %b
; ZVFHMIN-NEXT: .LBB46_2:
; ZVFHMIN-NEXT: addi a1, sp, 16
; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
-; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: slli a2, a1, 3
-; ZVFHMIN-NEXT: add a1, a2, a1
-; ZVFHMIN-NEXT: add a1, sp, a1
-; ZVFHMIN-NEXT: addi a1, a1, 16
-; ZVFHMIN-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a1, a0, 3
+; ZVFHMIN-NEXT: add a0, a1, a0
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v0
-; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: slli a1, a1, 3
-; ZVFHMIN-NEXT: add a1, sp, a1
-; ZVFHMIN-NEXT: addi a1, a1, 16
-; ZVFHMIN-NEXT: vl1r.v v0, (a1) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vl1r.v v0, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfdiv.vv v16, v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -1328,16 +1328,10 @@ define <vscale x 32 x half> @vfdiv_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: slli a1, a1, 4
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; ZVFHMIN-NEXT: vsetvli a1, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT: vmv8r.v v16, v8
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: vmset.m v7
-; ZVFHMIN-NEXT: addi a3, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20
; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m8, ta, ma
+; ZVFHMIN-NEXT: vmset.m v24
; ZVFHMIN-NEXT: vmv.v.x v16, a1
; ZVFHMIN-NEXT: csrr a1, vlenb
; ZVFHMIN-NEXT: slli a1, a1, 3
@@ -1348,18 +1342,22 @@ define <vscale x 32 x half> @vfdiv_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: srli a2, a2, 2
; ZVFHMIN-NEXT: sub a3, a0, a1
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT: vslidedown.vx v0, v7, a2
+; ZVFHMIN-NEXT: vslidedown.vx v0, v24, a2
; ZVFHMIN-NEXT: sltu a2, a0, a3
; ZVFHMIN-NEXT: addi a2, a2, -1
; ZVFHMIN-NEXT: and a2, a2, a3
-; ZVFHMIN-NEXT: csrr a3, vlenb
-; ZVFHMIN-NEXT: slli a3, a3, 3
-; ZVFHMIN-NEXT: add a3, sp, a3
-; ZVFHMIN-NEXT: addi a3, a3, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a3) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vmv8r.v v16, v8
+; ZVFHMIN-NEXT: addi a3, sp, 16
+; ZVFHMIN-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20
+; ZVFHMIN-NEXT: csrr a2, vlenb
+; ZVFHMIN-NEXT: slli a2, a2, 3
+; ZVFHMIN-NEXT: add a2, sp, a2
+; ZVFHMIN-NEXT: addi a2, a2, 16
+; ZVFHMIN-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v28
-; ZVFHMIN-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfdiv.vv v16, v8, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
@@ -1369,14 +1367,15 @@ define <vscale x 32 x half> @vfdiv_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: .LBB47_2:
; ZVFHMIN-NEXT: addi a1, sp, 16
; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
-; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: slli a1, a1, 3
-; ZVFHMIN-NEXT: add a1, sp, a1
-; ZVFHMIN-NEXT: addi a1, a1, 16
-; ZVFHMIN-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v0
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfdiv.vv v16, v16, v24
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll
index 02d6229..5d998c4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll
@@ -290,10 +290,10 @@ define <vscale x 1 x half> @vfmax_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfmax_vv_nxv1f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vfmax.vv v9, v9, v10, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
@@ -311,10 +311,10 @@ define <vscale x 1 x half> @vfmax_vv_nxv1f16_unmasked(<vscale x 1 x half> %va, <
;
; ZVFHMIN-LABEL: vfmax_vv_nxv1f16_unmasked:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vfmax.vv v9, v9, v10
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
@@ -334,10 +334,10 @@ define <vscale x 2 x half> @vfmax_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfmax_vv_nxv2f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; ZVFHMIN-NEXT: vfmax.vv v9, v9, v10, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
@@ -355,10 +355,10 @@ define <vscale x 2 x half> @vfmax_vv_nxv2f16_unmasked(<vscale x 2 x half> %va, <
;
; ZVFHMIN-LABEL: vfmax_vv_nxv2f16_unmasked:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; ZVFHMIN-NEXT: vfmax.vv v9, v9, v10
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
@@ -378,10 +378,10 @@ define <vscale x 4 x half> @vfmax_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfmax_vv_nxv4f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vfmax.vv v10, v12, v10, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
@@ -399,10 +399,10 @@ define <vscale x 4 x half> @vfmax_vv_nxv4f16_unmasked(<vscale x 4 x half> %va, <
;
; ZVFHMIN-LABEL: vfmax_vv_nxv4f16_unmasked:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vfmax.vv v10, v12, v10
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
@@ -422,10 +422,10 @@ define <vscale x 8 x half> @vfmax_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfmax_vv_nxv8f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vfmax.vv v12, v16, v12, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
@@ -443,10 +443,10 @@ define <vscale x 8 x half> @vfmax_vv_nxv8f16_unmasked(<vscale x 8 x half> %va, <
;
; ZVFHMIN-LABEL: vfmax_vv_nxv8f16_unmasked:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vfmax.vv v12, v16, v12
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
@@ -466,10 +466,10 @@ define <vscale x 16 x half> @vfmax_vv_nxv16f16(<vscale x 16 x half> %va, <vscale
;
; ZVFHMIN-LABEL: vfmax_vv_nxv16f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmax.vv v16, v24, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -487,10 +487,10 @@ define <vscale x 16 x half> @vfmax_vv_nxv16f16_unmasked(<vscale x 16 x half> %va
;
; ZVFHMIN-LABEL: vfmax_vv_nxv16f16_unmasked:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmax.vv v16, v24, v16
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -516,23 +516,22 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: addi a1, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: slli a1, a2, 1
; ZVFHMIN-NEXT: srli a2, a2, 2
; ZVFHMIN-NEXT: sub a3, a0, a1
-; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a2
; ZVFHMIN-NEXT: sltu a2, a0, a3
; ZVFHMIN-NEXT: addi a2, a2, -1
; ZVFHMIN-NEXT: and a2, a2, a3
-; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: addi a3, sp, 16
+; ZVFHMIN-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
-; ZVFHMIN-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmax.vv v16, v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
@@ -542,10 +541,11 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: .LBB22_2:
; ZVFHMIN-NEXT: addi a1, sp, 16
; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
; ZVFHMIN-NEXT: vmv1r.v v0, v7
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmax.vv v16, v24, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -577,22 +577,21 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: vsetvli a1, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT: vmset.m v7
-; ZVFHMIN-NEXT: addi a1, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
+; ZVFHMIN-NEXT: vmset.m v24
; ZVFHMIN-NEXT: slli a1, a2, 1
; ZVFHMIN-NEXT: srli a2, a2, 2
; ZVFHMIN-NEXT: sub a3, a0, a1
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT: vslidedown.vx v0, v7, a2
+; ZVFHMIN-NEXT: vslidedown.vx v0, v24, a2
; ZVFHMIN-NEXT: sltu a2, a0, a3
; ZVFHMIN-NEXT: addi a2, a2, -1
; ZVFHMIN-NEXT: and a2, a2, a3
-; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: addi a3, sp, 16
+; ZVFHMIN-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
-; ZVFHMIN-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmax.vv v16, v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
@@ -602,9 +601,10 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: .LBB23_2:
; ZVFHMIN-NEXT: addi a1, sp, 16
; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmax.vv v16, v24, v16
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll
index f7f8029..48a4c13 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll
@@ -290,10 +290,10 @@ define <vscale x 1 x half> @vfmin_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfmin_vv_nxv1f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vfmin.vv v9, v9, v10, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
@@ -311,10 +311,10 @@ define <vscale x 1 x half> @vfmin_vv_nxv1f16_unmasked(<vscale x 1 x half> %va, <
;
; ZVFHMIN-LABEL: vfmin_vv_nxv1f16_unmasked:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vfmin.vv v9, v9, v10
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
@@ -334,10 +334,10 @@ define <vscale x 2 x half> @vfmin_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfmin_vv_nxv2f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; ZVFHMIN-NEXT: vfmin.vv v9, v9, v10, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
@@ -355,10 +355,10 @@ define <vscale x 2 x half> @vfmin_vv_nxv2f16_unmasked(<vscale x 2 x half> %va, <
;
; ZVFHMIN-LABEL: vfmin_vv_nxv2f16_unmasked:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; ZVFHMIN-NEXT: vfmin.vv v9, v9, v10
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
@@ -378,10 +378,10 @@ define <vscale x 4 x half> @vfmin_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfmin_vv_nxv4f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vfmin.vv v10, v12, v10, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
@@ -399,10 +399,10 @@ define <vscale x 4 x half> @vfmin_vv_nxv4f16_unmasked(<vscale x 4 x half> %va, <
;
; ZVFHMIN-LABEL: vfmin_vv_nxv4f16_unmasked:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vfmin.vv v10, v12, v10
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
@@ -422,10 +422,10 @@ define <vscale x 8 x half> @vfmin_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfmin_vv_nxv8f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vfmin.vv v12, v16, v12, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
@@ -443,10 +443,10 @@ define <vscale x 8 x half> @vfmin_vv_nxv8f16_unmasked(<vscale x 8 x half> %va, <
;
; ZVFHMIN-LABEL: vfmin_vv_nxv8f16_unmasked:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vfmin.vv v12, v16, v12
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
@@ -466,10 +466,10 @@ define <vscale x 16 x half> @vfmin_vv_nxv16f16(<vscale x 16 x half> %va, <vscale
;
; ZVFHMIN-LABEL: vfmin_vv_nxv16f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmin.vv v16, v24, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -487,10 +487,10 @@ define <vscale x 16 x half> @vfmin_vv_nxv16f16_unmasked(<vscale x 16 x half> %va
;
; ZVFHMIN-LABEL: vfmin_vv_nxv16f16_unmasked:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmin.vv v16, v24, v16
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -516,23 +516,22 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: addi a1, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: slli a1, a2, 1
; ZVFHMIN-NEXT: srli a2, a2, 2
; ZVFHMIN-NEXT: sub a3, a0, a1
-; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a2
; ZVFHMIN-NEXT: sltu a2, a0, a3
; ZVFHMIN-NEXT: addi a2, a2, -1
; ZVFHMIN-NEXT: and a2, a2, a3
-; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: addi a3, sp, 16
+; ZVFHMIN-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
-; ZVFHMIN-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmin.vv v16, v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
@@ -542,10 +541,11 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: .LBB22_2:
; ZVFHMIN-NEXT: addi a1, sp, 16
; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
; ZVFHMIN-NEXT: vmv1r.v v0, v7
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmin.vv v16, v24, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -577,22 +577,21 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: vsetvli a1, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT: vmset.m v7
-; ZVFHMIN-NEXT: addi a1, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
+; ZVFHMIN-NEXT: vmset.m v24
; ZVFHMIN-NEXT: slli a1, a2, 1
; ZVFHMIN-NEXT: srli a2, a2, 2
; ZVFHMIN-NEXT: sub a3, a0, a1
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT: vslidedown.vx v0, v7, a2
+; ZVFHMIN-NEXT: vslidedown.vx v0, v24, a2
; ZVFHMIN-NEXT: sltu a2, a0, a3
; ZVFHMIN-NEXT: addi a2, a2, -1
; ZVFHMIN-NEXT: and a2, a2, a3
-; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: addi a3, sp, 16
+; ZVFHMIN-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
-; ZVFHMIN-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmin.vv v16, v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
@@ -602,9 +601,10 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: .LBB23_2:
; ZVFHMIN-NEXT: addi a1, sp, 16
; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmin.vv v16, v24, v16
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll
index 7e552304..06f74dd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll
@@ -19,10 +19,10 @@ define <vscale x 1 x half> @vfmul_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfmul_vv_nxv1f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vfmul.vv v9, v9, v10, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
@@ -40,10 +40,10 @@ define <vscale x 1 x half> @vfmul_vv_nxv1f16_unmasked(<vscale x 1 x half> %va, <
;
; ZVFHMIN-LABEL: vfmul_vv_nxv1f16_unmasked:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vfmul.vv v9, v9, v10
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
@@ -62,11 +62,11 @@ define <vscale x 1 x half> @vfmul_vf_nxv1f16(<vscale x 1 x half> %va, half %b, <
; ZVFHMIN-LABEL: vfmul_vf_nxv1f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v9, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vfmul.vv v9, v10, v8, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
@@ -87,11 +87,11 @@ define <vscale x 1 x half> @vfmul_vf_nxv1f16_unmasked(<vscale x 1 x half> %va, h
; ZVFHMIN-LABEL: vfmul_vf_nxv1f16_unmasked:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v9, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vfmul.vv v9, v10, v8
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
@@ -113,10 +113,10 @@ define <vscale x 2 x half> @vfmul_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfmul_vv_nxv2f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; ZVFHMIN-NEXT: vfmul.vv v9, v9, v10, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
@@ -134,10 +134,10 @@ define <vscale x 2 x half> @vfmul_vv_nxv2f16_unmasked(<vscale x 2 x half> %va, <
;
; ZVFHMIN-LABEL: vfmul_vv_nxv2f16_unmasked:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; ZVFHMIN-NEXT: vfmul.vv v9, v9, v10
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
@@ -156,11 +156,11 @@ define <vscale x 2 x half> @vfmul_vf_nxv2f16(<vscale x 2 x half> %va, half %b, <
; ZVFHMIN-LABEL: vfmul_vf_nxv2f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v9, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; ZVFHMIN-NEXT: vfmul.vv v9, v10, v8, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
@@ -181,11 +181,11 @@ define <vscale x 2 x half> @vfmul_vf_nxv2f16_unmasked(<vscale x 2 x half> %va, h
; ZVFHMIN-LABEL: vfmul_vf_nxv2f16_unmasked:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v9, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; ZVFHMIN-NEXT: vfmul.vv v9, v10, v8
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
@@ -207,10 +207,10 @@ define <vscale x 4 x half> @vfmul_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfmul_vv_nxv4f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vfmul.vv v10, v12, v10, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
@@ -228,10 +228,10 @@ define <vscale x 4 x half> @vfmul_vv_nxv4f16_unmasked(<vscale x 4 x half> %va, <
;
; ZVFHMIN-LABEL: vfmul_vv_nxv4f16_unmasked:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vfmul.vv v10, v12, v10
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
@@ -250,11 +250,11 @@ define <vscale x 4 x half> @vfmul_vf_nxv4f16(<vscale x 4 x half> %va, half %b, <
; ZVFHMIN-LABEL: vfmul_vf_nxv4f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v9, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vfmul.vv v10, v10, v12, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
@@ -275,11 +275,11 @@ define <vscale x 4 x half> @vfmul_vf_nxv4f16_unmasked(<vscale x 4 x half> %va, h
; ZVFHMIN-LABEL: vfmul_vf_nxv4f16_unmasked:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v9, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vfmul.vv v10, v10, v12
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
@@ -301,10 +301,10 @@ define <vscale x 8 x half> @vfmul_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfmul_vv_nxv8f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vfmul.vv v12, v16, v12, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
@@ -322,10 +322,10 @@ define <vscale x 8 x half> @vfmul_vv_nxv8f16_unmasked(<vscale x 8 x half> %va, <
;
; ZVFHMIN-LABEL: vfmul_vv_nxv8f16_unmasked:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vfmul.vv v12, v16, v12
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
@@ -344,11 +344,11 @@ define <vscale x 8 x half> @vfmul_vf_nxv8f16(<vscale x 8 x half> %va, half %b, <
; ZVFHMIN-LABEL: vfmul_vf_nxv8f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v10, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vfmul.vv v12, v12, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
@@ -369,11 +369,11 @@ define <vscale x 8 x half> @vfmul_vf_nxv8f16_unmasked(<vscale x 8 x half> %va, h
; ZVFHMIN-LABEL: vfmul_vf_nxv8f16_unmasked:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v10, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vfmul.vv v12, v12, v16
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
@@ -395,10 +395,10 @@ define <vscale x 16 x half> @vfmul_vv_nxv16f16(<vscale x 16 x half> %va, <vscale
;
; ZVFHMIN-LABEL: vfmul_vv_nxv16f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmul.vv v16, v24, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -416,10 +416,10 @@ define <vscale x 16 x half> @vfmul_vv_nxv16f16_unmasked(<vscale x 16 x half> %va
;
; ZVFHMIN-LABEL: vfmul_vv_nxv16f16_unmasked:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmul.vv v16, v24, v16
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -438,11 +438,11 @@ define <vscale x 16 x half> @vfmul_vf_nxv16f16(<vscale x 16 x half> %va, half %b
; ZVFHMIN-LABEL: vfmul_vf_nxv16f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v12, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmul.vv v16, v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -463,11 +463,11 @@ define <vscale x 16 x half> @vfmul_vf_nxv16f16_unmasked(<vscale x 16 x half> %va
; ZVFHMIN-LABEL: vfmul_vf_nxv16f16_unmasked:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v12, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmul.vv v16, v16, v24
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -495,23 +495,22 @@ define <vscale x 32 x half> @vfmul_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: addi a1, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: slli a1, a2, 1
; ZVFHMIN-NEXT: srli a2, a2, 2
; ZVFHMIN-NEXT: sub a3, a0, a1
-; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a2
; ZVFHMIN-NEXT: sltu a2, a0, a3
; ZVFHMIN-NEXT: addi a2, a2, -1
; ZVFHMIN-NEXT: and a2, a2, a3
-; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: addi a3, sp, 16
+; ZVFHMIN-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
-; ZVFHMIN-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmul.vv v16, v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
@@ -521,10 +520,11 @@ define <vscale x 32 x half> @vfmul_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: .LBB20_2:
; ZVFHMIN-NEXT: addi a1, sp, 16
; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
; ZVFHMIN-NEXT: vmv1r.v v0, v7
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmul.vv v16, v24, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -556,22 +556,21 @@ define <vscale x 32 x half> @vfmul_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: vsetvli a1, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT: vmset.m v7
-; ZVFHMIN-NEXT: addi a1, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
+; ZVFHMIN-NEXT: vmset.m v24
; ZVFHMIN-NEXT: slli a1, a2, 1
; ZVFHMIN-NEXT: srli a2, a2, 2
; ZVFHMIN-NEXT: sub a3, a0, a1
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT: vslidedown.vx v0, v7, a2
+; ZVFHMIN-NEXT: vslidedown.vx v0, v24, a2
; ZVFHMIN-NEXT: sltu a2, a0, a3
; ZVFHMIN-NEXT: addi a2, a2, -1
; ZVFHMIN-NEXT: and a2, a2, a3
-; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: addi a3, sp, 16
+; ZVFHMIN-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
-; ZVFHMIN-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmul.vv v16, v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
@@ -581,9 +580,10 @@ define <vscale x 32 x half> @vfmul_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: .LBB21_2:
; ZVFHMIN-NEXT: addi a1, sp, 16
; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmul.vv v16, v24, v16
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -614,14 +614,10 @@ define <vscale x 32 x half> @vfmul_vf_nxv32f16(<vscale x 32 x half> %va, half %b
; ZVFHMIN-NEXT: add a1, a2, a1
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x11, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 17 * vlenb
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vmv8r.v v16, v8
+; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m8, ta, ma
+; ZVFHMIN-NEXT: vmv8r.v v24, v8
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: addi a3, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20
-; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m8, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v16, a1
; ZVFHMIN-NEXT: csrr a1, vlenb
; ZVFHMIN-NEXT: slli a3, a1, 3
@@ -642,15 +638,18 @@ define <vscale x 32 x half> @vfmul_vf_nxv32f16(<vscale x 32 x half> %va, half %b
; ZVFHMIN-NEXT: sltu a2, a0, a3
; ZVFHMIN-NEXT: addi a2, a2, -1
; ZVFHMIN-NEXT: and a2, a2, a3
-; ZVFHMIN-NEXT: csrr a3, vlenb
-; ZVFHMIN-NEXT: slli a4, a3, 3
-; ZVFHMIN-NEXT: add a3, a4, a3
-; ZVFHMIN-NEXT: add a3, sp, a3
-; ZVFHMIN-NEXT: addi a3, a3, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a3) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: addi a3, sp, 16
+; ZVFHMIN-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v28
+; ZVFHMIN-NEXT: csrr a2, vlenb
+; ZVFHMIN-NEXT: slli a3, a2, 3
+; ZVFHMIN-NEXT: add a2, a3, a2
+; ZVFHMIN-NEXT: add a2, sp, a2
+; ZVFHMIN-NEXT: addi a2, a2, 16
+; ZVFHMIN-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v28
-; ZVFHMIN-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmul.vv v16, v8, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
@@ -660,20 +659,21 @@ define <vscale x 32 x half> @vfmul_vf_nxv32f16(<vscale x 32 x half> %va, half %b
; ZVFHMIN-NEXT: .LBB22_2:
; ZVFHMIN-NEXT: addi a1, sp, 16
; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
-; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: slli a2, a1, 3
-; ZVFHMIN-NEXT: add a1, a2, a1
-; ZVFHMIN-NEXT: add a1, sp, a1
-; ZVFHMIN-NEXT: addi a1, a1, 16
-; ZVFHMIN-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a1, a0, 3
+; ZVFHMIN-NEXT: add a0, a1, a0
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v0
-; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: slli a1, a1, 3
-; ZVFHMIN-NEXT: add a1, sp, a1
-; ZVFHMIN-NEXT: addi a1, a1, 16
-; ZVFHMIN-NEXT: vl1r.v v0, (a1) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vl1r.v v0, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmul.vv v16, v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -706,16 +706,10 @@ define <vscale x 32 x half> @vfmul_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: slli a1, a1, 4
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; ZVFHMIN-NEXT: vsetvli a1, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT: vmv8r.v v16, v8
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: vmset.m v7
-; ZVFHMIN-NEXT: addi a3, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20
; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m8, ta, ma
+; ZVFHMIN-NEXT: vmset.m v24
; ZVFHMIN-NEXT: vmv.v.x v16, a1
; ZVFHMIN-NEXT: csrr a1, vlenb
; ZVFHMIN-NEXT: slli a1, a1, 3
@@ -726,18 +720,22 @@ define <vscale x 32 x half> @vfmul_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: srli a2, a2, 2
; ZVFHMIN-NEXT: sub a3, a0, a1
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT: vslidedown.vx v0, v7, a2
+; ZVFHMIN-NEXT: vslidedown.vx v0, v24, a2
; ZVFHMIN-NEXT: sltu a2, a0, a3
; ZVFHMIN-NEXT: addi a2, a2, -1
; ZVFHMIN-NEXT: and a2, a2, a3
-; ZVFHMIN-NEXT: csrr a3, vlenb
-; ZVFHMIN-NEXT: slli a3, a3, 3
-; ZVFHMIN-NEXT: add a3, sp, a3
-; ZVFHMIN-NEXT: addi a3, a3, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a3) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vmv8r.v v16, v8
+; ZVFHMIN-NEXT: addi a3, sp, 16
+; ZVFHMIN-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20
+; ZVFHMIN-NEXT: csrr a2, vlenb
+; ZVFHMIN-NEXT: slli a2, a2, 3
+; ZVFHMIN-NEXT: add a2, sp, a2
+; ZVFHMIN-NEXT: addi a2, a2, 16
+; ZVFHMIN-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v28
-; ZVFHMIN-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmul.vv v16, v8, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
@@ -747,14 +745,15 @@ define <vscale x 32 x half> @vfmul_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: .LBB23_2:
; ZVFHMIN-NEXT: addi a1, sp, 16
; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
-; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: slli a1, a1, 3
-; ZVFHMIN-NEXT: add a1, sp, a1
-; ZVFHMIN-NEXT: addi a1, a1, 16
-; ZVFHMIN-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v0
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmul.vv v16, v16, v24
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp-mask.ll
index b7f2133..575d50d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp-mask.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp-mask.ll
@@ -42,9 +42,9 @@ define <vscale x 2 x i1> @vfptosi_nxv2i1_nxv2f16(<vscale x 2 x half> %va, <vscal
;
; ZVFHMIN-LABEL: vfptosi_nxv2i1_nxv2f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; ZVFHMIN-NEXT: vfcvt.rtz.x.f.v v8, v9, v0.t
; ZVFHMIN-NEXT: vmsne.vi v0, v8, 0, v0.t
; ZVFHMIN-NEXT: ret
@@ -62,9 +62,9 @@ define <vscale x 2 x i1> @vfptosi_nxv2i1_nxv2f16_unmasked(<vscale x 2 x half> %v
;
; ZVFHMIN-LABEL: vfptosi_nxv2i1_nxv2f16_unmasked:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; ZVFHMIN-NEXT: vfcvt.rtz.x.f.v v8, v9
; ZVFHMIN-NEXT: vmsne.vi v0, v8, 0
; ZVFHMIN-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll
index d990c74..e33ab98 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll
@@ -130,9 +130,8 @@ define <vscale x 2 x i7> @vfptosi_v4i7_v4f16(<vscale x 2 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfptosi_v4i7_v4f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
; ZVFHMIN-NEXT: vfncvt.rtz.x.f.w v8, v9, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
; ZVFHMIN-NEXT: vnsrl.wi v8, v8, 0, v0.t
@@ -153,9 +152,8 @@ define <vscale x 2 x i8> @vfptosi_nxv2i8_nxv2f16(<vscale x 2 x half> %va, <vscal
;
; ZVFHMIN-LABEL: vfptosi_nxv2i8_nxv2f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
; ZVFHMIN-NEXT: vfncvt.rtz.x.f.w v8, v9, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
; ZVFHMIN-NEXT: vnsrl.wi v8, v8, 0, v0.t
@@ -174,9 +172,8 @@ define <vscale x 2 x i8> @vfptosi_nxv2i8_nxv2f16_unmasked(<vscale x 2 x half> %v
;
; ZVFHMIN-LABEL: vfptosi_nxv2i8_nxv2f16_unmasked:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
; ZVFHMIN-NEXT: vfncvt.rtz.x.f.w v8, v9
; ZVFHMIN-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
; ZVFHMIN-NEXT: vnsrl.wi v8, v8, 0
@@ -196,9 +193,8 @@ define <vscale x 2 x i16> @vfptosi_nxv2i16_nxv2f16(<vscale x 2 x half> %va, <vsc
;
; ZVFHMIN-LABEL: vfptosi_nxv2i16_nxv2f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
; ZVFHMIN-NEXT: vfncvt.rtz.x.f.w v8, v9, v0.t
; ZVFHMIN-NEXT: ret
%v = call <vscale x 2 x i16> @llvm.vp.fptosi.nxv2i16.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
@@ -214,9 +210,8 @@ define <vscale x 2 x i16> @vfptosi_nxv2i16_nxv2f16_unmasked(<vscale x 2 x half>
;
; ZVFHMIN-LABEL: vfptosi_nxv2i16_nxv2f16_unmasked:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
; ZVFHMIN-NEXT: vfncvt.rtz.x.f.w v8, v9
; ZVFHMIN-NEXT: ret
%v = call <vscale x 2 x i16> @llvm.vp.fptosi.nxv2i16.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
@@ -235,9 +230,9 @@ define <vscale x 2 x i32> @vfptosi_nxv2i32_nxv2f16(<vscale x 2 x half> %va, <vsc
;
; ZVFHMIN-LABEL: vfptosi_nxv2i32_nxv2f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; ZVFHMIN-NEXT: vfcvt.rtz.x.f.v v8, v9, v0.t
; ZVFHMIN-NEXT: ret
%v = call <vscale x 2 x i32> @llvm.vp.fptosi.nxv2i32.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
@@ -254,9 +249,9 @@ define <vscale x 2 x i32> @vfptosi_nxv2i32_nxv2f16_unmasked(<vscale x 2 x half>
;
; ZVFHMIN-LABEL: vfptosi_nxv2i32_nxv2f16_unmasked:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; ZVFHMIN-NEXT: vfcvt.rtz.x.f.v v8, v9
; ZVFHMIN-NEXT: ret
%v = call <vscale x 2 x i32> @llvm.vp.fptosi.nxv2i32.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
@@ -276,9 +271,9 @@ define <vscale x 2 x i64> @vfptosi_nxv2i64_nxv2f16(<vscale x 2 x half> %va, <vsc
;
; ZVFHMIN-LABEL: vfptosi_nxv2i64_nxv2f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.rtz.x.f.v v8, v10, v0.t
; ZVFHMIN-NEXT: ret
%v = call <vscale x 2 x i64> @llvm.vp.fptosi.nxv2i64.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
@@ -286,21 +281,13 @@ define <vscale x 2 x i64> @vfptosi_nxv2i64_nxv2f16(<vscale x 2 x half> %va, <vsc
}
define <vscale x 2 x i64> @vfptosi_nxv2i64_nxv2f16_unmasked(<vscale x 2 x half> %va, i32 zeroext %evl) {
-; ZVFH-LABEL: vfptosi_nxv2i64_nxv2f16_unmasked:
-; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; ZVFH-NEXT: vfwcvt.f.f.v v10, v8
-; ZVFH-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFH-NEXT: vfwcvt.rtz.x.f.v v8, v10
-; ZVFH-NEXT: ret
-;
-; ZVFHMIN-LABEL: vfptosi_nxv2i64_nxv2f16_unmasked:
-; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.rtz.x.f.v v8, v10
-; ZVFHMIN-NEXT: ret
+; CHECK-LABEL: vfptosi_nxv2i64_nxv2f16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: vfwcvt.f.f.v v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vfwcvt.rtz.x.f.v v8, v10
+; CHECK-NEXT: ret
%v = call <vscale x 2 x i64> @llvm.vp.fptosi.nxv2i64.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x i64> %v
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp-mask.ll
index 8ac5992..e1d0ad4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp-mask.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp-mask.ll
@@ -42,9 +42,9 @@ define <vscale x 2 x i1> @vfptoui_nxv2i1_nxv2f16(<vscale x 2 x half> %va, <vscal
;
; ZVFHMIN-LABEL: vfptoui_nxv2i1_nxv2f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; ZVFHMIN-NEXT: vfcvt.rtz.xu.f.v v8, v9, v0.t
; ZVFHMIN-NEXT: vmsne.vi v0, v8, 0, v0.t
; ZVFHMIN-NEXT: ret
@@ -62,9 +62,9 @@ define <vscale x 2 x i1> @vfptoui_nxv2i1_nxv2f16_unmasked(<vscale x 2 x half> %v
;
; ZVFHMIN-LABEL: vfptoui_nxv2i1_nxv2f16_unmasked:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; ZVFHMIN-NEXT: vfcvt.rtz.xu.f.v v8, v9
; ZVFHMIN-NEXT: vmsne.vi v0, v8, 0
; ZVFHMIN-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll
index 3b24a64..86222ec 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll
@@ -130,9 +130,8 @@ define <vscale x 2 x i7> @vfptoui_v4i7_v4f16(<vscale x 2 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfptoui_v4i7_v4f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
; ZVFHMIN-NEXT: vfncvt.rtz.x.f.w v8, v9, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
; ZVFHMIN-NEXT: vnsrl.wi v8, v8, 0, v0.t
@@ -153,9 +152,8 @@ define <vscale x 2 x i8> @vfptoui_nxv2i8_nxv2f16(<vscale x 2 x half> %va, <vscal
;
; ZVFHMIN-LABEL: vfptoui_nxv2i8_nxv2f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
; ZVFHMIN-NEXT: vfncvt.rtz.xu.f.w v8, v9, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
; ZVFHMIN-NEXT: vnsrl.wi v8, v8, 0, v0.t
@@ -174,9 +172,8 @@ define <vscale x 2 x i8> @vfptoui_nxv2i8_nxv2f16_unmasked(<vscale x 2 x half> %v
;
; ZVFHMIN-LABEL: vfptoui_nxv2i8_nxv2f16_unmasked:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
; ZVFHMIN-NEXT: vfncvt.rtz.xu.f.w v8, v9
; ZVFHMIN-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
; ZVFHMIN-NEXT: vnsrl.wi v8, v8, 0
@@ -196,9 +193,8 @@ define <vscale x 2 x i16> @vfptoui_nxv2i16_nxv2f16(<vscale x 2 x half> %va, <vsc
;
; ZVFHMIN-LABEL: vfptoui_nxv2i16_nxv2f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
; ZVFHMIN-NEXT: vfncvt.rtz.xu.f.w v8, v9, v0.t
; ZVFHMIN-NEXT: ret
%v = call <vscale x 2 x i16> @llvm.vp.fptoui.nxv2i16.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
@@ -214,9 +210,8 @@ define <vscale x 2 x i16> @vfptoui_nxv2i16_nxv2f16_unmasked(<vscale x 2 x half>
;
; ZVFHMIN-LABEL: vfptoui_nxv2i16_nxv2f16_unmasked:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
; ZVFHMIN-NEXT: vfncvt.rtz.xu.f.w v8, v9
; ZVFHMIN-NEXT: ret
%v = call <vscale x 2 x i16> @llvm.vp.fptoui.nxv2i16.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
@@ -235,9 +230,9 @@ define <vscale x 2 x i32> @vfptoui_nxv2i32_nxv2f16(<vscale x 2 x half> %va, <vsc
;
; ZVFHMIN-LABEL: vfptoui_nxv2i32_nxv2f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; ZVFHMIN-NEXT: vfcvt.rtz.xu.f.v v8, v9, v0.t
; ZVFHMIN-NEXT: ret
%v = call <vscale x 2 x i32> @llvm.vp.fptoui.nxv2i32.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
@@ -254,9 +249,9 @@ define <vscale x 2 x i32> @vfptoui_nxv2i32_nxv2f16_unmasked(<vscale x 2 x half>
;
; ZVFHMIN-LABEL: vfptoui_nxv2i32_nxv2f16_unmasked:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; ZVFHMIN-NEXT: vfcvt.rtz.xu.f.v v8, v9
; ZVFHMIN-NEXT: ret
%v = call <vscale x 2 x i32> @llvm.vp.fptoui.nxv2i32.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
@@ -276,9 +271,9 @@ define <vscale x 2 x i64> @vfptoui_nxv2i64_nxv2f16(<vscale x 2 x half> %va, <vsc
;
; ZVFHMIN-LABEL: vfptoui_nxv2i64_nxv2f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.rtz.xu.f.v v8, v10, v0.t
; ZVFHMIN-NEXT: ret
%v = call <vscale x 2 x i64> @llvm.vp.fptoui.nxv2i64.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> %m, i32 %evl)
@@ -286,21 +281,13 @@ define <vscale x 2 x i64> @vfptoui_nxv2i64_nxv2f16(<vscale x 2 x half> %va, <vsc
}
define <vscale x 2 x i64> @vfptoui_nxv2i64_nxv2f16_unmasked(<vscale x 2 x half> %va, i32 zeroext %evl) {
-; ZVFH-LABEL: vfptoui_nxv2i64_nxv2f16_unmasked:
-; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; ZVFH-NEXT: vfwcvt.f.f.v v10, v8
-; ZVFH-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; ZVFH-NEXT: vfwcvt.rtz.xu.f.v v8, v10
-; ZVFH-NEXT: ret
-;
-; ZVFHMIN-LABEL: vfptoui_nxv2i64_nxv2f16_unmasked:
-; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.rtz.xu.f.v v8, v10
-; ZVFHMIN-NEXT: ret
+; CHECK-LABEL: vfptoui_nxv2i64_nxv2f16_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: vfwcvt.f.f.v v10, v8
+; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT: vfwcvt.rtz.xu.f.v v8, v10
+; CHECK-NEXT: ret
%v = call <vscale x 2 x i64> @llvm.vp.fptoui.nxv2i64.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x i1> splat (i1 true), i32 %evl)
ret <vscale x 2 x i64> %v
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll
index 8e57be1..e94d0a6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll
@@ -242,9 +242,9 @@ define <vscale x 1 x half> @vfsqrt_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfsqrt_vv_nxv1f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vfsqrt.v v9, v9, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
@@ -262,9 +262,9 @@ define <vscale x 1 x half> @vfsqrt_vv_nxv1f16_unmasked(<vscale x 1 x half> %va,
;
; ZVFHMIN-LABEL: vfsqrt_vv_nxv1f16_unmasked:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vfsqrt.v v9, v9
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
@@ -284,9 +284,9 @@ define <vscale x 2 x half> @vfsqrt_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfsqrt_vv_nxv2f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; ZVFHMIN-NEXT: vfsqrt.v v9, v9, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
@@ -304,9 +304,9 @@ define <vscale x 2 x half> @vfsqrt_vv_nxv2f16_unmasked(<vscale x 2 x half> %va,
;
; ZVFHMIN-LABEL: vfsqrt_vv_nxv2f16_unmasked:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; ZVFHMIN-NEXT: vfsqrt.v v9, v9
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
@@ -326,9 +326,9 @@ define <vscale x 4 x half> @vfsqrt_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfsqrt_vv_nxv4f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vfsqrt.v v10, v10, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
@@ -346,9 +346,9 @@ define <vscale x 4 x half> @vfsqrt_vv_nxv4f16_unmasked(<vscale x 4 x half> %va,
;
; ZVFHMIN-LABEL: vfsqrt_vv_nxv4f16_unmasked:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vfsqrt.v v10, v10
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
@@ -368,9 +368,9 @@ define <vscale x 8 x half> @vfsqrt_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfsqrt_vv_nxv8f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vfsqrt.v v12, v12, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
@@ -388,9 +388,9 @@ define <vscale x 8 x half> @vfsqrt_vv_nxv8f16_unmasked(<vscale x 8 x half> %va,
;
; ZVFHMIN-LABEL: vfsqrt_vv_nxv8f16_unmasked:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vfsqrt.v v12, v12
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
@@ -410,9 +410,9 @@ define <vscale x 16 x half> @vfsqrt_vv_nxv16f16(<vscale x 16 x half> %va, <vscal
;
; ZVFHMIN-LABEL: vfsqrt_vv_nxv16f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfsqrt.v v16, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -430,9 +430,9 @@ define <vscale x 16 x half> @vfsqrt_vv_nxv16f16_unmasked(<vscale x 16 x half> %v
;
; ZVFHMIN-LABEL: vfsqrt_vv_nxv16f16_unmasked:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfsqrt.v v16, v16
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -458,13 +458,13 @@ define <vscale x 32 x half> @vfsqrt_vv_nxv32f16(<vscale x 32 x half> %va, <vscal
; ZVFHMIN-NEXT: slli a1, a2, 1
; ZVFHMIN-NEXT: srli a2, a2, 2
; ZVFHMIN-NEXT: sub a3, a0, a1
+; ZVFHMIN-NEXT: sltu a4, a0, a3
+; ZVFHMIN-NEXT: addi a4, a4, -1
+; ZVFHMIN-NEXT: and a3, a4, a3
; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a2
-; ZVFHMIN-NEXT: sltu a2, a0, a3
-; ZVFHMIN-NEXT: addi a2, a2, -1
-; ZVFHMIN-NEXT: and a2, a2, a3
-; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a3, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
-; ZVFHMIN-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfsqrt.v v24, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v24
@@ -472,9 +472,10 @@ define <vscale x 32 x half> @vfsqrt_vv_nxv32f16(<vscale x 32 x half> %va, <vscal
; ZVFHMIN-NEXT: # %bb.1:
; ZVFHMIN-NEXT: mv a0, a1
; ZVFHMIN-NEXT: .LBB22_2:
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
; ZVFHMIN-NEXT: vmv1r.v v0, v16
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfsqrt.v v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -498,14 +499,14 @@ define <vscale x 32 x half> @vfsqrt_vv_nxv32f16_unmasked(<vscale x 32 x half> %v
; ZVFHMIN-NEXT: slli a1, a2, 1
; ZVFHMIN-NEXT: srli a2, a2, 2
; ZVFHMIN-NEXT: sub a3, a0, a1
+; ZVFHMIN-NEXT: sltu a4, a0, a3
+; ZVFHMIN-NEXT: addi a4, a4, -1
+; ZVFHMIN-NEXT: and a3, a4, a3
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v0, v16, a2
-; ZVFHMIN-NEXT: sltu a2, a0, a3
-; ZVFHMIN-NEXT: addi a2, a2, -1
-; ZVFHMIN-NEXT: and a2, a2, a3
-; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a3, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
-; ZVFHMIN-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfsqrt.v v16, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
@@ -513,8 +514,9 @@ define <vscale x 32 x half> @vfsqrt_vv_nxv32f16_unmasked(<vscale x 32 x half> %v
; ZVFHMIN-NEXT: # %bb.1:
; ZVFHMIN-NEXT: mv a0, a1
; ZVFHMIN-NEXT: .LBB23_2:
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfsqrt.v v16, v16
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll
index d034f65..56ed560 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll
@@ -641,10 +641,10 @@ define <vscale x 1 x half> @vfsub_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfsub_vv_nxv1f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vfsub.vv v9, v9, v10, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
@@ -662,10 +662,10 @@ define <vscale x 1 x half> @vfsub_vv_nxv1f16_unmasked(<vscale x 1 x half> %va, <
;
; ZVFHMIN-LABEL: vfsub_vv_nxv1f16_unmasked:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vfsub.vv v9, v9, v10
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
@@ -684,11 +684,11 @@ define <vscale x 1 x half> @vfsub_vf_nxv1f16(<vscale x 1 x half> %va, half %b, <
; ZVFHMIN-LABEL: vfsub_vf_nxv1f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v9, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vfsub.vv v9, v10, v8, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
@@ -709,11 +709,11 @@ define <vscale x 1 x half> @vfsub_vf_nxv1f16_unmasked(<vscale x 1 x half> %va, h
; ZVFHMIN-LABEL: vfsub_vf_nxv1f16_unmasked:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v9, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vfsub.vv v9, v10, v8
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
@@ -735,10 +735,10 @@ define <vscale x 2 x half> @vfsub_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfsub_vv_nxv2f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; ZVFHMIN-NEXT: vfsub.vv v9, v9, v10, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
@@ -756,10 +756,10 @@ define <vscale x 2 x half> @vfsub_vv_nxv2f16_unmasked(<vscale x 2 x half> %va, <
;
; ZVFHMIN-LABEL: vfsub_vv_nxv2f16_unmasked:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; ZVFHMIN-NEXT: vfsub.vv v9, v9, v10
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
@@ -778,11 +778,11 @@ define <vscale x 2 x half> @vfsub_vf_nxv2f16(<vscale x 2 x half> %va, half %b, <
; ZVFHMIN-LABEL: vfsub_vf_nxv2f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v9, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; ZVFHMIN-NEXT: vfsub.vv v9, v10, v8, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
@@ -803,11 +803,11 @@ define <vscale x 2 x half> @vfsub_vf_nxv2f16_unmasked(<vscale x 2 x half> %va, h
; ZVFHMIN-LABEL: vfsub_vf_nxv2f16_unmasked:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v9, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; ZVFHMIN-NEXT: vfsub.vv v9, v10, v8
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
@@ -829,10 +829,10 @@ define <vscale x 4 x half> @vfsub_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfsub_vv_nxv4f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vfsub.vv v10, v12, v10, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
@@ -850,10 +850,10 @@ define <vscale x 4 x half> @vfsub_vv_nxv4f16_unmasked(<vscale x 4 x half> %va, <
;
; ZVFHMIN-LABEL: vfsub_vv_nxv4f16_unmasked:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vfsub.vv v10, v12, v10
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
@@ -872,11 +872,11 @@ define <vscale x 4 x half> @vfsub_vf_nxv4f16(<vscale x 4 x half> %va, half %b, <
; ZVFHMIN-LABEL: vfsub_vf_nxv4f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v9, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vfsub.vv v10, v10, v12, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
@@ -897,11 +897,11 @@ define <vscale x 4 x half> @vfsub_vf_nxv4f16_unmasked(<vscale x 4 x half> %va, h
; ZVFHMIN-LABEL: vfsub_vf_nxv4f16_unmasked:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m1, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v9, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vfsub.vv v10, v10, v12
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
@@ -923,10 +923,10 @@ define <vscale x 8 x half> @vfsub_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
;
; ZVFHMIN-LABEL: vfsub_vv_nxv8f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vfsub.vv v12, v16, v12, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
@@ -944,10 +944,10 @@ define <vscale x 8 x half> @vfsub_vv_nxv8f16_unmasked(<vscale x 8 x half> %va, <
;
; ZVFHMIN-LABEL: vfsub_vv_nxv8f16_unmasked:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vfsub.vv v12, v16, v12
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
@@ -966,11 +966,11 @@ define <vscale x 8 x half> @vfsub_vf_nxv8f16(<vscale x 8 x half> %va, half %b, <
; ZVFHMIN-LABEL: vfsub_vf_nxv8f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v10, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vfsub.vv v12, v12, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
@@ -991,11 +991,11 @@ define <vscale x 8 x half> @vfsub_vf_nxv8f16_unmasked(<vscale x 8 x half> %va, h
; ZVFHMIN-LABEL: vfsub_vf_nxv8f16_unmasked:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v10, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vfsub.vv v12, v12, v16
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
@@ -1017,10 +1017,10 @@ define <vscale x 16 x half> @vfsub_vv_nxv16f16(<vscale x 16 x half> %va, <vscale
;
; ZVFHMIN-LABEL: vfsub_vv_nxv16f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfsub.vv v16, v24, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -1038,10 +1038,10 @@ define <vscale x 16 x half> @vfsub_vv_nxv16f16_unmasked(<vscale x 16 x half> %va
;
; ZVFHMIN-LABEL: vfsub_vv_nxv16f16_unmasked:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfsub.vv v16, v24, v16
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -1060,11 +1060,11 @@ define <vscale x 16 x half> @vfsub_vf_nxv16f16(<vscale x 16 x half> %va, half %b
; ZVFHMIN-LABEL: vfsub_vf_nxv16f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v12, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfsub.vv v16, v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -1085,11 +1085,11 @@ define <vscale x 16 x half> @vfsub_vf_nxv16f16_unmasked(<vscale x 16 x half> %va
; ZVFHMIN-LABEL: vfsub_vf_nxv16f16_unmasked:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v12, a1
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfsub.vv v16, v16, v24
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -1117,23 +1117,22 @@ define <vscale x 32 x half> @vfsub_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: addi a1, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: slli a1, a2, 1
; ZVFHMIN-NEXT: srli a2, a2, 2
; ZVFHMIN-NEXT: sub a3, a0, a1
-; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a2
; ZVFHMIN-NEXT: sltu a2, a0, a3
; ZVFHMIN-NEXT: addi a2, a2, -1
; ZVFHMIN-NEXT: and a2, a2, a3
-; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: addi a3, sp, 16
+; ZVFHMIN-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
-; ZVFHMIN-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfsub.vv v16, v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
@@ -1143,10 +1142,11 @@ define <vscale x 32 x half> @vfsub_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: .LBB44_2:
; ZVFHMIN-NEXT: addi a1, sp, 16
; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
; ZVFHMIN-NEXT: vmv1r.v v0, v7
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfsub.vv v16, v24, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -1178,22 +1178,21 @@ define <vscale x 32 x half> @vfsub_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: vsetvli a1, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT: vmset.m v7
-; ZVFHMIN-NEXT: addi a1, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
+; ZVFHMIN-NEXT: vmset.m v24
; ZVFHMIN-NEXT: slli a1, a2, 1
; ZVFHMIN-NEXT: srli a2, a2, 2
; ZVFHMIN-NEXT: sub a3, a0, a1
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT: vslidedown.vx v0, v7, a2
+; ZVFHMIN-NEXT: vslidedown.vx v0, v24, a2
; ZVFHMIN-NEXT: sltu a2, a0, a3
; ZVFHMIN-NEXT: addi a2, a2, -1
; ZVFHMIN-NEXT: and a2, a2, a3
-; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: addi a3, sp, 16
+; ZVFHMIN-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
-; ZVFHMIN-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfsub.vv v16, v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
@@ -1203,9 +1202,10 @@ define <vscale x 32 x half> @vfsub_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: .LBB45_2:
; ZVFHMIN-NEXT: addi a1, sp, 16
; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfsub.vv v16, v24, v16
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -1236,14 +1236,10 @@ define <vscale x 32 x half> @vfsub_vf_nxv32f16(<vscale x 32 x half> %va, half %b
; ZVFHMIN-NEXT: add a1, a2, a1
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x11, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 17 * vlenb
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vmv8r.v v16, v8
+; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m8, ta, ma
+; ZVFHMIN-NEXT: vmv8r.v v24, v8
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: addi a3, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20
-; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m8, ta, ma
; ZVFHMIN-NEXT: vmv.v.x v16, a1
; ZVFHMIN-NEXT: csrr a1, vlenb
; ZVFHMIN-NEXT: slli a3, a1, 3
@@ -1264,15 +1260,18 @@ define <vscale x 32 x half> @vfsub_vf_nxv32f16(<vscale x 32 x half> %va, half %b
; ZVFHMIN-NEXT: sltu a2, a0, a3
; ZVFHMIN-NEXT: addi a2, a2, -1
; ZVFHMIN-NEXT: and a2, a2, a3
-; ZVFHMIN-NEXT: csrr a3, vlenb
-; ZVFHMIN-NEXT: slli a4, a3, 3
-; ZVFHMIN-NEXT: add a3, a4, a3
-; ZVFHMIN-NEXT: add a3, sp, a3
-; ZVFHMIN-NEXT: addi a3, a3, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a3) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: addi a3, sp, 16
+; ZVFHMIN-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v28
+; ZVFHMIN-NEXT: csrr a2, vlenb
+; ZVFHMIN-NEXT: slli a3, a2, 3
+; ZVFHMIN-NEXT: add a2, a3, a2
+; ZVFHMIN-NEXT: add a2, sp, a2
+; ZVFHMIN-NEXT: addi a2, a2, 16
+; ZVFHMIN-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v28
-; ZVFHMIN-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfsub.vv v16, v8, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
@@ -1282,20 +1281,21 @@ define <vscale x 32 x half> @vfsub_vf_nxv32f16(<vscale x 32 x half> %va, half %b
; ZVFHMIN-NEXT: .LBB46_2:
; ZVFHMIN-NEXT: addi a1, sp, 16
; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
-; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: slli a2, a1, 3
-; ZVFHMIN-NEXT: add a1, a2, a1
-; ZVFHMIN-NEXT: add a1, sp, a1
-; ZVFHMIN-NEXT: addi a1, a1, 16
-; ZVFHMIN-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a1, a0, 3
+; ZVFHMIN-NEXT: add a0, a1, a0
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v0
-; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: slli a1, a1, 3
-; ZVFHMIN-NEXT: add a1, sp, a1
-; ZVFHMIN-NEXT: addi a1, a1, 16
-; ZVFHMIN-NEXT: vl1r.v v0, (a1) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vl1r.v v0, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfsub.vv v16, v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -1328,16 +1328,10 @@ define <vscale x 32 x half> @vfsub_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: slli a1, a1, 4
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; ZVFHMIN-NEXT: vsetvli a1, zero, e8, m4, ta, ma
-; ZVFHMIN-NEXT: vmv8r.v v16, v8
; ZVFHMIN-NEXT: fmv.x.h a1, fa0
; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: vmset.m v7
-; ZVFHMIN-NEXT: addi a3, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20
; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m8, ta, ma
+; ZVFHMIN-NEXT: vmset.m v24
; ZVFHMIN-NEXT: vmv.v.x v16, a1
; ZVFHMIN-NEXT: csrr a1, vlenb
; ZVFHMIN-NEXT: slli a1, a1, 3
@@ -1348,18 +1342,22 @@ define <vscale x 32 x half> @vfsub_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: srli a2, a2, 2
; ZVFHMIN-NEXT: sub a3, a0, a1
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT: vslidedown.vx v0, v7, a2
+; ZVFHMIN-NEXT: vslidedown.vx v0, v24, a2
; ZVFHMIN-NEXT: sltu a2, a0, a3
; ZVFHMIN-NEXT: addi a2, a2, -1
; ZVFHMIN-NEXT: and a2, a2, a3
-; ZVFHMIN-NEXT: csrr a3, vlenb
-; ZVFHMIN-NEXT: slli a3, a3, 3
-; ZVFHMIN-NEXT: add a3, sp, a3
-; ZVFHMIN-NEXT: addi a3, a3, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a3) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vmv8r.v v16, v8
+; ZVFHMIN-NEXT: addi a3, sp, 16
+; ZVFHMIN-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20
+; ZVFHMIN-NEXT: csrr a2, vlenb
+; ZVFHMIN-NEXT: slli a2, a2, 3
+; ZVFHMIN-NEXT: add a2, sp, a2
+; ZVFHMIN-NEXT: addi a2, a2, 16
+; ZVFHMIN-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v28
-; ZVFHMIN-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfsub.vv v16, v8, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
@@ -1369,14 +1367,15 @@ define <vscale x 32 x half> @vfsub_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: .LBB47_2:
; ZVFHMIN-NEXT: addi a1, sp, 16
; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
-; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: slli a1, a1, 3
-; ZVFHMIN-NEXT: add a1, sp, a1
-; ZVFHMIN-NEXT: addi a1, a1, 16
-; ZVFHMIN-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v0
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfsub.vv v16, v16, v24
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir b/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
index f9b8186..c4a3834 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt-op-info.mir
@@ -573,6 +573,86 @@ body: |
PseudoVSE8_V_MF2 %x, $noreg, 1, 3 /* e8 */
...
---
+name: vsm_v
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vsm_v
+ ; CHECK: %x:vr = PseudoVMAND_MM_B8 $noreg, $noreg, 1, 0 /* e8 */
+ ; CHECK-NEXT: PseudoVSM_V_B8 %x, $noreg, 1, 0 /* e8 */
+ %x:vr = PseudoVMAND_MM_B8 $noreg, $noreg, -1, 0
+ PseudoVSM_V_B8 %x, $noreg, 1, 0
+...
+---
+name: vsm_v_incompatible_emul
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vsm_v_incompatible_emul
+ ; CHECK: %x:vr = PseudoVMAND_MM_B8 $noreg, $noreg, -1, 0 /* e8 */
+ ; CHECK-NEXT: PseudoVSM_V_B16 %x, $noreg, 1, 0 /* e8 */
+ %x:vr = PseudoVMAND_MM_B8 $noreg, $noreg, -1, 0
+ PseudoVSM_V_B16 %x, $noreg, 1, 0
+...
+---
+name: vleN_v
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vleN_v
+ ; CHECK: %x:vr = PseudoVLE8_V_M1 $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ %x:vr = PseudoVLE8_V_M1 $noreg, $noreg, -1, 3 /* e8 */, 0
+ %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0
+...
+---
+name: vleN_v_incompatible_eew
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vleN_v_incompatible_eew
+ ; CHECK: %x:vr = PseudoVLE8_V_M1 $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 4 /* e16 */, 0 /* tu, mu */
+ %x:vr = PseudoVLE8_V_M1 $noreg, $noreg, -1, 3 /* e8 */, 0
+ %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 4 /* e16 */, 0
+...
+---
+name: vleN_v_incompatible_emul
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vleN_v_incompatible_emul
+ ; CHECK: %x:vr = PseudoVLE8_V_M1 $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %x:vr = PseudoVADD_VV_MF2 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ %x:vr = PseudoVLE8_V_M1 $noreg, $noreg, -1, 3 /* e8 */, 0
+ %x:vr = PseudoVADD_VV_MF2 $noreg, %x, $noreg, 1, 3 /* e8 */, 0
+...
+---
+name: vlm_v
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vlm_v
+ ; CHECK: %x:vr = PseudoVLM_V_B8 $noreg, $noreg, 1, 0 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:vr = PseudoVMAND_MM_B8 $noreg, %x, 1, 0 /* e8 */
+ %x:vr = PseudoVLM_V_B8 $noreg, $noreg, -1, 0, 0
+ %y:vr = PseudoVMAND_MM_B8 $noreg, %x, 1, 0
+...
+---
+name: vlm_v_incompatible_eew
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vlm_v_incompatible_eew
+ ; CHECK: %x:vr = PseudoVLM_V_B8 $noreg, $noreg, -1, 0 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, $noreg, %x, 1, 4 /* e16 */, 0 /* tu, mu */
+ %x:vr = PseudoVLM_V_B8 $noreg, $noreg, -1, 0, 0
+ %y:vr = PseudoVADD_VV_M1 $noreg, $noreg, %x, 1, 4 /* e16 */, 0
+...
+---
+name: vlm_v_incompatible_emul
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vlm_v_incompatible_emul
+ ; CHECK: %x:vr = PseudoVLM_V_B8 $noreg, $noreg, -1, 0 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:vr = PseudoVMAND_MM_B16 $noreg, %x, 1, 0 /* e8 */
+ %x:vr = PseudoVLM_V_B8 $noreg, $noreg, -1, 0, 0
+ %y:vr = PseudoVMAND_MM_B16 $noreg, %x, 1, 0
+...
+---
name: vsseN_v
body: |
bb.0:
@@ -705,6 +785,56 @@ body: |
%y:vr = PseudoVLUXEI8_V_MF2_M1 $noreg, $noreg, %x, 1, 4 /* e16 */, 0
...
---
+name: vluxeiN_v_idx_incompatible_eew
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vluxeiN_v_idx_incompatible_eew
+ ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:vr = PseudoVLUXEI8_V_M1_M1 $noreg, $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0
+ %y:vr = PseudoVLUXEI8_V_M1_M1 $noreg, $noreg, %x, 1, 3 /* e8 */, 0
+...
+---
+name: vluxeiN_v_idx_incompatible_emul
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vluxeiN_v_idx_incompatible_emul
+ ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:vr = PseudoVLUXEI8_V_MF2_MF2 $noreg, $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
+ %y:vr = PseudoVLUXEI8_V_MF2_MF2 $noreg, $noreg, %x, 1, 3 /* e8 */, 0
+...
+---
+name: vluxeiN_v_vd
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vluxeiN_v_vd
+ ; CHECK: %x:vr = PseudoVLUXEI8_V_M1_M1 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ %x:vr = PseudoVLUXEI8_V_M1_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
+ %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0
+...
+---
+name: vluxeiN_v_vd_incompatible_eew
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vluxeiN_v_vd_incompatible_eew
+ ; CHECK: %x:vr = PseudoVLUXEI8_V_M1_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 4 /* e16 */, 0 /* tu, mu */
+ %x:vr = PseudoVLUXEI8_V_M1_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
+ %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 4 /* e16 */, 0
+...
+---
+name: vluxeiN_vd_incompatible_emul
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vluxeiN_vd_incompatible_emul
+ ; CHECK: %x:vr = PseudoVLUXEI8_V_M1_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:vr = PseudoVADD_VV_MF2 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ %x:vr = PseudoVLUXEI8_V_M1_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
+ %y:vr = PseudoVADD_VV_MF2 $noreg, %x, $noreg, 1, 3 /* e8 */, 0
+...
+---
name: vmop_mm
body: |
bb.0:
@@ -1094,3 +1224,116 @@ body: |
%x:vr = PseudoVMAND_MM_B1 $noreg, $noreg, -1, 0
%y:vr = PseudoVIOTA_M_MF2 $noreg, %x, 1, 3 /* e8 */, 0
...
+name: vred_vs2
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vred_vs2
+ ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:vr = PseudoVREDAND_VS_M1_E8 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
+ %y:vr = PseudoVREDAND_VS_M1_E8 $noreg, %x, $noreg, 1, 3 /* e8 */, 0
+...
+---
+name: vred_vs1
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vred_vs1
+ ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:vr = PseudoVREDAND_VS_M1_E8 $noreg, $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
+ %y:vr = PseudoVREDAND_VS_M1_E8 $noreg, $noreg, %x, 1, 3 /* e8 */, 0
+...
+---
+name: vred_vs1_vs2
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vred_vs1_vs2
+ ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:vr = PseudoVREDAND_VS_M1_E8 $noreg, %x, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
+ %y:vr = PseudoVREDAND_VS_M1_E8 $noreg, %x, %x, 1, 3 /* e8 */, 0
+...
+---
+name: vred_vs1_vs2_incompatible_eew
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vred_vs1_vs2_incompatible_eew
+ ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:vr = PseudoVREDAND_VS_M1_E8 $noreg, %x, %x, 1, 4 /* e16 */, 0 /* tu, mu */
+ %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
+ %y:vr = PseudoVREDAND_VS_M1_E8 $noreg, %x, %x, 1, 4 /* e16 */, 0
+...
+---
+name: vred_vs1_vs2_incompatible_emul
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vred_vs1_vs2_incompatible_emul
+ ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:vr = PseudoVREDAND_VS_MF2_E8 $noreg, %x, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
+ %y:vr = PseudoVREDAND_VS_MF2_E8 $noreg, %x, %x, 1, 3 /* e8 */, 0
+...
+---
+name: vred_other_user_is_vl0
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vred_other_user_is_vl0
+ ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:vr = PseudoVREDSUM_VS_M1_E8 $noreg, $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %z:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 0, 3 /* e8 */, 0 /* tu, mu */
+ %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
+ %y:vr = PseudoVREDSUM_VS_M1_E8 $noreg, $noreg, %x, 1, 3 /* e8 */, 0
+ %z:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 0, 3 /* e8 */, 0
+...
+---
+name: vred_both_vl0
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vred_both_vl0
+ ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:vr = PseudoVREDSUM_VS_M1_E8 $noreg, $noreg, %x, 0, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %z:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 0, 3 /* e8 */, 0 /* tu, mu */
+ %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
+ %y:vr = PseudoVREDSUM_VS_M1_E8 $noreg, $noreg, %x, 0, 3 /* e8 */, 0
+ %z:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 0, 3 /* e8 */, 0
+...
+---
+name: vred_vl0_and_vlreg
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vred_vl0_and_vlreg
+ ; CHECK: %vl:gprnox0 = COPY $x1
+ ; CHECK-NEXT: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:vr = PseudoVREDSUM_VS_M1_E8 $noreg, $noreg, %x, %vl, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %z:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 0, 3 /* e8 */, 0 /* tu, mu */
+ %vl:gprnox0 = COPY $x1
+ %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
+ %y:vr = PseudoVREDSUM_VS_M1_E8 $noreg, $noreg, %x, %vl, 3 /* e8 */, 0
+ %z:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 0, 3 /* e8 */, 0
+...
+---
+name: vred_vlreg_and_vl0
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vred_vlreg_and_vl0
+ ; CHECK: %vl:gprnox0 = COPY $x1
+ ; CHECK-NEXT: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:vr = PseudoVREDSUM_VS_M1_E8 $noreg, $noreg, %x, 0, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %z:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, %vl, 3 /* e8 */, 0 /* tu, mu */
+ %vl:gprnox0 = COPY $x1
+ %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
+ %y:vr = PseudoVREDSUM_VS_M1_E8 $noreg, $noreg, %x, 0, 3 /* e8 */, 0
+ %z:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, %vl, 3 /* e8 */, 0
+...
+---
+name: vred_other_user_is_vl2
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vred_other_user_is_vl2
+ ; CHECK: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, 2, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:vr = PseudoVREDSUM_VS_M1_E8 $noreg, $noreg, %x, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %z:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 2, 3 /* e8 */, 0 /* tu, mu */
+ %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 3 /* e8 */, 0
+ %y:vr = PseudoVREDSUM_VS_M1_E8 $noreg, $noreg, %x, 1, 3 /* e8 */, 0
+ %z:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 2, 3 /* e8 */, 0
+...
diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt.mir b/llvm/test/CodeGen/RISCV/rvv/vl-opt.mir
index 3f966b0..0a366f4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vl-opt.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt.mir
@@ -110,4 +110,24 @@ body: |
%y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0
%z:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, -1, 3 /* e8 */, 0
...
+---
+name: vfcvt_x_f_v_nofpexcept
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vfcvt_x_f_v_nofpexcept
+ ; CHECK: %x:vr = nofpexcept PseudoVFCVT_X_F_V_M1 $noreg, $noreg, 0, 1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ %x:vr = nofpexcept PseudoVFCVT_X_F_V_M1 $noreg, $noreg, 0, -1, 3 /* e32 */, 0
+ %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0
+...
+---
+name: vfcvt_x_f_v_fpexcept
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vfcvt_x_f_v_fpexcept
+ ; CHECK: %x:vr = PseudoVFCVT_X_F_V_M1 $noreg, $noreg, 0, -1, 3 /* e8 */, 0 /* tu, mu */
+ ; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ %x:vr = PseudoVFCVT_X_F_V_M1 $noreg, $noreg, 0, -1, 3 /* e32 */, 0
+ %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0
+...
diff --git a/llvm/test/CodeGen/RISCV/rvv/vlopt-volatile-ld.mir b/llvm/test/CodeGen/RISCV/rvv/vlopt-volatile-ld.mir
new file mode 100644
index 0000000..e8f7957
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vlopt-volatile-ld.mir
@@ -0,0 +1,13 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+# RUN: llc %s -o - -mtriple=riscv64 -mattr=+v -run-pass=riscv-vl-optimizer -verify-machineinstrs | FileCheck %s
+
+---
+name: vleN_v_volatile
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: vleN_v
+ ; CHECK: %x:vr = PseudoVLE8_V_M1 $noreg, $noreg, -1, 3 /* e8 */, 0 /* tu, mu */ :: (volatile load (<vscale x 1 x s64>))
+ ; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0 /* tu, mu */
+ %x:vr = PseudoVLE8_V_M1 $noreg, $noreg, -1, 3 /* e8 */, 0 :: (volatile load (<vscale x 1 x s64>))
+ %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 3 /* e8 */, 0
+...
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmacc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmacc-vp.ll
index 333117c..c334e70 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmacc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmacc-vp.ll
@@ -1654,9 +1654,9 @@ define <vscale x 1 x i64> @vmacc_vx_nxv1i64(<vscale x 1 x i64> %a, i64 %b, <vsca
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; RV32-NEXT: vlse64.v v10, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu
+; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, mu
; RV32-NEXT: vmacc.vv v9, v8, v10, v0.t
; RV32-NEXT: vmv1r.v v8, v9
; RV32-NEXT: addi sp, sp, 16
@@ -1685,9 +1685,9 @@ define <vscale x 1 x i64> @vmacc_vx_nxv1i64_unmasked(<vscale x 1 x i64> %a, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; RV32-NEXT: vlse64.v v10, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, ma
+; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, ma
; RV32-NEXT: vmacc.vv v9, v8, v10
; RV32-NEXT: vmv1r.v v8, v9
; RV32-NEXT: addi sp, sp, 16
@@ -1729,9 +1729,8 @@ define <vscale x 1 x i64> @vmacc_vx_nxv1i64_ta(<vscale x 1 x i64> %a, i64 %b, <v
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vmacc.vv v9, v8, v10, v0.t
; RV32-NEXT: vmv.v.v v8, v9
; RV32-NEXT: addi sp, sp, 16
@@ -1791,9 +1790,9 @@ define <vscale x 2 x i64> @vmacc_vx_nxv2i64(<vscale x 2 x i64> %a, i64 %b, <vsca
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; RV32-NEXT: vlse64.v v12, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu
+; RV32-NEXT: vsetvli zero, zero, e64, m2, tu, mu
; RV32-NEXT: vmacc.vv v10, v8, v12, v0.t
; RV32-NEXT: vmv2r.v v8, v10
; RV32-NEXT: addi sp, sp, 16
@@ -1822,9 +1821,9 @@ define <vscale x 2 x i64> @vmacc_vx_nxv2i64_unmasked(<vscale x 2 x i64> %a, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; RV32-NEXT: vlse64.v v12, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, ma
+; RV32-NEXT: vsetvli zero, zero, e64, m2, tu, ma
; RV32-NEXT: vmacc.vv v10, v8, v12
; RV32-NEXT: vmv2r.v v8, v10
; RV32-NEXT: addi sp, sp, 16
@@ -1866,9 +1865,8 @@ define <vscale x 2 x i64> @vmacc_vx_nxv2i64_ta(<vscale x 2 x i64> %a, i64 %b, <v
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
-; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vmacc.vv v10, v8, v12, v0.t
; RV32-NEXT: vmv.v.v v8, v10
; RV32-NEXT: addi sp, sp, 16
@@ -1928,9 +1926,9 @@ define <vscale x 4 x i64> @vmacc_vx_nxv4i64(<vscale x 4 x i64> %a, i64 %b, <vsca
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; RV32-NEXT: vlse64.v v16, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, mu
+; RV32-NEXT: vsetvli zero, zero, e64, m4, tu, mu
; RV32-NEXT: vmacc.vv v12, v8, v16, v0.t
; RV32-NEXT: vmv4r.v v8, v12
; RV32-NEXT: addi sp, sp, 16
@@ -1959,9 +1957,9 @@ define <vscale x 4 x i64> @vmacc_vx_nxv4i64_unmasked(<vscale x 4 x i64> %a, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; RV32-NEXT: vlse64.v v16, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, ma
+; RV32-NEXT: vsetvli zero, zero, e64, m4, tu, ma
; RV32-NEXT: vmacc.vv v12, v8, v16
; RV32-NEXT: vmv4r.v v8, v12
; RV32-NEXT: addi sp, sp, 16
@@ -2003,9 +2001,8 @@ define <vscale x 4 x i64> @vmacc_vx_nxv4i64_ta(<vscale x 4 x i64> %a, i64 %b, <v
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
-; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vmacc.vv v12, v8, v16, v0.t
; RV32-NEXT: vmv.v.v v8, v12
; RV32-NEXT: addi sp, sp, 16
@@ -2067,9 +2064,9 @@ define <vscale x 8 x i64> @vmacc_vx_nxv8i64(<vscale x 8 x i64> %a, i64 %b, <vsca
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v24, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m8, tu, mu
+; RV32-NEXT: vsetvli zero, zero, e64, m8, tu, mu
; RV32-NEXT: vmacc.vv v16, v8, v24, v0.t
; RV32-NEXT: vmv8r.v v8, v16
; RV32-NEXT: addi sp, sp, 16
@@ -2098,9 +2095,9 @@ define <vscale x 8 x i64> @vmacc_vx_nxv8i64_unmasked(<vscale x 8 x i64> %a, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v24, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m8, tu, ma
+; RV32-NEXT: vsetvli zero, zero, e64, m8, tu, ma
; RV32-NEXT: vmacc.vv v16, v8, v24
; RV32-NEXT: vmv8r.v v8, v16
; RV32-NEXT: addi sp, sp, 16
@@ -2143,9 +2140,8 @@ define <vscale x 8 x i64> @vmacc_vx_nxv8i64_ta(<vscale x 8 x i64> %a, i64 %b, <v
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v24, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vlse64.v v24, (a0), zero
; RV32-NEXT: vmacc.vv v16, v8, v24, v0.t
; RV32-NEXT: vmv.v.v v8, v16
; RV32-NEXT: addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll
index 7818e99..3df0763 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll
@@ -1124,9 +1124,8 @@ define <vscale x 1 x i64> @vmax_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vsca
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vmax.vv v8, v8, v9, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1151,9 +1150,8 @@ define <vscale x 1 x i64> @vmax_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vmax.vv v8, v8, v9
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1200,9 +1198,8 @@ define <vscale x 2 x i64> @vmax_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vsca
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
-; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vmax.vv v8, v8, v10, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1227,9 +1224,8 @@ define <vscale x 2 x i64> @vmax_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
-; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vmax.vv v8, v8, v10
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1276,9 +1272,8 @@ define <vscale x 4 x i64> @vmax_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vsca
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
-; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vmax.vv v8, v8, v12, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1303,9 +1298,8 @@ define <vscale x 4 x i64> @vmax_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
-; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vmax.vv v8, v8, v12
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1352,9 +1346,8 @@ define <vscale x 8 x i64> @vmax_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vsca
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vmax.vv v8, v8, v16, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1379,9 +1372,8 @@ define <vscale x 8 x i64> @vmax_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vmax.vv v8, v8, v16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll
index 674b0b8..8147d46 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll
@@ -1123,9 +1123,8 @@ define <vscale x 1 x i64> @vmaxu_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vsc
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vmaxu.vv v8, v8, v9, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1150,9 +1149,8 @@ define <vscale x 1 x i64> @vmaxu_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vmaxu.vv v8, v8, v9
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1199,9 +1197,8 @@ define <vscale x 2 x i64> @vmaxu_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vsc
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
-; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vmaxu.vv v8, v8, v10, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1226,9 +1223,8 @@ define <vscale x 2 x i64> @vmaxu_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
-; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vmaxu.vv v8, v8, v10
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1275,9 +1271,8 @@ define <vscale x 4 x i64> @vmaxu_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vsc
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
-; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vmaxu.vv v8, v8, v12, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1302,9 +1297,8 @@ define <vscale x 4 x i64> @vmaxu_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
-; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vmaxu.vv v8, v8, v12
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1351,9 +1345,8 @@ define <vscale x 8 x i64> @vmaxu_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vsc
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vmaxu.vv v8, v8, v16, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1378,9 +1371,8 @@ define <vscale x 8 x i64> @vmaxu_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vmaxu.vv v8, v8, v16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll
index 79631cd..614bd4cb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll
@@ -1124,9 +1124,8 @@ define <vscale x 1 x i64> @vmin_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vsca
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vmin.vv v8, v8, v9, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1151,9 +1150,8 @@ define <vscale x 1 x i64> @vmin_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vmin.vv v8, v8, v9
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1200,9 +1198,8 @@ define <vscale x 2 x i64> @vmin_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vsca
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
-; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vmin.vv v8, v8, v10, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1227,9 +1224,8 @@ define <vscale x 2 x i64> @vmin_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
-; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vmin.vv v8, v8, v10
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1276,9 +1272,8 @@ define <vscale x 4 x i64> @vmin_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vsca
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
-; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vmin.vv v8, v8, v12, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1303,9 +1298,8 @@ define <vscale x 4 x i64> @vmin_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
-; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vmin.vv v8, v8, v12
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1352,9 +1346,8 @@ define <vscale x 8 x i64> @vmin_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vsca
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vmin.vv v8, v8, v16, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1379,9 +1372,8 @@ define <vscale x 8 x i64> @vmin_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vmin.vv v8, v8, v16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll
index bc93b62..21160553a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll
@@ -1123,9 +1123,8 @@ define <vscale x 1 x i64> @vminu_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vsc
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vminu.vv v8, v8, v9, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1150,9 +1149,8 @@ define <vscale x 1 x i64> @vminu_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vminu.vv v8, v8, v9
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1199,9 +1197,8 @@ define <vscale x 2 x i64> @vminu_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vsc
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
-; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vminu.vv v8, v8, v10, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1226,9 +1223,8 @@ define <vscale x 2 x i64> @vminu_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
-; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vminu.vv v8, v8, v10
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1275,9 +1271,8 @@ define <vscale x 4 x i64> @vminu_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vsc
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
-; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vminu.vv v8, v8, v12, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1302,9 +1297,8 @@ define <vscale x 4 x i64> @vminu_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
-; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vminu.vv v8, v8, v12
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1351,9 +1345,8 @@ define <vscale x 8 x i64> @vminu_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vsc
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vminu.vv v8, v8, v16, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1378,9 +1371,8 @@ define <vscale x 8 x i64> @vminu_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vminu.vv v8, v8, v16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmul-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmul-vp.ll
index b63098b..f0907e4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmul-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmul-vp.ll
@@ -934,9 +934,8 @@ define <vscale x 1 x i64> @vmul_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vsca
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vmul.vv v8, v8, v9, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -961,9 +960,8 @@ define <vscale x 1 x i64> @vmul_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vmul.vv v8, v8, v9
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1010,9 +1008,8 @@ define <vscale x 2 x i64> @vmul_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vsca
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
-; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vmul.vv v8, v8, v10, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1037,9 +1034,8 @@ define <vscale x 2 x i64> @vmul_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
-; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vmul.vv v8, v8, v10
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1086,9 +1082,8 @@ define <vscale x 4 x i64> @vmul_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vsca
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
-; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vmul.vv v8, v8, v12, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1113,9 +1108,8 @@ define <vscale x 4 x i64> @vmul_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
-; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vmul.vv v8, v8, v12
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1162,9 +1156,8 @@ define <vscale x 8 x i64> @vmul_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vsca
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vmul.vv v8, v8, v16, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1189,9 +1182,8 @@ define <vscale x 8 x i64> @vmul_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vmul.vv v8, v8, v16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vnmsac-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vnmsac-vp.ll
index 2e0daa6..3484d28 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnmsac-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnmsac-vp.ll
@@ -1654,9 +1654,9 @@ define <vscale x 1 x i64> @vnmsac_vx_nxv1i64(<vscale x 1 x i64> %a, i64 %b, <vsc
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; RV32-NEXT: vlse64.v v10, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu
+; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, mu
; RV32-NEXT: vnmsac.vv v9, v8, v10, v0.t
; RV32-NEXT: vmv1r.v v8, v9
; RV32-NEXT: addi sp, sp, 16
@@ -1685,9 +1685,9 @@ define <vscale x 1 x i64> @vnmsac_vx_nxv1i64_unmasked(<vscale x 1 x i64> %a, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; RV32-NEXT: vlse64.v v10, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, ma
+; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, ma
; RV32-NEXT: vnmsac.vv v9, v8, v10
; RV32-NEXT: vmv1r.v v8, v9
; RV32-NEXT: addi sp, sp, 16
@@ -1729,9 +1729,8 @@ define <vscale x 1 x i64> @vnmsac_vx_nxv1i64_ta(<vscale x 1 x i64> %a, i64 %b, <
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vnmsac.vv v9, v8, v10, v0.t
; RV32-NEXT: vmv.v.v v8, v9
; RV32-NEXT: addi sp, sp, 16
@@ -1791,9 +1790,9 @@ define <vscale x 2 x i64> @vnmsac_vx_nxv2i64(<vscale x 2 x i64> %a, i64 %b, <vsc
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; RV32-NEXT: vlse64.v v12, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu
+; RV32-NEXT: vsetvli zero, zero, e64, m2, tu, mu
; RV32-NEXT: vnmsac.vv v10, v8, v12, v0.t
; RV32-NEXT: vmv2r.v v8, v10
; RV32-NEXT: addi sp, sp, 16
@@ -1822,9 +1821,9 @@ define <vscale x 2 x i64> @vnmsac_vx_nxv2i64_unmasked(<vscale x 2 x i64> %a, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; RV32-NEXT: vlse64.v v12, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, ma
+; RV32-NEXT: vsetvli zero, zero, e64, m2, tu, ma
; RV32-NEXT: vnmsac.vv v10, v8, v12
; RV32-NEXT: vmv2r.v v8, v10
; RV32-NEXT: addi sp, sp, 16
@@ -1866,9 +1865,8 @@ define <vscale x 2 x i64> @vnmsac_vx_nxv2i64_ta(<vscale x 2 x i64> %a, i64 %b, <
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
-; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vnmsac.vv v10, v8, v12, v0.t
; RV32-NEXT: vmv.v.v v8, v10
; RV32-NEXT: addi sp, sp, 16
@@ -1928,9 +1926,9 @@ define <vscale x 4 x i64> @vnmsac_vx_nxv4i64(<vscale x 4 x i64> %a, i64 %b, <vsc
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; RV32-NEXT: vlse64.v v16, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, mu
+; RV32-NEXT: vsetvli zero, zero, e64, m4, tu, mu
; RV32-NEXT: vnmsac.vv v12, v8, v16, v0.t
; RV32-NEXT: vmv4r.v v8, v12
; RV32-NEXT: addi sp, sp, 16
@@ -1959,9 +1957,9 @@ define <vscale x 4 x i64> @vnmsac_vx_nxv4i64_unmasked(<vscale x 4 x i64> %a, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; RV32-NEXT: vlse64.v v16, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, ma
+; RV32-NEXT: vsetvli zero, zero, e64, m4, tu, ma
; RV32-NEXT: vnmsac.vv v12, v8, v16
; RV32-NEXT: vmv4r.v v8, v12
; RV32-NEXT: addi sp, sp, 16
@@ -2003,9 +2001,8 @@ define <vscale x 4 x i64> @vnmsac_vx_nxv4i64_ta(<vscale x 4 x i64> %a, i64 %b, <
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
-; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vnmsac.vv v12, v8, v16, v0.t
; RV32-NEXT: vmv.v.v v8, v12
; RV32-NEXT: addi sp, sp, 16
@@ -2067,9 +2064,9 @@ define <vscale x 8 x i64> @vnmsac_vx_nxv8i64(<vscale x 8 x i64> %a, i64 %b, <vsc
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v24, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m8, tu, mu
+; RV32-NEXT: vsetvli zero, zero, e64, m8, tu, mu
; RV32-NEXT: vnmsac.vv v16, v8, v24, v0.t
; RV32-NEXT: vmv8r.v v8, v16
; RV32-NEXT: addi sp, sp, 16
@@ -2098,9 +2095,9 @@ define <vscale x 8 x i64> @vnmsac_vx_nxv8i64_unmasked(<vscale x 8 x i64> %a, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v24, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m8, tu, ma
+; RV32-NEXT: vsetvli zero, zero, e64, m8, tu, ma
; RV32-NEXT: vnmsac.vv v16, v8, v24
; RV32-NEXT: vmv8r.v v8, v16
; RV32-NEXT: addi sp, sp, 16
@@ -2143,9 +2140,8 @@ define <vscale x 8 x i64> @vnmsac_vx_nxv8i64_ta(<vscale x 8 x i64> %a, i64 %b, <
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v24, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vlse64.v v24, (a0), zero
; RV32-NEXT: vnmsac.vv v16, v8, v24, v0.t
; RV32-NEXT: vmv.v.v v8, v16
; RV32-NEXT: addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/rvv/vor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vor-vp.ll
index ef281c5..e864d71 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vor-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vor-vp.ll
@@ -1326,9 +1326,8 @@ define <vscale x 1 x i64> @vor_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vscal
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vor.vv v8, v8, v9, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1353,9 +1352,8 @@ define <vscale x 1 x i64> @vor_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64 %
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vor.vv v8, v8, v9
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1422,9 +1420,8 @@ define <vscale x 2 x i64> @vor_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vscal
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
-; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vor.vv v8, v8, v10, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1449,9 +1446,8 @@ define <vscale x 2 x i64> @vor_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64 %
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
-; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vor.vv v8, v8, v10
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1518,9 +1514,8 @@ define <vscale x 4 x i64> @vor_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vscal
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
-; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vor.vv v8, v8, v12, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1545,9 +1540,8 @@ define <vscale x 4 x i64> @vor_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64 %
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
-; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vor.vv v8, v8, v12
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1614,9 +1608,8 @@ define <vscale x 8 x i64> @vor_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vscal
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vor.vv v8, v8, v16, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1641,9 +1634,8 @@ define <vscale x 8 x i64> @vor_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64 %
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vor.vv v8, v8, v16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll
index 3273274..66ba269 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll
@@ -893,9 +893,8 @@ define <vscale x 1 x i64> @vrem_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vsca
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vrem.vv v8, v8, v9, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -920,9 +919,8 @@ define <vscale x 1 x i64> @vrem_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vrem.vv v8, v8, v9
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -969,9 +967,8 @@ define <vscale x 2 x i64> @vrem_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vsca
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
-; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vrem.vv v8, v8, v10, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -996,9 +993,8 @@ define <vscale x 2 x i64> @vrem_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
-; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vrem.vv v8, v8, v10
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1045,9 +1041,8 @@ define <vscale x 4 x i64> @vrem_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vsca
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
-; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vrem.vv v8, v8, v12, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1072,9 +1067,8 @@ define <vscale x 4 x i64> @vrem_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
-; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vrem.vv v8, v8, v12
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1121,9 +1115,8 @@ define <vscale x 8 x i64> @vrem_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vsca
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vrem.vv v8, v8, v16, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1148,9 +1141,8 @@ define <vscale x 8 x i64> @vrem_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vrem.vv v8, v8, v16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll
index 6b588d0..4608661 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll
@@ -892,9 +892,8 @@ define <vscale x 1 x i64> @vremu_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vsc
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vremu.vv v8, v8, v9, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -919,9 +918,8 @@ define <vscale x 1 x i64> @vremu_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vremu.vv v8, v8, v9
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -968,9 +966,8 @@ define <vscale x 2 x i64> @vremu_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vsc
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
-; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vremu.vv v8, v8, v10, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -995,9 +992,8 @@ define <vscale x 2 x i64> @vremu_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
-; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vremu.vv v8, v8, v10
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1044,9 +1040,8 @@ define <vscale x 4 x i64> @vremu_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vsc
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
-; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vremu.vv v8, v8, v12, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1071,9 +1066,8 @@ define <vscale x 4 x i64> @vremu_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
-; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vremu.vv v8, v8, v12
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1120,9 +1114,8 @@ define <vscale x 8 x i64> @vremu_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vsc
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vremu.vv v8, v8, v16, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1147,9 +1140,8 @@ define <vscale x 8 x i64> @vremu_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vremu.vv v8, v8, v16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vrsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vrsub-vp.ll
index 0f38e94..c41139c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrsub-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrsub-vp.ll
@@ -842,9 +842,8 @@ define <vscale x 1 x i64> @vrsub_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vsc
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsub.vv v8, v9, v8, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -869,9 +868,8 @@ define <vscale x 1 x i64> @vrsub_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsub.vv v8, v9, v8
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -918,9 +916,8 @@ define <vscale x 2 x i64> @vrsub_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vsc
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
-; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsub.vv v8, v10, v8, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -945,9 +942,8 @@ define <vscale x 2 x i64> @vrsub_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
-; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsub.vv v8, v10, v8
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -994,9 +990,8 @@ define <vscale x 4 x i64> @vrsub_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vsc
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
-; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsub.vv v8, v12, v8, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1021,9 +1016,8 @@ define <vscale x 4 x i64> @vrsub_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
-; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsub.vv v8, v12, v8
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1070,9 +1064,8 @@ define <vscale x 8 x i64> @vrsub_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vsc
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsub.vv v8, v16, v8, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1097,9 +1090,8 @@ define <vscale x 8 x i64> @vrsub_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsub.vv v8, v16, v8
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll
index 575d041..e471f4b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll
@@ -1425,9 +1425,8 @@ define <vscale x 1 x i64> @vsadd_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vsc
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsadd.vv v8, v8, v9, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1452,9 +1451,8 @@ define <vscale x 1 x i64> @vsadd_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsadd.vv v8, v8, v9
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1521,9 +1519,8 @@ define <vscale x 2 x i64> @vsadd_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vsc
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
-; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsadd.vv v8, v8, v10, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1548,9 +1545,8 @@ define <vscale x 2 x i64> @vsadd_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
-; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsadd.vv v8, v8, v10
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1617,9 +1613,8 @@ define <vscale x 4 x i64> @vsadd_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vsc
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
-; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsadd.vv v8, v8, v12, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1644,9 +1639,8 @@ define <vscale x 4 x i64> @vsadd_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
-; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsadd.vv v8, v8, v12
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1713,9 +1707,8 @@ define <vscale x 8 x i64> @vsadd_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vsc
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsadd.vv v8, v8, v16, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1740,9 +1733,8 @@ define <vscale x 8 x i64> @vsadd_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsadd.vv v8, v8, v16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll
index c9ed72b..f76a2b4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll
@@ -1424,9 +1424,8 @@ define <vscale x 1 x i64> @vsaddu_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vs
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsaddu.vv v8, v8, v9, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1451,9 +1450,8 @@ define <vscale x 1 x i64> @vsaddu_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i6
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsaddu.vv v8, v8, v9
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1520,9 +1518,8 @@ define <vscale x 2 x i64> @vsaddu_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vs
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
-; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsaddu.vv v8, v8, v10, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1547,9 +1544,8 @@ define <vscale x 2 x i64> @vsaddu_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i6
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
-; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsaddu.vv v8, v8, v10
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1616,9 +1612,8 @@ define <vscale x 4 x i64> @vsaddu_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vs
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
-; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsaddu.vv v8, v8, v12, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1643,9 +1638,8 @@ define <vscale x 4 x i64> @vsaddu_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i6
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
-; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsaddu.vv v8, v8, v12
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1712,9 +1706,8 @@ define <vscale x 8 x i64> @vsaddu_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vs
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsaddu.vv v8, v8, v16, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1739,9 +1732,8 @@ define <vscale x 8 x i64> @vsaddu_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i6
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsaddu.vv v8, v8, v16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll
index c0da928..ebf8d5e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll
@@ -1468,9 +1468,8 @@ define <vscale x 1 x i64> @vssub_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vsc
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vssub.vv v8, v8, v9, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1495,9 +1494,8 @@ define <vscale x 1 x i64> @vssub_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vssub.vv v8, v8, v9
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1566,9 +1564,8 @@ define <vscale x 2 x i64> @vssub_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vsc
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
-; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vssub.vv v8, v8, v10, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1593,9 +1590,8 @@ define <vscale x 2 x i64> @vssub_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
-; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vssub.vv v8, v8, v10
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1664,9 +1660,8 @@ define <vscale x 4 x i64> @vssub_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vsc
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
-; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vssub.vv v8, v8, v12, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1691,9 +1686,8 @@ define <vscale x 4 x i64> @vssub_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
-; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vssub.vv v8, v8, v12
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1762,9 +1756,8 @@ define <vscale x 8 x i64> @vssub_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vsc
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vssub.vv v8, v8, v16, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1789,9 +1782,8 @@ define <vscale x 8 x i64> @vssub_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vssub.vv v8, v8, v16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll
index b602f11..d54901c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll
@@ -1466,9 +1466,8 @@ define <vscale x 1 x i64> @vssubu_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vs
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vssubu.vv v8, v8, v9, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1493,9 +1492,8 @@ define <vscale x 1 x i64> @vssubu_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i6
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vssubu.vv v8, v8, v9
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1564,9 +1562,8 @@ define <vscale x 2 x i64> @vssubu_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vs
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
-; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vssubu.vv v8, v8, v10, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1591,9 +1588,8 @@ define <vscale x 2 x i64> @vssubu_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i6
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
-; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vssubu.vv v8, v8, v10
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1662,9 +1658,8 @@ define <vscale x 4 x i64> @vssubu_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vs
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
-; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vssubu.vv v8, v8, v12, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1689,9 +1684,8 @@ define <vscale x 4 x i64> @vssubu_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i6
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
-; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vssubu.vv v8, v8, v12
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1760,9 +1754,8 @@ define <vscale x 8 x i64> @vssubu_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vs
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vssubu.vv v8, v8, v16, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1787,9 +1780,8 @@ define <vscale x 8 x i64> @vssubu_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i6
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vssubu.vv v8, v8, v16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsub-vp.ll
index 65ba791..e28da6b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsub-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsub-vp.ll
@@ -922,9 +922,8 @@ define <vscale x 1 x i64> @vsub_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vsca
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsub.vv v8, v8, v9, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -949,9 +948,8 @@ define <vscale x 1 x i64> @vsub_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsub.vv v8, v8, v9
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -998,9 +996,8 @@ define <vscale x 2 x i64> @vsub_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vsca
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
-; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsub.vv v8, v8, v10, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1025,9 +1022,8 @@ define <vscale x 2 x i64> @vsub_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
-; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsub.vv v8, v8, v10
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1074,9 +1070,8 @@ define <vscale x 4 x i64> @vsub_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vsca
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
-; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsub.vv v8, v8, v12, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1101,9 +1096,8 @@ define <vscale x 4 x i64> @vsub_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
-; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsub.vv v8, v8, v12
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1150,9 +1144,8 @@ define <vscale x 8 x i64> @vsub_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vsca
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsub.vv v8, v8, v16, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1177,9 +1170,8 @@ define <vscale x 8 x i64> @vsub_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsub.vv v8, v8, v16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll
index f3dd7ec..1694a7a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll
@@ -1694,9 +1694,8 @@ define <vscale x 1 x i64> @vxor_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vsca
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vxor.vv v8, v8, v9, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1721,9 +1720,8 @@ define <vscale x 1 x i64> @vxor_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vxor.vv v8, v8, v9
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1810,9 +1808,8 @@ define <vscale x 2 x i64> @vxor_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vsca
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
-; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vxor.vv v8, v8, v10, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1837,9 +1834,8 @@ define <vscale x 2 x i64> @vxor_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
-; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vxor.vv v8, v8, v10
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1926,9 +1922,8 @@ define <vscale x 4 x i64> @vxor_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vsca
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
-; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vxor.vv v8, v8, v12, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -1953,9 +1948,8 @@ define <vscale x 4 x i64> @vxor_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
-; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vxor.vv v8, v8, v12
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -2042,9 +2036,8 @@ define <vscale x 8 x i64> @vxor_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vsca
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vxor.vv v8, v8, v16, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
@@ -2069,9 +2062,8 @@ define <vscale x 8 x i64> @vxor_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: addi a0, sp, 8
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vxor.vv v8, v8, v16
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: .cfi_def_cfa_offset 0
diff --git a/llvm/test/CodeGen/SPIRV/AtomicCompareExchange.ll b/llvm/test/CodeGen/SPIRV/AtomicCompareExchange.ll
index f8207c56..5ce4a19 100644
--- a/llvm/test/CodeGen/SPIRV/AtomicCompareExchange.ll
+++ b/llvm/test/CodeGen/SPIRV/AtomicCompareExchange.ll
@@ -1,6 +1,6 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
-; CHECK-SPIRV: %[[#Int:]] = OpTypeInt 32 0
+; CHECK-SPIRV-DAG: %[[#Int:]] = OpTypeInt 32 0
; CHECK-SPIRV-DAG: %[[#MemScope_CrossDevice:]] = OpConstant %[[#Int]] 0
; CHECK-SPIRV-DAG: %[[#MemSemEqual_SeqCst:]] = OpConstant %[[#Int]] 16
; CHECK-SPIRV-DAG: %[[#MemSemUnequal_Acquire:]] = OpConstant %[[#Int]] 2
diff --git a/llvm/test/CodeGen/SPIRV/event-zero-const.ll b/llvm/test/CodeGen/SPIRV/event-zero-const.ll
index f3f20a0..523d2ad 100644
--- a/llvm/test/CodeGen/SPIRV/event-zero-const.ll
+++ b/llvm/test/CodeGen/SPIRV/event-zero-const.ll
@@ -4,10 +4,10 @@
; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
-; CHECK: %[[#LongTy:]] = OpTypeInt 64 0
-; CHECK: %[[#EventTy:]] = OpTypeEvent
-; CHECK: %[[#LongNull:]] = OpConstantNull %[[#LongTy]]
-; CHECK: %[[#EventNull:]] = OpConstantNull %[[#EventTy]]
+; CHECK-DAG: %[[#LongTy:]] = OpTypeInt 64 0
+; CHECK-DAG: %[[#EventTy:]] = OpTypeEvent
+; CHECK-DAG: %[[#LongNull:]] = OpConstantNull %[[#LongTy]]
+; CHECK-DAG: %[[#EventNull:]] = OpConstantNull %[[#EventTy]]
; CHECK: OpFunction
; CHECK: OpINotEqual %[[#]] %[[#]] %[[#LongNull]]
; CHECK: OpGroupAsyncCopy %[[#EventTy]] %[[#]] %[[#]] %[[#]] %[[#]] %[[#]] %[[#EventNull]]
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_function_pointers/fp-simple-hierarchy.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_function_pointers/fp-simple-hierarchy.ll
index 368c5d4..80309e9 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_function_pointers/fp-simple-hierarchy.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_function_pointers/fp-simple-hierarchy.ll
@@ -1,16 +1,17 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
-; TODO: This test currently fails with LLVM_ENABLE_EXPENSIVE_CHECKS enabled
-; XFAIL: expensive_checks
-
; CHECK-DAG: OpName %[[I9:.*]] "_ZN13BaseIncrement9incrementEPi"
; CHECK-DAG: OpName %[[I29:.*]] "_ZN12IncrementBy29incrementEPi"
; CHECK-DAG: OpName %[[I49:.*]] "_ZN12IncrementBy49incrementEPi"
; CHECK-DAG: OpName %[[I89:.*]] "_ZN12IncrementBy89incrementEPi"
+; CHECK-DAG: OpName %[[Foo:.*]] "foo"
; CHECK-DAG: %[[TyVoid:.*]] = OpTypeVoid
-; CHECK-DAG: %[[TyArr:.*]] = OpTypeArray
+; CHECK-DAG: %[[TyInt32:.*]] = OpTypeInt 32 0
+; CHECK-DAG: %[[TyInt8:.*]] = OpTypeInt 8 0
+; CHECK-DAG: %[[Const8:.*]] = OpConstant %[[TyInt32]] 8
+; CHECK-DAG: %[[TyArr:.*]] = OpTypeArray %[[TyInt8]] %[[Const8]]
; CHECK-DAG: %[[TyStruct1:.*]] = OpTypeStruct %[[TyArr]]
; CHECK-DAG: %[[TyStruct2:.*]] = OpTypeStruct %[[TyStruct1]]
; CHECK-DAG: %[[TyPtrStruct2:.*]] = OpTypePointer Generic %[[TyStruct2]]
@@ -18,16 +19,21 @@
; CHECK-DAG: %[[TyPtrFun:.*]] = OpTypePointer Generic %[[TyFun]]
; CHECK-DAG: %[[TyPtrPtrFun:.*]] = OpTypePointer Generic %[[TyPtrFun]]
-; CHECK: %[[I9]] = OpFunction
-; CHECK: %[[I29]] = OpFunction
-; CHECK: %[[I49]] = OpFunction
-; CHECK: %[[I89]] = OpFunction
+; CHECK-DAG: %[[I9]] = OpFunction
+; CHECK-DAG: %[[I29]] = OpFunction
+; CHECK-DAG: %[[I49]] = OpFunction
+; CHECK-DAG: %[[I89]] = OpFunction
+
+; CHECK: %[[Foo]] = OpFunction
+; CHECK-4: OpFunctionParameter
; CHECK: %[[Arg1:.*]] = OpPhi %[[TyPtrStruct2]]
; CHECK: %[[VTbl:.*]] = OpBitcast %[[TyPtrPtrFun]] %[[#]]
; CHECK: %[[FP:.*]] = OpLoad %[[TyPtrFun]] %[[VTbl]]
; CHECK: %[[#]] = OpFunctionPointerCallINTEL %[[TyVoid]] %[[FP]] %[[Arg1]] %[[#]]
+; CHECK-NO: OpFunction
+
%"cls::id" = type { %"cls::detail::array" }
%"cls::detail::array" = type { [1 x i64] }
%struct.obj_storage_t = type { %"struct.aligned_storage<BaseIncrement, IncrementBy2, IncrementBy4, IncrementBy8>::type" }
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_function_pointers/fp_const.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_function_pointers/fp_const.ll
index 75ad382..b96da63 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_function_pointers/fp_const.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_function_pointers/fp_const.ll
@@ -1,9 +1,6 @@
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
+; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv32-unknown-unknown --spirv-ext=+SPV_INTEL_function_pointers %s -o - | FileCheck %s
; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
-; TODO: This test currently fails with LLVM_ENABLE_EXPENSIVE_CHECKS enabled
-; XFAIL: expensive_checks
-
; CHECK-DAG: OpCapability FunctionPointersINTEL
; CHECK-DAG: OpCapability Int64
; CHECK: OpExtension "SPV_INTEL_function_pointers"
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_function_pointers/fun-ptr-addrcast.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_function_pointers/fun-ptr-addrcast.ll
index d38de21..8edecc1 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_function_pointers/fun-ptr-addrcast.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_function_pointers/fun-ptr-addrcast.ll
@@ -2,15 +2,9 @@
; pointers/PtrCast-null-in-OpSpecConstantOp.ll (that is OpSpecConstantOp with ptr-cast operation) correctly
; work also for function pointers.
-; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - --spirv-ext=+SPV_INTEL_function_pointers | FileCheck %s
+; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv32-unknown-unknown %s -o - --spirv-ext=+SPV_INTEL_function_pointers | FileCheck %s
; TODO: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
-; TODO: This test currently fails with LLVM_ENABLE_EXPENSIVE_CHECKS enabled
-; XFAIL: expensive_checks
-
-; Running with -verify-machineinstrs would lead to "Reading virtual register without a def"
-; error, because OpConstantFunctionPointerINTEL forward-refers to a function definition.
-
; CHECK-COUNT-3: %[[#]] = OpSpecConstantOp %[[#]] 121 %[[#]]
; CHECK-COUNT-3: OpPtrCastToGeneric
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_inline_assembly/inline_asm.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_inline_assembly/inline_asm.ll
index e006651..91286d5 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_inline_assembly/inline_asm.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_INTEL_inline_assembly/inline_asm.ll
@@ -31,20 +31,20 @@
; CHECK-DAG: %[[#Const123:]] = OpConstant %[[#Int32Ty]] 123
; CHECK-DAG: %[[#Const42:]] = OpConstant %[[#DoubleTy:]] 42
-; CHECK: %[[#Dialect:]] = OpAsmTargetINTEL "spirv64-unknown-unknown"
+; CHECK-DAG: %[[#Dialect:]] = OpAsmTargetINTEL "spirv64-unknown-unknown"
; CHECK-NO: OpAsmTargetINTEL
-; CHECK: %[[#Asm1:]] = OpAsmINTEL %[[#VoidTy]] %[[#Fun1Ty]] %[[#Dialect]] "" ""
-; CHECK: %[[#Asm2:]] = OpAsmINTEL %[[#VoidTy]] %[[#Fun1Ty]] %[[#Dialect]] "nop" ""
-; CHECK: %[[#Asm3:]] = OpAsmINTEL %[[#VoidTy]] %[[#Fun1Ty]] %[[#Dialect]] "" "~{cc},~{memory}"
-; CHECK: %[[#Asm4:]] = OpAsmINTEL %[[#Int32Ty]] %[[#Fun2Ty:]] %[[#Dialect]] "clobber_out $0" "=&r"
-; CHECK: %[[#Asm5:]] = OpAsmINTEL %[[#Int32Ty]] %[[#Fun3Ty]] %[[#Dialect]] "icmd $0 $1" "=r,r"
-; CHECK: %[[#Asm6:]] = OpAsmINTEL %[[#FloatTy]] %[[#Fun4Ty]] %[[#Dialect]] "fcmd $0 $1" "=r,r"
-; CHECK: %[[#Asm7:]] = OpAsmINTEL %[[#HalfTy]] %[[#Fun5Ty]] %[[#Dialect]] "fcmdext $0 $1 $2" "=r,r,r"
-; CHECK: %[[#Asm8:]] = OpAsmINTEL %[[#Int8Ty]] %[[#Fun6Ty]] %[[#Dialect]] "cmdext $0 $3 $1 $2" "=r,r,r,r"
-; CHECK: %[[#Asm9:]] = OpAsmINTEL %[[#Int64Ty]] %[[#Fun7Ty]] %[[#Dialect]] "icmdext $0 $3 $1 $2" "=r,r,r,r"
-; CHECK: %[[#Asm10:]] = OpAsmINTEL %[[#VoidTy]] %[[#Fun8Ty]] %[[#Dialect]] "constcmd $0 $1" "r,r"
-; CHECK: %[[#Asm11:]] = OpAsmINTEL %[[#VoidTy]] %[[#Fun8Ty]] %[[#Dialect]] "constcmd $0 $1" "i,i"
+; CHECK-DAG: %[[#Asm1:]] = OpAsmINTEL %[[#VoidTy]] %[[#Fun1Ty]] %[[#Dialect]] "" ""
+; CHECK-DAG: %[[#Asm2:]] = OpAsmINTEL %[[#VoidTy]] %[[#Fun1Ty]] %[[#Dialect]] "nop" ""
+; CHECK-DAG: %[[#Asm3:]] = OpAsmINTEL %[[#VoidTy]] %[[#Fun1Ty]] %[[#Dialect]] "" "~{cc},~{memory}"
+; CHECK-DAG: %[[#Asm4:]] = OpAsmINTEL %[[#Int32Ty]] %[[#Fun2Ty:]] %[[#Dialect]] "clobber_out $0" "=&r"
+; CHECK-DAG: %[[#Asm5:]] = OpAsmINTEL %[[#Int32Ty]] %[[#Fun3Ty]] %[[#Dialect]] "icmd $0 $1" "=r,r"
+; CHECK-DAG: %[[#Asm6:]] = OpAsmINTEL %[[#FloatTy]] %[[#Fun4Ty]] %[[#Dialect]] "fcmd $0 $1" "=r,r"
+; CHECK-DAG: %[[#Asm7:]] = OpAsmINTEL %[[#HalfTy]] %[[#Fun5Ty]] %[[#Dialect]] "fcmdext $0 $1 $2" "=r,r,r"
+; CHECK-DAG: %[[#Asm8:]] = OpAsmINTEL %[[#Int8Ty]] %[[#Fun6Ty]] %[[#Dialect]] "cmdext $0 $3 $1 $2" "=r,r,r,r"
+; CHECK-DAG: %[[#Asm9:]] = OpAsmINTEL %[[#Int64Ty]] %[[#Fun7Ty]] %[[#Dialect]] "icmdext $0 $3 $1 $2" "=r,r,r,r"
+; CHECK-DAG: %[[#Asm10:]] = OpAsmINTEL %[[#VoidTy]] %[[#Fun8Ty]] %[[#Dialect]] "constcmd $0 $1" "r,r"
+; CHECK-DAG: %[[#Asm11:]] = OpAsmINTEL %[[#VoidTy]] %[[#Fun8Ty]] %[[#Dialect]] "constcmd $0 $1" "i,i"
; CHECK-NO: OpAsmINTEL
; CHECK: OpFunction
diff --git a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_shader_clock/shader_clock.ll b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_shader_clock/shader_clock.ll
index 8ecd0a2..bd07ba1 100644
--- a/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_shader_clock/shader_clock.ll
+++ b/llvm/test/CodeGen/SPIRV/extensions/SPV_KHR_shader_clock/shader_clock.ll
@@ -6,9 +6,11 @@
; CHECK: OpCapability ShaderClockKHR
; CHECK: OpExtension "SPV_KHR_shader_clock"
-; CHECK-DAG: [[uint:%[a-z0-9_]+]] = OpTypeInt 32
+; CHECK-DAG: [[uint:%[a-z0-9_]+]] = OpTypeInt 32 0
; CHECK-DAG: [[ulong:%[a-z0-9_]+]] = OpTypeInt 64
; CHECK-DAG: [[v2uint:%[a-z0-9_]+]] = OpTypeVector [[uint]] 2
+; CHECK-DAG: OpConstant [[uint]] 8
+; CHECK-DAG: OpConstant [[uint]] 16
; CHECK-DAG: [[uint_1:%[a-z0-9_]+]] = OpConstant [[uint]] 1
; CHECK-DAG: [[uint_2:%[a-z0-9_]+]] = OpConstant [[uint]] 2
; CHECK-DAG: [[uint_3:%[a-z0-9_]+]] = OpConstant [[uint]] 3
diff --git a/llvm/test/CodeGen/SPIRV/global-var-name-linkage.ll b/llvm/test/CodeGen/SPIRV/global-var-name-linkage.ll
new file mode 100644
index 0000000..4501819
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/global-var-name-linkage.ll
@@ -0,0 +1,59 @@
+; Check names and decoration of global variables.
+
+; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK-DAG: OpName %[[#id18:]] "G1"
+; CHECK-DAG: OpName %[[#id22:]] "g1"
+; CHECK-DAG: OpName %[[#id23:]] "g2"
+; CHECK-DAG: OpName %[[#id27:]] "g4"
+; CHECK-DAG: OpName %[[#id30:]] "c1"
+; CHECK-DAG: OpName %[[#id31:]] "n_t"
+; CHECK-DAG: OpName %[[#id32:]] "w"
+; CHECK-DAG: OpName %[[#id34:]] "a.b"
+; CHECK-DAG: OpName %[[#id35:]] "e"
+; CHECK-DAG: OpName %[[#id36:]] "y.z"
+; CHECK-DAG: OpName %[[#id38:]] "x"
+
+; CHECK-NOT: OpDecorate %[[#id18]] LinkageAttributes
+; CHECK-DAG: OpDecorate %[[#id18]] Constant
+; CHECK-DAG: OpDecorate %[[#id22]] Alignment 4
+; CHECK-DAG: OpDecorate %[[#id22]] LinkageAttributes "g1" Export
+; CHECK-DAG: OpDecorate %[[#id23]] Alignment 4
+; CHECK-DAG: OpDecorate %[[#id27]] Alignment 4
+; CHECK-DAG: OpDecorate %[[#id27]] LinkageAttributes "g4" Export
+; CHECK-DAG: OpDecorate %[[#id30]] Constant
+; CHECK-DAG: OpDecorate %[[#id30]] Alignment 4
+; CHECK-DAG: OpDecorate %[[#id30]] LinkageAttributes "c1" Export
+; CHECK-DAG: OpDecorate %[[#id31]] Constant
+; CHECK-DAG: OpDecorate %[[#id31]] LinkageAttributes "n_t" Import
+; CHECK-DAG: OpDecorate %[[#id32]] Constant
+; CHECK-DAG: OpDecorate %[[#id32]] Alignment 4
+; CHECK-DAG: OpDecorate %[[#id32]] LinkageAttributes "w" Export
+; CHECK-DAG: OpDecorate %[[#id34]] Constant
+; CHECK-DAG: OpDecorate %[[#id34]] Alignment 4
+; CHECK-DAG: OpDecorate %[[#id35]] LinkageAttributes "e" Import
+; CHECK-DAG: OpDecorate %[[#id36]] Alignment 4
+; CHECK-DAG: OpDecorate %[[#id38]] Constant
+; CHECK-DAG: OpDecorate %[[#id38]] Alignment 4
+
+%"class.sycl::_V1::nd_item" = type { i8 }
+
+@G1 = private unnamed_addr addrspace(1) constant %"class.sycl::_V1::nd_item" poison, align 1
+@g1 = addrspace(1) global i32 1, align 4
+@g2 = internal addrspace(1) global i32 2, align 4
+@g4 = common addrspace(1) global i32 0, align 4
+@c1 = addrspace(2) constant [2 x i32] [i32 0, i32 1], align 4
+@n_t = external addrspace(2) constant [256 x i32]
+@w = addrspace(1) constant i32 0, align 4
+@a.b = internal addrspace(2) constant [2 x i32] [i32 2, i32 3], align 4
+@e = external addrspace(1) global i32
+@y.z = internal addrspace(1) global i32 0, align 4
+@x = internal addrspace(2) constant float 1.000000e+00, align 4
+
+define internal spir_func void @foo(ptr addrspace(4) align 1 %arg) {
+ ret void
+}
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/cross.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/cross.ll
index 2e0eb8c..b1625c0 100644
--- a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/cross.ll
+++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/cross.ll
@@ -15,7 +15,7 @@ entry:
; CHECK: %[[#arg0:]] = OpFunctionParameter %[[#vec3_float_16]]
; CHECK: %[[#arg1:]] = OpFunctionParameter %[[#vec3_float_16]]
; CHECK: %[[#]] = OpExtInst %[[#vec3_float_16]] %[[#op_ext_glsl]] Cross %[[#arg0]] %[[#arg1]]
- %hlsl.cross = call <3 x half> @llvm.spv.cross.v4f16(<3 x half> %a, <3 x half> %b)
+ %hlsl.cross = call <3 x half> @llvm.spv.cross.v3f16(<3 x half> %a, <3 x half> %b)
ret <3 x half> %hlsl.cross
}
@@ -25,9 +25,9 @@ entry:
; CHECK: %[[#arg0:]] = OpFunctionParameter %[[#vec3_float_32]]
; CHECK: %[[#arg1:]] = OpFunctionParameter %[[#vec3_float_32]]
; CHECK: %[[#]] = OpExtInst %[[#vec3_float_32]] %[[#op_ext_glsl]] Cross %[[#arg0]] %[[#arg1]]
- %hlsl.cross = call <3 x float> @llvm.spv.cross.v4f32(<3 x float> %a, <3 x float> %b)
+ %hlsl.cross = call <3 x float> @llvm.spv.cross.v3f32(<3 x float> %a, <3 x float> %b)
ret <3 x float> %hlsl.cross
}
-declare <3 x half> @llvm.spv.cross.v4f16(<3 x half>, <3 x half>)
-declare <3 x float> @llvm.spv.cross.v4f32(<3 x float>, <3 x float>)
+declare <3 x half> @llvm.spv.cross.v3f16(<3 x half>, <3 x half>)
+declare <3 x float> @llvm.spv.cross.v3f32(<3 x float>, <3 x float>)
diff --git a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/length.ll b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/length.ll
index b4a9d8e0..1ac862b 100644
--- a/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/length.ll
+++ b/llvm/test/CodeGen/SPIRV/hlsl-intrinsics/length.ll
@@ -11,19 +11,21 @@
define noundef half @length_half4(<4 x half> noundef %a) {
entry:
- ; CHECK: %[[#arg0:]] = OpFunctionParameter %[[#]]
+ ; CHECK: %[[#]] = OpFunction %[[#float_16]] None %[[#]]
+ ; CHECK: %[[#arg0:]] = OpFunctionParameter %[[#vec4_float_16]]
; CHECK: %[[#]] = OpExtInst %[[#float_16]] %[[#op_ext_glsl]] Length %[[#arg0]]
- %hlsl.length = call half @llvm.spv.length.v4f16(<4 x half> %a)
+ %hlsl.length = call half @llvm.spv.length.f16(<4 x half> %a)
ret half %hlsl.length
}
define noundef float @length_float4(<4 x float> noundef %a) {
entry:
- ; CHECK: %[[#arg0:]] = OpFunctionParameter %[[#]]
+ ; CHECK: %[[#]] = OpFunction %[[#float_32]] None %[[#]]
+ ; CHECK: %[[#arg0:]] = OpFunctionParameter %[[#vec4_float_32]]
; CHECK: %[[#]] = OpExtInst %[[#float_32]] %[[#op_ext_glsl]] Length %[[#arg0]]
- %hlsl.length = call float @llvm.spv.length.v4f32(<4 x float> %a)
+ %hlsl.length = call float @llvm.spv.length.f32(<4 x float> %a)
ret float %hlsl.length
}
-declare half @llvm.spv.length.v4f16(<4 x half>)
-declare float @llvm.spv.length.v4f32(<4 x float>)
+declare half @llvm.spv.length.f16(<4 x half>)
+declare float @llvm.spv.length.f32(<4 x float>)
diff --git a/llvm/test/CodeGen/SPIRV/iaddcarry-builtin.ll b/llvm/test/CodeGen/SPIRV/iaddcarry-builtin.ll
index 8f14eba2..49aaa45 100644
--- a/llvm/test/CodeGen/SPIRV/iaddcarry-builtin.ll
+++ b/llvm/test/CodeGen/SPIRV/iaddcarry-builtin.ll
@@ -25,9 +25,7 @@
; CHECK-SPIRV-DAG: [[v4uint:%[a-z0-9_]+]] = OpTypeVector [[uint]] 4
; CHECK-SPIRV-DAG: [[vecstruct:%[a-z0-9_]+]] = OpTypeStruct [[v4uint]] [[v4uint]]
; CHECK-SPIRV-DAG: [[_ptr_Function_vecstruct:%[a-z0-9_]+]] = OpTypePointer Function [[vecstruct]]
-; CHECK-SPIRV-DAG: [[struct_anon:%[a-z0-9_.]+]] = OpTypeStruct [[uint]] [[uint]]
-; CHECK-SPIRV-DAG: [[_ptr_Function_struct_anon:%[a-z0-9_]+]] = OpTypePointer Function [[struct_anon]]
-; CHECK-SPIRV-DAG: [[_ptr_Generic_struct_anon:%[a-z0-9_]+]] = OpTypePointer Generic [[struct_anon]]
+; CHECK-SPIRV-DAG: [[_ptr_Generic_i32struct:%[a-z0-9_]+]] = OpTypePointer Generic [[i32struct]]
define spir_func void @test_builtin_iaddcarrycc(i8 %a, i8 %b) {
entry:
@@ -116,9 +114,9 @@ define spir_func void @test_builtin_iaddcarry_anon(i32 %a, i32 %b) {
; CHECK-SPIRV: [[a_4:%[a-z0-9_]+]] = OpFunctionParameter [[uint]]
; CHECK-SPIRV: [[b_4:%[a-z0-9_]+]] = OpFunctionParameter [[uint]]
; CHECK-SPIRV: [[entry_4:%[a-z0-9_]+]] = OpLabel
-; CHECK-SPIRV: [[var_59:%[a-z0-9_]+]] = OpVariable [[_ptr_Function_struct_anon]] Function
-; CHECK-SPIRV: [[var_61:%[a-z0-9_]+]] = OpPtrCastToGeneric [[_ptr_Generic_struct_anon]] [[var_59]]
-; CHECK-SPIRV: [[var_62:%[a-z0-9_]+]] = OpIAddCarry [[struct_anon]] [[a_4]] [[b_4]]
+; CHECK-SPIRV: [[var_59:%[a-z0-9_]+]] = OpVariable [[_ptr_Function_i32struct]] Function
+; CHECK-SPIRV: [[var_61:%[a-z0-9_]+]] = OpPtrCastToGeneric [[_ptr_Generic_i32struct]] [[var_59]]
+; CHECK-SPIRV: [[var_62:%[a-z0-9_]+]] = OpIAddCarry [[i32struct]] [[a_4]] [[b_4]]
; CHECK-SPIRV: OpStore [[var_61]] [[var_62]]
declare void @_Z17__spirv_IAddCarryIiiE4anonIT_T0_ES1_S2_(ptr addrspace(4) sret(%struct.anon) align 4, i32, i32)
diff --git a/llvm/test/CodeGen/SPIRV/image-unoptimized.ll b/llvm/test/CodeGen/SPIRV/image-unoptimized.ll
index 0ce9c73..d7d5b1d 100644
--- a/llvm/test/CodeGen/SPIRV/image-unoptimized.ll
+++ b/llvm/test/CodeGen/SPIRV/image-unoptimized.ll
@@ -1,7 +1,7 @@
; RUN: llc -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
-; CHECK: %[[#TypeImage:]] = OpTypeImage
-; CHECK: %[[#TypeSampler:]] = OpTypeSampler
+; CHECK-DAG: %[[#TypeImage:]] = OpTypeImage
+; CHECK-DAG: %[[#TypeSampler:]] = OpTypeSampler
; CHECK-DAG: %[[#TypeImagePtr:]] = OpTypePointer {{.*}} %[[#TypeImage]]
; CHECK-DAG: %[[#TypeSamplerPtr:]] = OpTypePointer {{.*}} %[[#TypeSampler]]
diff --git a/llvm/test/CodeGen/SPIRV/isubborrow-builtin.ll b/llvm/test/CodeGen/SPIRV/isubborrow-builtin.ll
index 08b4d2a..ca842d2f 100644
--- a/llvm/test/CodeGen/SPIRV/isubborrow-builtin.ll
+++ b/llvm/test/CodeGen/SPIRV/isubborrow-builtin.ll
@@ -23,9 +23,7 @@
; CHECK-SPIRV-DAG: [[v4uint:%[a-z0-9_]+]] = OpTypeVector [[uint]] 4
; CHECK-SPIRV-DAG: [[vecstruct:%[a-z0-9_]+]] = OpTypeStruct [[v4uint]] [[v4uint]]
; CHECK-SPIRV-DAG: [[_ptr_Function_vecstruct:%[a-z0-9_]+]] = OpTypePointer Function [[vecstruct]]
-; CHECK-SPIRV-DAG: [[struct_anon:%[a-z0-9_.]+]] = OpTypeStruct [[uint]] [[uint]]
-; CHECK-SPIRV-DAG: [[_ptr_Function_struct_anon:%[a-z0-9_]+]] = OpTypePointer Function [[struct_anon]]
-; CHECK-SPIRV-DAG: [[_ptr_Generic_struct_anon:%[a-z0-9_]+]] = OpTypePointer Generic [[struct_anon]]
+; CHECK-SPIRV-DAG: [[_ptr_Generic_i32struct:%[a-z0-9_]+]] = OpTypePointer Generic [[i32struct]]
define spir_func void @test_builtin_isubborrowcc(i8 %a, i8 %b) {
entry:
@@ -114,9 +112,9 @@ define spir_func void @test_builtin_isubborrow_anon(i32 %a, i32 %b) {
; CHECK-SPIRV: [[a_4:%[a-z0-9_]+]] = OpFunctionParameter [[uint]]
; CHECK-SPIRV: [[b_4:%[a-z0-9_]+]] = OpFunctionParameter [[uint]]
; CHECK-SPIRV: [[entry_4:%[a-z0-9_]+]] = OpLabel
-; CHECK-SPIRV: [[var_59:%[a-z0-9_]+]] = OpVariable [[_ptr_Function_struct_anon]] Function
-; CHECK-SPIRV: [[var_61:%[a-z0-9_]+]] = OpPtrCastToGeneric [[_ptr_Generic_struct_anon]] [[var_59]]
-; CHECK-SPIRV: [[var_62:%[a-z0-9_]+]] = OpISubBorrow [[struct_anon]] [[a_4]] [[b_4]]
+; CHECK-SPIRV: [[var_59:%[a-z0-9_]+]] = OpVariable [[_ptr_Function_i32struct]] Function
+; CHECK-SPIRV: [[var_61:%[a-z0-9_]+]] = OpPtrCastToGeneric [[_ptr_Generic_i32struct]] [[var_59]]
+; CHECK-SPIRV: [[var_62:%[a-z0-9_]+]] = OpISubBorrow [[i32struct]] [[a_4]] [[b_4]]
; CHECK-SPIRV: OpStore [[var_61]] [[var_62]]
declare void @_Z18__spirv_ISubBorrowIiiE4anonIT_T0_ES1_S2_(ptr addrspace(4) sret(%struct.anon) align 4, i32, i32)
diff --git a/llvm/test/CodeGen/SPIRV/keep-tracked-const.ll b/llvm/test/CodeGen/SPIRV/keep-tracked-const.ll
index 0dc8623..61d06fe 100644
--- a/llvm/test/CodeGen/SPIRV/keep-tracked-const.ll
+++ b/llvm/test/CodeGen/SPIRV/keep-tracked-const.ll
@@ -3,9 +3,9 @@
; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
-; CHECK-SPIRV: %[[#Int:]] = OpTypeInt 32 0
-; CHECK-SPIRV: %[[#C0:]] = OpConstant %[[#Int]] 0
-; CHECK-SPIRV: %[[#C1:]] = OpConstant %[[#Int]] 1
+; CHECK-SPIRV-DAG: %[[#Int:]] = OpTypeInt 32 0
+; CHECK-SPIRV-DAG: %[[#C0:]] = OpConstant %[[#Int]] 0
+; CHECK-SPIRV-DAG: %[[#C1:]] = OpConstant %[[#Int]] 1
; CHECK-SPIRV: OpSelect %[[#Int]] %[[#]] %[[#C1]] %[[#C0]]
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/fshl.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/fshl.ll
index 2d5b309..25b5304 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/fshl.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/fshl.ll
@@ -1,21 +1,21 @@
; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
-; CHECK-SPIRV: OpName %[[#NAME_FSHL_FUNC_32:]] "spirv.llvm_fshl_i32"
-; CHECK-SPIRV: OpName %[[#NAME_FSHL_FUNC_16:]] "spirv.llvm_fshl_i16"
-; CHECK-SPIRV: OpName %[[#NAME_FSHL_FUNC_VEC_INT_16:]] "spirv.llvm_fshl_v2i16"
-; CHECK-SPIRV: %[[#TYPE_INT_32:]] = OpTypeInt 32 0
-; CHECK-SPIRV: %[[#TYPE_ORIG_FUNC_32:]] = OpTypeFunction %[[#TYPE_INT_32]] %[[#TYPE_INT_32]] %[[#TYPE_INT_32]]
-; CHECK-SPIRV: %[[#TYPE_INT_16:]] = OpTypeInt 16 0
-; CHECK-SPIRV: %[[#TYPE_ORIG_FUNC_16:]] = OpTypeFunction %[[#TYPE_INT_16]] %[[#TYPE_INT_16]] %[[#TYPE_INT_16]]
-; CHECK-SPIRV: %[[#TYPE_VEC_INT_16:]] = OpTypeVector %[[#TYPE_INT_16]] 2
-; CHECK-SPIRV: %[[#TYPE_ORIG_FUNC_VEC_INT_16:]] = OpTypeFunction %[[#TYPE_VEC_INT_16]] %[[#TYPE_VEC_INT_16]] %[[#TYPE_VEC_INT_16]]
-; CHECK-SPIRV: %[[#TYPE_FSHL_FUNC_32:]] = OpTypeFunction %[[#TYPE_INT_32]] %[[#TYPE_INT_32]] %[[#TYPE_INT_32]] %[[#TYPE_INT_32]]
-; CHECK-SPIRV: %[[#TYPE_FSHL_FUNC_16:]] = OpTypeFunction %[[#TYPE_INT_16]] %[[#TYPE_INT_16]] %[[#TYPE_INT_16]] %[[#TYPE_INT_16]]
-; CHECK-SPIRV: %[[#TYPE_FSHL_FUNC_VEC_INT_16:]] = OpTypeFunction %[[#TYPE_VEC_INT_16]] %[[#TYPE_VEC_INT_16]] %[[#TYPE_VEC_INT_16]] %[[#TYPE_VEC_INT_16]]
-; CHECK-SPIRV-DAG: %[[#CONST_ROTATE_32:]] = OpConstant %[[#TYPE_INT_32]] 8
-; CHECK-SPIRV-DAG: %[[#CONST_ROTATE_16:]] = OpConstant %[[#TYPE_INT_16]] 8
-; CHECK-SPIRV: %[[#CONST_ROTATE_VEC_INT_16:]] = OpConstantComposite %[[#TYPE_VEC_INT_16]] %[[#CONST_ROTATE_16]] %[[#CONST_ROTATE_16]]
-; CHECK-SPIRV-DAG: %[[#CONST_TYPE_SIZE_32:]] = OpConstant %[[#TYPE_INT_32]] 32
+; CHECK-SPIRV-DAG: OpName %[[#NAME_FSHL_FUNC_32:]] "spirv.llvm_fshl_i32"
+; CHECK-SPIRV-DAG: OpName %[[#NAME_FSHL_FUNC_16:]] "spirv.llvm_fshl_i16"
+; CHECK-SPIRV-DAG: OpName %[[#NAME_FSHL_FUNC_VEC_INT_16:]] "spirv.llvm_fshl_v2i16"
+; CHECK-SPIRV-DAG: %[[#TYPE_INT_32:]] = OpTypeInt 32 0
+; CHECK-SPIRV-DAG: %[[#TYPE_ORIG_FUNC_32:]] = OpTypeFunction %[[#TYPE_INT_32]] %[[#TYPE_INT_32]] %[[#TYPE_INT_32]]
+; CHECK-SPIRV-DAG: %[[#TYPE_INT_16:]] = OpTypeInt 16 0
+; CHECK-SPIRV-DAG: %[[#TYPE_ORIG_FUNC_16:]] = OpTypeFunction %[[#TYPE_INT_16]] %[[#TYPE_INT_16]] %[[#TYPE_INT_16]]
+; CHECK-SPIRV-DAG: %[[#TYPE_VEC_INT_16:]] = OpTypeVector %[[#TYPE_INT_16]] 2
+; CHECK-SPIRV-DAG: %[[#TYPE_ORIG_FUNC_VEC_INT_16:]] = OpTypeFunction %[[#TYPE_VEC_INT_16]] %[[#TYPE_VEC_INT_16]] %[[#TYPE_VEC_INT_16]]
+; CHECK-SPIRV-DAG: %[[#TYPE_FSHL_FUNC_32:]] = OpTypeFunction %[[#TYPE_INT_32]] %[[#TYPE_INT_32]] %[[#TYPE_INT_32]] %[[#TYPE_INT_32]]
+; CHECK-SPIRV-DAG: %[[#TYPE_FSHL_FUNC_16:]] = OpTypeFunction %[[#TYPE_INT_16]] %[[#TYPE_INT_16]] %[[#TYPE_INT_16]] %[[#TYPE_INT_16]]
+; CHECK-SPIRV-DAG: %[[#TYPE_FSHL_FUNC_VEC_INT_16:]] = OpTypeFunction %[[#TYPE_VEC_INT_16]] %[[#TYPE_VEC_INT_16]] %[[#TYPE_VEC_INT_16]] %[[#TYPE_VEC_INT_16]]
+; CHECK-SPIRV-DAG: %[[#CONST_ROTATE_32:]] = OpConstant %[[#TYPE_INT_32]] 8
+; CHECK-SPIRV-DAG: %[[#CONST_ROTATE_16:]] = OpConstant %[[#TYPE_INT_16]] 8
+; CHECK-SPIRV-DAG: %[[#CONST_ROTATE_VEC_INT_16:]] = OpConstantComposite %[[#TYPE_VEC_INT_16]] %[[#CONST_ROTATE_16]] %[[#CONST_ROTATE_16]]
+; CHECK-SPIRV-DAG: %[[#CONST_TYPE_SIZE_32:]] = OpConstant %[[#TYPE_INT_32]] 32
; CHECK-SPIRV: %[[#]] = OpFunction %[[#TYPE_INT_32]] {{.*}} %[[#TYPE_ORIG_FUNC_32]]
; CHECK-SPIRV: %[[#X:]] = OpFunctionParameter %[[#TYPE_INT_32]]
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/fshr.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/fshr.ll
index 4cf5ca5..55fb2d9 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/fshr.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/fshr.ll
@@ -1,20 +1,20 @@
; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s --check-prefix=CHECK-SPIRV
-; CHECK-SPIRV: OpName %[[#NAME_FSHR_FUNC_32:]] "spirv.llvm_fshr_i32"
-; CHECK-SPIRV: OpName %[[#NAME_FSHR_FUNC_16:]] "spirv.llvm_fshr_i16"
-; CHECK-SPIRV: OpName %[[#NAME_FSHR_FUNC_VEC_INT_16:]] "spirv.llvm_fshr_v2i16"
-; CHECK-SPIRV: %[[#TYPE_INT_32:]] = OpTypeInt 32 0
-; CHECK-SPIRV: %[[#TYPE_ORIG_FUNC_32:]] = OpTypeFunction %[[#TYPE_INT_32]] %[[#TYPE_INT_32]] %[[#TYPE_INT_32]]
-; CHECK-SPIRV: %[[#TYPE_INT_16:]] = OpTypeInt 16 0
-; CHECK-SPIRV: %[[#TYPE_ORIG_FUNC_16:]] = OpTypeFunction %[[#TYPE_INT_16]] %[[#TYPE_INT_16]] %[[#TYPE_INT_16]]
-; CHECK-SPIRV: %[[#TYPE_VEC_INT_16:]] = OpTypeVector %[[#TYPE_INT_16]] 2
-; CHECK-SPIRV: %[[#TYPE_ORIG_FUNC_VEC_INT_16:]] = OpTypeFunction %[[#TYPE_VEC_INT_16]] %[[#TYPE_VEC_INT_16]] %[[#TYPE_VEC_INT_16]]
-; CHECK-SPIRV: %[[#TYPE_FSHR_FUNC_32:]] = OpTypeFunction %[[#TYPE_INT_32]] %[[#TYPE_INT_32]] %[[#TYPE_INT_32]] %[[#TYPE_INT_32]]
-; CHECK-SPIRV: %[[#TYPE_FSHR_FUNC_16:]] = OpTypeFunction %[[#TYPE_INT_16]] %[[#TYPE_INT_16]] %[[#TYPE_INT_16]] %[[#TYPE_INT_16]]
-; CHECK-SPIRV: %[[#TYPE_FSHR_FUNC_VEC_INT_16:]] = OpTypeFunction %[[#TYPE_VEC_INT_16]] %[[#TYPE_VEC_INT_16]] %[[#TYPE_VEC_INT_16]] %[[#TYPE_VEC_INT_16]]
+; CHECK-SPIRV-DAG: OpName %[[#NAME_FSHR_FUNC_32:]] "spirv.llvm_fshr_i32"
+; CHECK-SPIRV-DAG: OpName %[[#NAME_FSHR_FUNC_16:]] "spirv.llvm_fshr_i16"
+; CHECK-SPIRV-DAG: OpName %[[#NAME_FSHR_FUNC_VEC_INT_16:]] "spirv.llvm_fshr_v2i16"
+; CHECK-SPIRV-DAG: %[[#TYPE_INT_32:]] = OpTypeInt 32 0
+; CHECK-SPIRV-DAG: %[[#TYPE_ORIG_FUNC_32:]] = OpTypeFunction %[[#TYPE_INT_32]] %[[#TYPE_INT_32]] %[[#TYPE_INT_32]]
+; CHECK-SPIRV-DAG: %[[#TYPE_INT_16:]] = OpTypeInt 16 0
+; CHECK-SPIRV-DAG: %[[#TYPE_ORIG_FUNC_16:]] = OpTypeFunction %[[#TYPE_INT_16]] %[[#TYPE_INT_16]] %[[#TYPE_INT_16]]
+; CHECK-SPIRV-DAG: %[[#TYPE_VEC_INT_16:]] = OpTypeVector %[[#TYPE_INT_16]] 2
+; CHECK-SPIRV-DAG: %[[#TYPE_ORIG_FUNC_VEC_INT_16:]] = OpTypeFunction %[[#TYPE_VEC_INT_16]] %[[#TYPE_VEC_INT_16]] %[[#TYPE_VEC_INT_16]]
+; CHECK-SPIRV-DAG: %[[#TYPE_FSHR_FUNC_32:]] = OpTypeFunction %[[#TYPE_INT_32]] %[[#TYPE_INT_32]] %[[#TYPE_INT_32]] %[[#TYPE_INT_32]]
+; CHECK-SPIRV-DAG: %[[#TYPE_FSHR_FUNC_16:]] = OpTypeFunction %[[#TYPE_INT_16]] %[[#TYPE_INT_16]] %[[#TYPE_INT_16]] %[[#TYPE_INT_16]]
+; CHECK-SPIRV-DAG: %[[#TYPE_FSHR_FUNC_VEC_INT_16:]] = OpTypeFunction %[[#TYPE_VEC_INT_16]] %[[#TYPE_VEC_INT_16]] %[[#TYPE_VEC_INT_16]] %[[#TYPE_VEC_INT_16]]
; CHECK-SPIRV-DAG: %[[#CONST_ROTATE_32:]] = OpConstant %[[#TYPE_INT_32]] 8
; CHECK-SPIRV-DAG: %[[#CONST_ROTATE_16:]] = OpConstant %[[#TYPE_INT_16]] 8
-; CHECK-SPIRV: %[[#CONST_ROTATE_VEC_INT_16:]] = OpConstantComposite %[[#TYPE_VEC_INT_16]] %[[#CONST_ROTATE_16]] %[[#CONST_ROTATE_16]]
+; CHECK-SPIRV-DAG: %[[#CONST_ROTATE_VEC_INT_16:]] = OpConstantComposite %[[#TYPE_VEC_INT_16]] %[[#CONST_ROTATE_16]] %[[#CONST_ROTATE_16]]
; CHECK-SPIRV-DAG: %[[#CONST_TYPE_SIZE_32:]] = OpConstant %[[#TYPE_INT_32]] 32
; CHECK-SPIRV: %[[#]] = OpFunction %[[#TYPE_INT_32]] {{.*}} %[[#TYPE_ORIG_FUNC_32]]
diff --git a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/memset.ll b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/memset.ll
index e7a9869..d5e70ae 100644
--- a/llvm/test/CodeGen/SPIRV/llvm-intrinsics/memset.ll
+++ b/llvm/test/CodeGen/SPIRV/llvm-intrinsics/memset.ll
@@ -12,17 +12,17 @@
; CHECK-DAG: %[[#Int8Ptr:]] = OpTypePointer Generic %[[#Int8]]
; CHECK-DAG: %[[#Const4:]] = OpConstant %[[#Int32]] 4
-; CHECK: %[[#Int8x4:]] = OpTypeArray %[[#Int8]] %[[#Const4]]
+; CHECK-DAG: %[[#Int8x4:]] = OpTypeArray %[[#Int8]] %[[#Const4]]
; CHECK-DAG: %[[#Const12:]] = OpConstant %[[#Int32]] 12
-; CHECK: %[[#Int8x12:]] = OpTypeArray %[[#Int8]] %[[#Const12]]
+; CHECK-DAG: %[[#Int8x12:]] = OpTypeArray %[[#Int8]] %[[#Const12]]
; CHECK-DAG: %[[#Const21:]] = OpConstant %[[#Int8]] 21
; CHECK-DAG: %[[#False:]] = OpConstantFalse %[[#]]
; CHECK-DAG: %[[#ConstComp:]] = OpConstantComposite %[[#Int8x4]] %[[#Const21]] %[[#Const21]] %[[#Const21]] %[[#Const21]]
; CHECK-DAG: %[[#ConstNull:]] = OpConstantNull %[[#Int8x12]]
-; CHECK: %[[#VarComp:]] = OpVariable %[[#]] UniformConstant %[[#ConstComp]]
-; CHECK: %[[#VarNull:]] = OpVariable %[[#]] UniformConstant %[[#ConstNull]]
+; CHECK-DAG: %[[#VarComp:]] = OpVariable %[[#]] UniformConstant %[[#ConstComp]]
+; CHECK-DAG: %[[#VarNull:]] = OpVariable %[[#]] UniformConstant %[[#ConstNull]]
; CHECK-DAG: %[[#Int8PtrConst:]] = OpTypePointer UniformConstant %[[#Int8]]
; CHECK: OpCopyMemorySized %[[#Target:]] %[[#Source:]] %[[#Const12]] Aligned 4
diff --git a/llvm/test/CodeGen/SPIRV/logical-access-chain.ll b/llvm/test/CodeGen/SPIRV/logical-access-chain.ll
index 39f6d33..d56678e 100644
--- a/llvm/test/CodeGen/SPIRV/logical-access-chain.ll
+++ b/llvm/test/CodeGen/SPIRV/logical-access-chain.ll
@@ -1,10 +1,10 @@
; RUN: llc -O0 -mtriple=spirv-unknown-unknown %s -o - | FileCheck %s
-; CHECK: [[uint:%[0-9]+]] = OpTypeInt 32 0
-; CHECK: [[uint2:%[0-9]+]] = OpTypeVector [[uint]] 2
-; CHECK: [[uint_1:%[0-9]+]] = OpConstant [[uint]] 1
-; CHECK: [[ptr_uint:%[0-9]+]] = OpTypePointer Function [[uint]]
-; CHECK: [[ptr_uint2:%[0-9]+]] = OpTypePointer Function [[uint2]]
+; CHECK-DAG: [[uint:%[0-9]+]] = OpTypeInt 32 0
+; CHECK-DAG: [[uint2:%[0-9]+]] = OpTypeVector [[uint]] 2
+; CHECK-DAG: [[uint_1:%[0-9]+]] = OpConstant [[uint]] 1
+; CHECK-DAG: [[ptr_uint:%[0-9]+]] = OpTypePointer Function [[uint]]
+; CHECK-DAG: [[ptr_uint2:%[0-9]+]] = OpTypePointer Function [[uint2]]
define void @main() #1 {
entry:
diff --git a/llvm/test/CodeGen/SPIRV/opencl/degrees.ll b/llvm/test/CodeGen/SPIRV/opencl/degrees.ll
index 88f9783..b8d4f52 100644
--- a/llvm/test/CodeGen/SPIRV/opencl/degrees.ll
+++ b/llvm/test/CodeGen/SPIRV/opencl/degrees.ll
@@ -3,7 +3,7 @@
; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
-; CHECK-DAG: %[[#op_ext_glsl:]] = OpExtInstImport "OpenCL.std"
+; CHECK-DAG: %[[#op_ext_ocl:]] = OpExtInstImport "OpenCL.std"
; CHECK-DAG: %[[#float_32:]] = OpTypeFloat 32
; CHECK-DAG: %[[#float_16:]] = OpTypeFloat 16
@@ -20,7 +20,7 @@ declare <4 x half> @llvm.spv.degrees.v4f16(<4 x half>)
define noundef float @degrees_float(float noundef %a) {
entry:
; CHECK: %[[#float_32_arg:]] = OpFunctionParameter %[[#float_32]]
-; CHECK: %[[#]] = OpExtInst %[[#float_32]] %[[#op_ext_glsl]] degrees %[[#float_32_arg]]
+; CHECK: %[[#]] = OpExtInst %[[#float_32]] %[[#op_ext_ocl]] degrees %[[#float_32_arg]]
%elt.degrees = call float @llvm.spv.degrees.f32(float %a)
ret float %elt.degrees
}
@@ -28,7 +28,7 @@ entry:
define noundef half @degrees_half(half noundef %a) {
entry:
; CHECK: %[[#float_16_arg:]] = OpFunctionParameter %[[#float_16]]
-; CHECK: %[[#]] = OpExtInst %[[#float_16]] %[[#op_ext_glsl]] degrees %[[#float_16_arg]]
+; CHECK: %[[#]] = OpExtInst %[[#float_16]] %[[#op_ext_ocl]] degrees %[[#float_16_arg]]
%elt.degrees = call half @llvm.spv.degrees.f16(half %a)
ret half %elt.degrees
}
@@ -36,7 +36,7 @@ entry:
define noundef <4 x float> @degrees_float_vector(<4 x float> noundef %a) {
entry:
; CHECK: %[[#vec4_float_32_arg:]] = OpFunctionParameter %[[#vec4_float_32]]
-; CHECK: %[[#]] = OpExtInst %[[#vec4_float_32]] %[[#op_ext_glsl]] degrees %[[#vec4_float_32_arg]]
+; CHECK: %[[#]] = OpExtInst %[[#vec4_float_32]] %[[#op_ext_ocl]] degrees %[[#vec4_float_32_arg]]
%elt.degrees = call <4 x float> @llvm.spv.degrees.v4f32(<4 x float> %a)
ret <4 x float> %elt.degrees
}
@@ -44,7 +44,7 @@ entry:
define noundef <4 x half> @degrees_half_vector(<4 x half> noundef %a) {
entry:
; CHECK: %[[#vec4_float_16_arg:]] = OpFunctionParameter %[[#vec4_float_16]]
-; CHECK: %[[#]] = OpExtInst %[[#vec4_float_16]] %[[#op_ext_glsl]] degrees %[[#vec4_float_16_arg]]
+; CHECK: %[[#]] = OpExtInst %[[#vec4_float_16]] %[[#op_ext_ocl]] degrees %[[#vec4_float_16_arg]]
%elt.degrees = call <4 x half> @llvm.spv.degrees.v4f16(<4 x half> %a)
ret <4 x half> %elt.degrees
}
diff --git a/llvm/test/CodeGen/SPIRV/opencl/radians.ll b/llvm/test/CodeGen/SPIRV/opencl/radians.ll
index f7bb8d5..5b4f26a 100644
--- a/llvm/test/CodeGen/SPIRV/opencl/radians.ll
+++ b/llvm/test/CodeGen/SPIRV/opencl/radians.ll
@@ -3,7 +3,7 @@
; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
-; CHECK-DAG: %[[#op_ext_glsl:]] = OpExtInstImport "OpenCL.std"
+; CHECK-DAG: %[[#op_ext_ocl:]] = OpExtInstImport "OpenCL.std"
; CHECK-DAG: %[[#float_32:]] = OpTypeFloat 32
; CHECK-DAG: %[[#float_16:]] = OpTypeFloat 16
@@ -20,7 +20,7 @@ declare <4 x half> @llvm.spv.radians.v4f16(<4 x half>)
define noundef float @radians_float(float noundef %a) {
entry:
; CHECK: %[[#float_32_arg:]] = OpFunctionParameter %[[#float_32]]
-; CHECK: %[[#]] = OpExtInst %[[#float_32]] %[[#op_ext_glsl]] radians %[[#float_32_arg]]
+; CHECK: %[[#]] = OpExtInst %[[#float_32]] %[[#op_ext_ocl]] radians %[[#float_32_arg]]
%elt.radians = call float @llvm.spv.radians.f32(float %a)
ret float %elt.radians
}
@@ -28,7 +28,7 @@ entry:
define noundef half @radians_half(half noundef %a) {
entry:
; CHECK: %[[#float_16_arg:]] = OpFunctionParameter %[[#float_16]]
-; CHECK: %[[#]] = OpExtInst %[[#float_16]] %[[#op_ext_glsl]] radians %[[#float_16_arg]]
+; CHECK: %[[#]] = OpExtInst %[[#float_16]] %[[#op_ext_ocl]] radians %[[#float_16_arg]]
%elt.radians = call half @llvm.spv.radians.f16(half %a)
ret half %elt.radians
}
@@ -36,7 +36,7 @@ entry:
define noundef <4 x float> @radians_float_vector(<4 x float> noundef %a) {
entry:
; CHECK: %[[#vec4_float_32_arg:]] = OpFunctionParameter %[[#vec4_float_32]]
-; CHECK: %[[#]] = OpExtInst %[[#vec4_float_32]] %[[#op_ext_glsl]] radians %[[#vec4_float_32_arg]]
+; CHECK: %[[#]] = OpExtInst %[[#vec4_float_32]] %[[#op_ext_ocl]] radians %[[#vec4_float_32_arg]]
%elt.radians = call <4 x float> @llvm.spv.radians.v4f32(<4 x float> %a)
ret <4 x float> %elt.radians
}
@@ -44,7 +44,7 @@ entry:
define noundef <4 x half> @radians_half_vector(<4 x half> noundef %a) {
entry:
; CHECK: %[[#vec4_float_16_arg:]] = OpFunctionParameter %[[#vec4_float_16]]
-; CHECK: %[[#]] = OpExtInst %[[#vec4_float_16]] %[[#op_ext_glsl]] radians %[[#vec4_float_16_arg]]
+; CHECK: %[[#]] = OpExtInst %[[#vec4_float_16]] %[[#op_ext_ocl]] radians %[[#vec4_float_16_arg]]
%elt.radians = call <4 x half> @llvm.spv.radians.v4f16(<4 x half> %a)
ret <4 x half> %elt.radians
}
diff --git a/llvm/test/CodeGen/SPIRV/pointers/PtrCast-null-in-OpSpecConstantOp.ll b/llvm/test/CodeGen/SPIRV/pointers/PtrCast-null-in-OpSpecConstantOp.ll
index 99e2c3e..dee16da 100644
--- a/llvm/test/CodeGen/SPIRV/pointers/PtrCast-null-in-OpSpecConstantOp.ll
+++ b/llvm/test/CodeGen/SPIRV/pointers/PtrCast-null-in-OpSpecConstantOp.ll
@@ -5,10 +5,8 @@
; CHECK-DAG: %[[Struct:.*]] = OpTypeStruct %[[Array]]
; CHECK-DAG: %[[Zero:.*]] = OpTypeInt 64 0
; CHECK-DAG: %[[Null:.*]] = OpConstantNull %[[Zero]]
-; CHECK-DAG: %[[R1:.*]] = OpConstantComposite %[[Array]] %[[Null]]
-; CHECK-DAG: %[[#]] = OpConstantComposite %[[Struct]] %[[R1]]
-; CHECK-DAG: %[[R2:.*]] = OpConstantComposite %[[Array]] %[[Null]]
-; CHECK-DAG: %[[#]] = OpConstantComposite %[[Struct]] %[[R2]]
+; CHECK-DAG: %[[R:.*]] = OpConstantComposite %[[Array]] %[[Null]]
+; CHECK-DAG: %[[#]] = OpConstantComposite %[[Struct]] %[[R]]
@G1 = addrspace(1) constant { [1 x ptr addrspace(4)] } { [1 x ptr addrspace(4)] [ptr addrspace(4) addrspacecast (ptr null to ptr addrspace(4))] }
@G2 = addrspace(1) constant { [1 x ptr addrspace(4)] } { [1 x ptr addrspace(4)] [ptr addrspace(4) addrspacecast (ptr addrspace(1) null to ptr addrspace(4))] }
diff --git a/llvm/test/CodeGen/SPIRV/pointers/struct-opaque-pointers.ll b/llvm/test/CodeGen/SPIRV/pointers/struct-opaque-pointers.ll
index 03ecf5e..59a2423 100644
--- a/llvm/test/CodeGen/SPIRV/pointers/struct-opaque-pointers.ll
+++ b/llvm/test/CodeGen/SPIRV/pointers/struct-opaque-pointers.ll
@@ -1,12 +1,12 @@
; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
-; CHECK: %[[TyInt64:.*]] = OpTypeInt 64 0
-; CHECK: %[[TyInt64Ptr:.*]] = OpTypePointer {{[a-zA-Z]+}} %[[TyInt64]]
-; CHECK: %[[TyStruct:.*]] = OpTypeStruct %[[TyInt64Ptr]] %[[TyInt64Ptr]]
-; CHECK: %[[ConstStruct:.*]] = OpConstantComposite %[[TyStruct]] %[[ConstField:.*]] %[[ConstField]]
-; CHECK: %[[TyStructPtr:.*]] = OpTypePointer {{[a-zA-Z]+}} %[[TyStruct]]
-; CHECK: OpVariable %[[TyStructPtr]] {{[a-zA-Z]+}} %[[ConstStruct]]
+; CHECK-DAG: %[[TyInt64:.*]] = OpTypeInt 64 0
+; CHECK-DAG: %[[TyInt64Ptr:.*]] = OpTypePointer {{[a-zA-Z]+}} %[[TyInt64]]
+; CHECK-DAG: %[[TyStruct:.*]] = OpTypeStruct %[[TyInt64Ptr]] %[[TyInt64Ptr]]
+; CHECK-DAG: %[[ConstStruct:.*]] = OpConstantComposite %[[TyStruct]] %[[ConstField:.*]] %[[ConstField]]
+; CHECK-DAG: %[[TyStructPtr:.*]] = OpTypePointer {{[a-zA-Z]+}} %[[TyStruct]]
+; CHECK-DAG: OpVariable %[[TyStructPtr]] {{[a-zA-Z]+}} %[[ConstStruct]]
@a = addrspace(1) constant i64 42
@struct = addrspace(1) global {ptr addrspace(1), ptr addrspace(1)} { ptr addrspace(1) @a, ptr addrspace(1) @a }
diff --git a/llvm/test/CodeGen/SPIRV/structurizer/HLSLControlFlowHint-pass-check.ll b/llvm/test/CodeGen/SPIRV/structurizer/HLSLControlFlowHint-pass-check.ll
deleted file mode 100644
index 9911b31..0000000
--- a/llvm/test/CodeGen/SPIRV/structurizer/HLSLControlFlowHint-pass-check.ll
+++ /dev/null
@@ -1,90 +0,0 @@
-; RUN: opt -passes='spirv-structurizer' -S -mtriple=spirv-unknown-unknown %s | FileCheck %s
-
-; CHECK-LABEL: define spir_func noundef i32 @test_branch
-; CHECK: call void @llvm.spv.selection.merge.p0(ptr blockaddress(@test_branch, %if.end), i32 1)
-; CHECK-NEXT: br i1 %cmp, label %if.then, label %if.else, !hlsl.controlflow.hint !{{[0-9]+}}
-define spir_func noundef i32 @test_branch(i32 noundef %X) {
-entry:
- %X.addr = alloca i32, align 4
- %resp = alloca i32, align 4
- store i32 %X, ptr %X.addr, align 4
- %0 = load i32, ptr %X.addr, align 4
- %cmp = icmp sgt i32 %0, 0
- br i1 %cmp, label %if.then, label %if.else, !hlsl.controlflow.hint !0
-
-if.then: ; preds = %entry
- %1 = load i32, ptr %X.addr, align 4
- %sub = sub nsw i32 0, %1
- store i32 %sub, ptr %resp, align 4
- br label %if.end
-
-if.else: ; preds = %entry
- %2 = load i32, ptr %X.addr, align 4
- %mul = mul nsw i32 %2, 2
- store i32 %mul, ptr %resp, align 4
- br label %if.end
-
-if.end: ; preds = %if.else, %if.then
- %3 = load i32, ptr %resp, align 4
- ret i32 %3
-}
-
-; CHECK-LABEL: define spir_func noundef i32 @test_flatten
-; CHECK: call void @llvm.spv.selection.merge.p0(ptr blockaddress(@test_flatten, %if.end), i32 2)
-; CHECK-NEXT: br i1 %cmp, label %if.then, label %if.else, !hlsl.controlflow.hint !{{[0-9]+}}
-define spir_func noundef i32 @test_flatten(i32 noundef %X) {
-entry:
- %X.addr = alloca i32, align 4
- %resp = alloca i32, align 4
- store i32 %X, ptr %X.addr, align 4
- %0 = load i32, ptr %X.addr, align 4
- %cmp = icmp sgt i32 %0, 0
- br i1 %cmp, label %if.then, label %if.else, !hlsl.controlflow.hint !1
-
-if.then: ; preds = %entry
- %1 = load i32, ptr %X.addr, align 4
- %sub = sub nsw i32 0, %1
- store i32 %sub, ptr %resp, align 4
- br label %if.end
-
-if.else: ; preds = %entry
- %2 = load i32, ptr %X.addr, align 4
- %mul = mul nsw i32 %2, 2
- store i32 %mul, ptr %resp, align 4
- br label %if.end
-
-if.end: ; preds = %if.else, %if.then
- %3 = load i32, ptr %resp, align 4
- ret i32 %3
-}
-; CHECK-LABEL: define spir_func noundef i32 @test_no_attr
-; CHECK: call void @llvm.spv.selection.merge.p0(ptr blockaddress(@test_no_attr, %if.end), i32 0)
-; CHECK-NEXT: br i1 %cmp, label %if.then, label %if.else
-define spir_func noundef i32 @test_no_attr(i32 noundef %X) {
-entry:
- %X.addr = alloca i32, align 4
- %resp = alloca i32, align 4
- store i32 %X, ptr %X.addr, align 4
- %0 = load i32, ptr %X.addr, align 4
- %cmp = icmp sgt i32 %0, 0
- br i1 %cmp, label %if.then, label %if.else
-
-if.then: ; preds = %entry
- %1 = load i32, ptr %X.addr, align 4
- %sub = sub nsw i32 0, %1
- store i32 %sub, ptr %resp, align 4
- br label %if.end
-
-if.else: ; preds = %entry
- %2 = load i32, ptr %X.addr, align 4
- %mul = mul nsw i32 %2, 2
- store i32 %mul, ptr %resp, align 4
- br label %if.end
-
-if.end: ; preds = %if.else, %if.then
- %3 = load i32, ptr %resp, align 4
- ret i32 %3
-}
-
-!0 = !{!"hlsl.controlflow.hint", i32 1}
-!1 = !{!"hlsl.controlflow.hint", i32 2}
diff --git a/llvm/test/CodeGen/SPIRV/structurizer/HLSLControlFlowHint.ll b/llvm/test/CodeGen/SPIRV/structurizer/HLSLControlFlowHint.ll
deleted file mode 100644
index 848eaf7..0000000
--- a/llvm/test/CodeGen/SPIRV/structurizer/HLSLControlFlowHint.ll
+++ /dev/null
@@ -1,91 +0,0 @@
-; RUN: llc -O0 -mtriple=spirv-unknown-unknown %s -o - | FileCheck %s
-; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv-unknown-unknown %s -o - -filetype=obj | spirv-val %}
-
-
-define spir_func noundef i32 @test_branch(i32 noundef %X) {
-entry:
-; CHECK-LABEL: ; -- Begin function test_branch
-; OpSelectionMerge %[[#]] DontFlatten
- %X.addr = alloca i32, align 4
- %resp = alloca i32, align 4
- store i32 %X, ptr %X.addr, align 4
- %0 = load i32, ptr %X.addr, align 4
- %cmp = icmp sgt i32 %0, 0
- br i1 %cmp, label %if.then, label %if.else, !hlsl.controlflow.hint !0
-
-if.then: ; preds = %entry
- %1 = load i32, ptr %X.addr, align 4
- %sub = sub nsw i32 0, %1
- store i32 %sub, ptr %resp, align 4
- br label %if.end
-
-if.else: ; preds = %entry
- %2 = load i32, ptr %X.addr, align 4
- %mul = mul nsw i32 %2, 2
- store i32 %mul, ptr %resp, align 4
- br label %if.end
-
-if.end: ; preds = %if.else, %if.then
- %3 = load i32, ptr %resp, align 4
- ret i32 %3
-}
-
-
-define spir_func noundef i32 @test_flatten(i32 noundef %X) {
-entry:
-; CHECK-LABEL: ; -- Begin function test_flatten
-; OpSelectionMerge %[[#]] Flatten
- %X.addr = alloca i32, align 4
- %resp = alloca i32, align 4
- store i32 %X, ptr %X.addr, align 4
- %0 = load i32, ptr %X.addr, align 4
- %cmp = icmp sgt i32 %0, 0
- br i1 %cmp, label %if.then, label %if.else, !hlsl.controlflow.hint !1
-
-if.then: ; preds = %entry
- %1 = load i32, ptr %X.addr, align 4
- %sub = sub nsw i32 0, %1
- store i32 %sub, ptr %resp, align 4
- br label %if.end
-
-if.else: ; preds = %entry
- %2 = load i32, ptr %X.addr, align 4
- %mul = mul nsw i32 %2, 2
- store i32 %mul, ptr %resp, align 4
- br label %if.end
-
-if.end: ; preds = %if.else, %if.then
- %3 = load i32, ptr %resp, align 4
- ret i32 %3
-}
-
-define spir_func noundef i32 @test_no_attr(i32 noundef %X) {
-entry:
-; CHECK-LABEL: ; -- Begin function test_no_attr
-; OpSelectionMerge %[[#]] None
- %X.addr = alloca i32, align 4
- %resp = alloca i32, align 4
- store i32 %X, ptr %X.addr, align 4
- %0 = load i32, ptr %X.addr, align 4
- %cmp = icmp sgt i32 %0, 0
- br i1 %cmp, label %if.then, label %if.else
-
-if.then: ; preds = %entry
- %1 = load i32, ptr %X.addr, align 4
- %sub = sub nsw i32 0, %1
- store i32 %sub, ptr %resp, align 4
- br label %if.end
-
-if.else: ; preds = %entry
- %2 = load i32, ptr %X.addr, align 4
- %mul = mul nsw i32 %2, 2
- store i32 %mul, ptr %resp, align 4
- br label %if.end
-
-if.end: ; preds = %if.else, %if.then
- %3 = load i32, ptr %resp, align 4
- ret i32 %3
-}
-
-!0 = !{!"hlsl.controlflow.hint", i32 1}
-!1 = !{!"hlsl.controlflow.hint", i32 2}
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/SampledImage.ll b/llvm/test/CodeGen/SPIRV/transcoding/SampledImage.ll
index e4c7bdb..8a90e40 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/SampledImage.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/SampledImage.ll
@@ -24,12 +24,10 @@
; CHECK-SPIRV: OpName %[[#sample_kernel_float:]] "sample_kernel_float"
; CHECK-SPIRV: OpName %[[#sample_kernel_int:]] "sample_kernel_int"
-; CHECK-SPIRV: %[[#TypeSampler:]] = OpTypeSampler
+; CHECK-SPIRV-DAG: %[[#TypeSampler:]] = OpTypeSampler
; CHECK-SPIRV-DAG: %[[#SampledImageTy:]] = OpTypeSampledImage
; CHECK-SPIRV-DAG: %[[#ConstSampler1:]] = OpConstantSampler %[[#TypeSampler]] None 0 Linear
; CHECK-SPIRV-DAG: %[[#ConstSampler2:]] = OpConstantSampler %[[#TypeSampler]] Repeat 0 Nearest
-; CHECK-SPIRV-DAG: %[[#ConstSampler3:]] = OpConstantSampler %[[#TypeSampler]] None 0 Linear
-; CHECK-SPIRV-DAG: %[[#ConstSampler4:]] = OpConstantSampler %[[#TypeSampler]] Repeat 0 Nearest
; CHECK-SPIRV: %[[#sample_kernel_float]] = OpFunction %{{.*}}
; CHECK-SPIRV: %[[#InputImage:]] = OpFunctionParameter %{{.*}}
@@ -65,13 +63,13 @@ declare spir_func target("spirv.Sampler") @__translate_sampler_initializer(i32)
; CHECK-SPIRV: %[[#InputImage:]] = OpFunctionParameter %{{.*}}
; CHECK-SPIRV: %[[#argSampl:]] = OpFunctionParameter %[[#TypeSampler]]
-; CHECK-SPIRV: %[[#SampledImage4:]] = OpSampledImage %[[#SampledImageTy]] %[[#InputImage]] %[[#ConstSampler3]]
+; CHECK-SPIRV: %[[#SampledImage4:]] = OpSampledImage %[[#SampledImageTy]] %[[#InputImage]] %[[#ConstSampler1]]
; CHECK-SPIRV: %[[#]] = OpImageSampleExplicitLod %[[#]] %[[#SampledImage4]]
; CHECK-SPIRV: %[[#SampledImage5:]] = OpSampledImage %[[#SampledImageTy]] %[[#InputImage]] %[[#argSampl]]
; CHECK-SPIRV: %[[#]] = OpImageSampleExplicitLod %[[#]] %[[#SampledImage5]]
-; CHECK-SPIRV: %[[#SampledImage6:]] = OpSampledImage %[[#SampledImageTy]] %[[#InputImage]] %[[#ConstSampler4]]
+; CHECK-SPIRV: %[[#SampledImage6:]] = OpSampledImage %[[#SampledImageTy]] %[[#InputImage]] %[[#ConstSampler2]]
; CHECK-SPIRV: %[[#]] = OpImageSampleExplicitLod %[[#]] %[[#SampledImage6]]
define dso_local spir_kernel void @sample_kernel_int(target("spirv.Image", void, 1, 0, 0, 0, 0, 0, 0) %input, <2 x float> noundef %coords, <4 x i32> addrspace(1)* nocapture noundef writeonly %results, target("spirv.Sampler") %argSampl) local_unnamed_addr {
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/cl-types.ll b/llvm/test/CodeGen/SPIRV/transcoding/cl-types.ll
index 8b326e2..55f1125 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/cl-types.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/cl-types.ll
@@ -39,7 +39,7 @@
; CHECK-SPIRV-DAG: %[[#SAMP:]] = OpTypeSampler
; CHECK-SPIRV-DAG: %[[#SAMPIMG:]] = OpTypeSampledImage %[[#IMG2D_RD]]
-; CHECK-SPIRV: %[[#SAMP_CONST:]] = OpConstantSampler %[[#SAMP]] None 0 Linear
+; CHECK-SPIRV-DAG: %[[#SAMP_CONST:]] = OpConstantSampler %[[#SAMP]] None 0 Linear
; CHECK-SPIRV: %[[#]] = OpFunctionParameter %[[#PIPE_RD]]
; CHECK-SPIRV: %[[#]] = OpFunctionParameter %[[#PIPE_WR]]
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/spirv-private-array-initialization.ll b/llvm/test/CodeGen/SPIRV/transcoding/spirv-private-array-initialization.ll
index 74dbaab..5810d9c 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/spirv-private-array-initialization.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/spirv-private-array-initialization.ll
@@ -14,19 +14,19 @@
; CHECK-SPIRV-DAG: %[[#twelve:]] = OpConstant %[[#i32]] 12
; CHECK-SPIRV-DAG: %[[#const_i32x3_ptr:]] = OpTypePointer UniformConstant %[[#i32x3]]
-; CHECK-SPIRV: %[[#test_arr2:]] = OpVariable %[[#const_i32x3_ptr]] UniformConstant %[[#test_arr_init]]
-; CHECK-SPIRV: %[[#test_arr:]] = OpVariable %[[#const_i32x3_ptr]] UniformConstant %[[#test_arr_init]]
+; CHECK-SPIRV-DAG: %[[#test_arr1:]] = OpVariable %[[#const_i32x3_ptr]] UniformConstant %[[#test_arr_init]]
+; CHECK-SPIRV-DAG: %[[#test_arr2:]] = OpVariable %[[#const_i32x3_ptr]] UniformConstant %[[#test_arr_init]]
; CHECK-SPIRV-DAG: %[[#i32x3_ptr:]] = OpTypePointer Function %[[#i32x3]]
-; CHECK-SPIRV: %[[#arr:]] = OpVariable %[[#i32x3_ptr]] Function
+; CHECK-SPIRV: %[[#arr1:]] = OpVariable %[[#i32x3_ptr]] Function
; CHECK-SPIRV: %[[#arr2:]] = OpVariable %[[#i32x3_ptr]] Function
-; CHECK-SPIRV-32: OpCopyMemorySized %[[#arr]] %[[#test_arr]] %[[#twelve]] Aligned 4
+; CHECK-SPIRV-32: OpCopyMemorySized %[[#arr1]] %[[#test_arr1]] %[[#twelve]] Aligned 4
; CHECK-SPIRV-32: OpCopyMemorySized %[[#arr2]] %[[#test_arr2]] %[[#twelve]] Aligned 4
; CHECK-SPIRV-64: %[[#twelvezext1:]] = OpUConvert %[[#i64:]] %[[#twelve:]]
-; CHECK-SPIRV-64: OpCopyMemorySized %[[#arr]] %[[#test_arr]] %[[#twelvezext1]] Aligned 4
+; CHECK-SPIRV-64: OpCopyMemorySized %[[#arr1]] %[[#test_arr1]] %[[#twelvezext1]] Aligned 4
; CHECK-SPIRV-64: %[[#twelvezext2:]] = OpUConvert %[[#i64:]] %[[#twelve:]]
; CHECK-SPIRV-64: OpCopyMemorySized %[[#arr2]] %[[#test_arr2]] %[[#twelvezext2]] Aligned 4
diff --git a/llvm/test/CodeGen/SPIRV/transcoding/sub_group_non_uniform_arithmetic.ll b/llvm/test/CodeGen/SPIRV/transcoding/sub_group_non_uniform_arithmetic.ll
index adf73fe..62b09f6 100644
--- a/llvm/test/CodeGen/SPIRV/transcoding/sub_group_non_uniform_arithmetic.ll
+++ b/llvm/test/CodeGen/SPIRV/transcoding/sub_group_non_uniform_arithmetic.ll
@@ -329,12 +329,12 @@
; CHECK-SPIRV-DAG: %[[#double:]] = OpTypeFloat 64
; CHECK-SPIRV-DAG: %[[#false:]] = OpConstantFalse %[[#bool]]
+; CHECK-SPIRV-DAG: %[[#int_32:]] = OpConstant %[[#int]] 32
; CHECK-SPIRV-DAG: %[[#ScopeSubgroup:]] = OpConstant %[[#int]] 3
; CHECK-SPIRV-DAG: %[[#char_0:]] = OpConstant %[[#char]] 0
; CHECK-SPIRV-DAG: %[[#char_10:]] = OpConstant %[[#char]] 10
; CHECK-SPIRV-DAG: %[[#short_0:]] = OpConstant %[[#short]] 0
; CHECK-SPIRV-DAG: %[[#int_0:]] = OpConstant %[[#int]] 0
-; CHECK-SPIRV-DAG: %[[#int_32:]] = OpConstant %[[#int]] 32
; CHECK-SPIRV-DAG: %[[#long_0:]] = OpConstantNull %[[#long]]
; CHECK-SPIRV-DAG: %[[#half_0:]] = OpConstant %[[#half]] 0
; CHECK-SPIRV-DAG: %[[#float_0:]] = OpConstant %[[#float]] 0
diff --git a/llvm/test/CodeGen/SPIRV/unnamed-global.ll b/llvm/test/CodeGen/SPIRV/unnamed-global.ll
index f72334b..90bac50 100644
--- a/llvm/test/CodeGen/SPIRV/unnamed-global.ll
+++ b/llvm/test/CodeGen/SPIRV/unnamed-global.ll
@@ -4,10 +4,10 @@
; RUN: llc -verify-machineinstrs -O0 -mtriple=spirv32-unknown-unknown %s -o - | FileCheck %s
; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv32-unknown-unknown %s -o - -filetype=obj | spirv-val %}
-; CHECK: %[[TyInt:.*]] = OpTypeInt 8 0
-; CHECK: %[[ConstInt:.*]] = OpConstant %[[TyInt]] 123
-; CHECK: %[[TyPtr:.*]] = OpTypePointer {{[a-zA-Z]+}} %[[TyInt]]
-; CHECK: %[[VarId:.*]] = OpVariable %[[TyPtr]] {{[a-zA-Z]+}} %[[ConstInt]]
+; CHECK-DAG: %[[TyInt:.*]] = OpTypeInt 8 0
+; CHECK-DAG: %[[ConstInt:.*]] = OpConstant %[[TyInt]] 123
+; CHECK-DAG: %[[TyPtr:.*]] = OpTypePointer {{[a-zA-Z]+}} %[[TyInt]]
+; CHECK-DAG: %[[VarId:.*]] = OpVariable %[[TyPtr]] {{[a-zA-Z]+}} %[[ConstInt]]
@0 = addrspace(1) global i8 123
diff --git a/llvm/test/CodeGen/X86/codegen-prepare-addrmode-sext.ll b/llvm/test/CodeGen/X86/codegen-prepare-addrmode-sext.ll
index c611e89..f3070cd 100644
--- a/llvm/test/CodeGen/X86/codegen-prepare-addrmode-sext.ll
+++ b/llvm/test/CodeGen/X86/codegen-prepare-addrmode-sext.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
; RUN: opt -S -passes='require<profile-summary>,function(codegenprepare)' %s -o - | FileCheck %s
; This file tests the different cases what are involved when codegen prepare
; tries to get sign/zero extension out of the way of addressing mode.
@@ -9,14 +10,17 @@ target triple = "x86_64-apple-macosx"
; Check that we correctly promote both operands of the promotable add.
-; CHECK-LABEL: @twoArgsPromotion
-; CHECK: [[ARG1SEXT:%[a-zA-Z_0-9-]+]] = sext i32 %arg1 to i64
-; CHECK: [[ARG2SEXT:%[a-zA-Z_0-9-]+]] = sext i32 %arg2 to i64
-; CHECK: [[PROMOTED:%[a-zA-Z_0-9-]+]] = add nsw i64 [[ARG1SEXT]], [[ARG2SEXT]]
-; CHECK: inttoptr i64 [[PROMOTED]] to ptr
-; CHECK: ret
define i8 @twoArgsPromotion(i32 %arg1, i32 %arg2) {
- %add = add nsw i32 %arg1, %arg2
+; CHECK-LABEL: define i8 @twoArgsPromotion(
+; CHECK-SAME: i32 [[ARG1:%.*]], i32 [[ARG2:%.*]]) {
+; CHECK-NEXT: [[PROMOTED:%.*]] = sext i32 [[ARG1]] to i64
+; CHECK-NEXT: [[PROMOTED2:%.*]] = sext i32 [[ARG2]] to i64
+; CHECK-NEXT: [[ADD:%.*]] = add nsw i64 [[PROMOTED]], [[PROMOTED2]]
+; CHECK-NEXT: [[BASE:%.*]] = inttoptr i64 [[ADD]] to ptr
+; CHECK-NEXT: [[RES:%.*]] = load i8, ptr [[BASE]], align 1
+; CHECK-NEXT: ret i8 [[RES]]
+;
+ %add = add nsw i32 %arg1, %arg2
%sextadd = sext i32 %add to i64
%base = inttoptr i64 %sextadd to ptr
%res = load i8, ptr %base
@@ -28,11 +32,16 @@ define i8 @twoArgsPromotion(i32 %arg1, i32 %arg2) {
; Otherwise, we will increase the number of instruction executed.
; (This is a heuristic of course, because the new sext could have been
; merged with something else.)
-; CHECK-LABEL: @twoArgsNoPromotion
-; CHECK: add nsw i32 %arg1, %arg2
-; CHECK: ret
define i8 @twoArgsNoPromotion(i32 %arg1, i32 %arg2, ptr %base) {
- %add = add nsw i32 %arg1, %arg2
+; CHECK-LABEL: define i8 @twoArgsNoPromotion(
+; CHECK-SAME: i32 [[ARG1:%.*]], i32 [[ARG2:%.*]], ptr [[BASE:%.*]]) {
+; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[ARG1]], [[ARG2]]
+; CHECK-NEXT: [[SEXTADD:%.*]] = sext i32 [[ADD]] to i64
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[BASE]], i64 [[SEXTADD]]
+; CHECK-NEXT: [[RES:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT: ret i8 [[RES]]
+;
+ %add = add nsw i32 %arg1, %arg2
%sextadd = sext i32 %add to i64
%arrayidx = getelementptr inbounds i8, ptr %base, i64 %sextadd
%res = load i8, ptr %arrayidx
@@ -41,11 +50,16 @@ define i8 @twoArgsNoPromotion(i32 %arg1, i32 %arg2, ptr %base) {
; Check that we do not promote when the related instruction does not have
; the nsw flag.
-; CHECK-LABEL: @noPromotion
-; CHECK-NOT: add i64
-; CHECK: ret
define i8 @noPromotion(i32 %arg1, i32 %arg2, ptr %base) {
- %add = add i32 %arg1, %arg2
+; CHECK-LABEL: define i8 @noPromotion(
+; CHECK-SAME: i32 [[ARG1:%.*]], i32 [[ARG2:%.*]], ptr [[BASE:%.*]]) {
+; CHECK-NEXT: [[ADD:%.*]] = add i32 [[ARG1]], [[ARG2]]
+; CHECK-NEXT: [[SEXTADD:%.*]] = sext i32 [[ADD]] to i64
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[BASE]], i64 [[SEXTADD]]
+; CHECK-NEXT: [[RES:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT: ret i8 [[RES]]
+;
+ %add = add i32 %arg1, %arg2
%sextadd = sext i32 %add to i64
%arrayidx = getelementptr inbounds i8, ptr %base, i64 %sextadd
%res = load i8, ptr %arrayidx
@@ -53,13 +67,16 @@ define i8 @noPromotion(i32 %arg1, i32 %arg2, ptr %base) {
}
; Check that we correctly promote constant arguments.
-; CHECK-LABEL: @oneArgPromotion
-; CHECK: [[ARG1SEXT:%[a-zA-Z_0-9-]+]] = sext i32 %arg1 to i64
-; CHECK: [[PROMOTED:%[a-zA-Z_0-9-]+]] = add nsw i64 [[ARG1SEXT]], 1
-; CHECK: getelementptr inbounds i8, ptr %base, i64 [[PROMOTED]]
-; CHECK: ret
define i8 @oneArgPromotion(i32 %arg1, ptr %base) {
- %add = add nsw i32 %arg1, 1
+; CHECK-LABEL: define i8 @oneArgPromotion(
+; CHECK-SAME: i32 [[ARG1:%.*]], ptr [[BASE:%.*]]) {
+; CHECK-NEXT: [[PROMOTED:%.*]] = sext i32 [[ARG1]] to i64
+; CHECK-NEXT: [[ADD:%.*]] = add nsw i64 [[PROMOTED]], 1
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[BASE]], i64 [[ADD]]
+; CHECK-NEXT: [[RES:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT: ret i8 [[RES]]
+;
+ %add = add nsw i32 %arg1, 1
%sextadd = sext i32 %add to i64
%arrayidx = getelementptr inbounds i8, ptr %base, i64 %sextadd
%res = load i8, ptr %arrayidx
@@ -67,14 +84,17 @@ define i8 @oneArgPromotion(i32 %arg1, ptr %base) {
}
; Check that we are able to merge a sign extension with a zero extension.
-; CHECK-LABEL: @oneArgPromotionZExt
-; CHECK: [[ARG1ZEXT:%[a-zA-Z_0-9-]+]] = zext i8 %arg1 to i64
-; CHECK: [[PROMOTED:%[a-zA-Z_0-9-]+]] = add nsw i64 [[ARG1ZEXT]], 1
-; CHECK: getelementptr inbounds i8, ptr %base, i64 [[PROMOTED]]
-; CHECK: ret
define i8 @oneArgPromotionZExt(i8 %arg1, ptr %base) {
+; CHECK-LABEL: define i8 @oneArgPromotionZExt(
+; CHECK-SAME: i8 [[ARG1:%.*]], ptr [[BASE:%.*]]) {
+; CHECK-NEXT: [[PROMOTED2:%.*]] = zext i8 [[ARG1]] to i64
+; CHECK-NEXT: [[ADD:%.*]] = add nsw i64 [[PROMOTED2]], 1
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[BASE]], i64 [[ADD]]
+; CHECK-NEXT: [[RES:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT: ret i8 [[RES]]
+;
%zext = zext i8 %arg1 to i32
- %add = add nsw i32 %zext, 1
+ %add = add nsw i32 %zext, 1
%sextadd = sext i32 %add to i64
%arrayidx = getelementptr inbounds i8, ptr %base, i64 %sextadd
%res = load i8, ptr %arrayidx
@@ -88,11 +108,14 @@ define i8 @oneArgPromotionZExt(i8 %arg1, ptr %base) {
; more thing in the addressing mode. Therefore the modification is
; rolled back.
; Still, this test case exercises the desired code path.
-; CHECK-LABEL: @oneArgPromotionCstZExt
-; CHECK: [[PROMOTED:%[a-zA-Z_0-9-]+]] = add nsw i64 0, 1
-; CHECK: getelementptr inbounds i8, ptr %base, i64 [[PROMOTED]]
-; CHECK: ret
define i8 @oneArgPromotionCstZExt(ptr %base) {
+; CHECK-LABEL: define i8 @oneArgPromotionCstZExt(
+; CHECK-SAME: ptr [[BASE:%.*]]) {
+; CHECK-NEXT: [[ADD:%.*]] = add nsw i64 0, 1
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[BASE]], i64 [[ADD]]
+; CHECK-NEXT: [[RES:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT: ret i8 [[RES]]
+;
%cst = zext i16 undef to i32
%add = add nsw i32 %cst, 1
%sextadd = sext i32 %add to i64
@@ -103,15 +126,18 @@ define i8 @oneArgPromotionCstZExt(ptr %base) {
; Check that we do not promote truncate when we cannot determine the
; bits that are dropped.
-; CHECK-LABEL: @oneArgPromotionBlockTrunc1
-; CHECK: [[ARG1TRUNC:%[a-zA-Z_0-9-]+]] = trunc i32 %arg1 to i8
-; CHECK: [[ARG1SEXT:%[a-zA-Z_0-9-]+]] = sext i8 [[ARG1TRUNC]] to i64
-; CHECK: [[PROMOTED:%[a-zA-Z_0-9-]+]] = add nsw i64 [[ARG1SEXT]], 1
-; CHECK: getelementptr inbounds i8, ptr %base, i64 [[PROMOTED]]
-; CHECK: ret
define i8 @oneArgPromotionBlockTrunc1(i32 %arg1, ptr %base) {
+; CHECK-LABEL: define i8 @oneArgPromotionBlockTrunc1(
+; CHECK-SAME: i32 [[ARG1:%.*]], ptr [[BASE:%.*]]) {
+; CHECK-NEXT: [[TRUNC:%.*]] = trunc i32 [[ARG1]] to i8
+; CHECK-NEXT: [[PROMOTED:%.*]] = sext i8 [[TRUNC]] to i64
+; CHECK-NEXT: [[ADD:%.*]] = add nsw i64 [[PROMOTED]], 1
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[BASE]], i64 [[ADD]]
+; CHECK-NEXT: [[RES:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT: ret i8 [[RES]]
+;
%trunc = trunc i32 %arg1 to i8
- %add = add nsw i8 %trunc, 1
+ %add = add nsw i8 %trunc, 1
%sextadd = sext i8 %add to i64
%arrayidx = getelementptr inbounds i8, ptr %base, i64 %sextadd
%res = load i8, ptr %arrayidx
@@ -120,17 +146,20 @@ define i8 @oneArgPromotionBlockTrunc1(i32 %arg1, ptr %base) {
; Check that we do not promote truncate when we cannot determine all the
; bits that are dropped.
-; CHECK-LABEL: @oneArgPromotionBlockTrunc2
-; CHECK: [[ARG1SEXT:%[a-zA-Z_0-9-]+]] = sext i16 %arg1 to i32
-; CHECK: [[ARG1TRUNC:%[a-zA-Z_0-9-]+]] = trunc i32 [[ARG1SEXT]] to i8
-; CHECK: [[ARG1SEXT64:%[a-zA-Z_0-9-]+]] = sext i8 [[ARG1TRUNC]] to i64
-; CHECK: [[PROMOTED:%[a-zA-Z_0-9-]+]] = add nsw i64 [[ARG1SEXT64]], 1
-; CHECK: getelementptr inbounds i8, ptr %base, i64 [[PROMOTED]]
-; CHECK: ret
define i8 @oneArgPromotionBlockTrunc2(i16 %arg1, ptr %base) {
+; CHECK-LABEL: define i8 @oneArgPromotionBlockTrunc2(
+; CHECK-SAME: i16 [[ARG1:%.*]], ptr [[BASE:%.*]]) {
+; CHECK-NEXT: [[SEXTARG1:%.*]] = sext i16 [[ARG1]] to i32
+; CHECK-NEXT: [[TRUNC:%.*]] = trunc i32 [[SEXTARG1]] to i8
+; CHECK-NEXT: [[PROMOTED:%.*]] = sext i8 [[TRUNC]] to i64
+; CHECK-NEXT: [[ADD:%.*]] = add nsw i64 [[PROMOTED]], 1
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[BASE]], i64 [[ADD]]
+; CHECK-NEXT: [[RES:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT: ret i8 [[RES]]
+;
%sextarg1 = sext i16 %arg1 to i32
%trunc = trunc i32 %sextarg1 to i8
- %add = add nsw i8 %trunc, 1
+ %add = add nsw i8 %trunc, 1
%sextadd = sext i8 %add to i64
%arrayidx = getelementptr inbounds i8, ptr %base, i64 %sextadd
%res = load i8, ptr %arrayidx
@@ -139,15 +168,18 @@ define i8 @oneArgPromotionBlockTrunc2(i16 %arg1, ptr %base) {
; Check that we are able to promote truncate when we know all the bits
; that are dropped.
-; CHECK-LABEL: @oneArgPromotionPassTruncKeepSExt
-; CHECK: [[ARG1SEXT:%[a-zA-Z_0-9-]+]] = sext i1 %arg1 to i64
-; CHECK: [[PROMOTED:%[a-zA-Z_0-9-]+]] = add nsw i64 [[ARG1SEXT]], 1
-; CHECK: getelementptr inbounds i8, ptr %base, i64 [[PROMOTED]]
-; CHECK: ret
define i8 @oneArgPromotionPassTruncKeepSExt(i1 %arg1, ptr %base) {
+; CHECK-LABEL: define i8 @oneArgPromotionPassTruncKeepSExt(
+; CHECK-SAME: i1 [[ARG1:%.*]], ptr [[BASE:%.*]]) {
+; CHECK-NEXT: [[PROMOTED:%.*]] = sext i1 [[ARG1]] to i64
+; CHECK-NEXT: [[ADD:%.*]] = add nsw i64 [[PROMOTED]], 1
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[BASE]], i64 [[ADD]]
+; CHECK-NEXT: [[RES:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT: ret i8 [[RES]]
+;
%sextarg1 = sext i1 %arg1 to i32
%trunc = trunc i32 %sextarg1 to i8
- %add = add nsw i8 %trunc, 1
+ %add = add nsw i8 %trunc, 1
%sextadd = sext i8 %add to i64
%arrayidx = getelementptr inbounds i8, ptr %base, i64 %sextadd
%res = load i8, ptr %arrayidx
@@ -156,17 +188,19 @@ define i8 @oneArgPromotionPassTruncKeepSExt(i1 %arg1, ptr %base) {
; On X86 truncate are free. Check that we are able to promote the add
; to be used as addressing mode and that we insert a truncate for the other
-; use.
-; CHECK-LABEL: @oneArgPromotionTruncInsert
-; CHECK: [[ARG1SEXT:%[a-zA-Z_0-9-]+]] = sext i8 %arg1 to i64
-; CHECK: [[PROMOTED:%[a-zA-Z_0-9-]+]] = add nsw i64 [[ARG1SEXT]], 1
-; CHECK: [[TRUNC:%[a-zA-Z_0-9-]+]] = trunc i64 [[PROMOTED]] to i8
-; CHECK: [[GEP:%[a-zA-Z_0-9-]+]] = getelementptr inbounds i8, ptr %base, i64 [[PROMOTED]]
-; CHECK: [[LOAD:%[a-zA-Z_0-9-]+]] = load i8, ptr [[GEP]]
-; CHECK: add i8 [[LOAD]], [[TRUNC]]
-; CHECK: ret
+; use.
define i8 @oneArgPromotionTruncInsert(i8 %arg1, ptr %base) {
- %add = add nsw i8 %arg1, 1
+; CHECK-LABEL: define i8 @oneArgPromotionTruncInsert(
+; CHECK-SAME: i8 [[ARG1:%.*]], ptr [[BASE:%.*]]) {
+; CHECK-NEXT: [[PROMOTED2:%.*]] = sext i8 [[ARG1]] to i64
+; CHECK-NEXT: [[ADD:%.*]] = add nsw i64 [[PROMOTED2]], 1
+; CHECK-NEXT: [[PROMOTED:%.*]] = trunc i64 [[ADD]] to i8
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[BASE]], i64 [[ADD]]
+; CHECK-NEXT: [[RES:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT: [[FINALRES:%.*]] = add i8 [[RES]], [[PROMOTED]]
+; CHECK-NEXT: ret i8 [[FINALRES]]
+;
+ %add = add nsw i8 %arg1, 1
%sextadd = sext i8 %add to i64
%arrayidx = getelementptr inbounds i8, ptr %base, i64 %sextadd
%res = load i8, ptr %arrayidx
@@ -175,15 +209,20 @@ define i8 @oneArgPromotionTruncInsert(i8 %arg1, ptr %base) {
}
; Cannot sext from a larger type than the promoted type.
-; CHECK-LABEL: @oneArgPromotionLargerType
-; CHECK: [[ARG1TRUNC:%[a-zA-Z_0-9-]+]] = trunc i128 %arg1 to i8
-; CHECK: [[ARG1SEXT64:%[a-zA-Z_0-9-]+]] = sext i8 [[ARG1TRUNC]] to i64
-; CHECK: [[PROMOTED:%[a-zA-Z_0-9-]+]] = add nsw i64 [[ARG1SEXT64]], 1
-; CHECK: getelementptr inbounds i8, ptr %base, i64 [[PROMOTED]]
-; CHECK: ret
define i8 @oneArgPromotionLargerType(i128 %arg1, ptr %base) {
+; CHECK-LABEL: define i8 @oneArgPromotionLargerType(
+; CHECK-SAME: i128 [[ARG1:%.*]], ptr [[BASE:%.*]]) {
+; CHECK-NEXT: [[TRUNC:%.*]] = trunc i128 [[ARG1]] to i8
+; CHECK-NEXT: [[PROMOTED2:%.*]] = sext i8 [[TRUNC]] to i64
+; CHECK-NEXT: [[ADD:%.*]] = add nsw i64 [[PROMOTED2]], 1
+; CHECK-NEXT: [[PROMOTED:%.*]] = trunc i64 [[ADD]] to i8
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[BASE]], i64 [[ADD]]
+; CHECK-NEXT: [[RES:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT: [[FINALRES:%.*]] = add i8 [[RES]], [[PROMOTED]]
+; CHECK-NEXT: ret i8 [[FINALRES]]
+;
%trunc = trunc i128 %arg1 to i8
- %add = add nsw i8 %trunc, 1
+ %add = add nsw i8 %trunc, 1
%sextadd = sext i8 %add to i64
%arrayidx = getelementptr inbounds i8, ptr %base, i64 %sextadd
%res = load i8, ptr %arrayidx
@@ -194,18 +233,20 @@ define i8 @oneArgPromotionLargerType(i128 %arg1, ptr %base) {
; Use same inserted trunc
; On X86 truncate are free. Check that we are able to promote the add
; to be used as addressing mode and that we insert a truncate for
-; *all* the other uses.
-; CHECK-LABEL: @oneArgPromotionTruncInsertSeveralUse
-; CHECK: [[ARG1SEXT:%[a-zA-Z_0-9-]+]] = sext i8 %arg1 to i64
-; CHECK: [[PROMOTED:%[a-zA-Z_0-9-]+]] = add nsw i64 [[ARG1SEXT]], 1
-; CHECK: [[TRUNC:%[a-zA-Z_0-9-]+]] = trunc i64 [[PROMOTED]] to i8
-; CHECK: [[GEP:%[a-zA-Z_0-9-]+]] = getelementptr inbounds i8, ptr %base, i64 [[PROMOTED]]
-; CHECK: [[LOAD:%[a-zA-Z_0-9-]+]] = load i8, ptr [[GEP]]
-; CHECK: [[ADDRES:%[a-zA-Z_0-9-]+]] = add i8 [[LOAD]], [[TRUNC]]
-; CHECK: add i8 [[ADDRES]], [[TRUNC]]
-; CHECK: ret
+; *all* the other uses.
define i8 @oneArgPromotionTruncInsertSeveralUse(i8 %arg1, ptr %base) {
- %add = add nsw i8 %arg1, 1
+; CHECK-LABEL: define i8 @oneArgPromotionTruncInsertSeveralUse(
+; CHECK-SAME: i8 [[ARG1:%.*]], ptr [[BASE:%.*]]) {
+; CHECK-NEXT: [[PROMOTED2:%.*]] = sext i8 [[ARG1]] to i64
+; CHECK-NEXT: [[ADD:%.*]] = add nsw i64 [[PROMOTED2]], 1
+; CHECK-NEXT: [[PROMOTED:%.*]] = trunc i64 [[ADD]] to i8
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[BASE]], i64 [[ADD]]
+; CHECK-NEXT: [[RES:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT: [[ALMOSTFINALRES:%.*]] = add i8 [[RES]], [[PROMOTED]]
+; CHECK-NEXT: [[FINALRES:%.*]] = add i8 [[ALMOSTFINALRES]], [[PROMOTED]]
+; CHECK-NEXT: ret i8 [[FINALRES]]
+;
+ %add = add nsw i8 %arg1, 1
%sextadd = sext i8 %add to i64
%arrayidx = getelementptr inbounds i8, ptr %base, i64 %sextadd
%res = load i8, ptr %arrayidx
@@ -216,16 +257,18 @@ define i8 @oneArgPromotionTruncInsertSeveralUse(i8 %arg1, ptr %base) {
; Check that the promoted instruction is used for all uses of the original
; sign extension.
-; CHECK-LABEL: @oneArgPromotionSExtSeveralUse
-; CHECK: [[ARG1SEXT:%[a-zA-Z_0-9-]+]] = sext i8 %arg1 to i64
-; CHECK: [[PROMOTED:%[a-zA-Z_0-9-]+]] = add nsw i64 [[ARG1SEXT]], 1
-; CHECK: [[GEP:%[a-zA-Z_0-9-]+]] = getelementptr inbounds i8, ptr %base, i64 [[PROMOTED]]
-; CHECK: [[LOAD:%[a-zA-Z_0-9-]+]] = load i8, ptr [[GEP]]
-; CHECK: [[ADDRES:%[a-zA-Z_0-9-]+]] = zext i8 [[LOAD]] to i64
-; CHECK: add i64 [[ADDRES]], [[PROMOTED]]
-; CHECK: ret
define i64 @oneArgPromotionSExtSeveralUse(i8 %arg1, ptr %base) {
- %add = add nsw i8 %arg1, 1
+; CHECK-LABEL: define i64 @oneArgPromotionSExtSeveralUse(
+; CHECK-SAME: i8 [[ARG1:%.*]], ptr [[BASE:%.*]]) {
+; CHECK-NEXT: [[PROMOTED:%.*]] = sext i8 [[ARG1]] to i64
+; CHECK-NEXT: [[ADD:%.*]] = add nsw i64 [[PROMOTED]], 1
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[BASE]], i64 [[ADD]]
+; CHECK-NEXT: [[RES:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT: [[ALMOSTFINALRES:%.*]] = zext i8 [[RES]] to i64
+; CHECK-NEXT: [[FINALRES:%.*]] = add i64 [[ALMOSTFINALRES]], [[ADD]]
+; CHECK-NEXT: ret i64 [[FINALRES]]
+;
+ %add = add nsw i8 %arg1, 1
%sextadd = sext i8 %add to i64
%arrayidx = getelementptr inbounds i8, ptr %base, i64 %sextadd
%res = load i8, ptr %arrayidx
@@ -249,16 +292,19 @@ define i64 @oneArgPromotionSExtSeveralUse(i8 %arg1, ptr %base) {
; - Setting the operands of the promoted instruction with the promoted values.
; - Moving instruction around (mainly sext when promoting instruction).
; Each type of those promotions has to be undo at least once during this
-; specific test.
-; CHECK-LABEL: @twoArgsPromotionNest
-; CHECK: [[ORIG:%[a-zA-Z_0-9-]+]] = add nsw i32 %arg1, %arg2
-; CHECK: [[ADD:%[a-zA-Z_0-9-]+]] = add nsw i32 [[ORIG]], [[ORIG]]
-; CHECK: [[SEXT:%[a-zA-Z_0-9-]+]] = sext i32 [[ADD]] to i64
-; CHECK: getelementptr inbounds i8, ptr %base, i64 [[SEXT]]
-; CHECK: ret
+; specific test.
define i8 @twoArgsPromotionNest(i32 %arg1, i32 %arg2, ptr %base) {
+; CHECK-LABEL: define i8 @twoArgsPromotionNest(
+; CHECK-SAME: i32 [[ARG1:%.*]], i32 [[ARG2:%.*]], ptr [[BASE:%.*]]) {
+; CHECK-NEXT: [[PROMOTABLEADD1:%.*]] = add nsw i32 [[ARG1]], [[ARG2]]
+; CHECK-NEXT: [[PROMOTABLEADD2:%.*]] = add nsw i32 [[PROMOTABLEADD1]], [[PROMOTABLEADD1]]
+; CHECK-NEXT: [[SEXTADD:%.*]] = sext i32 [[PROMOTABLEADD2]] to i64
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[BASE]], i64 [[SEXTADD]]
+; CHECK-NEXT: [[RES:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT: ret i8 [[RES]]
+;
%promotableadd1 = add nsw i32 %arg1, %arg2
- %promotableadd2 = add nsw i32 %promotableadd1, %promotableadd1
+ %promotableadd2 = add nsw i32 %promotableadd1, %promotableadd1
%sextadd = sext i32 %promotableadd2 to i64
%arrayidx = getelementptr inbounds i8, ptr %base, i64 %sextadd
%res = load i8, ptr %arrayidx
@@ -270,18 +316,21 @@ define i8 @twoArgsPromotionNest(i32 %arg1, i32 %arg2, ptr %base) {
; The matcher first promotes the add, removes the trunc and promotes
; the sext of arg1.
; Then, the matcher cannot use an addressing mode r + r + r, thus it
-; rolls back.
-; CHECK-LABEL: @twoArgsNoPromotionRemove
-; CHECK: [[SEXTARG1:%[a-zA-Z_0-9-]+]] = sext i1 %arg1 to i32
-; CHECK: [[TRUNC:%[a-zA-Z_0-9-]+]] = trunc i32 [[SEXTARG1]] to i8
-; CHECK: [[ADD:%[a-zA-Z_0-9-]+]] = add nsw i8 [[TRUNC]], %arg2
-; CHECK: [[SEXT:%[a-zA-Z_0-9-]+]] = sext i8 [[ADD]] to i64
-; CHECK: getelementptr inbounds i8, ptr %base, i64 [[SEXT]]
-; CHECK: ret
+; rolls back.
define i8 @twoArgsNoPromotionRemove(i1 %arg1, i8 %arg2, ptr %base) {
+; CHECK-LABEL: define i8 @twoArgsNoPromotionRemove(
+; CHECK-SAME: i1 [[ARG1:%.*]], i8 [[ARG2:%.*]], ptr [[BASE:%.*]]) {
+; CHECK-NEXT: [[SEXTARG1:%.*]] = sext i1 [[ARG1]] to i32
+; CHECK-NEXT: [[TRUNC:%.*]] = trunc i32 [[SEXTARG1]] to i8
+; CHECK-NEXT: [[ADD:%.*]] = add nsw i8 [[TRUNC]], [[ARG2]]
+; CHECK-NEXT: [[SEXTADD:%.*]] = sext i8 [[ADD]] to i64
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[BASE]], i64 [[SEXTADD]]
+; CHECK-NEXT: [[RES:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT: ret i8 [[RES]]
+;
%sextarg1 = sext i1 %arg1 to i32
%trunc = trunc i32 %sextarg1 to i8
- %add = add nsw i8 %trunc, %arg2
+ %add = add nsw i8 %trunc, %arg2
%sextadd = sext i8 %add to i64
%arrayidx = getelementptr inbounds i8, ptr %base, i64 %sextadd
%res = load i8, ptr %arrayidx
@@ -301,29 +350,40 @@ define i8 @twoArgsNoPromotionRemove(i1 %arg1, i8 %arg2, ptr %base) {
; Check that we did not promote anything in the final matching.
;
; <rdar://problem/16020230>
-; CHECK-LABEL: @checkProfitability
-; CHECK-NOT: {{%[a-zA-Z_0-9-]+}} = sext i32 %arg1 to i64
-; CHECK-NOT: {{%[a-zA-Z_0-9-]+}} = sext i32 %arg2 to i64
-; CHECK: [[SHL:%[a-zA-Z_0-9-]+]] = shl nsw i32 %arg1, 1
-; CHECK: [[ADD:%[a-zA-Z_0-9-]+]] = add nsw i32 [[SHL]], %arg2
-; CHECK: [[SEXTADD:%[a-zA-Z_0-9-]+]] = sext i32 [[ADD]] to i64
; BB then
-; CHECK: [[BASE1:%[a-zA-Z_0-9-]+]] = inttoptr i64 [[SEXTADD]] to ptr
-; CHECK: [[FULL1:%[a-zA-Z_0-9-]+]] = getelementptr i8, ptr [[BASE1]], i64 48
-; CHECK: load i32, ptr [[FULL1]]
; BB else
-; CHECK: [[BASE2:%[a-zA-Z_0-9-]+]] = inttoptr i64 [[SEXTADD]] to ptr
-; CHECK: [[FULL2:%[a-zA-Z_0-9-]+]] = getelementptr i8, ptr [[BASE2]], i64 48
-; CHECK: load i32, ptr [[FULL2]]
-; CHECK: ret
define i32 @checkProfitability(i32 %arg1, i32 %arg2, i1 %test) {
+; CHECK-LABEL: define i32 @checkProfitability(
+; CHECK-SAME: i32 [[ARG1:%.*]], i32 [[ARG2:%.*]], i1 [[TEST:%.*]]) {
+; CHECK-NEXT: [[SHL:%.*]] = shl nsw i32 [[ARG1]], 1
+; CHECK-NEXT: [[ADD1:%.*]] = add nsw i32 [[SHL]], [[ARG2]]
+; CHECK-NEXT: [[SEXTIDX1:%.*]] = sext i32 [[ADD1]] to i64
+; CHECK-NEXT: br i1 [[TEST]], label %[[THEN:.*]], label %[[ELSE:.*]]
+; CHECK: [[THEN]]:
+; CHECK-NEXT: [[SUNKADDR:%.*]] = inttoptr i64 [[SEXTIDX1]] to ptr
+; CHECK-NEXT: [[SUNKADDR13:%.*]] = getelementptr i8, ptr [[SUNKADDR]], i64 48
+; CHECK-NEXT: [[RES1:%.*]] = load i32, ptr [[SUNKADDR13]], align 4
+; CHECK-NEXT: br label %[[END:.*]]
+; CHECK: [[ELSE]]:
+; CHECK-NEXT: [[SUNKADDR17:%.*]] = inttoptr i64 [[SEXTIDX1]] to ptr
+; CHECK-NEXT: [[SUNKADDR18:%.*]] = getelementptr i8, ptr [[SUNKADDR17]], i64 48
+; CHECK-NEXT: [[RES2:%.*]] = load i32, ptr [[SUNKADDR18]], align 4
+; CHECK-NEXT: br label %[[END]]
+; CHECK: [[END]]:
+; CHECK-NEXT: [[TMP:%.*]] = phi i32 [ [[RES1]], %[[THEN]] ], [ [[RES2]], %[[ELSE]] ]
+; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[SEXTIDX1]] to i32
+; CHECK-NEXT: [[RES:%.*]] = add i32 [[TMP]], [[TMP1]]
+; CHECK-NEXT: [[ADDR:%.*]] = inttoptr i32 [[RES]] to ptr
+; CHECK-NEXT: [[FINAL:%.*]] = load i32, ptr [[ADDR]], align 4
+; CHECK-NEXT: ret i32 [[FINAL]]
+;
%shl = shl nsw i32 %arg1, 1
%add1 = add nsw i32 %shl, %arg2
%sextidx1 = sext i32 %add1 to i64
%tmpptr = inttoptr i64 %sextidx1 to ptr
%arrayidx1 = getelementptr i32, ptr %tmpptr, i64 12
br i1 %test, label %then, label %else
-then:
+then:
%res1 = load i32, ptr %arrayidx1
br label %end
else:
@@ -346,15 +406,47 @@ end:
; We used to crash on this function because we did not return the right
; promoted instruction for %conv.i.
; Make sure we generate the right code now.
-; CHECK-LABEL: @fn3
; %conv.i is used twice and only one of its use is being promoted.
; Use it at the starting point for the matching.
-; CHECK: %conv.i = zext i16 [[PLAIN_OPND:%[.a-zA-Z_0-9-]+]] to i32
-; CHECK-NEXT: [[PROMOTED_CONV:%[.a-zA-Z_0-9-]+]] = zext i16 [[PLAIN_OPND]] to i64
-; CHECK-NEXT: [[ADD:%[a-zA-Z_0-9-]+]] = getelementptr i8, ptr %P, i64 [[PROMOTED_CONV]]
-; CHECK-NEXT: [[ADDR:%[a-zA-Z_0-9-]+]] = getelementptr i8, ptr [[ADD]], i64 7
-; CHECK-NEXT: load i8, ptr [[ADDR]], align 1
define signext i16 @fn3(ptr nocapture readonly %P) {
+; CHECK-LABEL: define signext i16 @fn3(
+; CHECK-SAME: ptr nocapture readonly [[P:%.*]]) {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br label %[[WHILE_BODY_I_I:.*]]
+; CHECK: [[WHILE_BODY_I_I]]:
+; CHECK-NEXT: [[SRC_ADDR_0_I_I:%.*]] = phi i16 [ 0, %[[ENTRY]] ], [ [[INC_I_I:%.*]], %[[WHILE_BODY_I_I]] ]
+; CHECK-NEXT: [[INC_I_I]] = add i16 [[SRC_ADDR_0_I_I]], 1
+; CHECK-NEXT: [[IDXPROM_I_I:%.*]] = sext i16 [[SRC_ADDR_0_I_I]] to i64
+; CHECK-NEXT: [[SUNKADDR:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 [[IDXPROM_I_I]]
+; CHECK-NEXT: [[SUNKADDR2:%.*]] = getelementptr inbounds i8, ptr [[SUNKADDR]], i64 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[SUNKADDR2]], align 1
+; CHECK-NEXT: [[CONV2_I_I:%.*]] = zext i8 [[TMP1]] to i32
+; CHECK-NEXT: [[AND_I_I:%.*]] = and i32 [[CONV2_I_I]], 15
+; CHECK-NEXT: store i32 [[AND_I_I]], ptr @a, align 4
+; CHECK-NEXT: [[TOBOOL_I_I:%.*]] = icmp eq i32 [[AND_I_I]], 0
+; CHECK-NEXT: br i1 [[TOBOOL_I_I]], label %[[WHILE_BODY_I_I]], label %[[FN1_EXIT_I:.*]]
+; CHECK: [[FN1_EXIT_I]]:
+; CHECK-NEXT: [[CONV_I:%.*]] = zext i16 [[INC_I_I]] to i32
+; CHECK-NEXT: [[PROMOTED4:%.*]] = zext i16 [[INC_I_I]] to i64
+; CHECK-NEXT: [[SUNKADDR5:%.*]] = getelementptr i8, ptr [[P]], i64 [[PROMOTED4]]
+; CHECK-NEXT: [[SUNKADDR6:%.*]] = getelementptr i8, ptr [[SUNKADDR5]], i64 7
+; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[SUNKADDR6]], align 1
+; CHECK-NEXT: [[CONV2_I:%.*]] = sext i8 [[TMP2]] to i16
+; CHECK-NEXT: store i16 [[CONV2_I]], ptr @b, align 2
+; CHECK-NEXT: [[SUB4_I:%.*]] = sub nsw i32 0, [[CONV_I]]
+; CHECK-NEXT: [[CONV5_I:%.*]] = zext i16 [[CONV2_I]] to i32
+; CHECK-NEXT: [[CMP_I:%.*]] = icmp sgt i32 [[CONV5_I]], [[SUB4_I]]
+; CHECK-NEXT: br i1 [[CMP_I]], label %[[IF_THEN_I:.*]], label %[[FN2_EXIT:.*]]
+; CHECK: [[IF_THEN_I]]:
+; CHECK-NEXT: [[END_I:%.*]] = getelementptr inbounds [[STRUCT_DNS_PACKET:%.*]], ptr [[P]], i64 0, i32 1
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[END_I]], align 4
+; CHECK-NEXT: [[SUB7_I:%.*]] = add i32 [[TMP3]], 65535
+; CHECK-NEXT: [[CONV8_I:%.*]] = trunc i32 [[SUB7_I]] to i16
+; CHECK-NEXT: br label %[[FN2_EXIT]]
+; CHECK: [[FN2_EXIT]]:
+; CHECK-NEXT: [[RETVAL_0_I:%.*]] = phi i16 [ [[CONV8_I]], %[[IF_THEN_I]] ], [ undef, %[[FN1_EXIT_I]] ]
+; CHECK-NEXT: ret i16 [[RETVAL_0_I]]
+;
entry:
%tmp = getelementptr inbounds %struct.dns_packet, ptr %P, i64 0, i32 2
br label %while.body.i.i
@@ -399,13 +491,16 @@ fn2.exit: ; preds = %if.then.i, %fn1.exi
; Check that we do not promote an extension if the non-wrapping flag does not
; match the kind of the extension.
-; CHECK-LABEL: @noPromotionFlag
-; CHECK: [[ADD:%[a-zA-Z_0-9-]+]] = add nsw i32 %arg1, %arg2
-; CHECK: [[PROMOTED:%[a-zA-Z_0-9-]+]] = zext i32 [[ADD]] to i64
-; CHECK: inttoptr i64 [[PROMOTED]] to ptr
-; CHECK: ret
define i8 @noPromotionFlag(i32 %arg1, i32 %arg2) {
- %add = add nsw i32 %arg1, %arg2
+; CHECK-LABEL: define i8 @noPromotionFlag(
+; CHECK-SAME: i32 [[ARG1:%.*]], i32 [[ARG2:%.*]]) {
+; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[ARG1]], [[ARG2]]
+; CHECK-NEXT: [[ZEXTADD:%.*]] = zext i32 [[ADD]] to i64
+; CHECK-NEXT: [[BASE:%.*]] = inttoptr i64 [[ZEXTADD]] to ptr
+; CHECK-NEXT: [[RES:%.*]] = load i8, ptr [[BASE]], align 1
+; CHECK-NEXT: ret i8 [[RES]]
+;
+ %add = add nsw i32 %arg1, %arg2
%zextadd = zext i32 %add to i64
%base = inttoptr i64 %zextadd to ptr
%res = load i8, ptr %base
@@ -413,14 +508,17 @@ define i8 @noPromotionFlag(i32 %arg1, i32 %arg2) {
}
; Check that we correctly promote both operands of the promotable add with zext.
-; CHECK-LABEL: @twoArgsPromotionZExt
-; CHECK: [[ARG1ZEXT:%[a-zA-Z_0-9-]+]] = zext i32 %arg1 to i64
-; CHECK: [[ARG2ZEXT:%[a-zA-Z_0-9-]+]] = zext i32 %arg2 to i64
-; CHECK: [[PROMOTED:%[a-zA-Z_0-9-]+]] = add nuw i64 [[ARG1ZEXT]], [[ARG2ZEXT]]
-; CHECK: inttoptr i64 [[PROMOTED]] to ptr
-; CHECK: ret
define i8 @twoArgsPromotionZExt(i32 %arg1, i32 %arg2) {
- %add = add nuw i32 %arg1, %arg2
+; CHECK-LABEL: define i8 @twoArgsPromotionZExt(
+; CHECK-SAME: i32 [[ARG1:%.*]], i32 [[ARG2:%.*]]) {
+; CHECK-NEXT: [[PROMOTED:%.*]] = zext i32 [[ARG1]] to i64
+; CHECK-NEXT: [[PROMOTED2:%.*]] = zext i32 [[ARG2]] to i64
+; CHECK-NEXT: [[ADD:%.*]] = add nuw i64 [[PROMOTED]], [[PROMOTED2]]
+; CHECK-NEXT: [[BASE:%.*]] = inttoptr i64 [[ADD]] to ptr
+; CHECK-NEXT: [[RES:%.*]] = load i8, ptr [[BASE]], align 1
+; CHECK-NEXT: ret i8 [[RES]]
+;
+ %add = add nuw i32 %arg1, %arg2
%zextadd = zext i32 %add to i64
%base = inttoptr i64 %zextadd to ptr
%res = load i8, ptr %base
@@ -428,13 +526,16 @@ define i8 @twoArgsPromotionZExt(i32 %arg1, i32 %arg2) {
}
; Check that we correctly promote constant arguments.
-; CHECK-LABEL: @oneArgPromotionNegativeCstZExt
-; CHECK: [[ARG1ZEXT:%[a-zA-Z_0-9-]+]] = zext i8 %arg1 to i64
-; CHECK: [[PROMOTED:%[a-zA-Z_0-9-]+]] = add nuw i64 [[ARG1ZEXT]], 255
-; CHECK: getelementptr inbounds i8, ptr %base, i64 [[PROMOTED]]
-; CHECK: ret
define i8 @oneArgPromotionNegativeCstZExt(i8 %arg1, ptr %base) {
- %add = add nuw i8 %arg1, -1
+; CHECK-LABEL: define i8 @oneArgPromotionNegativeCstZExt(
+; CHECK-SAME: i8 [[ARG1:%.*]], ptr [[BASE:%.*]]) {
+; CHECK-NEXT: [[PROMOTED:%.*]] = zext i8 [[ARG1]] to i64
+; CHECK-NEXT: [[ADD:%.*]] = add nuw i64 [[PROMOTED]], 255
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[BASE]], i64 [[ADD]]
+; CHECK-NEXT: [[RES:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT: ret i8 [[RES]]
+;
+ %add = add nuw i8 %arg1, -1
%zextadd = zext i8 %add to i64
%arrayidx = getelementptr inbounds i8, ptr %base, i64 %zextadd
%res = load i8, ptr %arrayidx
@@ -442,14 +543,17 @@ define i8 @oneArgPromotionNegativeCstZExt(i8 %arg1, ptr %base) {
}
; Check that we are able to merge two zero extensions.
-; CHECK-LABEL: @oneArgPromotionZExtZExt
-; CHECK: [[ARG1ZEXT:%[a-zA-Z_0-9-]+]] = zext i8 %arg1 to i64
-; CHECK: [[PROMOTED:%[a-zA-Z_0-9-]+]] = add nuw i64 [[ARG1ZEXT]], 1
-; CHECK: getelementptr inbounds i8, ptr %base, i64 [[PROMOTED]]
-; CHECK: ret
define i8 @oneArgPromotionZExtZExt(i8 %arg1, ptr %base) {
+; CHECK-LABEL: define i8 @oneArgPromotionZExtZExt(
+; CHECK-SAME: i8 [[ARG1:%.*]], ptr [[BASE:%.*]]) {
+; CHECK-NEXT: [[PROMOTED2:%.*]] = zext i8 [[ARG1]] to i64
+; CHECK-NEXT: [[ADD:%.*]] = add nuw i64 [[PROMOTED2]], 1
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[BASE]], i64 [[ADD]]
+; CHECK-NEXT: [[RES:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT: ret i8 [[RES]]
+;
%zext = zext i8 %arg1 to i32
- %add = add nuw i32 %zext, 1
+ %add = add nuw i32 %zext, 1
%zextadd = zext i32 %add to i64
%arrayidx = getelementptr inbounds i8, ptr %base, i64 %zextadd
%res = load i8, ptr %arrayidx
@@ -458,17 +562,20 @@ define i8 @oneArgPromotionZExtZExt(i8 %arg1, ptr %base) {
; Check that we do not promote truncate when the dropped bits
; are of a different kind.
-; CHECK-LABEL: @oneArgPromotionBlockTruncZExt
-; CHECK: [[ARG1SEXT:%[a-zA-Z_0-9-]+]] = sext i1 %arg1 to i32
-; CHECK: [[ARG1TRUNC:%[a-zA-Z_0-9-]+]] = trunc i32 [[ARG1SEXT]] to i8
-; CHECK: [[ARG1ZEXT:%[a-zA-Z_0-9-]+]] = zext i8 [[ARG1TRUNC]] to i64
-; CHECK: [[PROMOTED:%[a-zA-Z_0-9-]+]] = add nuw i64 [[ARG1ZEXT]], 1
-; CHECK: getelementptr inbounds i8, ptr %base, i64 [[PROMOTED]]
-; CHECK: ret
define i8 @oneArgPromotionBlockTruncZExt(i1 %arg1, ptr %base) {
+; CHECK-LABEL: define i8 @oneArgPromotionBlockTruncZExt(
+; CHECK-SAME: i1 [[ARG1:%.*]], ptr [[BASE:%.*]]) {
+; CHECK-NEXT: [[SEXTARG1:%.*]] = sext i1 [[ARG1]] to i32
+; CHECK-NEXT: [[TRUNC:%.*]] = trunc i32 [[SEXTARG1]] to i8
+; CHECK-NEXT: [[PROMOTED:%.*]] = zext i8 [[TRUNC]] to i64
+; CHECK-NEXT: [[ADD:%.*]] = add nuw i64 [[PROMOTED]], 1
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[BASE]], i64 [[ADD]]
+; CHECK-NEXT: [[RES:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT: ret i8 [[RES]]
+;
%sextarg1 = sext i1 %arg1 to i32
%trunc = trunc i32 %sextarg1 to i8
- %add = add nuw i8 %trunc, 1
+ %add = add nuw i8 %trunc, 1
%zextadd = zext i8 %add to i64
%arrayidx = getelementptr inbounds i8, ptr %base, i64 %zextadd
%res = load i8, ptr %arrayidx
@@ -477,15 +584,18 @@ define i8 @oneArgPromotionBlockTruncZExt(i1 %arg1, ptr %base) {
; Check that we are able to promote truncate when we know all the bits
; that are dropped.
-; CHECK-LABEL: @oneArgPromotionPassTruncZExt
-; CHECK: [[ARG1ZEXT:%[a-zA-Z_0-9-]+]] = zext i1 %arg1 to i64
-; CHECK: [[PROMOTED:%[a-zA-Z_0-9-]+]] = add nuw i64 [[ARG1ZEXT]], 1
-; CHECK: getelementptr inbounds i8, ptr %base, i64 [[PROMOTED]]
-; CHECK: ret
define i8 @oneArgPromotionPassTruncZExt(i1 %arg1, ptr %base) {
+; CHECK-LABEL: define i8 @oneArgPromotionPassTruncZExt(
+; CHECK-SAME: i1 [[ARG1:%.*]], ptr [[BASE:%.*]]) {
+; CHECK-NEXT: [[PROMOTED2:%.*]] = zext i1 [[ARG1]] to i64
+; CHECK-NEXT: [[ADD:%.*]] = add nuw i64 [[PROMOTED2]], 1
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[BASE]], i64 [[ADD]]
+; CHECK-NEXT: [[RES:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT: ret i8 [[RES]]
+;
%sextarg1 = zext i1 %arg1 to i32
%trunc = trunc i32 %sextarg1 to i8
- %add = add nuw i8 %trunc, 1
+ %add = add nuw i8 %trunc, 1
%zextadd = zext i8 %add to i64
%arrayidx = getelementptr inbounds i8, ptr %base, i64 %zextadd
%res = load i8, ptr %arrayidx
@@ -493,15 +603,18 @@ define i8 @oneArgPromotionPassTruncZExt(i1 %arg1, ptr %base) {
}
; Check that we do not promote sext with zext.
-; CHECK-LABEL: @oneArgPromotionBlockSExtZExt
-; CHECK: [[ARG1SEXT:%[a-zA-Z_0-9-]+]] = sext i1 %arg1 to i8
-; CHECK: [[ARG1ZEXT:%[a-zA-Z_0-9-]+]] = zext i8 [[ARG1SEXT]] to i64
-; CHECK: [[PROMOTED:%[a-zA-Z_0-9-]+]] = add nuw i64 [[ARG1ZEXT]], 1
-; CHECK: getelementptr inbounds i8, ptr %base, i64 [[PROMOTED]]
-; CHECK: ret
define i8 @oneArgPromotionBlockSExtZExt(i1 %arg1, ptr %base) {
+; CHECK-LABEL: define i8 @oneArgPromotionBlockSExtZExt(
+; CHECK-SAME: i1 [[ARG1:%.*]], ptr [[BASE:%.*]]) {
+; CHECK-NEXT: [[SEXTARG1:%.*]] = sext i1 [[ARG1]] to i8
+; CHECK-NEXT: [[PROMOTED:%.*]] = zext i8 [[SEXTARG1]] to i64
+; CHECK-NEXT: [[ADD:%.*]] = add nuw i64 [[PROMOTED]], 1
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[BASE]], i64 [[ADD]]
+; CHECK-NEXT: [[RES:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
+; CHECK-NEXT: ret i8 [[RES]]
+;
%sextarg1 = sext i1 %arg1 to i8
- %add = add nuw i8 %sextarg1, 1
+ %add = add nuw i8 %sextarg1, 1
%zextadd = zext i8 %add to i64
%arrayidx = getelementptr inbounds i8, ptr %base, i64 %zextadd
%res = load i8, ptr %arrayidx
diff --git a/llvm/test/CodeGen/X86/uint_to_half.ll b/llvm/test/CodeGen/X86/uint_to_half.ll
new file mode 100644
index 0000000..b62a07e
--- /dev/null
+++ b/llvm/test/CodeGen/X86/uint_to_half.ll
@@ -0,0 +1,198 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx,+f16c | FileCheck %s -check-prefixes=AVX1
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+f16c | FileCheck %s -check-prefixes=AVX2
+; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s -check-prefixes=AVX512
+
+define <8 x half> @test_uitofp_v8i32_v8f16(<8 x i32> %a) {
+; AVX1-LABEL: test_uitofp_v8i32_v8f16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpsrld $16, %xmm2, %xmm2
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX1-NEXT: vcvtdq2ps %ymm1, %ymm1
+; AVX1-NEXT: vmulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
+; AVX1-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX1-NEXT: vcvtdq2ps %ymm0, %ymm0
+; AVX1-NEXT: vaddps %ymm0, %ymm1, %ymm0
+; AVX1-NEXT: vcvtps2ph $4, %ymm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_uitofp_v8i32_v8f16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm1 = [1258291200,1258291200,1258291200,1258291200,1258291200,1258291200,1258291200,1258291200]
+; AVX2-NEXT: vpblendw {{.*#+}} ymm1 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7],ymm0[8],ymm1[9],ymm0[10],ymm1[11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
+; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
+; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm2 = [1392508928,1392508928,1392508928,1392508928,1392508928,1392508928,1392508928,1392508928]
+; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7],ymm0[8],ymm2[9],ymm0[10],ymm2[11],ymm0[12],ymm2[13],ymm0[14],ymm2[15]
+; AVX2-NEXT: vbroadcastss {{.*#+}} ymm2 = [5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11]
+; AVX2-NEXT: vsubps %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vaddps %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vcvtps2ph $4, %ymm0, %xmm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_uitofp_v8i32_v8f16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vcvtudq2ps %ymm0, %ymm0
+; AVX512-NEXT: vcvtps2ph $4, %ymm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %vec = uitofp <8 x i32> %a to <8 x half>
+ ret <8 x half> %vec
+}
+
+define <8 x half> @test_strict_uitofp_v8i32_v8f16(<8 x i32> %a) {
+; AVX1-LABEL: test_strict_uitofp_v8i32_v8f16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vpsrld $16, %xmm2, %xmm2
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX1-NEXT: vcvtdq2ps %ymm1, %ymm1
+; AVX1-NEXT: vmulps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
+; AVX1-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
+; AVX1-NEXT: vcvtdq2ps %ymm0, %ymm0
+; AVX1-NEXT: vaddps %ymm0, %ymm1, %ymm0
+; AVX1-NEXT: vcvtps2ph $4, %ymm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_strict_uitofp_v8i32_v8f16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm1 = [1258291200,1258291200,1258291200,1258291200,1258291200,1258291200,1258291200,1258291200]
+; AVX2-NEXT: vpblendw {{.*#+}} ymm1 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7],ymm0[8],ymm1[9],ymm0[10],ymm1[11],ymm0[12],ymm1[13],ymm0[14],ymm1[15]
+; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
+; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm2 = [1392508928,1392508928,1392508928,1392508928,1392508928,1392508928,1392508928,1392508928]
+; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7],ymm0[8],ymm2[9],ymm0[10],ymm2[11],ymm0[12],ymm2[13],ymm0[14],ymm2[15]
+; AVX2-NEXT: vbroadcastss {{.*#+}} ymm2 = [5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11]
+; AVX2-NEXT: vsubps %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vaddps %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vcvtps2ph $4, %ymm0, %xmm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_strict_uitofp_v8i32_v8f16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vcvtudq2ps %ymm0, %ymm0
+; AVX512-NEXT: vcvtps2ph $4, %ymm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+ %vec = tail call <8 x half> @llvm.experimental.constrained.uitofp.f16.i32(<8 x i32> %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <8 x half> %vec
+}
+
+define <16 x half> @test_uitofp_v16i32_v16f16(<16 x i32> %a) {
+; AVX1-LABEL: test_uitofp_v16i32_v16f16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpsrld $16, %xmm0, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpsrld $16, %xmm3, %xmm3
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX1-NEXT: vcvtdq2ps %ymm2, %ymm2
+; AVX1-NEXT: vbroadcastss {{.*#+}} ymm3 = [6.5536E+4,6.5536E+4,6.5536E+4,6.5536E+4,6.5536E+4,6.5536E+4,6.5536E+4,6.5536E+4]
+; AVX1-NEXT: vmulps %ymm3, %ymm2, %ymm2
+; AVX1-NEXT: vbroadcastss {{.*#+}} ymm4 = [65535,65535,65535,65535,65535,65535,65535,65535]
+; AVX1-NEXT: vandps %ymm4, %ymm0, %ymm0
+; AVX1-NEXT: vcvtdq2ps %ymm0, %ymm0
+; AVX1-NEXT: vaddps %ymm0, %ymm2, %ymm0
+; AVX1-NEXT: vcvtps2ph $4, %ymm0, %xmm0
+; AVX1-NEXT: vpsrld $16, %xmm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
+; AVX1-NEXT: vpsrld $16, %xmm5, %xmm5
+; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm2, %ymm2
+; AVX1-NEXT: vcvtdq2ps %ymm2, %ymm2
+; AVX1-NEXT: vmulps %ymm3, %ymm2, %ymm2
+; AVX1-NEXT: vandps %ymm4, %ymm1, %ymm1
+; AVX1-NEXT: vcvtdq2ps %ymm1, %ymm1
+; AVX1-NEXT: vaddps %ymm1, %ymm2, %ymm1
+; AVX1-NEXT: vcvtps2ph $4, %ymm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_uitofp_v16i32_v16f16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm2 = [1258291200,1258291200,1258291200,1258291200,1258291200,1258291200,1258291200,1258291200]
+; AVX2-NEXT: vpblendw {{.*#+}} ymm3 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7],ymm0[8],ymm2[9],ymm0[10],ymm2[11],ymm0[12],ymm2[13],ymm0[14],ymm2[15]
+; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
+; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm4 = [1392508928,1392508928,1392508928,1392508928,1392508928,1392508928,1392508928,1392508928]
+; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm4[1],ymm0[2],ymm4[3],ymm0[4],ymm4[5],ymm0[6],ymm4[7],ymm0[8],ymm4[9],ymm0[10],ymm4[11],ymm0[12],ymm4[13],ymm0[14],ymm4[15]
+; AVX2-NEXT: vbroadcastss {{.*#+}} ymm5 = [5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11]
+; AVX2-NEXT: vsubps %ymm5, %ymm0, %ymm0
+; AVX2-NEXT: vaddps %ymm0, %ymm3, %ymm0
+; AVX2-NEXT: vcvtps2ph $4, %ymm0, %xmm0
+; AVX2-NEXT: vpblendw {{.*#+}} ymm2 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7],ymm1[8],ymm2[9],ymm1[10],ymm2[11],ymm1[12],ymm2[13],ymm1[14],ymm2[15]
+; AVX2-NEXT: vpsrld $16, %ymm1, %ymm1
+; AVX2-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0],ymm4[1],ymm1[2],ymm4[3],ymm1[4],ymm4[5],ymm1[6],ymm4[7],ymm1[8],ymm4[9],ymm1[10],ymm4[11],ymm1[12],ymm4[13],ymm1[14],ymm4[15]
+; AVX2-NEXT: vsubps %ymm5, %ymm1, %ymm1
+; AVX2-NEXT: vaddps %ymm1, %ymm2, %ymm1
+; AVX2-NEXT: vcvtps2ph $4, %ymm1, %xmm1
+; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_uitofp_v16i32_v16f16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vcvtudq2ps %zmm0, %zmm0
+; AVX512-NEXT: vcvtps2ph $4, %zmm0, %ymm0
+; AVX512-NEXT: retq
+ %vec = uitofp <16 x i32> %a to <16 x half>
+ ret <16 x half> %vec
+}
+
+define <16 x half> @test_strict_uitofp_v16i32_v16f16(<16 x i32> %a) {
+; AVX1-LABEL: test_strict_uitofp_v16i32_v16f16:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vpsrld $16, %xmm0, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpsrld $16, %xmm3, %xmm3
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
+; AVX1-NEXT: vcvtdq2ps %ymm2, %ymm2
+; AVX1-NEXT: vbroadcastss {{.*#+}} ymm3 = [6.5536E+4,6.5536E+4,6.5536E+4,6.5536E+4,6.5536E+4,6.5536E+4,6.5536E+4,6.5536E+4]
+; AVX1-NEXT: vmulps %ymm3, %ymm2, %ymm2
+; AVX1-NEXT: vbroadcastss {{.*#+}} ymm4 = [65535,65535,65535,65535,65535,65535,65535,65535]
+; AVX1-NEXT: vandps %ymm4, %ymm0, %ymm0
+; AVX1-NEXT: vcvtdq2ps %ymm0, %ymm0
+; AVX1-NEXT: vaddps %ymm0, %ymm2, %ymm0
+; AVX1-NEXT: vcvtps2ph $4, %ymm0, %xmm0
+; AVX1-NEXT: vpsrld $16, %xmm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
+; AVX1-NEXT: vpsrld $16, %xmm5, %xmm5
+; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm2, %ymm2
+; AVX1-NEXT: vcvtdq2ps %ymm2, %ymm2
+; AVX1-NEXT: vmulps %ymm3, %ymm2, %ymm2
+; AVX1-NEXT: vandps %ymm4, %ymm1, %ymm1
+; AVX1-NEXT: vcvtdq2ps %ymm1, %ymm1
+; AVX1-NEXT: vaddps %ymm1, %ymm2, %ymm1
+; AVX1-NEXT: vcvtps2ph $4, %ymm1, %xmm1
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_strict_uitofp_v16i32_v16f16:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm2 = [1258291200,1258291200,1258291200,1258291200,1258291200,1258291200,1258291200,1258291200]
+; AVX2-NEXT: vpblendw {{.*#+}} ymm3 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7],ymm0[8],ymm2[9],ymm0[10],ymm2[11],ymm0[12],ymm2[13],ymm0[14],ymm2[15]
+; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
+; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm4 = [1392508928,1392508928,1392508928,1392508928,1392508928,1392508928,1392508928,1392508928]
+; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm4[1],ymm0[2],ymm4[3],ymm0[4],ymm4[5],ymm0[6],ymm4[7],ymm0[8],ymm4[9],ymm0[10],ymm4[11],ymm0[12],ymm4[13],ymm0[14],ymm4[15]
+; AVX2-NEXT: vbroadcastss {{.*#+}} ymm5 = [5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11,5.49764202E+11]
+; AVX2-NEXT: vsubps %ymm5, %ymm0, %ymm0
+; AVX2-NEXT: vaddps %ymm0, %ymm3, %ymm0
+; AVX2-NEXT: vcvtps2ph $4, %ymm0, %xmm0
+; AVX2-NEXT: vpblendw {{.*#+}} ymm2 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7],ymm1[8],ymm2[9],ymm1[10],ymm2[11],ymm1[12],ymm2[13],ymm1[14],ymm2[15]
+; AVX2-NEXT: vpsrld $16, %ymm1, %ymm1
+; AVX2-NEXT: vpblendw {{.*#+}} ymm1 = ymm1[0],ymm4[1],ymm1[2],ymm4[3],ymm1[4],ymm4[5],ymm1[6],ymm4[7],ymm1[8],ymm4[9],ymm1[10],ymm4[11],ymm1[12],ymm4[13],ymm1[14],ymm4[15]
+; AVX2-NEXT: vsubps %ymm5, %ymm1, %ymm1
+; AVX2-NEXT: vaddps %ymm1, %ymm2, %ymm1
+; AVX2-NEXT: vcvtps2ph $4, %ymm1, %xmm1
+; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test_strict_uitofp_v16i32_v16f16:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vcvtudq2ps %zmm0, %zmm0
+; AVX512-NEXT: vcvtps2ph $4, %zmm0, %ymm0
+; AVX512-NEXT: retq
+ %vec = tail call <16 x half> @llvm.experimental.constrained.uitofp.f16.i32(<16 x i32> %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret <16 x half> %vec
+}
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-256-v32.ll b/llvm/test/CodeGen/X86/vector-shuffle-256-v32.ll
index 4f42d5c..15e287d 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-256-v32.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-256-v32.ll
@@ -4129,6 +4129,62 @@ define <32 x i8> @shuffle_v32i8_56_zz_zz_zz_57_zz_zz_zz_58_zz_zz_zz__zz_59_zz_zz
ret <32 x i8> %shuffle
}
+; PR121823
+define <32 x i8> @shuffle_v32i8_01_09_00_03_11_02_05_13_04_07_15_06_17_25_16_19_27_18_21_29_20_23_31_22_zz_zz_zz_zz_zz_zz_zz_zz(<32 x i8> %a) {
+; AVX1-LABEL: shuffle_v32i8_01_09_00_03_11_02_05_13_04_07_15_06_17_25_16_19_27_18_21_29_20_23_31_22_zz_zz_zz_zz_zz_zz_zz_zz:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[1,9,0,3]
+; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[1,9,0,3,11,2,5,13,4,7,15,6],zero,zero,zero,zero
+; AVX1-NEXT: vpor %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[11,2,5,13,4,7,15,6],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: shuffle_v32i8_01_09_00_03_11_02_05_13_04_07_15_06_17_25_16_19_27_18_21_29_20_23_31_22_zz_zz_zz_zz_zz_zz_zz_zz:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[1,9,0,3,11,2,5,13,4,7,15,6,u,u,u,u,17,25,16,19,27,18,21,29,20,23,31,22,u,u,u,u]
+; AVX2-NEXT: vpmovsxbd {{.*#+}} ymm1 = [0,1,2,4,5,6,0,0]
+; AVX2-NEXT: vpermd %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
+; AVX2-NEXT: retq
+;
+; AVX512VLBW-LABEL: shuffle_v32i8_01_09_00_03_11_02_05_13_04_07_15_06_17_25_16_19_27_18_21_29_20_23_31_22_zz_zz_zz_zz_zz_zz_zz_zz:
+; AVX512VLBW: # %bb.0:
+; AVX512VLBW-NEXT: vpshufb {{.*#+}} ymm1 = ymm0[1,9,0,3,11,2,5,13,4,7,15,6,u,u,u,u,17,25,16,19,27,18,21,29,20,23,31,22,u,u,u,u]
+; AVX512VLBW-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX512VLBW-NEXT: vpmovsxbd {{.*#+}} ymm0 = [0,1,2,4,5,6,14,15]
+; AVX512VLBW-NEXT: vpermi2d %ymm2, %ymm1, %ymm0
+; AVX512VLBW-NEXT: retq
+;
+; AVX512VLVBMI-LABEL: shuffle_v32i8_01_09_00_03_11_02_05_13_04_07_15_06_17_25_16_19_27_18_21_29_20_23_31_22_zz_zz_zz_zz_zz_zz_zz_zz:
+; AVX512VLVBMI: # %bb.0:
+; AVX512VLVBMI-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX512VLVBMI-NEXT: vmovdqa {{.*#+}} ymm2 = [1,9,0,3,11,2,5,13,4,7,15,6,17,25,16,19,27,18,21,29,20,23,31,22,56,57,58,59,60,61,62,63]
+; AVX512VLVBMI-NEXT: vpermt2b %ymm1, %ymm2, %ymm0
+; AVX512VLVBMI-NEXT: retq
+;
+; XOPAVX1-LABEL: shuffle_v32i8_01_09_00_03_11_02_05_13_04_07_15_06_17_25_16_19_27_18_21_29_20_23_31_22_zz_zz_zz_zz_zz_zz_zz_zz:
+; XOPAVX1: # %bb.0:
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; XOPAVX1-NEXT: vpperm {{.*#+}} xmm0 = xmm0[1,9,0,3,11,2,5,13,4,7,15,6],xmm1[1,9,0,3]
+; XOPAVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[11,2,5,13,4,7,15,6],zero,zero,zero,zero,zero,zero,zero,zero
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: shuffle_v32i8_01_09_00_03_11_02_05_13_04_07_15_06_17_25_16_19_27_18_21_29_20_23_31_22_zz_zz_zz_zz_zz_zz_zz_zz:
+; XOPAVX2: # %bb.0:
+; XOPAVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[1,9,0,3,11,2,5,13,4,7,15,6,u,u,u,u,17,25,16,19,27,18,21,29,20,23,31,22,u,u,u,u]
+; XOPAVX2-NEXT: vpmovsxbd {{.*#+}} ymm1 = [0,1,2,4,5,6,0,0]
+; XOPAVX2-NEXT: vpermd %ymm0, %ymm1, %ymm0
+; XOPAVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; XOPAVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
+; XOPAVX2-NEXT: retq
+ %r = shufflevector <32 x i8> %a, <32 x i8> zeroinitializer, <32 x i32> <i32 1, i32 9, i32 0, i32 3, i32 11, i32 2, i32 5, i32 13, i32 4, i32 7, i32 15, i32 6, i32 17, i32 25, i32 16, i32 19, i32 27, i32 18, i32 21, i32 29, i32 20, i32 23, i32 31, i32 22, i32 32, i32 32, i32 32, i32 32, i32 48, i32 48, i32 48, i32 48>
+ ret <32 x i8> %r
+}
+
define <32 x i8> @shuffle_v32i8_47_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_63_16_17_18_19_20_21_22_23_24_25_26_27_28_29_30(<32 x i8> %a, <32 x i8> %b) {
; AVX1-LABEL: shuffle_v32i8_47_00_01_02_03_04_05_06_07_08_09_10_11_12_13_14_63_16_17_18_19_20_21_22_23_24_25_26_27_28_29_30:
; AVX1: # %bb.0:
diff --git a/llvm/test/DebugInfo/NVPTX/debug-addr-class.ll b/llvm/test/DebugInfo/NVPTX/debug-addr-class.ll
index 26ad597..82301e4 100644
--- a/llvm/test/DebugInfo/NVPTX/debug-addr-class.ll
+++ b/llvm/test/DebugInfo/NVPTX/debug-addr-class.ll
@@ -4,7 +4,7 @@
@GLOBAL = addrspace(1) externally_initialized global i32 0, align 4, !dbg !0
@SHARED = addrspace(3) externally_initialized global i32 undef, align 4, !dbg !6
-define void @test(float, ptr, ptr, i32) !dbg !17 {
+define ptx_kernel void @test(float, ptr, ptr, i32) !dbg !17 {
%5 = alloca float, align 4
%6 = alloca ptr, align 8
%7 = alloca ptr, align 8
@@ -38,7 +38,6 @@ define void @test(float, ptr, ptr, i32) !dbg !17 {
declare void @llvm.dbg.declare(metadata, metadata, metadata)
!llvm.dbg.cu = !{!2}
-!nvvm.annotations = !{!10}
!llvm.module.flags = !{!11, !12, !13, !14, !15}
!llvm.ident = !{!16}
@@ -52,7 +51,6 @@ declare void @llvm.dbg.declare(metadata, metadata, metadata)
!7 = distinct !DIGlobalVariable(name: "SHARED", scope: !2, file: !8, line: 4, type: !9, isLocal: false, isDefinition: true)
!8 = !DIFile(filename: "test.cu", directory: "/tmp")
!9 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
-!10 = !{ptr @test, !"kernel", i32 1}
!11 = !{i32 2, !"Dwarf Version", i32 2}
!12 = !{i32 2, !"Debug Info Version", i32 3}
!13 = !{i32 1, !"wchar_size", i32 4}
diff --git a/llvm/test/DebugInfo/NVPTX/debug-info.ll b/llvm/test/DebugInfo/NVPTX/debug-info.ll
index 55c81ca..c926229 100644
--- a/llvm/test/DebugInfo/NVPTX/debug-info.ll
+++ b/llvm/test/DebugInfo/NVPTX/debug-info.ll
@@ -59,7 +59,7 @@
; CHECK: }
; Function Attrs: nounwind
-define void @_Z5saxpyifPfS_(i32 %n, float %a, ptr nocapture readonly %x, ptr nocapture %y) local_unnamed_addr #0 !dbg !566 {
+define ptx_kernel void @_Z5saxpyifPfS_(i32 %n, float %a, ptr nocapture readonly %x, ptr nocapture %y) local_unnamed_addr #0 !dbg !566 {
entry:
call void @llvm.dbg.value(metadata i32 %n, metadata !570, metadata !DIExpression()), !dbg !575
call void @llvm.dbg.value(metadata float %a, metadata !571, metadata !DIExpression()), !dbg !576
@@ -8496,7 +8496,6 @@ attributes #2 = { nounwind readnone speculatable }
attributes #3 = { nounwind }
!llvm.dbg.cu = !{!0}
-!nvvm.annotations = !{!555, !556, !557, !556, !558, !558, !558, !558, !559, !559, !558}
!llvm.module.flags = !{!560, !561, !562, !563}
!llvm.ident = !{!564}
!nvvm.internalize.after.link = !{}
@@ -9057,11 +9056,6 @@ attributes #3 = { nounwind }
!552 = !DISubprogram(name: "tgammaf", linkageName: "_ZL7tgammaff", scope: !444, file: !444, line: 1592, type: !13, isLocal: true, isDefinition: false, flags: DIFlagPrototyped, isOptimized: true)
!553 = !DIImportedEntity(tag: DW_TAG_imported_declaration, scope: !5, entity: !554, file: !445, line: 459)
!554 = !DISubprogram(name: "truncf", linkageName: "_ZL6truncff", scope: !462, file: !462, line: 662, type: !13, isLocal: true, isDefinition: false, flags: DIFlagPrototyped, isOptimized: true)
-!555 = !{ptr @_Z5saxpyifPfS_, !"kernel", i32 1}
-!556 = !{null, !"align", i32 8}
-!557 = !{null, !"align", i32 8, !"align", i32 65544, !"align", i32 131080}
-!558 = !{null, !"align", i32 16}
-!559 = !{null, !"align", i32 16, !"align", i32 65552, !"align", i32 131088}
!560 = !{i32 2, !"Dwarf Version", i32 2}
!561 = !{i32 2, !"Debug Info Version", i32 3}
!562 = !{i32 1, !"wchar_size", i32 4}
diff --git a/llvm/test/DebugInfo/X86/dwarf5-debug-names-addr-tu-to-non-tu.ll b/llvm/test/DebugInfo/X86/dwarf5-debug-names-addr-tu-to-non-tu.ll
new file mode 100644
index 0000000..a836b2a
--- /dev/null
+++ b/llvm/test/DebugInfo/X86/dwarf5-debug-names-addr-tu-to-non-tu.ll
@@ -0,0 +1,83 @@
+; RUN: llc -filetype=obj -O0 -generate-type-units -mtriple=x86_64-unknown-linux-gnu < %s \
+; RUN: | llvm-dwarfdump -debug-info -debug-names - \
+; RUN: | FileCheck %s
+
+;; Test that an entry in the debug names table gets created for a top level DIE when the creation of TU fails.
+
+;; clang++ -O0 main.cpp -gdwarf-5 -fdebug-types-section -gpubnames -S -emit-llvm -glldb -o main.ll
+;; int foo;
+;; namespace {
+;; struct t1 {};
+;; } // namespace
+;; template <int *> struct t2 {
+;; t1 v1;
+;; };
+;; struct t3 {
+;; t2<&foo> v1;
+;; };
+;; t3 v1;
+
+; CHECK: [[OFFSET:0x[0-9a-f]*]]: DW_TAG_structure_type
+; CHECK: [[OFFSET1:0x[0-9a-f]*]]: DW_TAG_structure_type
+
+; CHECK: Bucket 0 [
+; CHECK-NEXT: Name 1 {
+; CHECK-NEXT: Hash: {{.+}}
+; CHECK-NEXT: String: {{.+}} "t3"
+; CHECK-NEXT: Entry @ {{.+}} {
+; CHECK-NEXT: Abbrev: 0x1
+; CHECK-NEXT: Tag: DW_TAG_structure_type
+; CHECK-NEXT: DW_IDX_die_offset: [[OFFSET]]
+; CHECK-NEXT: DW_IDX_parent: <parent not indexed>
+
+; CHECK: Name 5 {
+; CHECK-NEXT: Hash: {{.+}}
+; CHECK-NEXT: String: {{.+}} "t2<&foo>"
+; CHECK-NEXT: Entry @ 0xe1 {
+; CHECK-NEXT: Abbrev: 0x1
+; CHECK-NEXT: Tag: DW_TAG_structure_type
+; CHECK-NEXT: DW_IDX_die_offset: [[OFFSET1]]
+; CHECK-NEXT: DW_IDX_parent: <parent not indexed>
+
+; ModuleID = 'main.cpp'
+source_filename = "main.cpp"
+target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+%struct.t3 = type { i8 }
+
+@foo = dso_local global i32 0, align 4, !dbg !0
+@v1 = dso_local global %struct.t3 zeroinitializer, align 1, !dbg !5
+
+!llvm.dbg.cu = !{!2}
+!llvm.module.flags = !{!20, !21, !22, !23, !24, !25, !26}
+!llvm.ident = !{!27}
+
+!0 = !DIGlobalVariableExpression(var: !1, expr: !DIExpression())
+!1 = distinct !DIGlobalVariable(name: "foo", scope: !2, file: !3, line: 1, type: !19, isLocal: false, isDefinition: true)
+!2 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus_14, file: !3, producer: "clang version 20.0.0git (git@github.com:llvm/llvm-project.git ba373096e8ac83a7136fc44bc4e71a7bc53417a6)", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, globals: !4, splitDebugInlining: false, sysroot: "/")
+!3 = !DIFile(filename: "main.cpp", directory: "/StructuredType", checksumkind: CSK_MD5, checksum: "f91f8d905197b1c0309da9526bc4776e")
+!4 = !{!0, !5}
+!5 = !DIGlobalVariableExpression(var: !6, expr: !DIExpression())
+!6 = distinct !DIGlobalVariable(name: "v1", scope: !2, file: !3, line: 11, type: !7, isLocal: false, isDefinition: true)
+!7 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "t3", file: !3, line: 8, size: 8, flags: DIFlagTypePassByValue, elements: !8, identifier: "_ZTS2t3")
+!8 = !{!9}
+!9 = !DIDerivedType(tag: DW_TAG_member, name: "v1", scope: !7, file: !3, line: 9, baseType: !10, size: 8)
+!10 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "t2<&foo>", file: !3, line: 5, size: 8, flags: DIFlagTypePassByValue, elements: !11, templateParams: !16, identifier: "_ZTS2t2IXadL_Z3fooEEE")
+!11 = !{!12}
+!12 = !DIDerivedType(tag: DW_TAG_member, name: "v1", scope: !10, file: !3, line: 6, baseType: !13, size: 8)
+!13 = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "t1", scope: !14, file: !3, line: 3, size: 8, flags: DIFlagTypePassByValue, elements: !15)
+!14 = !DINamespace(scope: null)
+!15 = !{}
+!16 = !{!17}
+!17 = !DITemplateValueParameter(type: !18, value: ptr @foo)
+!18 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !19, size: 64)
+!19 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!20 = !{i32 7, !"Dwarf Version", i32 5}
+!21 = !{i32 2, !"Debug Info Version", i32 3}
+!22 = !{i32 1, !"wchar_size", i32 4}
+!23 = !{i32 8, !"PIC Level", i32 2}
+!24 = !{i32 7, !"PIE Level", i32 2}
+!25 = !{i32 7, !"uwtable", i32 2}
+!26 = !{i32 7, !"frame-pointer", i32 2}
+!27 = !{!"clang version 20.0.0git (git@github.com:llvm/llvm-project.git ba373096e8ac83a7136fc44bc4e71a7bc53417a6)"}
diff --git a/llvm/test/ExecutionEngine/JITLink/AArch32/ELF_data_alignment.s b/llvm/test/ExecutionEngine/JITLink/AArch32/ELF_data_alignment.s
index b4f6e04..9296f04 100644
--- a/llvm/test/ExecutionEngine/JITLink/AArch32/ELF_data_alignment.s
+++ b/llvm/test/ExecutionEngine/JITLink/AArch32/ELF_data_alignment.s
@@ -1,16 +1,18 @@
# REQUIRES: asserts
# RUN: llvm-mc -triple=armv7-linux-gnueabi -arm-add-build-attributes -filetype=obj -o %t_armv7.o %s
# RUN: llvm-objdump -s --section=.rodata %t_armv7.o | FileCheck --check-prefix=CHECK-OBJ %s
-# RUN: llvm-jitlink -noexec -slab-address 0x76ff0000 -slab-allocate 10Kb \
-# RUN: -slab-page-size 4096 %t_armv7.o -debug-only=jitlink 2>&1 \
+# RUN: llvm-jitlink -num-threads=0 -debug-only=jitlink -noexec \
+# RUN: -slab-address 0x76ff0000 -slab-allocate 10Kb \
+# RUN: -slab-page-size 4096 %t_armv7.o 2>&1 \
# RUN: | FileCheck --check-prefix=CHECK-LG %s
# RUN: llvm-jitlink -noexec -slab-address 0x76ff0000 -slab-allocate 10Kb \
# RUN: -slab-page-size 4096 %t_armv7.o -check %s
# RUN: llvm-mc -triple=thumbv7-linux-gnueabi -arm-add-build-attributes -filetype=obj -o %t_thumbv7.o %s
# RUN: llvm-objdump -s --section=.rodata %t_thumbv7.o | FileCheck --check-prefix=CHECK-OBJ %s
-# RUN: llvm-jitlink -noexec -slab-address 0x76ff0000 -slab-allocate 10Kb \
-# RUN: -slab-page-size 4096 %t_thumbv7.o -debug-only=jitlink 2>&1 \
+# RUN: llvm-jitlink -num-threads=0 -debug-only=jitlink -noexec \
+# RUN: -slab-address 0x76ff0000 -slab-allocate 10Kb \
+# RUN: -slab-page-size 4096 %t_thumbv7.o 2>&1 \
# RUN: | FileCheck --check-prefix=CHECK-LG %s
# RUN: llvm-jitlink -noexec -slab-address 0x76ff0000 -slab-allocate 10Kb \
# RUN: -slab-page-size 4096 %t_thumbv7.o -check %s
diff --git a/llvm/test/ExecutionEngine/JITLink/AArch64/ELF_ehframe.s b/llvm/test/ExecutionEngine/JITLink/AArch64/ELF_ehframe.s
index 151a041..b25ffee 100644
--- a/llvm/test/ExecutionEngine/JITLink/AArch64/ELF_ehframe.s
+++ b/llvm/test/ExecutionEngine/JITLink/AArch64/ELF_ehframe.s
@@ -1,7 +1,7 @@
# REQUIRES: asserts
# RUN: llvm-mc -triple=aarch64-linux-gnu -filetype=obj -o %t %s
-# RUN: llvm-jitlink -noexec -phony-externals -debug-only=jitlink %t 2>&1 | \
-# RUN: FileCheck %s
+# RUN: llvm-jitlink -num-threads=0 -debug-only=jitlink -noexec \
+# RUN: -phony-externals %t 2>&1 | FileCheck %s
#
# Check that splitting of eh-frame sections works.
#
diff --git a/llvm/test/ExecutionEngine/JITLink/AArch64/MachO_compact_unwind.s b/llvm/test/ExecutionEngine/JITLink/AArch64/MachO_compact_unwind.s
index 20534d5..b2adb85 100644
--- a/llvm/test/ExecutionEngine/JITLink/AArch64/MachO_compact_unwind.s
+++ b/llvm/test/ExecutionEngine/JITLink/AArch64/MachO_compact_unwind.s
@@ -1,6 +1,7 @@
# REQUIRES: asserts
# RUN: llvm-mc -triple=arm64-apple-ios -filetype=obj -o %t %s
-# RUN: llvm-jitlink -noexec -debug-only=jitlink %t 2>&1 | FileCheck %s
+# RUN: llvm-jitlink -num-threads=0 -debug-only=jitlink -noexec %t 2>&1 \
+# RUN: | FileCheck %s
#
# Check that splitting of compact-unwind sections works.
#
diff --git a/llvm/test/ExecutionEngine/JITLink/AArch64/MachO_ehframe.s b/llvm/test/ExecutionEngine/JITLink/AArch64/MachO_ehframe.s
index 8d43b0f..4e84518 100644
--- a/llvm/test/ExecutionEngine/JITLink/AArch64/MachO_ehframe.s
+++ b/llvm/test/ExecutionEngine/JITLink/AArch64/MachO_ehframe.s
@@ -1,7 +1,7 @@
# REQUIRES: asserts
# RUN: llvm-mc -triple=arm64-apple-darwin11 -filetype=obj -o %t %s
-# RUN: llvm-jitlink -noexec -phony-externals -debug-only=jitlink %t 2>&1 | \
-# RUN: FileCheck %s
+# RUN: llvm-jitlink -num-threads=0 -debug-only=jitlink -noexec \
+# RUN: -phony-externals %t 2>&1 | FileCheck %s
#
# Check that splitting of eh-frame sections works.
#
diff --git a/llvm/test/ExecutionEngine/JITLink/LoongArch/ELF_loongarch64_ehframe.s b/llvm/test/ExecutionEngine/JITLink/LoongArch/ELF_loongarch64_ehframe.s
index cc54585..806cdcf 100644
--- a/llvm/test/ExecutionEngine/JITLink/LoongArch/ELF_loongarch64_ehframe.s
+++ b/llvm/test/ExecutionEngine/JITLink/LoongArch/ELF_loongarch64_ehframe.s
@@ -1,7 +1,7 @@
# REQUIRES: asserts
-# RUN: llvm-mc --triple=loongarch64-linux-gnu --filetype=obj -o %t %s
-# RUN: llvm-jitlink --noexec --phony-externals --debug-only=jitlink %t 2>&1 | \
-# RUN: FileCheck %s
+# RUN: llvm-mc -triple=loongarch64-linux-gnu -filetype=obj -o %t %s
+# RUN: llvm-jitlink -num-threads=0 -debug-only=jitlink -noexec \
+# RUN: -phony-externals %t 2>&1 | FileCheck %s
## Check that splitting of eh-frame sections works.
diff --git a/llvm/test/ExecutionEngine/JITLink/RISCV/ELF_relax_call.s b/llvm/test/ExecutionEngine/JITLink/RISCV/ELF_relax_call.s
index 480fbb8..2b5c9e3 100644
--- a/llvm/test/ExecutionEngine/JITLink/RISCV/ELF_relax_call.s
+++ b/llvm/test/ExecutionEngine/JITLink/RISCV/ELF_relax_call.s
@@ -1,15 +1,15 @@
# REQUIRES: asserts
# RUN: llvm-mc -triple=riscv32 -mattr=+relax -filetype=obj -o %t.rv32 %s
-# RUN: llvm-jitlink -noexec \
+# RUN: llvm-jitlink -num-threads=0 -debug-only=jitlink -noexec \
# RUN: -slab-allocate 100Kb -slab-address 0x1000 -slab-page-size 4096 \
-# RUN: -debug-only=jitlink -check %s %t.rv32 \
-# RUN: 2>&1 | FileCheck %s
+# RUN: -check %s %t.rv32 2>&1 \
+# RUN: | FileCheck %s
# RUN: llvm-mc -triple=riscv64 -mattr=+relax -filetype=obj -o %t.rv64 %s
-# RUN: llvm-jitlink -noexec \
+# RUN: llvm-jitlink -num-threads=0 -debug-only=jitlink -noexec \
# RUN: -slab-allocate 100Kb -slab-address 0x1000 -slab-page-size 4096 \
-# RUN: -debug-only=jitlink -check %s %t.rv64 \
-# RUN: 2>&1 | FileCheck %s
+# RUN: -check %s %t.rv64 2>&1 \
+# RUN: | FileCheck %s
.text
diff --git a/llvm/test/ExecutionEngine/JITLink/RISCV/ELF_relax_call_rvc.s b/llvm/test/ExecutionEngine/JITLink/RISCV/ELF_relax_call_rvc.s
index e8a2928..3bbfd55 100644
--- a/llvm/test/ExecutionEngine/JITLink/RISCV/ELF_relax_call_rvc.s
+++ b/llvm/test/ExecutionEngine/JITLink/RISCV/ELF_relax_call_rvc.s
@@ -1,43 +1,43 @@
# REQUIRES: asserts
# RUN: llvm-mc -triple=riscv32 -mattr=+relax,+c -filetype=obj -o %t.rv32 %s
-# RUN: llvm-jitlink -noexec \
+# RUN: llvm-jitlink -num-threads=0 -debug-only=jitlink -noexec \
# RUN: -slab-allocate 100Kb -slab-address 0x1000 -slab-page-size 4096 \
-# RUN: -debug-only=jitlink -check %s %t.rv32 \
-# RUN: 2>&1 | FileCheck %s
-# RUN: llvm-jitlink -noexec \
+# RUN: -check %s %t.rv32 2>&1 \
+# RUN: | FileCheck %s
+# RUN: llvm-jitlink -num-threads=0 -debug-only=jitlink -noexec \
# RUN: -slab-allocate 100Kb -slab-address 0x1000 -slab-page-size 4096 \
-# RUN: -debug-only=jitlink -check %s -check-name=jitlink-check-rv32 %t.rv32 \
-# RUN: 2>&1 | FileCheck -check-prefix=CHECK-RV32 %s
+# RUN: -check %s -check-name=jitlink-check-rv32 %t.rv32 2>&1 \
+# RUN: | FileCheck -check-prefix=CHECK-RV32 %s
# RUN: llvm-mc -triple=riscv64 -mattr=+relax,+c -filetype=obj -o %t.rv64 %s
-# RUN: llvm-jitlink -noexec \
+# RUN: llvm-jitlink -num-threads=0 -debug-only=jitlink -noexec \
# RUN: -slab-allocate 100Kb -slab-address 0x1000 -slab-page-size 4096 \
-# RUN: -debug-only=jitlink -check %s %t.rv64 \
-# RUN: 2>&1 | FileCheck %s
-# RUN: llvm-jitlink -noexec \
+# RUN: -check %s %t.rv64 2>&1 \
+# RUN: | FileCheck %s
+# RUN: llvm-jitlink -num-threads=0 -debug-only=jitlink -noexec \
# RUN: -slab-allocate 100Kb -slab-address 0x1000 -slab-page-size 4096 \
-# RUN: -debug-only=jitlink -check %s -check-name=jitlink-check-rv64 %t.rv64 \
-# RUN: 2>&1 | FileCheck -check-prefix=CHECK-RV64 %s
+# RUN: -check %s -check-name=jitlink-check-rv64 %t.rv64 2>&1 \
+# RUN: | FileCheck -check-prefix=CHECK-RV64 %s
# RUN: llvm-mc -triple=riscv32 -mattr=+relax,+zca -filetype=obj -o %t.rv32zca %s
-# RUN: llvm-jitlink -noexec \
+# RUN: llvm-jitlink -num-threads=0 -debug-only=jitlink -noexec \
# RUN: -slab-allocate 100Kb -slab-address 0x1000 -slab-page-size 4096 \
-# RUN: -debug-only=jitlink -check %s %t.rv32zca \
-# RUN: 2>&1 | FileCheck %s
-# RUN: llvm-jitlink -noexec \
+# RUN: -check %s %t.rv32zca 2>&1 \
+# RUN: | FileCheck %s
+# RUN: llvm-jitlink -num-threads=0 -debug-only=jitlink -noexec \
# RUN: -slab-allocate 100Kb -slab-address 0x1000 -slab-page-size 4096 \
-# RUN: -debug-only=jitlink -check %s -check-name=jitlink-check-rv32 %t.rv32zca \
-# RUN: 2>&1 | FileCheck -check-prefix=CHECK-RV32 %s
+# RUN: -check %s -check-name=jitlink-check-rv32 %t.rv32zca 2>&1 \
+# RUN: | FileCheck -check-prefix=CHECK-RV32 %s
# RUN: llvm-mc -triple=riscv64 -mattr=+relax,+c -filetype=obj -o %t.rv64 %s
-# RUN: llvm-jitlink -noexec \
+# RUN: llvm-jitlink -num-threads=0 -debug-only=jitlink -noexec \
# RUN: -slab-allocate 100Kb -slab-address 0x1000 -slab-page-size 4096 \
-# RUN: -debug-only=jitlink -check %s %t.rv64 \
-# RUN: 2>&1 | FileCheck %s
-# RUN: llvm-jitlink -noexec \
+# RUN: -check %s %t.rv64 2>&1 \
+# RUN: | FileCheck %s
+# RUN: llvm-jitlink -num-threads=0 -debug-only=jitlink -noexec \
# RUN: -slab-allocate 100Kb -slab-address 0x1000 -slab-page-size 4096 \
-# RUN: -debug-only=jitlink -check %s -check-name=jitlink-check-rv64 %t.rv64 \
-# RUN: 2>&1 | FileCheck -check-prefix=CHECK-RV64 %s
+# RUN: -check %s -check-name=jitlink-check-rv64 %t.rv64 2>&1 \
+# RUN: | FileCheck -check-prefix=CHECK-RV64 %s
.text
diff --git a/llvm/test/ExecutionEngine/JITLink/RISCV/anonymous_symbol.s b/llvm/test/ExecutionEngine/JITLink/RISCV/anonymous_symbol.s
index e7114e4..a1badfd 100644
--- a/llvm/test/ExecutionEngine/JITLink/RISCV/anonymous_symbol.s
+++ b/llvm/test/ExecutionEngine/JITLink/RISCV/anonymous_symbol.s
@@ -1,6 +1,7 @@
# REQUIRES: asserts
# RUN: llvm-mc -triple=riscv64 -filetype=obj -o %t %s
-# RUN: llvm-jitlink -debug-only=jitlink -noexec %t 2>&1 | FileCheck %s
+# RUN: llvm-jitlink -debug-only=jitlink -num-threads=0 -noexec %t 2>&1 \
+# RUN: | FileCheck %s
#
# Because of the exist of cfi directive, sections like eh_frame section will be emitted
# in llvm's object code emission phase. Anonymous symbols will also be emitted to indicate
diff --git a/llvm/test/ExecutionEngine/JITLink/ppc64/ELF_ppc64_ehframe.s b/llvm/test/ExecutionEngine/JITLink/ppc64/ELF_ppc64_ehframe.s
index 9e9b340..75f09ff 100644
--- a/llvm/test/ExecutionEngine/JITLink/ppc64/ELF_ppc64_ehframe.s
+++ b/llvm/test/ExecutionEngine/JITLink/ppc64/ELF_ppc64_ehframe.s
@@ -1,10 +1,12 @@
# REQUIRES: asserts
# RUN: llvm-mc -triple=powerpc64le-unknown-linux-gnu -filetype=obj -o %t %s
-# RUN: llvm-jitlink -noexec -phony-externals -debug-only=jitlink %t 2>&1 | \
-# RUN: FileCheck %s
+# RUN: llvm-jitlink -num-threads=0 -debug-only=jitlink -noexec -phony-externals \
+# RUN: %t 2>&1 \
+# RUN: | FileCheck %s
# RUN: llvm-mc -triple=powerpc64-unknown-linux-gnu -filetype=obj -o %t %s
-# RUN: llvm-jitlink -noexec -phony-externals -debug-only=jitlink %t 2>&1 | \
-# RUN: FileCheck %s
+# RUN: llvm-jitlink -num-threads=0 -debug-only=jitlink -noexec -phony-externals \
+# RUN: %t 2>&1 \
+# RUN: | FileCheck %s
#
# Check that splitting of eh-frame sections works.
#
diff --git a/llvm/test/ExecutionEngine/JITLink/ppc64/external_weak.s b/llvm/test/ExecutionEngine/JITLink/ppc64/external_weak.s
index 0bc9090..7021a27 100644
--- a/llvm/test/ExecutionEngine/JITLink/ppc64/external_weak.s
+++ b/llvm/test/ExecutionEngine/JITLink/ppc64/external_weak.s
@@ -4,8 +4,9 @@
# RUN: %t/external_weak.o %S/Inputs/external_weak.s
# RUN: llvm-mc -triple=powerpc64le-unknown-linux-gnu -filetype=obj -o \
# RUN: %t/external_weak_main.o %S/Inputs/external_weak_main.s
-# RUN: llvm-jitlink -noexec -debug-only=jitlink %t/external_weak.o \
-# RUN: %t/external_weak_main.o 2>&1 | FileCheck %s
+# RUN: llvm-jitlink -num-threads=0 -debug-only=jitlink -noexec \
+# RUN: %t/external_weak.o %t/external_weak_main.o 2>&1 \
+# RUN: | FileCheck %s
# CHECK: Created ELFLinkGraphBuilder for "{{.*}}external_weak_main.o"
# CHECK: Creating defined graph symbol for ELF symbol "foo"
# CHECK: External symbols:
diff --git a/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_abs.s b/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_abs.s
index 830a2e0..d69dbbd 100644
--- a/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_abs.s
+++ b/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_abs.s
@@ -1,6 +1,7 @@
# REQUIRES: asserts
# RUN: llvm-mc -filetype=obj -triple=x86_64-windows-msvc %s -o %t
-# RUN: llvm-jitlink --debug-only=jitlink -noexec %t 2>&1 | FileCheck %s
+# RUN: llvm-jitlink -num-threads=0 -debug-only=jitlink -noexec %t 2>&1 \
+# RUN: | FileCheck %s
#
# Check absolute symbol is created with a correct value.
#
diff --git a/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_comdat_any.test b/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_comdat_any.test
index 10f1182..b117674 100644
--- a/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_comdat_any.test
+++ b/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_comdat_any.test
@@ -1,7 +1,8 @@
# REQUIRES: asserts
# RUN: yaml2obj %s -o %t
-# RUN: llvm-jitlink -noexec --debug-only=jitlink -noexec %t 2>&1 | FileCheck %s
-#
+# RUN: llvm-jitlink -num-threads=0 -debug-only=jitlink -noexec %t 2>&1 \
+# RUN: | FileCheck %s
+#
# Check a weak symbol is created for a COMDAT symbol with IMAGE_COMDAT_SELECT_ANY selection type.
#
# CHECK: Creating graph symbols...
diff --git a/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_comdat_associative.test b/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_comdat_associative.test
index 7dfb4c7..8915d04 100644
--- a/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_comdat_associative.test
+++ b/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_comdat_associative.test
@@ -1,16 +1,19 @@
# REQUIRES: asserts
# RUN: yaml2obj %s -o %t
-# RUN: llvm-jitlink -noexec --debug-only=jitlink -noexec %t 2>&1
-#
+# RUN: llvm-jitlink -num-threads=0 -debug-only=jitlink -noexec %t 2>&1 \
+# RUN: | FileCheck %s
+#
# Check COMDAT associative symbol is emitted as local symbol.
#
-# CHECK: Creating graph symbols...
-# CHECK: 2: Creating defined graph symbol for COFF symbol ".text" in .text (index: 2)
-# CHECK-NEXT: 0x0 (block + 0x00000000): size: 0x00000001, linkage: strong, scope: local, dead - <anonymous symbol>
-# CHECK-NEXT: 4: Exporting COMDAT graph symbol for COFF symbol "func" in section 2
-# CHECK-NEXT: 0x0 (block + 0x00000000): size: 0x00000001, linkage: weak, scope: default, dead - func
-# CHECK-NEXT: 5: Creating defined graph symbol for COFF symbol ".xdata" in .xdata (index: 3)
-# CHECK-NEXT: 0x0 (block + 0x00000000): size: 0x00000000, linkage: strong, scope: local, dead - .xdata
+# CHECK: Creating graph symbols...
+# CHECK: 0: Creating defined graph symbol for COFF symbol ".text" in .text (index: 1)
+# CHECK-NEXT: 0x0 (block + 0x00000000): size: 0x00000000, linkage: strong, scope: local, dead - .text
+# CHECK-NEXT: 4: Exporting COMDAT graph symbol for COFF symbol "func" in section 2
+# CHECK-NEXT: 0x0 (block + 0x00000000): size: 0x00000000, linkage: weak, scope: default, dead - func
+# CHECK-NEXT: 4: Creating defined graph symbol for COFF symbol "func" in .text (index: 2)
+# CHECK-NEXT: 0x0 (block + 0x00000000): size: 0x00000000, linkage: weak, scope: default, dead - func
+# CHECK-NEXT: 5: Creating defined graph symbol for COFF symbol ".xdata" in .xdata (index: 3)
+# CHECK-NEXT: 0x0 (block + 0x00000000): size: 0x00000000, linkage: strong, scope: local, dead - .xdata
--- !COFF
header:
diff --git a/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_comdat_exact_match.test b/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_comdat_exact_match.test
index f757271..76a0ac4 100644
--- a/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_comdat_exact_match.test
+++ b/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_comdat_exact_match.test
@@ -1,7 +1,8 @@
# REQUIRES: asserts
# RUN: yaml2obj %s -o %t
-# RUN: llvm-jitlink -noexec --debug-only=jitlink -noexec %t 2>&1 | FileCheck %s
-#
+# RUN: llvm-jitlink -num-threads=0 -debug-only=jitlink -noexec %t 2>&1 \
+# RUN: | FileCheck %s
+#
# Check a weak symbol is created for a COMDAT symbol with IMAGE_COMDAT_SELECT_EXACT_MATCH selection type.
# Doesn't check the content validation.
#
diff --git a/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_comdat_intervene.test b/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_comdat_intervene.test
index 11a1825..79f4b15 100644
--- a/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_comdat_intervene.test
+++ b/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_comdat_intervene.test
@@ -1,7 +1,8 @@
# REQUIRES: asserts
# RUN: yaml2obj %s -o %t
-# RUN: llvm-jitlink -noexec --debug-only=jitlink -noexec %t 2>&1 | FileCheck %s
-#
+# RUN: llvm-jitlink -num-threads=0 -debug-only=jitlink -noexec %t 2>&1 \
+# RUN: | FileCheck %s
+#
# Check a comdat export is done correctly even if second symbol of comdat sequences appear out of order
#
# CHECK: Creating graph symbols...
diff --git a/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_comdat_largest.test b/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_comdat_largest.test
index 86d809d..dc05297 100644
--- a/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_comdat_largest.test
+++ b/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_comdat_largest.test
@@ -1,7 +1,8 @@
# REQUIRES: asserts
# RUN: yaml2obj %s -o %t
-# RUN: llvm-jitlink -noexec --debug-only=jitlink -noexec %t 2>&1
-#
+# RUN: llvm-jitlink -num-threads=0 -debug-only=jitlink -noexec %t 2>&1 \
+# RUN: | FileCheck %s
+#
# Check jitlink handles largest selection type as plain weak symbol.
#
# CHECK: Creating graph symbols...
diff --git a/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_comdat_noduplicate.test b/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_comdat_noduplicate.test
index 53b2c81..0c5313e 100644
--- a/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_comdat_noduplicate.test
+++ b/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_comdat_noduplicate.test
@@ -1,7 +1,8 @@
# REQUIRES: asserts
# RUN: yaml2obj %s -o %t
-# RUN: llvm-jitlink -noexec --debug-only=jitlink -noexec %t 2>&1 | FileCheck %s
-#
+# RUN: llvm-jitlink -num-threads=0 -debug-only=jitlink -noexec -noexec %t 2>&1 \
+# RUN: | FileCheck %s
+#
# Check a strong symbol is created for a COMDAT symbol with IMAGE_COMDAT_SELECT_NODUPLICATES selection type.
#
# CHECK: Creating graph symbols...
diff --git a/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_comdat_offset.test b/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_comdat_offset.test
index 97467fd..6cd8ff9 100644
--- a/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_comdat_offset.test
+++ b/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_comdat_offset.test
@@ -1,7 +1,8 @@
# REQUIRES: asserts
# RUN: yaml2obj %s -o %t
-# RUN: llvm-jitlink -noexec --debug-only=jitlink -noexec %t 2>&1 | FileCheck %s
-#
+# RUN: llvm-jitlink -num-threads=0 -debug-only=jitlink -noexec %t 2>&1 \
+# RUN: | FileCheck %s
+#
# Check a COMDAT symbol with an offset is handled correctly.
#
# CHECK: Creating graph symbols...
diff --git a/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_comdat_same_size.test b/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_comdat_same_size.test
index ef0f84a..e1d955f 100644
--- a/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_comdat_same_size.test
+++ b/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_comdat_same_size.test
@@ -1,7 +1,8 @@
# REQUIRES: asserts
# RUN: yaml2obj %s -o %t
-# RUN: llvm-jitlink -noexec --debug-only=jitlink -noexec %t 2>&1 | FileCheck %s
-#
+# RUN: llvm-jitlink -num-threads=0 -debug-only=jitlink -noexec %t 2>&1 \
+# RUN: | FileCheck %s
+#
# Check a weak symbol is created for a COMDAT symbol with IMAGE_COMDAT_SELECT_SAME_SIZE selection type.
# Doesn't check the size validation.
#
diff --git a/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_comdat_weak.s b/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_comdat_weak.s
index 79ac75f..8fa8ba0 100644
--- a/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_comdat_weak.s
+++ b/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_comdat_weak.s
@@ -1,6 +1,7 @@
# REQUIRES: asserts
# RUN: llvm-mc -filetype=obj -triple=x86_64-windows-msvc %s -o %t
-# RUN: llvm-jitlink --debug-only=jitlink -noexec %t 2>&1 | FileCheck %s
+# RUN: llvm-jitlink -num-threads=0 -debug-only=jitlink -noexec %t 2>&1 \
+# RUN: | FileCheck %s
#
# Check a COMDAT any symbol is exported as a weak symbol.
#
diff --git a/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_common_symbol.s b/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_common_symbol.s
index 2d4ad30..2788a9b 100644
--- a/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_common_symbol.s
+++ b/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_common_symbol.s
@@ -1,6 +1,7 @@
# REQUIRES: asserts
# RUN: llvm-mc -filetype=obj -triple=x86_64-windows-msvc %s -o %t
-# RUN: llvm-jitlink --debug-only=jitlink -noexec %t 2>&1 | FileCheck %s
+# RUN: llvm-jitlink -num-threads=0 -debug-only=jitlink -noexec %t 2>&1 \
+# RUN: | FileCheck %s
#
# Check a common symbol is created.
#
diff --git a/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_duplicate_externals.test b/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_duplicate_externals.test
index e929c01..ebce795 100644
--- a/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_duplicate_externals.test
+++ b/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_duplicate_externals.test
@@ -1,10 +1,10 @@
# REQUIRES: asserts
# RUN: yaml2obj %s -o %t
-# RUN: llvm-jitlink -noexec -abs __ImageBase=0xfff00000 \
-# RUN: --debug-only=jitlink \
-# RUN: -slab-allocate 100Kb -slab-address 0xfff00000 -slab-page-size 4096 \
-# RUN: %t 2>&1 | FileCheck %s
-#
+# RUN: llvm-jitlink -num-threads=0 -debug-only=jitlink -noexec \
+# RUN: -abs __ImageBase=0xfff00000 -slab-allocate 100Kb \
+# RUN: -slab-address 0xfff00000 -slab-page-size 4096 %t 2>&1 \
+# RUN: | FileCheck %s
+#
# Check duplicate undefined external symbols are handled correctly.
#
# CHECK: Creating graph symbols...
diff --git a/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_file_debug.s b/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_file_debug.s
index 3980f81..ac1ef2d 100644
--- a/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_file_debug.s
+++ b/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_file_debug.s
@@ -1,6 +1,8 @@
# REQUIRES: asserts
# RUN: llvm-mc -filetype=obj -triple=x86_64-windows-msvc %s -o %t
-# RUN: llvm-jitlink -abs func=0xcafef00d --debug-only=jitlink -noexec %t 2>&1 | FileCheck %s
+# RUN: llvm-jitlink -num-threads=0 -debug-only=jitlink -noexec \
+# RUN: -abs func=0xcafef00d %t 2>&1 \
+# RUN: | FileCheck %s
#
# Check a file debug symbol is skipped.
#
diff --git a/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_static_var.s b/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_static_var.s
index 5275c7d..dce0c1e 100644
--- a/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_static_var.s
+++ b/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_static_var.s
@@ -1,6 +1,8 @@
# REQUIRES: asserts
# RUN: llvm-mc -filetype=obj -triple=x86_64-windows-msvc %s -o %t
-# RUN: llvm-jitlink -abs var=0xcafef00d --debug-only=jitlink -noexec %t 2>&1 | FileCheck %s
+# RUN: llvm-jitlink -num-threads=0 -debug-only=jitlink -noexec \
+# RUN: -abs var=0xcafef00d %t 2>&1 \
+# RUN: | FileCheck %s
#
# Check a local symbol is created for a static variable.
#
diff --git a/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_weak_external.s b/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_weak_external.s
index c750d75..d49d561 100644
--- a/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_weak_external.s
+++ b/llvm/test/ExecutionEngine/JITLink/x86-64/COFF_weak_external.s
@@ -1,6 +1,8 @@
# REQUIRES: asserts
# RUN: llvm-mc -filetype=obj -triple=x86_64-windows-msvc %s -o %t
-# RUN: llvm-jitlink -abs var=0xcafef00d --debug-only=jitlink -noexec %t 2>&1 | FileCheck %s
+# RUN: llvm-jitlink -num-threads=0 -debug-only=jitlink -noexec \
+# RUN: -abs var=0xcafef00d %t 2>&1 | \
+# RUN: FileCheck %s
#
# Check a default symbol is aliased as a weak external symbol.
#
diff --git a/llvm/test/ExecutionEngine/JITLink/x86-64/ELF_debug_section_lifetime_is_NoAlloc.yaml b/llvm/test/ExecutionEngine/JITLink/x86-64/ELF_debug_section_lifetime_is_NoAlloc.yaml
index 0afcda4..09dda47 100644
--- a/llvm/test/ExecutionEngine/JITLink/x86-64/ELF_debug_section_lifetime_is_NoAlloc.yaml
+++ b/llvm/test/ExecutionEngine/JITLink/x86-64/ELF_debug_section_lifetime_is_NoAlloc.yaml
@@ -1,6 +1,7 @@
# REQUIRES: asserts
# RUN: yaml2obj -o %t.o %s
-# RUN: llvm-jitlink -debug-only=jitlink -noexec %t.o 2>&1 | FileCheck %s
+# RUN: llvm-jitlink -num-threads=0 -debug-only=jitlink -noexec %t.o 2>&1 \
+# RUN: | FileCheck %s
#
# Check that debug sections get NoAlloc lifetimes.
#
diff --git a/llvm/test/ExecutionEngine/JITLink/x86-64/ELF_ehframe_basic.s b/llvm/test/ExecutionEngine/JITLink/x86-64/ELF_ehframe_basic.s
index c01ced5..9339f07 100644
--- a/llvm/test/ExecutionEngine/JITLink/x86-64/ELF_ehframe_basic.s
+++ b/llvm/test/ExecutionEngine/JITLink/x86-64/ELF_ehframe_basic.s
@@ -2,8 +2,9 @@
# UNSUPPORTED: system-windows
# RUN: llvm-mc -triple=x86_64-unknown-linux -position-independent \
# RUN: -filetype=obj -o %t %s
-# RUN: llvm-jitlink -debug-only=jitlink -abs bar=0x01 \
-# RUN: -abs _ZTIi=0x02 -noexec %t 2>&1 | FileCheck %s
+# RUN: llvm-jitlink -num-threads=0 -debug-only=jitlink -noexec \
+# RUN: -abs bar=0x01 -abs _ZTIi=0x02 %t 2>&1 \
+# RUN: | FileCheck %s
#
# FIXME: This test should run on windows. Investigate spurious
# 'note: command had no output on stdout or stderr' errors, then re-enable.
diff --git a/llvm/test/ExecutionEngine/JITLink/x86-64/ELF_ehframe_large_static_personality_encodings.s b/llvm/test/ExecutionEngine/JITLink/x86-64/ELF_ehframe_large_static_personality_encodings.s
index 64990b5..98fc5f4 100644
--- a/llvm/test/ExecutionEngine/JITLink/x86-64/ELF_ehframe_large_static_personality_encodings.s
+++ b/llvm/test/ExecutionEngine/JITLink/x86-64/ELF_ehframe_large_static_personality_encodings.s
@@ -2,8 +2,9 @@
# UNSUPPORTED: system-windows
# RUN: llvm-mc -triple=x86_64-pc-linux-gnu -large-code-model \
# RUN: -filetype=obj -o %t %s
-# RUN: llvm-jitlink -debug-only=jitlink -noexec -phony-externals %t 2>&1 | \
-# RUN: FileCheck %s
+# RUN: llvm-jitlink -num-threads=0 -debug-only=jitlink -noexec \
+# RUN: -phony-externals %t 2>&1 \
+# RUN: | FileCheck %s
#
# Check handling of pointer encodings for personality functions when compiling
# with `-mcmodel=large -static`.
diff --git a/llvm/test/ExecutionEngine/JITLink/x86-64/LocalDependencyPropagation.s b/llvm/test/ExecutionEngine/JITLink/x86-64/LocalDependencyPropagation.s
index 0898ad8..83d71cd 100644
--- a/llvm/test/ExecutionEngine/JITLink/x86-64/LocalDependencyPropagation.s
+++ b/llvm/test/ExecutionEngine/JITLink/x86-64/LocalDependencyPropagation.s
@@ -1,7 +1,8 @@
# REQUIRES: asserts
# RUN: llvm-mc -triple=x86_64-apple-macosx10.9 -filetype=obj -o %t %s
-# RUN: llvm-jitlink -debug-only=orc -num-threads=0 -noexec \
-# RUN: -abs _external_func=0x1 -entry=_foo %t 2>&1 | FileCheck %s
+# RUN: llvm-jitlink -num-threads=0 -debug-only=orc -noexec \
+# RUN: -abs _external_func=0x1 -entry=_foo %t 2>&1 \
+# RUN: | FileCheck %s
#
# Check that simplification eliminates dependencies on symbols in this unit,
# and correctly propagates dependencies on symbols outside the unit (including
diff --git a/llvm/test/ExecutionEngine/JITLink/x86-64/MachO-check-dwarf-filename.s b/llvm/test/ExecutionEngine/JITLink/x86-64/MachO-check-dwarf-filename.s
index df44ce9..81ea18f 100644
--- a/llvm/test/ExecutionEngine/JITLink/x86-64/MachO-check-dwarf-filename.s
+++ b/llvm/test/ExecutionEngine/JITLink/x86-64/MachO-check-dwarf-filename.s
@@ -1,6 +1,7 @@
# RUN: llvm-mc -triple=x86_64-apple-macosx10.9 -filetype=obj -o %t.o %s
-# RUN: llvm-jitlink -debug-only=orc -noexec -debugger-support %t.o 2>&1 | \
-# RUN: FileCheck %s
+# RUN: llvm-jitlink -num-threads=0 -debug-only=orc -noexec -debugger-support \
+# RUN: %t.o 2>&1 \
+# RUN: | FileCheck %s
#
# REQUIRES: asserts && system-darwin
#
diff --git a/llvm/test/ExecutionEngine/JITLink/x86-64/MachO_compact_unwind.s b/llvm/test/ExecutionEngine/JITLink/x86-64/MachO_compact_unwind.s
index e5783141..3852207 100644
--- a/llvm/test/ExecutionEngine/JITLink/x86-64/MachO_compact_unwind.s
+++ b/llvm/test/ExecutionEngine/JITLink/x86-64/MachO_compact_unwind.s
@@ -1,6 +1,7 @@
# REQUIRES: asserts
# RUN: llvm-mc -triple=x86_64-apple-darwin11 -filetype=obj -o %t %s
-# RUN: llvm-jitlink -noexec -debug-only=jitlink %t 2>&1 | FileCheck %s
+# RUN: llvm-jitlink -num-threads=0 -debug-only=jitlink -noexec %t 2>&1 \
+# RUN: | FileCheck %s
#
# Check that splitting of compact-unwind sections works.
#
diff --git a/llvm/test/ExecutionEngine/JITLink/x86-64/MachO_cstring_section_alignment.s b/llvm/test/ExecutionEngine/JITLink/x86-64/MachO_cstring_section_alignment.s
index 5a8cef5..3859a35 100644
--- a/llvm/test/ExecutionEngine/JITLink/x86-64/MachO_cstring_section_alignment.s
+++ b/llvm/test/ExecutionEngine/JITLink/x86-64/MachO_cstring_section_alignment.s
@@ -1,6 +1,7 @@
# REQUIRES: asserts
# RUN: llvm-mc -triple=x86_64-apple-macos10.9 -filetype=obj -o %t %s
-# RUN: llvm-jitlink -debug-only=jitlink -noexec %t 2>&1 | FileCheck %s
+# RUN: llvm-jitlink -num-threads=0 -debug-only=jitlink -noexec %t 2>&1 \
+# RUN: | FileCheck %s
#
# Verify that PC-begin candidate symbols have been sorted correctly when adding
# PC-begin edges for FDEs. In this test both _main and _X are at address zero,
diff --git a/llvm/test/ExecutionEngine/JITLink/x86-64/MachO_cstring_section_splitting.s b/llvm/test/ExecutionEngine/JITLink/x86-64/MachO_cstring_section_splitting.s
index a5baf56..0d68a10 100644
--- a/llvm/test/ExecutionEngine/JITLink/x86-64/MachO_cstring_section_splitting.s
+++ b/llvm/test/ExecutionEngine/JITLink/x86-64/MachO_cstring_section_splitting.s
@@ -1,7 +1,8 @@
# REQUIRES: asserts
# RUN: llvm-mc -triple=x86_64-apple-macosx10.9 -filetype=obj -o %t %s
-# RUN: llvm-jitlink -debug-only=jitlink -noexec -entry hook %t 2>&1 | \
-# RUN: FileCheck %s
+# RUN: llvm-jitlink -num-threads=0 -debug-only=jitlink -noexec \
+# RUN: -entry hook %t 2>&1 \
+# RUN: | FileCheck %s
#
# Verify that we split C string literals on null-terminators, rather than on
# symbol boundaries. We expect four dead-stripped symbols: l_str.0, l_str.2,
diff --git a/llvm/test/ExecutionEngine/JITLink/x86-64/MachO_non_subsections_via_symbols.s b/llvm/test/ExecutionEngine/JITLink/x86-64/MachO_non_subsections_via_symbols.s
index e1adb3b..66fcb47 100644
--- a/llvm/test/ExecutionEngine/JITLink/x86-64/MachO_non_subsections_via_symbols.s
+++ b/llvm/test/ExecutionEngine/JITLink/x86-64/MachO_non_subsections_via_symbols.s
@@ -4,7 +4,8 @@
#
# REQUIRES: asserts
# RUN: llvm-mc -triple=x86_64-apple-macosx10.9 -filetype=obj -o %t %s
-# RUN: llvm-jitlink -debug-only=jitlink -noexec %t 2>&1 | FileCheck %s
+# RUN: llvm-jitlink -num-threads=0 -debug-only=jitlink -noexec %t 2>&1 \
+# RUN: | FileCheck %s
# CHECK: Creating graph symbols...
# CHECK: Graphifying regular section __DATA,__data...
diff --git a/llvm/test/Instrumentation/HWAddressSanitizer/zero-ptr.ll b/llvm/test/Instrumentation/HWAddressSanitizer/zero-ptr.ll
new file mode 100644
index 0000000..a201174
--- /dev/null
+++ b/llvm/test/Instrumentation/HWAddressSanitizer/zero-ptr.ll
@@ -0,0 +1,35 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
+; RUN: opt < %s -passes=hwasan -S | FileCheck %s
+; RUN: opt < %s -passes=hwasan -hwasan-recover=0 -hwasan-mapping-offset=0 -S | FileCheck %s --check-prefixes=ABORT-ZERO-BASED-SHADOW
+
+; This shows that HWASan will emit a memaccess check when dereferencing a null
+; pointer.
+; The output is used as the source for llvm/test/CodeGen/AArch64/hwasan-zero-ptr.ll.
+
+target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
+target triple = "aarch64--linux-android10000"
+
+define void @test_store_to_zeroptr() sanitize_hwaddress {
+; CHECK-LABEL: define void @test_store_to_zeroptr
+; CHECK-SAME: () #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[DOTHWASAN_SHADOW:%.*]] = call ptr asm "", "=r,0"(ptr @__hwasan_shadow)
+; CHECK-NEXT: [[B:%.*]] = inttoptr i64 0 to ptr
+; CHECK-NEXT: call void @llvm.hwasan.check.memaccess.shortgranules(ptr [[DOTHWASAN_SHADOW]], ptr [[B]], i32 19)
+; CHECK-NEXT: store i64 42, ptr [[B]], align 8
+; CHECK-NEXT: ret void
+;
+; ABORT-ZERO-BASED-SHADOW-LABEL: define void @test_store_to_zeroptr
+; ABORT-ZERO-BASED-SHADOW-SAME: () #[[ATTR0:[0-9]+]] {
+; ABORT-ZERO-BASED-SHADOW-NEXT: entry:
+; ABORT-ZERO-BASED-SHADOW-NEXT: [[DOTHWASAN_SHADOW:%.*]] = call ptr asm "", "=r,0"(ptr null)
+; ABORT-ZERO-BASED-SHADOW-NEXT: [[B:%.*]] = inttoptr i64 0 to ptr
+; ABORT-ZERO-BASED-SHADOW-NEXT: call void @llvm.hwasan.check.memaccess.shortgranules.fixedshadow(ptr [[B]], i32 19, i64 0)
+; ABORT-ZERO-BASED-SHADOW-NEXT: store i64 42, ptr [[B]], align 8
+; ABORT-ZERO-BASED-SHADOW-NEXT: ret void
+;
+entry:
+ %b = inttoptr i64 0 to i64*
+ store i64 42, ptr %b
+ ret void
+}
diff --git a/llvm/test/Instrumentation/TypeSanitizer/access-with-offset.ll b/llvm/test/Instrumentation/TypeSanitizer/access-with-offset.ll
index 78f3816..56cf3f5 100644
--- a/llvm/test/Instrumentation/TypeSanitizer/access-with-offset.ll
+++ b/llvm/test/Instrumentation/TypeSanitizer/access-with-offset.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals
-; RUN: opt -passes='tysan-module,tysan' -S %s | FileCheck %s
+; RUN: opt -passes='tysan' -S %s | FileCheck %s
;.
; CHECK: @llvm.global_ctors = appending global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 0, ptr @tysan.module_ctor, ptr null }]
diff --git a/llvm/test/Instrumentation/TypeSanitizer/alloca-only.ll b/llvm/test/Instrumentation/TypeSanitizer/alloca-only.ll
index 1aa47cac..117cd1a 100644
--- a/llvm/test/Instrumentation/TypeSanitizer/alloca-only.ll
+++ b/llvm/test/Instrumentation/TypeSanitizer/alloca-only.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals
; Test basic type sanitizer instrumentation.
;
-; RUN: opt -passes='tysan-module,tysan' -S %s | FileCheck %s
+; RUN: opt -passes='tysan' -S %s | FileCheck %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
diff --git a/llvm/test/Instrumentation/TypeSanitizer/alloca.ll b/llvm/test/Instrumentation/TypeSanitizer/alloca.ll
index 94098bd..ea5adf6 100644
--- a/llvm/test/Instrumentation/TypeSanitizer/alloca.ll
+++ b/llvm/test/Instrumentation/TypeSanitizer/alloca.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; Test basic type sanitizer instrumentation.
;
-; RUN: opt -passes='tysan-module,tysan' -S %s | FileCheck %s
+; RUN: opt -passes='tysan' -S %s | FileCheck %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
diff --git a/llvm/test/Instrumentation/TypeSanitizer/anon.ll b/llvm/test/Instrumentation/TypeSanitizer/anon.ll
index ce4f0c1b..37de1b7 100644
--- a/llvm/test/Instrumentation/TypeSanitizer/anon.ll
+++ b/llvm/test/Instrumentation/TypeSanitizer/anon.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals
; Test basic type sanitizer instrumentation.
;
-; RUN: opt -passes='tysan-module,tysan' -S %s | FileCheck %s
+; RUN: opt -passes='tysan' -S %s | FileCheck %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
diff --git a/llvm/test/Instrumentation/TypeSanitizer/basic-nosan.ll b/llvm/test/Instrumentation/TypeSanitizer/basic-nosan.ll
index 9b9522f..8ddc573 100644
--- a/llvm/test/Instrumentation/TypeSanitizer/basic-nosan.ll
+++ b/llvm/test/Instrumentation/TypeSanitizer/basic-nosan.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals --include-generated-funcs
; Test basic type sanitizer instrumentation.
-; RUN: opt -passes='tysan-module,tysan' -S %s | FileCheck %s
+; RUN: opt -passes='tysan' -S %s | FileCheck %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
diff --git a/llvm/test/Instrumentation/TypeSanitizer/basic.ll b/llvm/test/Instrumentation/TypeSanitizer/basic.ll
index 8873a40..704c188 100644
--- a/llvm/test/Instrumentation/TypeSanitizer/basic.ll
+++ b/llvm/test/Instrumentation/TypeSanitizer/basic.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals
; Test basic type sanitizer instrumentation.
;
-; RUN: opt -passes='tysan-module,tysan' -S %s | FileCheck %s
+; RUN: opt -passes='tysan' -S %s | FileCheck %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
diff --git a/llvm/test/Instrumentation/TypeSanitizer/byval.ll b/llvm/test/Instrumentation/TypeSanitizer/byval.ll
index 23ed1b0..6ae343d 100644
--- a/llvm/test/Instrumentation/TypeSanitizer/byval.ll
+++ b/llvm/test/Instrumentation/TypeSanitizer/byval.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals --include-generated-funcs
; Test basic type sanitizer instrumentation.
-; RUN: opt -passes='tysan-module,tysan' -S %s | FileCheck %s
+; RUN: opt -passes='tysan' -S %s | FileCheck %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
diff --git a/llvm/test/Instrumentation/TypeSanitizer/globals.ll b/llvm/test/Instrumentation/TypeSanitizer/globals.ll
index 1f57c2a..a73599e 100644
--- a/llvm/test/Instrumentation/TypeSanitizer/globals.ll
+++ b/llvm/test/Instrumentation/TypeSanitizer/globals.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --check-globals --include-generated-funcs
-; RUN: opt -passes='tysan-module,tysan' -S %s | FileCheck %s
+; RUN: opt -passes='tysan' -S %s | FileCheck %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
diff --git a/llvm/test/Instrumentation/TypeSanitizer/invalid-metadata.ll b/llvm/test/Instrumentation/TypeSanitizer/invalid-metadata.ll
index e7de62e..0c99c0f 100644
--- a/llvm/test/Instrumentation/TypeSanitizer/invalid-metadata.ll
+++ b/llvm/test/Instrumentation/TypeSanitizer/invalid-metadata.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --check-globals --include-generated-funcs
-; RUN: opt -passes='tysan-module,tysan' -S %s | FileCheck %s
+; RUN: opt -passes='tysan' -S %s | FileCheck %s
!llvm.tysan.globals = !{!0}
diff --git a/llvm/test/Instrumentation/TypeSanitizer/memintrinsics.ll b/llvm/test/Instrumentation/TypeSanitizer/memintrinsics.ll
index 26f7c18..65a30bd 100644
--- a/llvm/test/Instrumentation/TypeSanitizer/memintrinsics.ll
+++ b/llvm/test/Instrumentation/TypeSanitizer/memintrinsics.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; Test basic type sanitizer instrumentation.
;
-; RUN: opt -passes='tysan-module,tysan' -S %s | FileCheck %s
+; RUN: opt -passes='tysan' -S %s | FileCheck %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
diff --git a/llvm/test/Instrumentation/TypeSanitizer/nosanitize.ll b/llvm/test/Instrumentation/TypeSanitizer/nosanitize.ll
index 7b07a42..c7c153e 100644
--- a/llvm/test/Instrumentation/TypeSanitizer/nosanitize.ll
+++ b/llvm/test/Instrumentation/TypeSanitizer/nosanitize.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals
; Test basic type sanitizer instrumentation.
;
-; RUN: opt -passes='tysan-module,tysan' -S %s | FileCheck %s
+; RUN: opt -passes='tysan' -S %s | FileCheck %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
diff --git a/llvm/test/Instrumentation/TypeSanitizer/sanitize-no-tbaa.ll b/llvm/test/Instrumentation/TypeSanitizer/sanitize-no-tbaa.ll
index 3cb7b83..060f031 100644
--- a/llvm/test/Instrumentation/TypeSanitizer/sanitize-no-tbaa.ll
+++ b/llvm/test/Instrumentation/TypeSanitizer/sanitize-no-tbaa.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; Test basic type sanitizer instrumentation.
;
-; RUN: opt -passes='tysan-module,tysan' -S %s | FileCheck %s
+; RUN: opt -passes='tysan' -S %s | FileCheck %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
diff --git a/llvm/test/Instrumentation/TypeSanitizer/swifterror.ll b/llvm/test/Instrumentation/TypeSanitizer/swifterror.ll
index 5711fb4..dc83a02 100644
--- a/llvm/test/Instrumentation/TypeSanitizer/swifterror.ll
+++ b/llvm/test/Instrumentation/TypeSanitizer/swifterror.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; Test basic type sanitizer instrumentation.
;
-; RUN: opt -passes='tysan-module,tysan' -S %s | FileCheck %s
+; RUN: opt -passes='tysan' -S %s | FileCheck %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
diff --git a/llvm/test/MC/AArch64/armv9.6a-rme-gpc3.s b/llvm/test/MC/AArch64/armv9.6a-rme-gpc3.s
index baf05f1..093101b 100644
--- a/llvm/test/MC/AArch64/armv9.6a-rme-gpc3.s
+++ b/llvm/test/MC/AArch64/armv9.6a-rme-gpc3.s
@@ -2,10 +2,18 @@
// RUN: llvm-mc -triple aarch64 -show-encoding %s | FileCheck %s
.func:
apas x0
+ apas x1
+ apas x2
+ apas x17
+ apas x30
mrs x3, GPCBW_EL3
msr GPCBW_EL3, x4
# CHECK: .func:
-# CHECK-NEXT: apas x0 // encoding: [0x1f,0x70,0x0e,0xd5]
+# CHECK-NEXT: apas x0 // encoding: [0x00,0x70,0x0e,0xd5]
+# CHECK-NEXT: apas x1 // encoding: [0x01,0x70,0x0e,0xd5]
+# CHECK-NEXT: apas x2 // encoding: [0x02,0x70,0x0e,0xd5]
+# CHECK-NEXT: apas x17 // encoding: [0x11,0x70,0x0e,0xd5]
+# CHECK-NEXT: apas x30 // encoding: [0x1e,0x70,0x0e,0xd5]
# CHECK-NEXT: mrs x3, GPCBW_EL3 // encoding: [0xa3,0x21,0x3e,0xd5]
# CHECK-NEXT: msr GPCBW_EL3, x4 // encoding: [0xa4,0x21,0x1e,0xd5]
diff --git a/llvm/test/MC/AMDGPU/gfx1030_err.s b/llvm/test/MC/AMDGPU/gfx1030_err.s
index 87a0987..a0565dc 100644
--- a/llvm/test/MC/AMDGPU/gfx1030_err.s
+++ b/llvm/test/MC/AMDGPU/gfx1030_err.s
@@ -573,3 +573,9 @@ v_dot8_u32_u4 v0, v1, v2, v3 op_sel:[1,1] op_sel_hi:[1,0]
v_dot8_u32_u4 v0, v1, v2, v3 op_sel:[1,1] op_sel_hi:[1,1]
// GFX10: :[[@LINE-1]]:{{[0-9]+}}: error: not a valid operand.
+
+image_bvh_intersect_ray v[4:7], v[9:19], null
+// GFX10: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+
+image_bvh64_intersect_ray v[4:7], v[9:20], null
+// GFX10: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
diff --git a/llvm/test/MC/AMDGPU/gfx10_asm_smem_err.s b/llvm/test/MC/AMDGPU/gfx10_asm_smem_err.s
index 670e973..74c283c 100644
--- a/llvm/test/MC/AMDGPU/gfx10_asm_smem_err.s
+++ b/llvm/test/MC/AMDGPU/gfx10_asm_smem_err.s
@@ -84,3 +84,5 @@ s_buffer_load_dwordx16 s[4:19], null, s101
s_buffer_store_dword s4, null, s101
// NOGFX10: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+s_atc_probe_buffer 7, null, s2
+// NOGFX10: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
diff --git a/llvm/test/MC/AMDGPU/gfx11_asm_mimg_err.s b/llvm/test/MC/AMDGPU/gfx11_asm_mimg_err.s
index 9c61445..2586198 100644
--- a/llvm/test/MC/AMDGPU/gfx11_asm_mimg_err.s
+++ b/llvm/test/MC/AMDGPU/gfx11_asm_mimg_err.s
@@ -517,3 +517,9 @@ image_sample_o v[5:6], v[1:2], null, s[12:15] dmask:0x3 dim:SQ_RSRC_IMG_1D
image_sample_o v[5:6], v[1:2], s[8:15], null dmask:0x3 dim:SQ_RSRC_IMG_1D
// NOGFX11: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+
+image_bvh_intersect_ray v[4:7], v[9:19], null
+// NOGFX11: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+
+image_bvh64_intersect_ray v[4:7], v[9:20], null
+// NOGFX11: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
diff --git a/llvm/test/MC/AMDGPU/gfx11_asm_smem_err.s b/llvm/test/MC/AMDGPU/gfx11_asm_smem_err.s
index da195b4..7dd6ded 100644
--- a/llvm/test/MC/AMDGPU/gfx11_asm_smem_err.s
+++ b/llvm/test/MC/AMDGPU/gfx11_asm_smem_err.s
@@ -29,3 +29,6 @@ s_buffer_load_dwordx8 s[4:11], null, s101
s_buffer_load_dwordx16 s[4:19], null, s101
// NOGFX11: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+
+s_atc_probe_buffer 7, null, s2
+// NOGFX11: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
diff --git a/llvm/test/MC/AMDGPU/gfx11_asm_vop3.s b/llvm/test/MC/AMDGPU/gfx11_asm_vop3.s
index fc8d2bd..6bc92bc 100644
--- a/llvm/test/MC/AMDGPU/gfx11_asm_vop3.s
+++ b/llvm/test/MC/AMDGPU/gfx11_asm_vop3.s
@@ -3746,50 +3746,62 @@ v_max_u16 v5.l, v255.l, v255.h
v_max_u16 v255.h, 0xfe0b, vcc_hi
// GFX11: v_max_u16 v255.h, 0xfe0b, vcc_hi op_sel:[0,0,1] ; encoding: [0xff,0x40,0x09,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
-v_maxmin_f16 v5, v1, v2, s3
-// GFX11: v_maxmin_f16 v5, v1, v2, s3 ; encoding: [0x05,0x00,0x60,0xd6,0x01,0x05,0x0e,0x00]
+v_maxmin_f16 v5.l, v1.l, v2.l, s3
+// GFX11: v_maxmin_f16 v5.l, v1.l, v2.l, s3 ; encoding: [0x05,0x00,0x60,0xd6,0x01,0x05,0x0e,0x00]
-v_maxmin_f16 v5, v255, s2, s105
-// GFX11: v_maxmin_f16 v5, v255, s2, s105 ; encoding: [0x05,0x00,0x60,0xd6,0xff,0x05,0xa4,0x01]
+v_maxmin_f16 v5.l, v255.l, s2, s105
+// GFX11: v_maxmin_f16 v5.l, v255.l, s2, s105 ; encoding: [0x05,0x00,0x60,0xd6,0xff,0x05,0xa4,0x01]
-v_maxmin_f16 v5, s1, v255, exec_hi
-// GFX11: v_maxmin_f16 v5, s1, v255, exec_hi ; encoding: [0x05,0x00,0x60,0xd6,0x01,0xfe,0xff,0x01]
+v_maxmin_f16 v5.l, s1, v255.l, exec_hi
+// GFX11: v_maxmin_f16 v5.l, s1, v255.l, exec_hi ; encoding: [0x05,0x00,0x60,0xd6,0x01,0xfe,0xff,0x01]
-v_maxmin_f16 v5, s105, s105, exec_lo
-// GFX11: v_maxmin_f16 v5, s105, s105, exec_lo ; encoding: [0x05,0x00,0x60,0xd6,0x69,0xd2,0xf8,0x01]
+v_maxmin_f16 v5.l, s105, s105, exec_lo
+// GFX11: v_maxmin_f16 v5.l, s105, s105, exec_lo ; encoding: [0x05,0x00,0x60,0xd6,0x69,0xd2,0xf8,0x01]
-v_maxmin_f16 v5, vcc_lo, ttmp15, v3
-// GFX11: v_maxmin_f16 v5, vcc_lo, ttmp15, v3 ; encoding: [0x05,0x00,0x60,0xd6,0x6a,0xf6,0x0c,0x04]
+v_maxmin_f16 v5.l, vcc_lo, ttmp15, v3.l
+// GFX11: v_maxmin_f16 v5.l, vcc_lo, ttmp15, v3.l ; encoding: [0x05,0x00,0x60,0xd6,0x6a,0xf6,0x0c,0x04]
-v_maxmin_f16 v5, vcc_hi, 0xfe0b, v255
-// GFX11: v_maxmin_f16 v5, vcc_hi, 0xfe0b, v255 ; encoding: [0x05,0x00,0x60,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+v_maxmin_f16 v5.l, vcc_hi, 0xfe0b, v255.l
+// GFX11: v_maxmin_f16 v5.l, vcc_hi, 0xfe0b, v255.l ; encoding: [0x05,0x00,0x60,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
-v_maxmin_f16 v5, -|ttmp15|, -|src_scc|, -|ttmp15|
-// GFX11: v_maxmin_f16 v5, -|ttmp15|, -|src_scc|, -|ttmp15| ; encoding: [0x05,0x07,0x60,0xd6,0x7b,0xfa,0xed,0xe1]
+v_maxmin_f16 v5.l, -|ttmp15|, -|src_scc|, -|ttmp15|
+// GFX11: v_maxmin_f16 v5.l, -|ttmp15|, -|src_scc|, -|ttmp15| ; encoding: [0x05,0x07,0x60,0xd6,0x7b,0xfa,0xed,0xe1]
-v_maxmin_f16 v5, m0, 0.5, m0
-// GFX11: v_maxmin_f16 v5, m0, 0.5, m0 ; encoding: [0x05,0x00,0x60,0xd6,0x7d,0xe0,0xf5,0x01]
+v_maxmin_f16 v5.l, m0, 0.5, m0
+// GFX11: v_maxmin_f16 v5.l, m0, 0.5, m0 ; encoding: [0x05,0x00,0x60,0xd6,0x7d,0xe0,0xf5,0x01]
-v_maxmin_f16 v5, |exec_lo|, -1, vcc_hi
-// GFX11: v_maxmin_f16 v5, |exec_lo|, -1, vcc_hi ; encoding: [0x05,0x01,0x60,0xd6,0x7e,0x82,0xad,0x01]
+v_maxmin_f16 v5.l, |exec_lo|, -1, vcc_hi
+// GFX11: v_maxmin_f16 v5.l, |exec_lo|, -1, vcc_hi ; encoding: [0x05,0x01,0x60,0xd6,0x7e,0x82,0xad,0x01]
-v_maxmin_f16 v5, -|exec_hi|, null, -|vcc_lo|
-// GFX11: v_maxmin_f16 v5, -|exec_hi|, null, -|vcc_lo| ; encoding: [0x05,0x05,0x60,0xd6,0x7f,0xf8,0xa8,0xa1]
+v_maxmin_f16 v5.l, -|exec_hi|, null, -|vcc_lo|
+// GFX11: v_maxmin_f16 v5.l, -|exec_hi|, null, -|vcc_lo| ; encoding: [0x05,0x05,0x60,0xd6,0x7f,0xf8,0xa8,0xa1]
-v_maxmin_f16 v5, null, exec_lo, -|0xfe0b|
-// GFX11: v_maxmin_f16 v5, null, exec_lo, -|0xfe0b| ; encoding: [0x05,0x04,0x60,0xd6,0x7c,0xfc,0xfc,0x83,0x0b,0xfe,0x00,0x00]
+v_maxmin_f16 v5.l, null, exec_lo, -|0xfe0b|
+// GFX11: v_maxmin_f16 v5.l, null, exec_lo, -|0xfe0b| ; encoding: [0x05,0x04,0x60,0xd6,0x7c,0xfc,0xfc,0x83,0x0b,0xfe,0x00,0x00]
-v_maxmin_f16 v5, -1, -|exec_hi|, -|src_scc|
-// GFX11: v_maxmin_f16 v5, -1, -|exec_hi|, -|src_scc| ; encoding: [0x05,0x06,0x60,0xd6,0xc1,0xfe,0xf4,0xc3]
+v_maxmin_f16 v5.l, -1, -|exec_hi|, -|src_scc|
+// GFX11: v_maxmin_f16 v5.l, -1, -|exec_hi|, -|src_scc| ; encoding: [0x05,0x06,0x60,0xd6,0xc1,0xfe,0xf4,0xc3]
-v_maxmin_f16 v5, 0.5, -m0, 0.5 mul:2
-// GFX11: v_maxmin_f16 v5, 0.5, -m0, 0.5 mul:2 ; encoding: [0x05,0x00,0x60,0xd6,0xf0,0xfa,0xc0,0x4b]
+v_maxmin_f16 v5.l, 0.5, -m0, 0.5 mul:2
+// GFX11: v_maxmin_f16 v5.l, 0.5, -m0, 0.5 mul:2 ; encoding: [0x05,0x00,0x60,0xd6,0xf0,0xfa,0xc0,0x4b]
-v_maxmin_f16 v5, -src_scc, |vcc_lo|, -1 mul:4
-// GFX11: v_maxmin_f16 v5, -src_scc, |vcc_lo|, -1 mul:4 ; encoding: [0x05,0x02,0x60,0xd6,0xfd,0xd4,0x04,0x33]
+v_maxmin_f16 v5.l, -src_scc, |vcc_lo|, -1 mul:4
+// GFX11: v_maxmin_f16 v5.l, -src_scc, |vcc_lo|, -1 mul:4 ; encoding: [0x05,0x02,0x60,0xd6,0xfd,0xd4,0x04,0x33]
-v_maxmin_f16 v255, -|0xfe0b|, -|vcc_hi|, null clamp div:2
-// GFX11: v_maxmin_f16 v255, -|0xfe0b|, -|vcc_hi|, null clamp div:2 ; encoding: [0xff,0x83,0x60,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00]
+v_maxmin_f16 v255.l, -|0xfe0b|, -|vcc_hi|, null clamp div:2
+// GFX11: v_maxmin_f16 v255.l, -|0xfe0b|, -|vcc_hi|, null clamp div:2 ; encoding: [0xff,0x83,0x60,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00]
+
+v_maxmin_f16 v5.l, v255.h, s2, s105
+// GFX11: v_maxmin_f16 v5.l, v255.h, s2, s105 op_sel:[1,0,0,0] ; encoding: [0x05,0x08,0x60,0xd6,0xff,0x05,0xa4,0x01]
+
+v_maxmin_f16 v5.l, s1, v255.h, exec_hi
+// GFX11: v_maxmin_f16 v5.l, s1, v255.h, exec_hi op_sel:[0,1,0,0] ; encoding: [0x05,0x10,0x60,0xd6,0x01,0xfe,0xff,0x01]
+
+v_maxmin_f16 v5.l, vcc_hi, 0xfe0b, v255.h
+// GFX11: v_maxmin_f16 v5.l, vcc_hi, 0xfe0b, v255.h op_sel:[0,0,1,0] ; encoding: [0x05,0x20,0x60,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+
+v_maxmin_f16 v255.h, -|0xfe0b|, -|vcc_hi|, null clamp div:2
+// GFX11: v_maxmin_f16 v255.h, -|0xfe0b|, -|vcc_hi|, null op_sel:[0,0,0,1] clamp div:2 ; encoding: [0xff,0xc3,0x60,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00]
v_maxmin_f32 v5, v1, v2, s3
// GFX11: v_maxmin_f32 v5, v1, v2, s3 ; encoding: [0x05,0x00,0x5e,0xd6,0x01,0x05,0x0e,0x00]
@@ -4823,50 +4835,62 @@ v_min_u16 v5.l, v255.l, v255.h
v_min_u16 v255.h, 0xfe0b, vcc_hi
// GFX11: v_min_u16 v255.h, 0xfe0b, vcc_hi op_sel:[0,0,1] ; encoding: [0xff,0x40,0x0b,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
-v_minmax_f16 v5, v1, v2, s3
-// GFX11: v_minmax_f16 v5, v1, v2, s3 ; encoding: [0x05,0x00,0x61,0xd6,0x01,0x05,0x0e,0x00]
+v_minmax_f16 v5.l, v1.l, v2.l, s3
+// GFX11: v_minmax_f16 v5.l, v1.l, v2.l, s3 ; encoding: [0x05,0x00,0x61,0xd6,0x01,0x05,0x0e,0x00]
+
+v_minmax_f16 v5.l, v255.l, s2, s105
+// GFX11: v_minmax_f16 v5.l, v255.l, s2, s105 ; encoding: [0x05,0x00,0x61,0xd6,0xff,0x05,0xa4,0x01]
+
+v_minmax_f16 v5.l, s1, v255.l, exec_hi
+// GFX11: v_minmax_f16 v5.l, s1, v255.l, exec_hi ; encoding: [0x05,0x00,0x61,0xd6,0x01,0xfe,0xff,0x01]
+
+v_minmax_f16 v5.l, s105, s105, exec_lo
+// GFX11: v_minmax_f16 v5.l, s105, s105, exec_lo ; encoding: [0x05,0x00,0x61,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_minmax_f16 v5.l, vcc_lo, ttmp15, v3.l
+// GFX11: v_minmax_f16 v5.l, vcc_lo, ttmp15, v3.l ; encoding: [0x05,0x00,0x61,0xd6,0x6a,0xf6,0x0c,0x04]
-v_minmax_f16 v5, v255, s2, s105
-// GFX11: v_minmax_f16 v5, v255, s2, s105 ; encoding: [0x05,0x00,0x61,0xd6,0xff,0x05,0xa4,0x01]
+v_minmax_f16 v5.l, vcc_hi, 0xfe0b, v255.l
+// GFX11: v_minmax_f16 v5.l, vcc_hi, 0xfe0b, v255.l ; encoding: [0x05,0x00,0x61,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
-v_minmax_f16 v5, s1, v255, exec_hi
-// GFX11: v_minmax_f16 v5, s1, v255, exec_hi ; encoding: [0x05,0x00,0x61,0xd6,0x01,0xfe,0xff,0x01]
+v_minmax_f16 v5.l, -|ttmp15|, -|src_scc|, -|ttmp15|
+// GFX11: v_minmax_f16 v5.l, -|ttmp15|, -|src_scc|, -|ttmp15| ; encoding: [0x05,0x07,0x61,0xd6,0x7b,0xfa,0xed,0xe1]
-v_minmax_f16 v5, s105, s105, exec_lo
-// GFX11: v_minmax_f16 v5, s105, s105, exec_lo ; encoding: [0x05,0x00,0x61,0xd6,0x69,0xd2,0xf8,0x01]
+v_minmax_f16 v5.l, m0, 0.5, m0
+// GFX11: v_minmax_f16 v5.l, m0, 0.5, m0 ; encoding: [0x05,0x00,0x61,0xd6,0x7d,0xe0,0xf5,0x01]
-v_minmax_f16 v5, vcc_lo, ttmp15, v3
-// GFX11: v_minmax_f16 v5, vcc_lo, ttmp15, v3 ; encoding: [0x05,0x00,0x61,0xd6,0x6a,0xf6,0x0c,0x04]
+v_minmax_f16 v5.l, |exec_lo|, -1, vcc_hi
+// GFX11: v_minmax_f16 v5.l, |exec_lo|, -1, vcc_hi ; encoding: [0x05,0x01,0x61,0xd6,0x7e,0x82,0xad,0x01]
-v_minmax_f16 v5, vcc_hi, 0xfe0b, v255
-// GFX11: v_minmax_f16 v5, vcc_hi, 0xfe0b, v255 ; encoding: [0x05,0x00,0x61,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+v_minmax_f16 v5.l, -|exec_hi|, null, -|vcc_lo|
+// GFX11: v_minmax_f16 v5.l, -|exec_hi|, null, -|vcc_lo| ; encoding: [0x05,0x05,0x61,0xd6,0x7f,0xf8,0xa8,0xa1]
-v_minmax_f16 v5, -|ttmp15|, -|src_scc|, -|ttmp15|
-// GFX11: v_minmax_f16 v5, -|ttmp15|, -|src_scc|, -|ttmp15| ; encoding: [0x05,0x07,0x61,0xd6,0x7b,0xfa,0xed,0xe1]
+v_minmax_f16 v5.l, null, exec_lo, -|0xfe0b|
+// GFX11: v_minmax_f16 v5.l, null, exec_lo, -|0xfe0b| ; encoding: [0x05,0x04,0x61,0xd6,0x7c,0xfc,0xfc,0x83,0x0b,0xfe,0x00,0x00]
-v_minmax_f16 v5, m0, 0.5, m0
-// GFX11: v_minmax_f16 v5, m0, 0.5, m0 ; encoding: [0x05,0x00,0x61,0xd6,0x7d,0xe0,0xf5,0x01]
+v_minmax_f16 v5.l, -1, -|exec_hi|, -|src_scc|
+// GFX11: v_minmax_f16 v5.l, -1, -|exec_hi|, -|src_scc| ; encoding: [0x05,0x06,0x61,0xd6,0xc1,0xfe,0xf4,0xc3]
-v_minmax_f16 v5, |exec_lo|, -1, vcc_hi
-// GFX11: v_minmax_f16 v5, |exec_lo|, -1, vcc_hi ; encoding: [0x05,0x01,0x61,0xd6,0x7e,0x82,0xad,0x01]
+v_minmax_f16 v5.l, 0.5, -m0, 0.5 mul:2
+// GFX11: v_minmax_f16 v5.l, 0.5, -m0, 0.5 mul:2 ; encoding: [0x05,0x00,0x61,0xd6,0xf0,0xfa,0xc0,0x4b]
-v_minmax_f16 v5, -|exec_hi|, null, -|vcc_lo|
-// GFX11: v_minmax_f16 v5, -|exec_hi|, null, -|vcc_lo| ; encoding: [0x05,0x05,0x61,0xd6,0x7f,0xf8,0xa8,0xa1]
+v_minmax_f16 v5.l, -src_scc, |vcc_lo|, -1 mul:4
+// GFX11: v_minmax_f16 v5.l, -src_scc, |vcc_lo|, -1 mul:4 ; encoding: [0x05,0x02,0x61,0xd6,0xfd,0xd4,0x04,0x33]
-v_minmax_f16 v5, null, exec_lo, -|0xfe0b|
-// GFX11: v_minmax_f16 v5, null, exec_lo, -|0xfe0b| ; encoding: [0x05,0x04,0x61,0xd6,0x7c,0xfc,0xfc,0x83,0x0b,0xfe,0x00,0x00]
+v_minmax_f16 v255.l, -|0xfe0b|, -|vcc_hi|, null clamp div:2
+// GFX11: v_minmax_f16 v255.l, -|0xfe0b|, -|vcc_hi|, null clamp div:2 ; encoding: [0xff,0x83,0x61,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00]
-v_minmax_f16 v5, -1, -|exec_hi|, -|src_scc|
-// GFX11: v_minmax_f16 v5, -1, -|exec_hi|, -|src_scc| ; encoding: [0x05,0x06,0x61,0xd6,0xc1,0xfe,0xf4,0xc3]
+v_minmax_f16 v5.l, v255.h, s2, s105
+// GFX11: v_minmax_f16 v5.l, v255.h, s2, s105 op_sel:[1,0,0,0] ; encoding: [0x05,0x08,0x61,0xd6,0xff,0x05,0xa4,0x01]
-v_minmax_f16 v5, 0.5, -m0, 0.5 mul:2
-// GFX11: v_minmax_f16 v5, 0.5, -m0, 0.5 mul:2 ; encoding: [0x05,0x00,0x61,0xd6,0xf0,0xfa,0xc0,0x4b]
+v_minmax_f16 v5.l, s1, v255.h, exec_hi
+// GFX11: v_minmax_f16 v5.l, s1, v255.h, exec_hi op_sel:[0,1,0,0] ; encoding: [0x05,0x10,0x61,0xd6,0x01,0xfe,0xff,0x01]
-v_minmax_f16 v5, -src_scc, |vcc_lo|, -1 mul:4
-// GFX11: v_minmax_f16 v5, -src_scc, |vcc_lo|, -1 mul:4 ; encoding: [0x05,0x02,0x61,0xd6,0xfd,0xd4,0x04,0x33]
+v_minmax_f16 v5.l, vcc_hi, 0xfe0b, v255.h
+// GFX11: v_minmax_f16 v5.l, vcc_hi, 0xfe0b, v255.h op_sel:[0,0,1,0] ; encoding: [0x05,0x20,0x61,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
-v_minmax_f16 v255, -|0xfe0b|, -|vcc_hi|, null clamp div:2
-// GFX11: v_minmax_f16 v255, -|0xfe0b|, -|vcc_hi|, null clamp div:2 ; encoding: [0xff,0x83,0x61,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00]
+v_minmax_f16 v255.h, -|0xfe0b|, -|vcc_hi|, null clamp div:2
+// GFX11: v_minmax_f16 v255.h, -|0xfe0b|, -|vcc_hi|, null op_sel:[0,0,0,1] clamp div:2 ; encoding: [0xff,0xc3,0x61,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00]
v_minmax_f32 v5, v1, v2, s3
// GFX11: v_minmax_f32 v5, v1, v2, s3 ; encoding: [0x05,0x00,0x5f,0xd6,0x01,0x05,0x0e,0x00]
diff --git a/llvm/test/MC/AMDGPU/gfx11_asm_vop3_dpp16.s b/llvm/test/MC/AMDGPU/gfx11_asm_vop3_dpp16.s
index f715694..5fa1334 100644
--- a/llvm/test/MC/AMDGPU/gfx11_asm_vop3_dpp16.s
+++ b/llvm/test/MC/AMDGPU/gfx11_asm_vop3_dpp16.s
@@ -2660,47 +2660,92 @@ v_max_u16_e64_dpp v5.l, v1.l, v2.h row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_
v_max_u16_e64_dpp v255.h, v255.l, v255.l row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
// GFX11: v_max_u16_e64_dpp v255.h, v255.l, v255.l op_sel:[0,0,1] row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x40,0x09,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x05,0x30]
-v_maxmin_f16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
-// GFX11: v_maxmin_f16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x60,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, v3.l quad_perm:[3,2,1,0]
+// GFX11: v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, v3.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x60,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
-v_maxmin_f16_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
-// GFX11: v_maxmin_f16_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x60,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, v3.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf
+// GFX11: v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, v3.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x60,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
-v_maxmin_f16_e64_dpp v5, v1, v2, v3 row_mirror
-// GFX11: v_maxmin_f16_e64_dpp v5, v1, v2, v3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x60,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, v3.l row_mirror row_mask:0xf bank_mask:0xf
+// GFX11: v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, v3.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x60,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
-v_maxmin_f16_e64_dpp v5, v1, v2, v255 row_half_mirror
-// GFX11: v_maxmin_f16_e64_dpp v5, v1, v2, v255 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x60,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, v255.l row_half_mirror row_mask:0xf bank_mask:0xf
+// GFX11: v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, v255.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x60,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
-v_maxmin_f16_e64_dpp v5, v1, v2, s105 row_shl:1
-// GFX11: v_maxmin_f16_e64_dpp v5, v1, v2, s105 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x60,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, s105 row_shl:1 row_mask:0xf bank_mask:0xf
+// GFX11: v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, s105 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x60,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
-v_maxmin_f16_e64_dpp v5, v1, v2, vcc_hi row_shl:15
-// GFX11: v_maxmin_f16_e64_dpp v5, v1, v2, vcc_hi row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x60,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, vcc_hi row_shl:15
+// GFX11: v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, vcc_hi row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x60,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
-v_maxmin_f16_e64_dpp v5, v1, v2, vcc_lo row_shr:1
-// GFX11: v_maxmin_f16_e64_dpp v5, v1, v2, vcc_lo row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x60,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, vcc_lo row_shr:1
+// GFX11: v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, vcc_lo row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x60,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
-v_maxmin_f16_e64_dpp v5, |v1|, v2, -ttmp15 row_shr:15
-// GFX11: v_maxmin_f16_e64_dpp v5, |v1|, v2, -ttmp15 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x01,0x60,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+v_maxmin_f16_e64_dpp v5.l, |v1.l|, v2.l, -ttmp15 row_shr:15 row_mask:0xf bank_mask:0xf
+// GFX11: v_maxmin_f16_e64_dpp v5.l, |v1.l|, v2.l, -ttmp15 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x01,0x60,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
-v_maxmin_f16_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1
-// GFX11: v_maxmin_f16_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x02,0x60,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+v_maxmin_f16_e64_dpp v5.l, v1.l, -|v2.l|, exec_hi row_ror:1 row_mask:0xf bank_mask:0xf
+// GFX11: v_maxmin_f16_e64_dpp v5.l, v1.l, -|v2.l|, exec_hi row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x02,0x60,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
-v_maxmin_f16_e64_dpp v5, -v1, v2, |exec_lo| row_ror:15
-// GFX11: v_maxmin_f16_e64_dpp v5, -v1, v2, |exec_lo| row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x60,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+v_maxmin_f16_e64_dpp v5.l, -v1.l, v2.l, |exec_lo| row_ror:15 row_mask:0xf bank_mask:0xf
+// GFX11: v_maxmin_f16_e64_dpp v5.l, -v1.l, v2.l, |exec_lo| row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x60,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
-v_maxmin_f16_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
-// GFX11: v_maxmin_f16_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x03,0x60,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+v_maxmin_f16_e64_dpp v5.l, -|v1.l|, -|v2.l|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: v_maxmin_f16_e64_dpp v5.l, -|v1.l|, -|v2.l|, null row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x03,0x60,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
-v_maxmin_f16_e64_dpp v5, -|v1|, v2, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
-// GFX11: v_maxmin_f16_e64_dpp v5, -|v1|, v2, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x05,0x60,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01]
+v_maxmin_f16_e64_dpp v5.l, -|v1.l|, v2.l, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: v_maxmin_f16_e64_dpp v5.l, -|v1.l|, v2.l, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x05,0x60,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01]
-v_maxmin_f16_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
-// GFX11: v_maxmin_f16_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x06,0x60,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x09,0x13]
+v_maxmin_f16_e64_dpp v5.l, v1.l, -|v2.l|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1
+// GFX11: v_maxmin_f16_e64_dpp v5.l, v1.l, -|v2.l|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x06,0x60,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x09,0x13]
-v_maxmin_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
-// GFX11: v_maxmin_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x87,0x60,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x05,0x30]
+v_maxmin_f16_e64_dpp v255.l, -|v255.l|, -|v255.l|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: v_maxmin_f16_e64_dpp v255.l, -|v255.l|, -|v255.l|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x87,0x60,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x05,0x30]
+
+v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, v255.l quad_perm:[0,1,2,3]
+// GFX11: v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, v255.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x60,0xd6,0xfa,0x04,0xfe,0x07,0x01,0xe4,0x00,0xff]
+
+v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, s3 row_mirror
+// GFX11: v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, s3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x60,0xd6,0xfa,0x04,0x0e,0x00,0x01,0x40,0x01,0xff]
+
+v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, s105 row_half_mirror
+// GFX11: v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, s105 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x60,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x41,0x01,0xff]
+
+v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, ttmp15 row_shl:1
+// GFX11: v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, ttmp15 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x60,0xd6,0xfa,0x04,0xee,0x01,0x01,0x01,0x01,0xff]
+
+v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, -|m0| row_shr:15
+// GFX11: v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, -|m0| row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x60,0xd6,0xfa,0x04,0xf6,0x81,0x01,0x1f,0x01,0xff]
+
+v_maxmin_f16_e64_dpp v5.l, -|v1.l|, v2.l, -|exec_hi| row_ror:1
+// GFX11: v_maxmin_f16_e64_dpp v5.l, -|v1.l|, v2.l, -|exec_hi| row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x05,0x60,0xd6,0xfa,0x04,0xfe,0xa1,0x01,0x21,0x01,0xff]
+
+v_maxmin_f16_e64_dpp v5.l, v1.l, -|v2.l|, -|exec_lo| row_ror:15
+// GFX11: v_maxmin_f16_e64_dpp v5.l, v1.l, -|v2.l|, -|exec_lo| row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x06,0x60,0xd6,0xfa,0x04,0xfa,0xc1,0x01,0x2f,0x01,0xff]
+
+v_maxmin_f16_e64_dpp v5.l, |v1.l|, -v2.l, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: v_maxmin_f16_e64_dpp v5.l, |v1.l|, -v2.l, null row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x01,0x60,0xd6,0xfa,0x04,0xf2,0x41,0x01,0x50,0x01,0xff]
+
+v_maxmin_f16_e64_dpp v5.l, -v1.l, |v2.l|, -1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: v_maxmin_f16_e64_dpp v5.l, -v1.l, |v2.l|, -1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x02,0x60,0xd6,0xfa,0x04,0x06,0x2b,0x01,0x5f,0x01,0x01]
+
+v_maxmin_f16_e64_dpp v5.l, -|v1.l|, -|v2.l|, 0.5 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: v_maxmin_f16_e64_dpp v5.l, -|v1.l|, -|v2.l|, 0.5 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x03,0x60,0xd6,0xfa,0x04,0xc2,0x73,0x01,0x60,0x09,0x13]
+
+v_maxmin_f16_e64_dpp v5.h, v1.h, v2.h, v3.h quad_perm:[3,2,1,0]
+// GFX11: v_maxmin_f16_e64_dpp v5.h, v1.h, v2.h, v3.h op_sel:[1,1,1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x78,0x60,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, v255.h quad_perm:[0,1,2,3]
+// GFX11: v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, v255.h op_sel:[0,0,1,0] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x20,0x60,0xd6,0xfa,0x04,0xfe,0x07,0x01,0xe4,0x00,0xff]
+
+v_maxmin_f16_e64_dpp v5.l, -v1.h, |v2.l|, -1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: v_maxmin_f16_e64_dpp v5.l, -v1.h, |v2.l|, -1 op_sel:[1,0,0,0] mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x0a,0x60,0xd6,0xfa,0x04,0x06,0x2b,0x01,0x5f,0x01,0x01]
+
+v_maxmin_f16_e64_dpp v5.l, -|v1.l|, -|v2.h|, 0.5 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: v_maxmin_f16_e64_dpp v5.l, -|v1.l|, -|v2.h|, 0.5 op_sel:[0,1,0,0] mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x13,0x60,0xd6,0xfa,0x04,0xc2,0x73,0x01,0x60,0x09,0x13]
+
+v_maxmin_f16_e64_dpp v255.h, -|v255.l|, -|v255.l|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: v_maxmin_f16_e64_dpp v255.h, -|v255.l|, -|v255.l|, -|src_scc| op_sel:[0,0,0,1] clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0xc7,0x60,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x05,0x30]
v_maxmin_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
// GFX11: v_maxmin_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5e,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
@@ -3704,47 +3749,92 @@ v_min_u16_e64_dpp v5.l, v1.l, v2.h row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_
v_min_u16_e64_dpp v255.h, v255.l, v255.l row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
// GFX11: v_min_u16_e64_dpp v255.h, v255.l, v255.l op_sel:[0,0,1] row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x40,0x0b,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x05,0x30]
-v_minmax_f16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
-// GFX11: v_minmax_f16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x61,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, v3.l quad_perm:[3,2,1,0]
+// GFX11: v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, v3.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x61,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, v3.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf
+// GFX11: v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, v3.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x61,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, v3.l row_mirror row_mask:0xf bank_mask:0xf
+// GFX11: v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, v3.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x61,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, v255.l row_half_mirror row_mask:0xf bank_mask:0xf
+// GFX11: v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, v255.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x61,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, s105 row_shl:1 row_mask:0xf bank_mask:0xf
+// GFX11: v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, s105 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x61,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, vcc_hi row_shl:15
+// GFX11: v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, vcc_hi row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x61,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, vcc_lo row_shr:1
+// GFX11: v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, vcc_lo row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x61,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_minmax_f16_e64_dpp v5.l, |v1.l|, v2.l, -ttmp15 row_shr:15 row_mask:0xf bank_mask:0xf
+// GFX11: v_minmax_f16_e64_dpp v5.l, |v1.l|, v2.l, -ttmp15 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x01,0x61,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+
+v_minmax_f16_e64_dpp v5.l, v1.l, -|v2.l|, exec_hi row_ror:1 row_mask:0xf bank_mask:0xf
+// GFX11: v_minmax_f16_e64_dpp v5.l, v1.l, -|v2.l|, exec_hi row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x02,0x61,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+
+v_minmax_f16_e64_dpp v5.l, -v1.l, v2.l, |exec_lo| row_ror:15 row_mask:0xf bank_mask:0xf
+// GFX11: v_minmax_f16_e64_dpp v5.l, -v1.l, v2.l, |exec_lo| row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x61,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+
+v_minmax_f16_e64_dpp v5.l, -|v1.l|, -|v2.l|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: v_minmax_f16_e64_dpp v5.l, -|v1.l|, -|v2.l|, null row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x03,0x61,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+v_minmax_f16_e64_dpp v5.l, -|v1.l|, v2.l, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: v_minmax_f16_e64_dpp v5.l, -|v1.l|, v2.l, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x05,0x61,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01]
+
+v_minmax_f16_e64_dpp v5.l, v1.l, -|v2.l|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1
+// GFX11: v_minmax_f16_e64_dpp v5.l, v1.l, -|v2.l|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x06,0x61,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x09,0x13]
+
+v_minmax_f16_e64_dpp v255.l, -|v255.l|, -|v255.l|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: v_minmax_f16_e64_dpp v255.l, -|v255.l|, -|v255.l|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x87,0x61,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x05,0x30]
+
+v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, v255.l quad_perm:[0,1,2,3]
+// GFX11: v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, v255.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x61,0xd6,0xfa,0x04,0xfe,0x07,0x01,0xe4,0x00,0xff]
+
+v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, s3 row_mirror
+// GFX11: v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, s3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x61,0xd6,0xfa,0x04,0x0e,0x00,0x01,0x40,0x01,0xff]
-v_minmax_f16_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
-// GFX11: v_minmax_f16_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x61,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, s105 row_half_mirror
+// GFX11: v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, s105 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x61,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x41,0x01,0xff]
-v_minmax_f16_e64_dpp v5, v1, v2, v3 row_mirror
-// GFX11: v_minmax_f16_e64_dpp v5, v1, v2, v3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x61,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, ttmp15 row_shl:1
+// GFX11: v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, ttmp15 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x61,0xd6,0xfa,0x04,0xee,0x01,0x01,0x01,0x01,0xff]
-v_minmax_f16_e64_dpp v5, v1, v2, v255 row_half_mirror
-// GFX11: v_minmax_f16_e64_dpp v5, v1, v2, v255 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x61,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, -|m0| row_shr:15
+// GFX11: v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, -|m0| row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x61,0xd6,0xfa,0x04,0xf6,0x81,0x01,0x1f,0x01,0xff]
-v_minmax_f16_e64_dpp v5, v1, v2, s105 row_shl:1
-// GFX11: v_minmax_f16_e64_dpp v5, v1, v2, s105 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x61,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+v_minmax_f16_e64_dpp v5.l, -|v1.l|, v2.l, -|exec_hi| row_ror:1
+// GFX11: v_minmax_f16_e64_dpp v5.l, -|v1.l|, v2.l, -|exec_hi| row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x05,0x61,0xd6,0xfa,0x04,0xfe,0xa1,0x01,0x21,0x01,0xff]
-v_minmax_f16_e64_dpp v5, v1, v2, vcc_hi row_shl:15
-// GFX11: v_minmax_f16_e64_dpp v5, v1, v2, vcc_hi row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x61,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+v_minmax_f16_e64_dpp v5.l, v1.l, -|v2.l|, -|exec_lo| row_ror:15
+// GFX11: v_minmax_f16_e64_dpp v5.l, v1.l, -|v2.l|, -|exec_lo| row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x06,0x61,0xd6,0xfa,0x04,0xfa,0xc1,0x01,0x2f,0x01,0xff]
-v_minmax_f16_e64_dpp v5, v1, v2, vcc_lo row_shr:1
-// GFX11: v_minmax_f16_e64_dpp v5, v1, v2, vcc_lo row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x61,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+v_minmax_f16_e64_dpp v5.l, |v1.l|, -v2.l, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX11: v_minmax_f16_e64_dpp v5.l, |v1.l|, -v2.l, null row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x01,0x61,0xd6,0xfa,0x04,0xf2,0x41,0x01,0x50,0x01,0xff]
-v_minmax_f16_e64_dpp v5, |v1|, v2, -ttmp15 row_shr:15
-// GFX11: v_minmax_f16_e64_dpp v5, |v1|, v2, -ttmp15 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x01,0x61,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+v_minmax_f16_e64_dpp v5.l, -v1.l, |v2.l|, -1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: v_minmax_f16_e64_dpp v5.l, -v1.l, |v2.l|, -1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x02,0x61,0xd6,0xfa,0x04,0x06,0x2b,0x01,0x5f,0x01,0x01]
-v_minmax_f16_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1
-// GFX11: v_minmax_f16_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x02,0x61,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+v_minmax_f16_e64_dpp v5.l, -|v1.l|, -|v2.l|, 0.5 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: v_minmax_f16_e64_dpp v5.l, -|v1.l|, -|v2.l|, 0.5 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x03,0x61,0xd6,0xfa,0x04,0xc2,0x73,0x01,0x60,0x09,0x13]
-v_minmax_f16_e64_dpp v5, -v1, v2, |exec_lo| row_ror:15
-// GFX11: v_minmax_f16_e64_dpp v5, -v1, v2, |exec_lo| row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x61,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+v_minmax_f16_e64_dpp v5.h, v1.h, v2.h, v3.h quad_perm:[3,2,1,0]
+// GFX11: v_minmax_f16_e64_dpp v5.h, v1.h, v2.h, v3.h op_sel:[1,1,1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x78,0x61,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
-v_minmax_f16_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
-// GFX11: v_minmax_f16_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x03,0x61,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, v255.h quad_perm:[0,1,2,3]
+// GFX11: v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, v255.h op_sel:[0,0,1,0] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x20,0x61,0xd6,0xfa,0x04,0xfe,0x07,0x01,0xe4,0x00,0xff]
-v_minmax_f16_e64_dpp v5, -|v1|, v2, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
-// GFX11: v_minmax_f16_e64_dpp v5, -|v1|, v2, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x05,0x61,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01]
+v_minmax_f16_e64_dpp v5.l, -v1.h, |v2.l|, -1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX11: v_minmax_f16_e64_dpp v5.l, -v1.h, |v2.l|, -1 op_sel:[1,0,0,0] mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x0a,0x61,0xd6,0xfa,0x04,0x06,0x2b,0x01,0x5f,0x01,0x01]
-v_minmax_f16_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
-// GFX11: v_minmax_f16_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x06,0x61,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x09,0x13]
+v_minmax_f16_e64_dpp v5.l, -|v1.l|, -|v2.h|, 0.5 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX11: v_minmax_f16_e64_dpp v5.l, -|v1.l|, -|v2.h|, 0.5 op_sel:[0,1,0,0] mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x13,0x61,0xd6,0xfa,0x04,0xc2,0x73,0x01,0x60,0x09,0x13]
-v_minmax_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
-// GFX11: v_minmax_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x87,0x61,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x05,0x30]
+v_minmax_f16_e64_dpp v255.h, -|v255.l|, -|v255.l|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX11: v_minmax_f16_e64_dpp v255.h, -|v255.l|, -|v255.l|, -|src_scc| op_sel:[0,0,0,1] clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0xc7,0x61,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x05,0x30]
v_minmax_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
// GFX11: v_minmax_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5f,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
diff --git a/llvm/test/MC/AMDGPU/gfx11_asm_vop3_dpp8.s b/llvm/test/MC/AMDGPU/gfx11_asm_vop3_dpp8.s
index 2ececc0..2fc0206 100644
--- a/llvm/test/MC/AMDGPU/gfx11_asm_vop3_dpp8.s
+++ b/llvm/test/MC/AMDGPU/gfx11_asm_vop3_dpp8.s
@@ -1660,41 +1660,80 @@ v_max_u16_e64_dpp v5.l, v1.l, v2.h dpp8:[7,6,5,4,3,2,1,0] fi:1
v_max_u16_e64_dpp v255.h, v255.l, v255.l dpp8:[0,0,0,0,0,0,0,0] fi:0
// GFX11: v_max_u16_e64_dpp v255.h, v255.l, v255.l op_sel:[0,0,1] dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x40,0x09,0xd7,0xe9,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
-v_maxmin_f16_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
-// GFX11: v_maxmin_f16_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x60,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, v3.l dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, v3.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x60,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
-v_maxmin_f16_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
-// GFX11: v_maxmin_f16_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x60,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, v255.l dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, v255.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x60,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
-v_maxmin_f16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
-// GFX11: v_maxmin_f16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x60,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, s105 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x60,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
-v_maxmin_f16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
-// GFX11: v_maxmin_f16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x60,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, vcc_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x60,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
-v_maxmin_f16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
-// GFX11: v_maxmin_f16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x60,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x60,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
-v_maxmin_f16_e64_dpp v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0]
-// GFX11: v_maxmin_f16_e64_dpp v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x60,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+v_maxmin_f16_e64_dpp v5.l, |v1.l|, v2.l, -ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: v_maxmin_f16_e64_dpp v5.l, |v1.l|, v2.l, -ttmp15 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x60,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
-v_maxmin_f16_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
-// GFX11: v_maxmin_f16_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x02,0x60,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+v_maxmin_f16_e64_dpp v5.l, v1.l, -|v2.l|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: v_maxmin_f16_e64_dpp v5.l, v1.l, -|v2.l|, exec_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x02,0x60,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
-v_maxmin_f16_e64_dpp v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0]
-// GFX11: v_maxmin_f16_e64_dpp v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x60,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+v_maxmin_f16_e64_dpp v5.l, -v1.l, v2.l, |exec_lo| dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: v_maxmin_f16_e64_dpp v5.l, -v1.l, v2.l, |exec_lo| dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x60,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
-v_maxmin_f16_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
-// GFX11: v_maxmin_f16_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x03,0x60,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+v_maxmin_f16_e64_dpp v5.l, -|v1.l|, -|v2.l|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: v_maxmin_f16_e64_dpp v5.l, -|v1.l|, -|v2.l|, null dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x03,0x60,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
-v_maxmin_f16_e64_dpp v5, -|v1|, v2, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0]
-// GFX11: v_maxmin_f16_e64_dpp v5, -|v1|, v2, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x05,0x60,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05]
+v_maxmin_f16_e64_dpp v5.l, -|v1.l|, v2.l, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: v_maxmin_f16_e64_dpp v5.l, -|v1.l|, v2.l, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x05,0x60,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05]
-v_maxmin_f16_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX11: v_maxmin_f16_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x06,0x60,0xd6,0xea,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05]
+v_maxmin_f16_e64_dpp v5.l, v1.l, -|v2.l|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: v_maxmin_f16_e64_dpp v5.l, v1.l, -|v2.l|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x06,0x60,0xd6,0xea,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05]
-v_maxmin_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
-// GFX11: v_maxmin_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x87,0x60,0xd6,0xe9,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+v_maxmin_f16_e64_dpp v255.l, -|v255.l|, -|v255.l|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: v_maxmin_f16_e64_dpp v255.l, -|v255.l|, -|v255.l|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x87,0x60,0xd6,0xe9,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+
+v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, s3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, s3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x60,0xd6,0xe9,0x04,0x0e,0x00,0x01,0x77,0x39,0x05]
+
+v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, ttmp15 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x60,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, -|m0| dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, -|m0| dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x60,0xd6,0xe9,0x04,0xf6,0x81,0x01,0x77,0x39,0x05]
+
+v_maxmin_f16_e64_dpp v5.l, -|v1.l|, v2.l, -|exec_hi| dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: v_maxmin_f16_e64_dpp v5.l, -|v1.l|, v2.l, -|exec_hi| dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x05,0x60,0xd6,0xe9,0x04,0xfe,0xa1,0x01,0x77,0x39,0x05]
+
+v_maxmin_f16_e64_dpp v5.l, v1.l, -|v2.l|, -|exec_lo| dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: v_maxmin_f16_e64_dpp v5.l, v1.l, -|v2.l|, -|exec_lo| dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x06,0x60,0xd6,0xe9,0x04,0xfa,0xc1,0x01,0x77,0x39,0x05]
+
+v_maxmin_f16_e64_dpp v5.l, |v1.l|, -v2.l, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: v_maxmin_f16_e64_dpp v5.l, |v1.l|, -v2.l, null dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x60,0xd6,0xe9,0x04,0xf2,0x41,0x01,0x77,0x39,0x05]
+
+v_maxmin_f16_e64_dpp v5.l, -v1.l, |v2.l|, -1 mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: v_maxmin_f16_e64_dpp v5.l, -v1.l, |v2.l|, -1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x02,0x60,0xd6,0xe9,0x04,0x06,0x2b,0x01,0x77,0x39,0x05]
+
+v_maxmin_f16_e64_dpp v5.l, -|v1.l|, -|v2.l|, 0.5 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: v_maxmin_f16_e64_dpp v5.l, -|v1.l|, -|v2.l|, 0.5 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x03,0x60,0xd6,0xea,0x04,0xc2,0x73,0x01,0x77,0x39,0x05]
+
+v_maxmin_f16_e64_dpp v5.h, v1.h, v2.h, v3.h dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: v_maxmin_f16_e64_dpp v5.h, v1.h, v2.h, v3.h op_sel:[1,1,1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x78,0x60,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, v255.h dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, v255.h op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x20,0x60,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_maxmin_f16_e64_dpp v5.l, -v1.h, |v2.l|, -1 mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: v_maxmin_f16_e64_dpp v5.l, -v1.h, |v2.l|, -1 op_sel:[1,0,0,0] mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x0a,0x60,0xd6,0xe9,0x04,0x06,0x2b,0x01,0x77,0x39,0x05]
+
+v_maxmin_f16_e64_dpp v5.l, -|v1.l|, -|v2.h|, 0.5 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: v_maxmin_f16_e64_dpp v5.l, -|v1.l|, -|v2.h|, 0.5 op_sel:[0,1,0,0] mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x13,0x60,0xd6,0xea,0x04,0xc2,0x73,0x01,0x77,0x39,0x05]
+
+v_maxmin_f16_e64_dpp v255.h, -|v255.l|, -|v255.l|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: v_maxmin_f16_e64_dpp v255.h, -|v255.l|, -|v255.l|, -|src_scc| op_sel:[0,0,0,1] clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0xc7,0x60,0xd6,0xe9,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
v_maxmin_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
// GFX11: v_maxmin_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5e,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
@@ -2434,41 +2473,80 @@ v_min_u16_e64_dpp v5.l, v1.l, v2.h dpp8:[7,6,5,4,3,2,1,0] fi:1
v_min_u16_e64_dpp v255.h, v255.l, v255.l dpp8:[0,0,0,0,0,0,0,0] fi:0
// GFX11: v_min_u16_e64_dpp v255.h, v255.l, v255.l op_sel:[0,0,1] dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x40,0x0b,0xd7,0xe9,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
-v_minmax_f16_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
-// GFX11: v_minmax_f16_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x61,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, v3.l dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, v3.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x61,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, v255.l dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, v255.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x61,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, s105 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x61,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, vcc_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x61,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x61,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_minmax_f16_e64_dpp v5.l, |v1.l|, v2.l, -ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: v_minmax_f16_e64_dpp v5.l, |v1.l|, v2.l, -ttmp15 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x61,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+
+v_minmax_f16_e64_dpp v5.l, v1.l, -|v2.l|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: v_minmax_f16_e64_dpp v5.l, v1.l, -|v2.l|, exec_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x02,0x61,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+
+v_minmax_f16_e64_dpp v5.l, -v1.l, v2.l, |exec_lo| dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: v_minmax_f16_e64_dpp v5.l, -v1.l, v2.l, |exec_lo| dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x61,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+
+v_minmax_f16_e64_dpp v5.l, -|v1.l|, -|v2.l|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: v_minmax_f16_e64_dpp v5.l, -|v1.l|, -|v2.l|, null dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x03,0x61,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+v_minmax_f16_e64_dpp v5.l, -|v1.l|, v2.l, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: v_minmax_f16_e64_dpp v5.l, -|v1.l|, v2.l, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x05,0x61,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05]
+
+v_minmax_f16_e64_dpp v5.l, v1.l, -|v2.l|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: v_minmax_f16_e64_dpp v5.l, v1.l, -|v2.l|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x06,0x61,0xd6,0xea,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05]
+
+v_minmax_f16_e64_dpp v255.l, -|v255.l|, -|v255.l|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: v_minmax_f16_e64_dpp v255.l, -|v255.l|, -|v255.l|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x87,0x61,0xd6,0xe9,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+
+v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, s3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, s3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x61,0xd6,0xe9,0x04,0x0e,0x00,0x01,0x77,0x39,0x05]
+
+v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, ttmp15 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x61,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
-v_minmax_f16_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
-// GFX11: v_minmax_f16_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x61,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, -|m0| dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, -|m0| dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x61,0xd6,0xe9,0x04,0xf6,0x81,0x01,0x77,0x39,0x05]
-v_minmax_f16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
-// GFX11: v_minmax_f16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x61,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+v_minmax_f16_e64_dpp v5.l, -|v1.l|, v2.l, -|exec_hi| dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: v_minmax_f16_e64_dpp v5.l, -|v1.l|, v2.l, -|exec_hi| dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x05,0x61,0xd6,0xe9,0x04,0xfe,0xa1,0x01,0x77,0x39,0x05]
-v_minmax_f16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
-// GFX11: v_minmax_f16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x61,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+v_minmax_f16_e64_dpp v5.l, v1.l, -|v2.l|, -|exec_lo| dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: v_minmax_f16_e64_dpp v5.l, v1.l, -|v2.l|, -|exec_lo| dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x06,0x61,0xd6,0xe9,0x04,0xfa,0xc1,0x01,0x77,0x39,0x05]
-v_minmax_f16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
-// GFX11: v_minmax_f16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x61,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+v_minmax_f16_e64_dpp v5.l, |v1.l|, -v2.l, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: v_minmax_f16_e64_dpp v5.l, |v1.l|, -v2.l, null dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x61,0xd6,0xe9,0x04,0xf2,0x41,0x01,0x77,0x39,0x05]
-v_minmax_f16_e64_dpp v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0]
-// GFX11: v_minmax_f16_e64_dpp v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x61,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+v_minmax_f16_e64_dpp v5.l, -v1.l, |v2.l|, -1 mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: v_minmax_f16_e64_dpp v5.l, -v1.l, |v2.l|, -1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x02,0x61,0xd6,0xe9,0x04,0x06,0x2b,0x01,0x77,0x39,0x05]
-v_minmax_f16_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
-// GFX11: v_minmax_f16_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x02,0x61,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+v_minmax_f16_e64_dpp v5.l, -|v1.l|, -|v2.l|, 0.5 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: v_minmax_f16_e64_dpp v5.l, -|v1.l|, -|v2.l|, 0.5 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x03,0x61,0xd6,0xea,0x04,0xc2,0x73,0x01,0x77,0x39,0x05]
-v_minmax_f16_e64_dpp v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0]
-// GFX11: v_minmax_f16_e64_dpp v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x61,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+v_minmax_f16_e64_dpp v5.h, v1.h, v2.h, v3.h dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: v_minmax_f16_e64_dpp v5.h, v1.h, v2.h, v3.h op_sel:[1,1,1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x78,0x61,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
-v_minmax_f16_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
-// GFX11: v_minmax_f16_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x03,0x61,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, v255.h dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, v255.h op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x20,0x61,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
-v_minmax_f16_e64_dpp v5, -|v1|, v2, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0]
-// GFX11: v_minmax_f16_e64_dpp v5, -|v1|, v2, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x05,0x61,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05]
+v_minmax_f16_e64_dpp v5.l, -v1.h, |v2.l|, -1 mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX11: v_minmax_f16_e64_dpp v5.l, -v1.h, |v2.l|, -1 op_sel:[1,0,0,0] mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x0a,0x61,0xd6,0xe9,0x04,0x06,0x2b,0x01,0x77,0x39,0x05]
-v_minmax_f16_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX11: v_minmax_f16_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x06,0x61,0xd6,0xea,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05]
+v_minmax_f16_e64_dpp v5.l, -|v1.l|, -|v2.h|, 0.5 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX11: v_minmax_f16_e64_dpp v5.l, -|v1.l|, -|v2.h|, 0.5 op_sel:[0,1,0,0] mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x13,0x61,0xd6,0xea,0x04,0xc2,0x73,0x01,0x77,0x39,0x05]
-v_minmax_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
-// GFX11: v_minmax_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x87,0x61,0xd6,0xe9,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+v_minmax_f16_e64_dpp v255.h, -|v255.l|, -|v255.l|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX11: v_minmax_f16_e64_dpp v255.h, -|v255.l|, -|v255.l|, -|src_scc| op_sel:[0,0,0,1] clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0xc7,0x61,0xd6,0xe9,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
v_minmax_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
// GFX11: v_minmax_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5f,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
diff --git a/llvm/test/MC/AMDGPU/gfx12_asm_mimg_err.s b/llvm/test/MC/AMDGPU/gfx12_asm_mimg_err.s
index 0f2cfc3..ee82fa3 100644
--- a/llvm/test/MC/AMDGPU/gfx12_asm_mimg_err.s
+++ b/llvm/test/MC/AMDGPU/gfx12_asm_mimg_err.s
@@ -374,3 +374,8 @@ image_sample_o v[5:6], [v1, v2], null, s[12:15] dmask:0x3 dim:SQ_RSRC_IMG_1D
image_sample_o v[5:6], [v1, v2], s[8:15], null dmask:0x3 dim:SQ_RSRC_IMG_1D
// NOGFX12: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+image_bvh_intersect_ray v[4:7], [v9, v10, v[11:13], v[14:16], v[17:19]], null
+// NOGFX12: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+
+image_bvh64_intersect_ray v[4:7], [v[9:10], v11, v[12:14], v[15:17], v[18:20]], null
+// NOGFX12: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
diff --git a/llvm/test/MC/AMDGPU/gfx12_asm_smem_err.s b/llvm/test/MC/AMDGPU/gfx12_asm_smem_err.s
index 0f62c8b..49d7c72 100644
--- a/llvm/test/MC/AMDGPU/gfx12_asm_smem_err.s
+++ b/llvm/test/MC/AMDGPU/gfx12_asm_smem_err.s
@@ -29,3 +29,21 @@ s_buffer_load_dwordx8 s[4:11], null, s101
s_buffer_load_dwordx16 s[4:19], null, s101
// NOGFX12: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+
+s_atc_probe_buffer 7, null, s2
+// NOGFX12: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+
+s_buffer_prefetch_data null, 100, s10, 7
+// NOGFX12: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+
+s_buffer_load_i8 s5, null, s0
+// NOGFX12: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+
+s_buffer_load_u8 s5, null, s0
+// NOGFX12: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+
+s_buffer_load_i16 s5, null, s0
+// NOGFX12: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
+
+s_buffer_load_u16 s5, null, s0
+// NOGFX12: :[[@LINE-1]]:{{[0-9]+}}: error: invalid operand for instruction
diff --git a/llvm/test/MC/AMDGPU/gfx12_asm_vop3.s b/llvm/test/MC/AMDGPU/gfx12_asm_vop3.s
index c2db5b9..3e7b7d2 100644
--- a/llvm/test/MC/AMDGPU/gfx12_asm_vop3.s
+++ b/llvm/test/MC/AMDGPU/gfx12_asm_vop3.s
@@ -3590,50 +3590,62 @@ v_max_u16 v255.l, 0xfe0b, vcc_hi
v_max_u16 v255.h, 0xfe0b, vcc_hi
// GFX12: v_max_u16 v255.h, 0xfe0b, vcc_hi op_sel:[0,0,1] ; encoding: [0xff,0x40,0x09,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
-v_maxmin_num_f16 v5, v1, v2, s3
-// GFX12: v_maxmin_num_f16 v5, v1, v2, s3 ; encoding: [0x05,0x00,0x6b,0xd6,0x01,0x05,0x0e,0x00]
+v_maxmin_num_f16 v5.l, v1.l, v2.l, s3
+// GFX12: v_maxmin_num_f16 v5.l, v1.l, v2.l, s3 ; encoding: [0x05,0x00,0x6b,0xd6,0x01,0x05,0x0e,0x00]
-v_maxmin_num_f16 v5, v255, s2, s105
-// GFX12: v_maxmin_num_f16 v5, v255, s2, s105 ; encoding: [0x05,0x00,0x6b,0xd6,0xff,0x05,0xa4,0x01]
+v_maxmin_num_f16 v5.l, v255.l, s2, s105
+// GFX12: v_maxmin_num_f16 v5.l, v255.l, s2, s105 ; encoding: [0x05,0x00,0x6b,0xd6,0xff,0x05,0xa4,0x01]
-v_maxmin_num_f16 v5, s1, v255, exec_hi
-// GFX12: v_maxmin_num_f16 v5, s1, v255, exec_hi ; encoding: [0x05,0x00,0x6b,0xd6,0x01,0xfe,0xff,0x01]
+v_maxmin_num_f16 v5.l, s1, v255.l, exec_hi
+// GFX12: v_maxmin_num_f16 v5.l, s1, v255.l, exec_hi ; encoding: [0x05,0x00,0x6b,0xd6,0x01,0xfe,0xff,0x01]
-v_maxmin_num_f16 v5, s105, s105, exec_lo
-// GFX12: v_maxmin_num_f16 v5, s105, s105, exec_lo ; encoding: [0x05,0x00,0x6b,0xd6,0x69,0xd2,0xf8,0x01]
+v_maxmin_num_f16 v5.l, s105, s105, exec_lo
+// GFX12: v_maxmin_num_f16 v5.l, s105, s105, exec_lo ; encoding: [0x05,0x00,0x6b,0xd6,0x69,0xd2,0xf8,0x01]
-v_maxmin_num_f16 v5, vcc_lo, ttmp15, v3
-// GFX12: v_maxmin_num_f16 v5, vcc_lo, ttmp15, v3 ; encoding: [0x05,0x00,0x6b,0xd6,0x6a,0xf6,0x0c,0x04]
+v_maxmin_num_f16 v5.l, vcc_lo, ttmp15, v3.l
+// GFX12: v_maxmin_num_f16 v5.l, vcc_lo, ttmp15, v3.l ; encoding: [0x05,0x00,0x6b,0xd6,0x6a,0xf6,0x0c,0x04]
-v_maxmin_num_f16 v5, vcc_hi, 0xfe0b, v255
-// GFX12: v_maxmin_num_f16 v5, vcc_hi, 0xfe0b, v255 ; encoding: [0x05,0x00,0x6b,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+v_maxmin_num_f16 v5.l, vcc_hi, 0xfe0b, v255.l
+// GFX12: v_maxmin_num_f16 v5.l, vcc_hi, 0xfe0b, v255.l ; encoding: [0x05,0x00,0x6b,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
-v_maxmin_num_f16 v5, -|ttmp15|, -|src_scc|, -|ttmp15|
-// GFX12: v_maxmin_num_f16 v5, -|ttmp15|, -|src_scc|, -|ttmp15| ; encoding: [0x05,0x07,0x6b,0xd6,0x7b,0xfa,0xed,0xe1]
+v_maxmin_num_f16 v5.l, -|ttmp15|, -|src_scc|, -|ttmp15|
+// GFX12: v_maxmin_num_f16 v5.l, -|ttmp15|, -|src_scc|, -|ttmp15| ; encoding: [0x05,0x07,0x6b,0xd6,0x7b,0xfa,0xed,0xe1]
-v_maxmin_num_f16 v5, m0, 0.5, m0
-// GFX12: v_maxmin_num_f16 v5, m0, 0.5, m0 ; encoding: [0x05,0x00,0x6b,0xd6,0x7d,0xe0,0xf5,0x01]
+v_maxmin_num_f16 v5.l, m0, 0.5, m0
+// GFX12: v_maxmin_num_f16 v5.l, m0, 0.5, m0 ; encoding: [0x05,0x00,0x6b,0xd6,0x7d,0xe0,0xf5,0x01]
-v_maxmin_num_f16 v5, |exec_lo|, -1, vcc_hi
-// GFX12: v_maxmin_num_f16 v5, |exec_lo|, -1, vcc_hi ; encoding: [0x05,0x01,0x6b,0xd6,0x7e,0x82,0xad,0x01]
+v_maxmin_num_f16 v5.l, |exec_lo|, -1, vcc_hi
+// GFX12: v_maxmin_num_f16 v5.l, |exec_lo|, -1, vcc_hi ; encoding: [0x05,0x01,0x6b,0xd6,0x7e,0x82,0xad,0x01]
-v_maxmin_num_f16 v5, -|exec_hi|, null, -|vcc_lo|
-// GFX12: v_maxmin_num_f16 v5, -|exec_hi|, null, -|vcc_lo| ; encoding: [0x05,0x05,0x6b,0xd6,0x7f,0xf8,0xa8,0xa1]
+v_maxmin_num_f16 v5.l, -|exec_hi|, null, -|vcc_lo|
+// GFX12: v_maxmin_num_f16 v5.l, -|exec_hi|, null, -|vcc_lo| ; encoding: [0x05,0x05,0x6b,0xd6,0x7f,0xf8,0xa8,0xa1]
-v_maxmin_num_f16 v5, null, exec_lo, -|0xfe0b|
-// GFX12: v_maxmin_num_f16 v5, null, exec_lo, -|0xfe0b| ; encoding: [0x05,0x04,0x6b,0xd6,0x7c,0xfc,0xfc,0x83,0x0b,0xfe,0x00,0x00]
+v_maxmin_num_f16 v5.l, null, exec_lo, -|0xfe0b|
+// GFX12: v_maxmin_num_f16 v5.l, null, exec_lo, -|0xfe0b| ; encoding: [0x05,0x04,0x6b,0xd6,0x7c,0xfc,0xfc,0x83,0x0b,0xfe,0x00,0x00]
-v_maxmin_num_f16 v5, -1, -|exec_hi|, -|src_scc|
-// GFX12: v_maxmin_num_f16 v5, -1, -|exec_hi|, -|src_scc| ; encoding: [0x05,0x06,0x6b,0xd6,0xc1,0xfe,0xf4,0xc3]
+v_maxmin_num_f16 v5.l, -1, -|exec_hi|, -|src_scc|
+// GFX12: v_maxmin_num_f16 v5.l, -1, -|exec_hi|, -|src_scc| ; encoding: [0x05,0x06,0x6b,0xd6,0xc1,0xfe,0xf4,0xc3]
-v_maxmin_num_f16 v5, 0.5, -m0, 0.5 mul:2
-// GFX12: v_maxmin_num_f16 v5, 0.5, -m0, 0.5 mul:2 ; encoding: [0x05,0x00,0x6b,0xd6,0xf0,0xfa,0xc0,0x4b]
+v_maxmin_num_f16 v5.l, 0.5, -m0, 0.5 mul:2
+// GFX12: v_maxmin_num_f16 v5.l, 0.5, -m0, 0.5 mul:2 ; encoding: [0x05,0x00,0x6b,0xd6,0xf0,0xfa,0xc0,0x4b]
-v_maxmin_num_f16 v5, -src_scc, |vcc_lo|, -1 mul:4
-// GFX12: v_maxmin_num_f16 v5, -src_scc, |vcc_lo|, -1 mul:4 ; encoding: [0x05,0x02,0x6b,0xd6,0xfd,0xd4,0x04,0x33]
+v_maxmin_num_f16 v5.l, -src_scc, |vcc_lo|, -1 mul:4
+// GFX12: v_maxmin_num_f16 v5.l, -src_scc, |vcc_lo|, -1 mul:4 ; encoding: [0x05,0x02,0x6b,0xd6,0xfd,0xd4,0x04,0x33]
-v_maxmin_num_f16 v255, -|0xfe0b|, -|vcc_hi|, null clamp div:2
-// GFX12: v_maxmin_num_f16 v255, -|0xfe0b|, -|vcc_hi|, null clamp div:2 ; encoding: [0xff,0x83,0x6b,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00]
+v_maxmin_num_f16 v255.l, -|0xfe0b|, -|vcc_hi|, null clamp div:2
+// GFX12: v_maxmin_num_f16 v255.l, -|0xfe0b|, -|vcc_hi|, null clamp div:2 ; encoding: [0xff,0x83,0x6b,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00]
+
+v_maxmin_num_f16 v5.l, v255.h, s2, s105
+// GFX12: v_maxmin_num_f16 v5.l, v255.h, s2, s105 op_sel:[1,0,0,0] ; encoding: [0x05,0x08,0x6b,0xd6,0xff,0x05,0xa4,0x01]
+
+v_maxmin_num_f16 v5.l, s1, v255.h, exec_hi
+// GFX12: v_maxmin_num_f16 v5.l, s1, v255.h, exec_hi op_sel:[0,1,0,0] ; encoding: [0x05,0x10,0x6b,0xd6,0x01,0xfe,0xff,0x01]
+
+v_maxmin_num_f16 v5.l, vcc_hi, 0xfe0b, v255.h
+// GFX12: v_maxmin_num_f16 v5.l, vcc_hi, 0xfe0b, v255.h op_sel:[0,0,1,0] ; encoding: [0x05,0x20,0x6b,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+
+v_maxmin_num_f16 v255.h, -|0xfe0b|, -|vcc_hi|, null clamp div:2
+// GFX12: v_maxmin_num_f16 v255.h, -|0xfe0b|, -|vcc_hi|, null op_sel:[0,0,0,1] clamp div:2 ; encoding: [0xff,0xc3,0x6b,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00]
v_maxmin_num_f32 v5, v1, v2, s3
// GFX12: v_maxmin_num_f32 v5, v1, v2, s3 ; encoding: [0x05,0x00,0x69,0xd6,0x01,0x05,0x0e,0x00]
@@ -4580,50 +4592,62 @@ v_min_u16 v255.l, 0xfe0b, vcc_hi
v_min_u16 v255.h, 0xfe0b, vcc_hi
// GFX12: v_min_u16 v255.h, 0xfe0b, vcc_hi op_sel:[0,0,1] ; encoding: [0xff,0x40,0x0b,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
-v_minmax_num_f16 v5, v1, v2, s3
-// GFX12: v_minmax_num_f16 v5, v1, v2, s3 ; encoding: [0x05,0x00,0x6a,0xd6,0x01,0x05,0x0e,0x00]
+v_minmax_num_f16 v5.l, v1.l, v2.l, s3
+// GFX12: v_minmax_num_f16 v5.l, v1.l, v2.l, s3 ; encoding: [0x05,0x00,0x6a,0xd6,0x01,0x05,0x0e,0x00]
+
+v_minmax_num_f16 v5.l, v255.l, s2, s105
+// GFX12: v_minmax_num_f16 v5.l, v255.l, s2, s105 ; encoding: [0x05,0x00,0x6a,0xd6,0xff,0x05,0xa4,0x01]
+
+v_minmax_num_f16 v5.l, s1, v255.l, exec_hi
+// GFX12: v_minmax_num_f16 v5.l, s1, v255.l, exec_hi ; encoding: [0x05,0x00,0x6a,0xd6,0x01,0xfe,0xff,0x01]
+
+v_minmax_num_f16 v5.l, s105, s105, exec_lo
+// GFX12: v_minmax_num_f16 v5.l, s105, s105, exec_lo ; encoding: [0x05,0x00,0x6a,0xd6,0x69,0xd2,0xf8,0x01]
+
+v_minmax_num_f16 v5.l, vcc_lo, ttmp15, v3.l
+// GFX12: v_minmax_num_f16 v5.l, vcc_lo, ttmp15, v3.l ; encoding: [0x05,0x00,0x6a,0xd6,0x6a,0xf6,0x0c,0x04]
-v_minmax_num_f16 v5, v255, s2, s105
-// GFX12: v_minmax_num_f16 v5, v255, s2, s105 ; encoding: [0x05,0x00,0x6a,0xd6,0xff,0x05,0xa4,0x01]
+v_minmax_num_f16 v5.l, vcc_hi, 0xfe0b, v255.l
+// GFX12: v_minmax_num_f16 v5.l, vcc_hi, 0xfe0b, v255.l ; encoding: [0x05,0x00,0x6a,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
-v_minmax_num_f16 v5, s1, v255, exec_hi
-// GFX12: v_minmax_num_f16 v5, s1, v255, exec_hi ; encoding: [0x05,0x00,0x6a,0xd6,0x01,0xfe,0xff,0x01]
+v_minmax_num_f16 v5.l, -|ttmp15|, -|src_scc|, -|ttmp15|
+// GFX12: v_minmax_num_f16 v5.l, -|ttmp15|, -|src_scc|, -|ttmp15| ; encoding: [0x05,0x07,0x6a,0xd6,0x7b,0xfa,0xed,0xe1]
-v_minmax_num_f16 v5, s105, s105, exec_lo
-// GFX12: v_minmax_num_f16 v5, s105, s105, exec_lo ; encoding: [0x05,0x00,0x6a,0xd6,0x69,0xd2,0xf8,0x01]
+v_minmax_num_f16 v5.l, m0, 0.5, m0
+// GFX12: v_minmax_num_f16 v5.l, m0, 0.5, m0 ; encoding: [0x05,0x00,0x6a,0xd6,0x7d,0xe0,0xf5,0x01]
-v_minmax_num_f16 v5, vcc_lo, ttmp15, v3
-// GFX12: v_minmax_num_f16 v5, vcc_lo, ttmp15, v3 ; encoding: [0x05,0x00,0x6a,0xd6,0x6a,0xf6,0x0c,0x04]
+v_minmax_num_f16 v5.l, |exec_lo|, -1, vcc_hi
+// GFX12: v_minmax_num_f16 v5.l, |exec_lo|, -1, vcc_hi ; encoding: [0x05,0x01,0x6a,0xd6,0x7e,0x82,0xad,0x01]
-v_minmax_num_f16 v5, vcc_hi, 0xfe0b, v255
-// GFX12: v_minmax_num_f16 v5, vcc_hi, 0xfe0b, v255 ; encoding: [0x05,0x00,0x6a,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+v_minmax_num_f16 v5.l, -|exec_hi|, null, -|vcc_lo|
+// GFX12: v_minmax_num_f16 v5.l, -|exec_hi|, null, -|vcc_lo| ; encoding: [0x05,0x05,0x6a,0xd6,0x7f,0xf8,0xa8,0xa1]
-v_minmax_num_f16 v5, -|ttmp15|, -|src_scc|, -|ttmp15|
-// GFX12: v_minmax_num_f16 v5, -|ttmp15|, -|src_scc|, -|ttmp15| ; encoding: [0x05,0x07,0x6a,0xd6,0x7b,0xfa,0xed,0xe1]
+v_minmax_num_f16 v5.l, null, exec_lo, -|0xfe0b|
+// GFX12: v_minmax_num_f16 v5.l, null, exec_lo, -|0xfe0b| ; encoding: [0x05,0x04,0x6a,0xd6,0x7c,0xfc,0xfc,0x83,0x0b,0xfe,0x00,0x00]
-v_minmax_num_f16 v5, m0, 0.5, m0
-// GFX12: v_minmax_num_f16 v5, m0, 0.5, m0 ; encoding: [0x05,0x00,0x6a,0xd6,0x7d,0xe0,0xf5,0x01]
+v_minmax_num_f16 v5.l, -1, -|exec_hi|, -|src_scc|
+// GFX12: v_minmax_num_f16 v5.l, -1, -|exec_hi|, -|src_scc| ; encoding: [0x05,0x06,0x6a,0xd6,0xc1,0xfe,0xf4,0xc3]
-v_minmax_num_f16 v5, |exec_lo|, -1, vcc_hi
-// GFX12: v_minmax_num_f16 v5, |exec_lo|, -1, vcc_hi ; encoding: [0x05,0x01,0x6a,0xd6,0x7e,0x82,0xad,0x01]
+v_minmax_num_f16 v5.l, 0.5, -m0, 0.5 mul:2
+// GFX12: v_minmax_num_f16 v5.l, 0.5, -m0, 0.5 mul:2 ; encoding: [0x05,0x00,0x6a,0xd6,0xf0,0xfa,0xc0,0x4b]
-v_minmax_num_f16 v5, -|exec_hi|, null, -|vcc_lo|
-// GFX12: v_minmax_num_f16 v5, -|exec_hi|, null, -|vcc_lo| ; encoding: [0x05,0x05,0x6a,0xd6,0x7f,0xf8,0xa8,0xa1]
+v_minmax_num_f16 v5.l, -src_scc, |vcc_lo|, -1 mul:4
+// GFX12: v_minmax_num_f16 v5.l, -src_scc, |vcc_lo|, -1 mul:4 ; encoding: [0x05,0x02,0x6a,0xd6,0xfd,0xd4,0x04,0x33]
-v_minmax_num_f16 v5, null, exec_lo, -|0xfe0b|
-// GFX12: v_minmax_num_f16 v5, null, exec_lo, -|0xfe0b| ; encoding: [0x05,0x04,0x6a,0xd6,0x7c,0xfc,0xfc,0x83,0x0b,0xfe,0x00,0x00]
+v_minmax_num_f16 v255.l, -|0xfe0b|, -|vcc_hi|, null clamp div:2
+// GFX12: v_minmax_num_f16 v255.l, -|0xfe0b|, -|vcc_hi|, null clamp div:2 ; encoding: [0xff,0x83,0x6a,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00]
-v_minmax_num_f16 v5, -1, -|exec_hi|, -|src_scc|
-// GFX12: v_minmax_num_f16 v5, -1, -|exec_hi|, -|src_scc| ; encoding: [0x05,0x06,0x6a,0xd6,0xc1,0xfe,0xf4,0xc3]
+v_minmax_num_f16 v5.l, v255.h, s2, s105
+// GFX12: v_minmax_num_f16 v5.l, v255.h, s2, s105 op_sel:[1,0,0,0] ; encoding: [0x05,0x08,0x6a,0xd6,0xff,0x05,0xa4,0x01]
-v_minmax_num_f16 v5, 0.5, -m0, 0.5 mul:2
-// GFX12: v_minmax_num_f16 v5, 0.5, -m0, 0.5 mul:2 ; encoding: [0x05,0x00,0x6a,0xd6,0xf0,0xfa,0xc0,0x4b]
+v_minmax_num_f16 v5.l, s1, v255.h, exec_hi
+// GFX12: v_minmax_num_f16 v5.l, s1, v255.h, exec_hi op_sel:[0,1,0,0] ; encoding: [0x05,0x10,0x6a,0xd6,0x01,0xfe,0xff,0x01]
-v_minmax_num_f16 v5, -src_scc, |vcc_lo|, -1 mul:4
-// GFX12: v_minmax_num_f16 v5, -src_scc, |vcc_lo|, -1 mul:4 ; encoding: [0x05,0x02,0x6a,0xd6,0xfd,0xd4,0x04,0x33]
+v_minmax_num_f16 v5.l, vcc_hi, 0xfe0b, v255.h
+// GFX12: v_minmax_num_f16 v5.l, vcc_hi, 0xfe0b, v255.h op_sel:[0,0,1,0] ; encoding: [0x05,0x20,0x6a,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
-v_minmax_num_f16 v255, -|0xfe0b|, -|vcc_hi|, null clamp div:2
-// GFX12: v_minmax_num_f16 v255, -|0xfe0b|, -|vcc_hi|, null clamp div:2 ; encoding: [0xff,0x83,0x6a,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00]
+v_minmax_num_f16 v255.h, -|0xfe0b|, -|vcc_hi|, null clamp div:2
+// GFX12: v_minmax_num_f16 v255.h, -|0xfe0b|, -|vcc_hi|, null op_sel:[0,0,0,1] clamp div:2 ; encoding: [0xff,0xc3,0x6a,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00]
v_minmax_num_f32 v5, v1, v2, s3
// GFX12: v_minmax_num_f32 v5, v1, v2, s3 ; encoding: [0x05,0x00,0x68,0xd6,0x01,0x05,0x0e,0x00]
diff --git a/llvm/test/MC/AMDGPU/gfx12_asm_vop3_aliases.s b/llvm/test/MC/AMDGPU/gfx12_asm_vop3_aliases.s
index ee4561f..ffcf651 100644
--- a/llvm/test/MC/AMDGPU/gfx12_asm_vop3_aliases.s
+++ b/llvm/test/MC/AMDGPU/gfx12_asm_vop3_aliases.s
@@ -24,11 +24,11 @@ v_minmax_f32_e64_dpp v0, -v1, -v2, -v3 dpp8:[0,1,2,3,4,5,6,7]
v_maxmin_f32_e64_dpp v0, v1, v2, v3 clamp dpp8:[0,1,2,3,4,5,6,7]
// GFX12: v_maxmin_num_f32_e64_dpp v0, v1, v2, v3 clamp dpp8:[0,1,2,3,4,5,6,7] ; encoding: [0x00,0x80,0x69,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x88,0xc6,0xfa]
-v_minmax_f16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
-// GFX12: v_minmax_num_f16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, v3.l quad_perm:[3,2,1,0]
+// GFX12: v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, v3.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
-v_maxmin_f16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
-// GFX12: v_maxmin_num_f16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, v3.l quad_perm:[3,2,1,0]
+// GFX12: v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, v3.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
v_mad_i64_i32 v[5:6], s12, v1, v2, v[3:4]
// GFX12: v_mad_co_i64_i32 v[5:6], s12, v1, v2, v[3:4] ; encoding: [0x05,0x0c,0xff,0xd6,0x01,0x05,0x0e,0x04]
diff --git a/llvm/test/MC/AMDGPU/gfx12_asm_vop3_dpp16.s b/llvm/test/MC/AMDGPU/gfx12_asm_vop3_dpp16.s
index 623e668..aa804cc 100644
--- a/llvm/test/MC/AMDGPU/gfx12_asm_vop3_dpp16.s
+++ b/llvm/test/MC/AMDGPU/gfx12_asm_vop3_dpp16.s
@@ -2921,53 +2921,98 @@ v_max_u16_e64_dpp v5.l, v1.l, v2.h row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_
v_max_u16_e64_dpp v255.h, v255.l, v255.l row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
// GFX12: v_max_u16_e64_dpp v255.h, v255.l, v255.l op_sel:[0,0,1] row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x40,0x09,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x05,0x30]
-v_maxmin_num_f16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
-// GFX12: v_maxmin_num_f16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, v3.l quad_perm:[3,2,1,0]
+// GFX12: v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, v3.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
-v_maxmin_num_f16_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
-// GFX12: v_maxmin_num_f16_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+v_maxmin_num_f16_e64_dpp v5.l, v1.l, s2, v3.l quad_perm:[3,2,1,0]
+// GFX12: v_maxmin_num_f16_e64_dpp v5.l, v1.l, s2, v3.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
-v_maxmin_num_f16_e64_dpp v5, v1, 2.0, v3 quad_perm:[3,2,1,0]
-// GFX12: v_maxmin_num_f16_e64_dpp v5, v1, 2.0, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0xe8,0x0d,0x04,0x01,0x1b,0x00,0xff]
+v_maxmin_num_f16_e64_dpp v5.l, v1.l, 2.0, v3.l quad_perm:[3,2,1,0]
+// GFX12: v_maxmin_num_f16_e64_dpp v5.l, v1.l, 2.0, v3.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0xe8,0x0d,0x04,0x01,0x1b,0x00,0xff]
-v_maxmin_num_f16_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
-// GFX12: v_maxmin_num_f16_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, v3.l quad_perm:[0,1,2,3]
+// GFX12: v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, v3.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
-v_maxmin_num_f16_e64_dpp v5, v1, v2, v3 row_mirror
-// GFX12: v_maxmin_num_f16_e64_dpp v5, v1, v2, v3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, v3.l row_mirror
+// GFX12: v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, v3.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
-v_maxmin_num_f16_e64_dpp v5, v1, v2, v255 row_half_mirror
-// GFX12: v_maxmin_num_f16_e64_dpp v5, v1, v2, v255 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, v255.l row_half_mirror
+// GFX12: v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, v255.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
-v_maxmin_num_f16_e64_dpp v5, v1, v2, s105 row_shl:1
-// GFX12: v_maxmin_num_f16_e64_dpp v5, v1, v2, s105 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, s105 row_shl:1
+// GFX12: v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, s105 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
-v_maxmin_num_f16_e64_dpp v5, v1, v2, vcc_hi row_shl:15
-// GFX12: v_maxmin_num_f16_e64_dpp v5, v1, v2, vcc_hi row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, vcc_hi row_shl:15
+// GFX12: v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, vcc_hi row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
-v_maxmin_num_f16_e64_dpp v5, v1, v2, vcc_lo row_shr:1
-// GFX12: v_maxmin_num_f16_e64_dpp v5, v1, v2, vcc_lo row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, vcc_lo row_shr:1
+// GFX12: v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, vcc_lo row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
-v_maxmin_num_f16_e64_dpp v5, |v1|, v2, -ttmp15 row_shr:15
-// GFX12: v_maxmin_num_f16_e64_dpp v5, |v1|, v2, -ttmp15 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x01,0x6b,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+v_maxmin_num_f16_e64_dpp v5.l, |v1.l|, v2.l, -ttmp15 row_shr:15
+// GFX12: v_maxmin_num_f16_e64_dpp v5.l, |v1.l|, v2.l, -ttmp15 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x01,0x6b,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
-v_maxmin_num_f16_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1
-// GFX12: v_maxmin_num_f16_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x02,0x6b,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+v_maxmin_num_f16_e64_dpp v5.l, v1.l, -|v2.l|, exec_hi row_ror:1
+// GFX12: v_maxmin_num_f16_e64_dpp v5.l, v1.l, -|v2.l|, exec_hi row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x02,0x6b,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
-v_maxmin_num_f16_e64_dpp v5, -v1, v2, |exec_lo| row_ror:15
-// GFX12: v_maxmin_num_f16_e64_dpp v5, -v1, v2, |exec_lo| row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x6b,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+v_maxmin_num_f16_e64_dpp v5.l, -v1.l, v2.l, |exec_lo| row_ror:15
+// GFX12: v_maxmin_num_f16_e64_dpp v5.l, -v1.l, v2.l, |exec_lo| row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x6b,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
-v_maxmin_num_f16_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
-// GFX12: v_maxmin_num_f16_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x03,0x6b,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+v_maxmin_num_f16_e64_dpp v5.l, -|v1.l|, -|v2.l|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: v_maxmin_num_f16_e64_dpp v5.l, -|v1.l|, -|v2.l|, null row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x03,0x6b,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
-v_maxmin_num_f16_e64_dpp v5, -|v1|, v2, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
-// GFX12: v_maxmin_num_f16_e64_dpp v5, -|v1|, v2, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x05,0x6b,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01]
+v_maxmin_num_f16_e64_dpp v5.l, -|v1.l|, v2.l, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: v_maxmin_num_f16_e64_dpp v5.l, -|v1.l|, v2.l, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x05,0x6b,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01]
-v_maxmin_num_f16_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
-// GFX12: v_maxmin_num_f16_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x06,0x6b,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x09,0x13]
+v_maxmin_num_f16_e64_dpp v5.l, v1.l, -|v2.l|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: v_maxmin_num_f16_e64_dpp v5.l, v1.l, -|v2.l|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x06,0x6b,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x09,0x13]
-v_maxmin_num_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
-// GFX12: v_maxmin_num_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x87,0x6b,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x05,0x30]
+v_maxmin_num_f16_e64_dpp v255.l, -|v255.l|, -|v255.l|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: v_maxmin_num_f16_e64_dpp v255.l, -|v255.l|, -|v255.l|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x87,0x6b,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x05,0x30]
+
+v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, v255.l quad_perm:[0,1,2,3]
+// GFX12: v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, v255.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0xfe,0x07,0x01,0xe4,0x00,0xff]
+
+v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, s3 row_mirror
+// GFX12: v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, s3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0x0e,0x00,0x01,0x40,0x01,0xff]
+
+v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, s105 row_half_mirror
+// GFX12: v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, s105 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x41,0x01,0xff]
+
+v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, ttmp15 row_shl:1
+// GFX12: v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, ttmp15 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0xee,0x01,0x01,0x01,0x01,0xff]
+
+v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, -|m0| row_shr:15
+// GFX12: v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, -|m0| row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x6b,0xd6,0xfa,0x04,0xf6,0x81,0x01,0x1f,0x01,0xff]
+
+v_maxmin_num_f16_e64_dpp v5.l, -|v1.l|, v2.l, -|exec_hi| row_ror:1
+// GFX12: v_maxmin_num_f16_e64_dpp v5.l, -|v1.l|, v2.l, -|exec_hi| row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x05,0x6b,0xd6,0xfa,0x04,0xfe,0xa1,0x01,0x21,0x01,0xff]
+
+v_maxmin_num_f16_e64_dpp v5.l, v1.l, -|v2.l|, -|exec_lo| row_ror:15
+// GFX12: v_maxmin_num_f16_e64_dpp v5.l, v1.l, -|v2.l|, -|exec_lo| row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x06,0x6b,0xd6,0xfa,0x04,0xfa,0xc1,0x01,0x2f,0x01,0xff]
+
+v_maxmin_num_f16_e64_dpp v5.l, |v1.l|, -v2.l, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: v_maxmin_num_f16_e64_dpp v5.l, |v1.l|, -v2.l, null row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x01,0x6b,0xd6,0xfa,0x04,0xf2,0x41,0x01,0x50,0x01,0xff]
+
+v_maxmin_num_f16_e64_dpp v5.l, -v1.l, |v2.l|, -1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: v_maxmin_num_f16_e64_dpp v5.l, -v1.l, |v2.l|, -1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x02,0x6b,0xd6,0xfa,0x04,0x06,0x2b,0x01,0x5f,0x01,0x01]
+
+v_maxmin_num_f16_e64_dpp v5.l, -|v1.l|, -|v2.l|, 0.5 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: v_maxmin_num_f16_e64_dpp v5.l, -|v1.l|, -|v2.l|, 0.5 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x03,0x6b,0xd6,0xfa,0x04,0xc2,0x73,0x01,0x60,0x09,0x13]
+
+v_maxmin_num_f16_e64_dpp v5.h, v1.h, v2.h, v3.h quad_perm:[3,2,1,0]
+// GFX12: v_maxmin_num_f16_e64_dpp v5.h, v1.h, v2.h, v3.h op_sel:[1,1,1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x78,0x6b,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, v255.h quad_perm:[0,1,2,3]
+// GFX12: v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, v255.h op_sel:[0,0,1,0] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x20,0x6b,0xd6,0xfa,0x04,0xfe,0x07,0x01,0xe4,0x00,0xff]
+
+v_maxmin_num_f16_e64_dpp v5.l, -v1.h, |v2.l|, -1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: v_maxmin_num_f16_e64_dpp v5.l, -v1.h, |v2.l|, -1 op_sel:[1,0,0,0] mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x0a,0x6b,0xd6,0xfa,0x04,0x06,0x2b,0x01,0x5f,0x01,0x01]
+
+v_maxmin_num_f16_e64_dpp v5.l, -|v1.l|, -|v2.h|, 0.5 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: v_maxmin_num_f16_e64_dpp v5.l, -|v1.l|, -|v2.h|, 0.5 op_sel:[0,1,0,0] mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x13,0x6b,0xd6,0xfa,0x04,0xc2,0x73,0x01,0x60,0x09,0x13]
+
+v_maxmin_num_f16_e64_dpp v255.h, -|v255.l|, -|v255.l|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: v_maxmin_num_f16_e64_dpp v255.h, -|v255.l|, -|v255.l|, -|src_scc| op_sel:[0,0,0,1] clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0xc7,0x6b,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x05,0x30]
v_maxmin_num_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
// GFX12: v_maxmin_num_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x69,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
@@ -3956,53 +4001,98 @@ v_min_u16_e64_dpp v5.l, v1.l, v2.h row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_
v_min_u16_e64_dpp v255.h, v255.l, v255.l row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
// GFX12: v_min_u16_e64_dpp v255.h, v255.l, v255.l op_sel:[0,0,1] row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x40,0x0b,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x05,0x30]
-v_minmax_num_f16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
-// GFX12: v_minmax_num_f16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, v3.l quad_perm:[3,2,1,0]
+// GFX12: v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, v3.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+v_minmax_num_f16_e64_dpp v5.l, v1.l, s2, v3.l quad_perm:[3,2,1,0]
+// GFX12: v_minmax_num_f16_e64_dpp v5.l, v1.l, s2, v3.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+
+v_minmax_num_f16_e64_dpp v5.l, v1.l, 2.0, v3.l quad_perm:[3,2,1,0]
+// GFX12: v_minmax_num_f16_e64_dpp v5.l, v1.l, 2.0, v3.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0xe8,0x0d,0x04,0x01,0x1b,0x00,0xff]
+
+v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, v3.l quad_perm:[0,1,2,3]
+// GFX12: v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, v3.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+
+v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, v3.l row_mirror
+// GFX12: v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, v3.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+
+v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, v255.l row_half_mirror
+// GFX12: v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, v255.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+
+v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, s105 row_shl:1
+// GFX12: v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, s105 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+
+v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, vcc_hi row_shl:15
+// GFX12: v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, vcc_hi row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+
+v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, vcc_lo row_shr:1
+// GFX12: v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, vcc_lo row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+
+v_minmax_num_f16_e64_dpp v5.l, |v1.l|, v2.l, -ttmp15 row_shr:15
+// GFX12: v_minmax_num_f16_e64_dpp v5.l, |v1.l|, v2.l, -ttmp15 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x01,0x6a,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+
+v_minmax_num_f16_e64_dpp v5.l, v1.l, -|v2.l|, exec_hi row_ror:1
+// GFX12: v_minmax_num_f16_e64_dpp v5.l, v1.l, -|v2.l|, exec_hi row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x02,0x6a,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+
+v_minmax_num_f16_e64_dpp v5.l, -v1.l, v2.l, |exec_lo| row_ror:15
+// GFX12: v_minmax_num_f16_e64_dpp v5.l, -v1.l, v2.l, |exec_lo| row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x6a,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+
+v_minmax_num_f16_e64_dpp v5.l, -|v1.l|, -|v2.l|, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: v_minmax_num_f16_e64_dpp v5.l, -|v1.l|, -|v2.l|, null row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x03,0x6a,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+
+v_minmax_num_f16_e64_dpp v5.l, -|v1.l|, v2.l, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: v_minmax_num_f16_e64_dpp v5.l, -|v1.l|, v2.l, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x05,0x6a,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01]
+
+v_minmax_num_f16_e64_dpp v5.l, v1.l, -|v2.l|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: v_minmax_num_f16_e64_dpp v5.l, v1.l, -|v2.l|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x06,0x6a,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x09,0x13]
+
+v_minmax_num_f16_e64_dpp v255.l, -|v255.l|, -|v255.l|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: v_minmax_num_f16_e64_dpp v255.l, -|v255.l|, -|v255.l|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x87,0x6a,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x05,0x30]
-v_minmax_num_f16_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0]
-// GFX12: v_minmax_num_f16_e64_dpp v5, v1, s2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0x0c,0x04,0x01,0x1b,0x00,0xff]
+v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, v255.l quad_perm:[0,1,2,3]
+// GFX12: v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, v255.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0xfe,0x07,0x01,0xe4,0x00,0xff]
-v_minmax_num_f16_e64_dpp v5, v1, 2.0, v3 quad_perm:[3,2,1,0]
-// GFX12: v_minmax_num_f16_e64_dpp v5, v1, 2.0, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0xe8,0x0d,0x04,0x01,0x1b,0x00,0xff]
+v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, s3 row_mirror
+// GFX12: v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, s3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0x0e,0x00,0x01,0x40,0x01,0xff]
-v_minmax_num_f16_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3]
-// GFX12: v_minmax_num_f16_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, s105 row_half_mirror
+// GFX12: v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, s105 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x41,0x01,0xff]
-v_minmax_num_f16_e64_dpp v5, v1, v2, v3 row_mirror
-// GFX12: v_minmax_num_f16_e64_dpp v5, v1, v2, v3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, ttmp15 row_shl:1
+// GFX12: v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, ttmp15 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0xee,0x01,0x01,0x01,0x01,0xff]
-v_minmax_num_f16_e64_dpp v5, v1, v2, v255 row_half_mirror
-// GFX12: v_minmax_num_f16_e64_dpp v5, v1, v2, v255 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, -|m0| row_shr:15
+// GFX12: v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, -|m0| row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x6a,0xd6,0xfa,0x04,0xf6,0x81,0x01,0x1f,0x01,0xff]
-v_minmax_num_f16_e64_dpp v5, v1, v2, s105 row_shl:1
-// GFX12: v_minmax_num_f16_e64_dpp v5, v1, v2, s105 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+v_minmax_num_f16_e64_dpp v5.l, -|v1.l|, v2.l, -|exec_hi| row_ror:1
+// GFX12: v_minmax_num_f16_e64_dpp v5.l, -|v1.l|, v2.l, -|exec_hi| row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x05,0x6a,0xd6,0xfa,0x04,0xfe,0xa1,0x01,0x21,0x01,0xff]
-v_minmax_num_f16_e64_dpp v5, v1, v2, vcc_hi row_shl:15
-// GFX12: v_minmax_num_f16_e64_dpp v5, v1, v2, vcc_hi row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+v_minmax_num_f16_e64_dpp v5.l, v1.l, -|v2.l|, -|exec_lo| row_ror:15
+// GFX12: v_minmax_num_f16_e64_dpp v5.l, v1.l, -|v2.l|, -|exec_lo| row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x06,0x6a,0xd6,0xfa,0x04,0xfa,0xc1,0x01,0x2f,0x01,0xff]
-v_minmax_num_f16_e64_dpp v5, v1, v2, vcc_lo row_shr:1
-// GFX12: v_minmax_num_f16_e64_dpp v5, v1, v2, vcc_lo row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+v_minmax_num_f16_e64_dpp v5.l, |v1.l|, -v2.l, null row_share:0 row_mask:0xf bank_mask:0xf
+// GFX12: v_minmax_num_f16_e64_dpp v5.l, |v1.l|, -v2.l, null row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x01,0x6a,0xd6,0xfa,0x04,0xf2,0x41,0x01,0x50,0x01,0xff]
-v_minmax_num_f16_e64_dpp v5, |v1|, v2, -ttmp15 row_shr:15
-// GFX12: v_minmax_num_f16_e64_dpp v5, |v1|, v2, -ttmp15 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x01,0x6a,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+v_minmax_num_f16_e64_dpp v5.l, -v1.l, |v2.l|, -1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: v_minmax_num_f16_e64_dpp v5.l, -v1.l, |v2.l|, -1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x02,0x6a,0xd6,0xfa,0x04,0x06,0x2b,0x01,0x5f,0x01,0x01]
-v_minmax_num_f16_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1
-// GFX12: v_minmax_num_f16_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x02,0x6a,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+v_minmax_num_f16_e64_dpp v5.l, -|v1.l|, -|v2.l|, 0.5 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: v_minmax_num_f16_e64_dpp v5.l, -|v1.l|, -|v2.l|, 0.5 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x03,0x6a,0xd6,0xfa,0x04,0xc2,0x73,0x01,0x60,0x09,0x13]
-v_minmax_num_f16_e64_dpp v5, -v1, v2, |exec_lo| row_ror:15
-// GFX12: v_minmax_num_f16_e64_dpp v5, -v1, v2, |exec_lo| row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x6a,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+v_minmax_num_f16_e64_dpp v5.h, v1.h, v2.h, v3.h quad_perm:[3,2,1,0]
+// GFX12: v_minmax_num_f16_e64_dpp v5.h, v1.h, v2.h, v3.h op_sel:[1,1,1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x78,0x6a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
-v_minmax_num_f16_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf
-// GFX12: v_minmax_num_f16_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x03,0x6a,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, v255.h quad_perm:[0,1,2,3]
+// GFX12: v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, v255.h op_sel:[0,0,1,0] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x20,0x6a,0xd6,0xfa,0x04,0xfe,0x07,0x01,0xe4,0x00,0xff]
-v_minmax_num_f16_e64_dpp v5, -|v1|, v2, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
-// GFX12: v_minmax_num_f16_e64_dpp v5, -|v1|, v2, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x05,0x6a,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01]
+v_minmax_num_f16_e64_dpp v5.l, -v1.h, |v2.l|, -1 mul:2 row_share:15 row_mask:0x0 bank_mask:0x1
+// GFX12: v_minmax_num_f16_e64_dpp v5.l, -v1.h, |v2.l|, -1 op_sel:[1,0,0,0] mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x0a,0x6a,0xd6,0xfa,0x04,0x06,0x2b,0x01,0x5f,0x01,0x01]
-v_minmax_num_f16_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
-// GFX12: v_minmax_num_f16_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x06,0x6a,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x09,0x13]
+v_minmax_num_f16_e64_dpp v5.l, -|v1.l|, -|v2.h|, 0.5 mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 fi:0
+// GFX12: v_minmax_num_f16_e64_dpp v5.l, -|v1.l|, -|v2.h|, 0.5 op_sel:[0,1,0,0] mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 bound_ctrl:1 ; encoding: [0x05,0x13,0x6a,0xd6,0xfa,0x04,0xc2,0x73,0x01,0x60,0x09,0x13]
-v_minmax_num_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
-// GFX12: v_minmax_num_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0x87,0x6a,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x05,0x30]
+v_minmax_num_f16_e64_dpp v255.h, -|v255.l|, -|v255.l|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:0 fi:1
+// GFX12: v_minmax_num_f16_e64_dpp v255.h, -|v255.l|, -|v255.l|, -|src_scc| op_sel:[0,0,0,1] clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 fi:1 ; encoding: [0xff,0xc7,0x6a,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x05,0x30]
v_minmax_num_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0]
// GFX12: v_minmax_num_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x68,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
diff --git a/llvm/test/MC/AMDGPU/gfx12_asm_vop3_dpp8.s b/llvm/test/MC/AMDGPU/gfx12_asm_vop3_dpp8.s
index 056ea80..e93a65e 100644
--- a/llvm/test/MC/AMDGPU/gfx12_asm_vop3_dpp8.s
+++ b/llvm/test/MC/AMDGPU/gfx12_asm_vop3_dpp8.s
@@ -1878,47 +1878,86 @@ v_max_u16_e64_dpp v5.l, v1.l, v2.h dpp8:[7,6,5,4,3,2,1,0] fi:1
v_max_u16_e64_dpp v255.h, v255.l, v255.l dpp8:[0,0,0,0,0,0,0,0] fi:0
// GFX12: v_max_u16_e64_dpp v255.h, v255.l, v255.l op_sel:[0,0,1] dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x40,0x09,0xd7,0xe9,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
-v_maxmin_num_f16_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
-// GFX12: v_maxmin_num_f16_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6b,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, v3.l dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, v3.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6b,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
-v_maxmin_num_f16_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
-// GFX12: v_maxmin_num_f16_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6b,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+v_maxmin_num_f16_e64_dpp v5.l, v1.l, s2, v3.l dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: v_maxmin_num_f16_e64_dpp v5.l, v1.l, s2, v3.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6b,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
-v_maxmin_num_f16_e64_dpp v5, v1, 2.0, v3 dpp8:[7,6,5,4,3,2,1,0]
-// GFX12: v_maxmin_num_f16_e64_dpp v5, v1, 2.0, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6b,0xd6,0xe9,0xe8,0x0d,0x04,0x01,0x77,0x39,0x05]
+v_maxmin_num_f16_e64_dpp v5.l, v1.l, 2.0, v3.l dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: v_maxmin_num_f16_e64_dpp v5.l, v1.l, 2.0, v3.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6b,0xd6,0xe9,0xe8,0x0d,0x04,0x01,0x77,0x39,0x05]
-v_maxmin_num_f16_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
-// GFX12: v_maxmin_num_f16_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6b,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, v255.l dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, v255.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6b,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
-v_maxmin_num_f16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
-// GFX12: v_maxmin_num_f16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6b,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, s105 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6b,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
-v_maxmin_num_f16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
-// GFX12: v_maxmin_num_f16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6b,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, vcc_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6b,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
-v_maxmin_num_f16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
-// GFX12: v_maxmin_num_f16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6b,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6b,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
-v_maxmin_num_f16_e64_dpp v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0]
-// GFX12: v_maxmin_num_f16_e64_dpp v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x6b,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+v_maxmin_num_f16_e64_dpp v5.l, |v1.l|, v2.l, -ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: v_maxmin_num_f16_e64_dpp v5.l, |v1.l|, v2.l, -ttmp15 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x6b,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
-v_maxmin_num_f16_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
-// GFX12: v_maxmin_num_f16_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x02,0x6b,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+v_maxmin_num_f16_e64_dpp v5.l, v1.l, -|v2.l|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: v_maxmin_num_f16_e64_dpp v5.l, v1.l, -|v2.l|, exec_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x02,0x6b,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
-v_maxmin_num_f16_e64_dpp v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0]
-// GFX12: v_maxmin_num_f16_e64_dpp v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x6b,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+v_maxmin_num_f16_e64_dpp v5.l, -v1.l, v2.l, |exec_lo| dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: v_maxmin_num_f16_e64_dpp v5.l, -v1.l, v2.l, |exec_lo| dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x6b,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
-v_maxmin_num_f16_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
-// GFX12: v_maxmin_num_f16_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x03,0x6b,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+v_maxmin_num_f16_e64_dpp v5.l, -|v1.l|, -|v2.l|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: v_maxmin_num_f16_e64_dpp v5.l, -|v1.l|, -|v2.l|, null dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x03,0x6b,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
-v_maxmin_num_f16_e64_dpp v5, -|v1|, v2, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0]
-// GFX12: v_maxmin_num_f16_e64_dpp v5, -|v1|, v2, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x05,0x6b,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05]
+v_maxmin_num_f16_e64_dpp v5.l, -|v1.l|, v2.l, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: v_maxmin_num_f16_e64_dpp v5.l, -|v1.l|, v2.l, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x05,0x6b,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05]
-v_maxmin_num_f16_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX12: v_maxmin_num_f16_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x06,0x6b,0xd6,0xea,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05]
+v_maxmin_num_f16_e64_dpp v5.l, v1.l, -|v2.l|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: v_maxmin_num_f16_e64_dpp v5.l, v1.l, -|v2.l|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x06,0x6b,0xd6,0xea,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05]
-v_maxmin_num_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0]
-// GFX12: v_maxmin_num_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x87,0x6b,0xd6,0xe9,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+v_maxmin_num_f16_e64_dpp v255.l, -|v255.l|, -|v255.l|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0]
+// GFX12: v_maxmin_num_f16_e64_dpp v255.l, -|v255.l|, -|v255.l|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x87,0x6b,0xd6,0xe9,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+
+v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, s3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, s3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6b,0xd6,0xe9,0x04,0x0e,0x00,0x01,0x77,0x39,0x05]
+
+v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, ttmp15 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6b,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
+
+v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, -|m0| dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, -|m0| dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x6b,0xd6,0xe9,0x04,0xf6,0x81,0x01,0x77,0x39,0x05]
+
+v_maxmin_num_f16_e64_dpp v5.l, -|v1.l|, v2.l, -|exec_hi| dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: v_maxmin_num_f16_e64_dpp v5.l, -|v1.l|, v2.l, -|exec_hi| dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x05,0x6b,0xd6,0xe9,0x04,0xfe,0xa1,0x01,0x77,0x39,0x05]
+
+v_maxmin_num_f16_e64_dpp v5.l, v1.l, -|v2.l|, -|exec_lo| dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: v_maxmin_num_f16_e64_dpp v5.l, v1.l, -|v2.l|, -|exec_lo| dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x06,0x6b,0xd6,0xe9,0x04,0xfa,0xc1,0x01,0x77,0x39,0x05]
+
+v_maxmin_num_f16_e64_dpp v5.l, |v1.l|, -v2.l, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: v_maxmin_num_f16_e64_dpp v5.l, |v1.l|, -v2.l, null dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x6b,0xd6,0xe9,0x04,0xf2,0x41,0x01,0x77,0x39,0x05]
+
+v_maxmin_num_f16_e64_dpp v5.l, -v1.l, |v2.l|, -1 mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: v_maxmin_num_f16_e64_dpp v5.l, -v1.l, |v2.l|, -1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x02,0x6b,0xd6,0xe9,0x04,0x06,0x2b,0x01,0x77,0x39,0x05]
+
+v_maxmin_num_f16_e64_dpp v5.l, -|v1.l|, -|v2.l|, 0.5 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: v_maxmin_num_f16_e64_dpp v5.l, -|v1.l|, -|v2.l|, 0.5 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x03,0x6b,0xd6,0xea,0x04,0xc2,0x73,0x01,0x77,0x39,0x05]
+
+v_maxmin_num_f16_e64_dpp v5.h, v1.h, v2.h, v3.h dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: v_maxmin_num_f16_e64_dpp v5.h, v1.h, v2.h, v3.h op_sel:[1,1,1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x78,0x6b,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, v255.h dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, v255.h op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x20,0x6b,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_maxmin_num_f16_e64_dpp v5.l, -v1.h, |v2.l|, -1 mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: v_maxmin_num_f16_e64_dpp v5.l, -v1.h, |v2.l|, -1 op_sel:[1,0,0,0] mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x0a,0x6b,0xd6,0xe9,0x04,0x06,0x2b,0x01,0x77,0x39,0x05]
+
+v_maxmin_num_f16_e64_dpp v5.l, -|v1.l|, -|v2.h|, 0.5 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: v_maxmin_num_f16_e64_dpp v5.l, -|v1.l|, -|v2.h|, 0.5 op_sel:[0,1,0,0] mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x13,0x6b,0xd6,0xea,0x04,0xc2,0x73,0x01,0x77,0x39,0x05]
+
+v_maxmin_num_f16_e64_dpp v255.h, -|v255.l|, -|v255.l|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: v_maxmin_num_f16_e64_dpp v255.h, -|v255.l|, -|v255.l|, -|src_scc| op_sel:[0,0,0,1] clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0xc7,0x6b,0xd6,0xe9,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
v_maxmin_num_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
// GFX12: v_maxmin_num_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x69,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
@@ -2673,47 +2712,86 @@ v_min_u16_e64_dpp v5.l, v1.l, v2.h dpp8:[7,6,5,4,3,2,1,0] fi:1
v_min_u16_e64_dpp v255.h, v255.l, v255.l dpp8:[0,0,0,0,0,0,0,0] fi:0
// GFX12: v_min_u16_e64_dpp v255.h, v255.l, v255.l op_sel:[0,0,1] dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x40,0x0b,0xd7,0xe9,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
-v_minmax_num_f16_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
-// GFX12: v_minmax_num_f16_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6a,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, v3.l dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, v3.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6a,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+v_minmax_num_f16_e64_dpp v5.l, v1.l, s2, v3.l dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: v_minmax_num_f16_e64_dpp v5.l, v1.l, s2, v3.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6a,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+
+v_minmax_num_f16_e64_dpp v5.l, v1.l, 2.0, v3.l dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: v_minmax_num_f16_e64_dpp v5.l, v1.l, 2.0, v3.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6a,0xd6,0xe9,0xe8,0x0d,0x04,0x01,0x77,0x39,0x05]
+
+v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, v255.l dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, v255.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6a,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, s105 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, s105 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6a,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+
+v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, vcc_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6a,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+
+v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6a,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+
+v_minmax_num_f16_e64_dpp v5.l, |v1.l|, v2.l, -ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: v_minmax_num_f16_e64_dpp v5.l, |v1.l|, v2.l, -ttmp15 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x6a,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+
+v_minmax_num_f16_e64_dpp v5.l, v1.l, -|v2.l|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: v_minmax_num_f16_e64_dpp v5.l, v1.l, -|v2.l|, exec_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x02,0x6a,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+
+v_minmax_num_f16_e64_dpp v5.l, -v1.l, v2.l, |exec_lo| dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: v_minmax_num_f16_e64_dpp v5.l, -v1.l, v2.l, |exec_lo| dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x6a,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+
+v_minmax_num_f16_e64_dpp v5.l, -|v1.l|, -|v2.l|, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: v_minmax_num_f16_e64_dpp v5.l, -|v1.l|, -|v2.l|, null dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x03,0x6a,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+
+v_minmax_num_f16_e64_dpp v5.l, -|v1.l|, v2.l, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: v_minmax_num_f16_e64_dpp v5.l, -|v1.l|, v2.l, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x05,0x6a,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05]
+
+v_minmax_num_f16_e64_dpp v5.l, v1.l, -|v2.l|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: v_minmax_num_f16_e64_dpp v5.l, v1.l, -|v2.l|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x06,0x6a,0xd6,0xea,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05]
+
+v_minmax_num_f16_e64_dpp v255.l, -|v255.l|, -|v255.l|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0]
+// GFX12: v_minmax_num_f16_e64_dpp v255.l, -|v255.l|, -|v255.l|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x87,0x6a,0xd6,0xe9,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
-v_minmax_num_f16_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0]
-// GFX12: v_minmax_num_f16_e64_dpp v5, v1, s2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6a,0xd6,0xe9,0x04,0x0c,0x04,0x01,0x77,0x39,0x05]
+v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, s3 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, s3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6a,0xd6,0xe9,0x04,0x0e,0x00,0x01,0x77,0x39,0x05]
-v_minmax_num_f16_e64_dpp v5, v1, 2.0, v3 dpp8:[7,6,5,4,3,2,1,0]
-// GFX12: v_minmax_num_f16_e64_dpp v5, v1, 2.0, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6a,0xd6,0xe9,0xe8,0x0d,0x04,0x01,0x77,0x39,0x05]
+v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, ttmp15 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, ttmp15 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6a,0xd6,0xe9,0x04,0xee,0x01,0x01,0x77,0x39,0x05]
-v_minmax_num_f16_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0]
-// GFX12: v_minmax_num_f16_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6a,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, -|m0| dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, -|m0| dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x6a,0xd6,0xe9,0x04,0xf6,0x81,0x01,0x77,0x39,0x05]
-v_minmax_num_f16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0]
-// GFX12: v_minmax_num_f16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6a,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+v_minmax_num_f16_e64_dpp v5.l, -|v1.l|, v2.l, -|exec_hi| dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: v_minmax_num_f16_e64_dpp v5.l, -|v1.l|, v2.l, -|exec_hi| dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x05,0x6a,0xd6,0xe9,0x04,0xfe,0xa1,0x01,0x77,0x39,0x05]
-v_minmax_num_f16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0]
-// GFX12: v_minmax_num_f16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6a,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+v_minmax_num_f16_e64_dpp v5.l, v1.l, -|v2.l|, -|exec_lo| dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: v_minmax_num_f16_e64_dpp v5.l, v1.l, -|v2.l|, -|exec_lo| dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x06,0x6a,0xd6,0xe9,0x04,0xfa,0xc1,0x01,0x77,0x39,0x05]
-v_minmax_num_f16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0]
-// GFX12: v_minmax_num_f16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6a,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+v_minmax_num_f16_e64_dpp v5.l, |v1.l|, -v2.l, null dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: v_minmax_num_f16_e64_dpp v5.l, |v1.l|, -v2.l, null dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x6a,0xd6,0xe9,0x04,0xf2,0x41,0x01,0x77,0x39,0x05]
-v_minmax_num_f16_e64_dpp v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0]
-// GFX12: v_minmax_num_f16_e64_dpp v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x6a,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+v_minmax_num_f16_e64_dpp v5.l, -v1.l, |v2.l|, -1 mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: v_minmax_num_f16_e64_dpp v5.l, -v1.l, |v2.l|, -1 mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x02,0x6a,0xd6,0xe9,0x04,0x06,0x2b,0x01,0x77,0x39,0x05]
-v_minmax_num_f16_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0]
-// GFX12: v_minmax_num_f16_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x02,0x6a,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+v_minmax_num_f16_e64_dpp v5.l, -|v1.l|, -|v2.l|, 0.5 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: v_minmax_num_f16_e64_dpp v5.l, -|v1.l|, -|v2.l|, 0.5 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x03,0x6a,0xd6,0xea,0x04,0xc2,0x73,0x01,0x77,0x39,0x05]
-v_minmax_num_f16_e64_dpp v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0]
-// GFX12: v_minmax_num_f16_e64_dpp v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x6a,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+v_minmax_num_f16_e64_dpp v5.h, v1.h, v2.h, v3.h dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: v_minmax_num_f16_e64_dpp v5.h, v1.h, v2.h, v3.h op_sel:[1,1,1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x78,0x6a,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
-v_minmax_num_f16_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0]
-// GFX12: v_minmax_num_f16_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x03,0x6a,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, v255.h dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, v255.h op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x20,0x6a,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
-v_minmax_num_f16_e64_dpp v5, -|v1|, v2, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0]
-// GFX12: v_minmax_num_f16_e64_dpp v5, -|v1|, v2, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x05,0x6a,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05]
+v_minmax_num_f16_e64_dpp v5.l, -v1.h, |v2.l|, -1 mul:2 dpp8:[7,6,5,4,3,2,1,0]
+// GFX12: v_minmax_num_f16_e64_dpp v5.l, -v1.h, |v2.l|, -1 op_sel:[1,0,0,0] mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x0a,0x6a,0xd6,0xe9,0x04,0x06,0x2b,0x01,0x77,0x39,0x05]
-v_minmax_num_f16_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
-// GFX12: v_minmax_num_f16_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x06,0x6a,0xd6,0xea,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05]
+v_minmax_num_f16_e64_dpp v5.l, -|v1.l|, -|v2.h|, 0.5 mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1
+// GFX12: v_minmax_num_f16_e64_dpp v5.l, -|v1.l|, -|v2.h|, 0.5 op_sel:[0,1,0,0] mul:4 dpp8:[7,6,5,4,3,2,1,0] fi:1 ; encoding: [0x05,0x13,0x6a,0xd6,0xea,0x04,0xc2,0x73,0x01,0x77,0x39,0x05]
-v_minmax_num_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0]
-// GFX12: v_minmax_num_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0x87,0x6a,0xd6,0xe9,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+v_minmax_num_f16_e64_dpp v255.h, -|v255.l|, -|v255.l|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:0
+// GFX12: v_minmax_num_f16_e64_dpp v255.h, -|v255.l|, -|v255.l|, -|src_scc| op_sel:[0,0,0,1] clamp div:2 dpp8:[0,0,0,0,0,0,0,0] ; encoding: [0xff,0xc7,0x6a,0xd6,0xe9,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
v_minmax_num_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0]
// GFX12: v_minmax_num_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x68,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
diff --git a/llvm/test/MC/Disassembler/AArch64/armv9.6a-rme-gpc3.txt b/llvm/test/MC/Disassembler/AArch64/armv9.6a-rme-gpc3.txt
index c5d074b..d198771 100644
--- a/llvm/test/MC/Disassembler/AArch64/armv9.6a-rme-gpc3.txt
+++ b/llvm/test/MC/Disassembler/AArch64/armv9.6a-rme-gpc3.txt
@@ -1,10 +1,18 @@
# NOTE: Assertions have been autogenerated by utils/update_mca_test_checks.py
# RUN: llvm-mc -triple aarch64 -disassemble %s | FileCheck %s
-[0x1f,0x70,0x0e,0xd5]
+[0x00,0x70,0x0e,0xd5]
+[0x01,0x70,0x0e,0xd5]
+[0x02,0x70,0x0e,0xd5]
+[0x11,0x70,0x0e,0xd5]
+[0x1e,0x70,0x0e,0xd5]
[0xa3,0x21,0x3e,0xd5]
[0xa4,0x21,0x1e,0xd5]
-# CHECK: sys #6, c7, c0, #0
+# CHECK: sys #6, c7, c0, #0, x0
+# CHECK-NEXT: sys #6, c7, c0, #0, x1
+# CHECK-NEXT: sys #6, c7, c0, #0, x2
+# CHECK-NEXT: sys #6, c7, c0, #0, x17
+# CHECK-NEXT: sys #6, c7, c0, #0, x30
# CHECK-NEXT: mrs x3, GPCBW_EL3
# CHECK-NEXT: msr GPCBW_EL3, x4
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3.txt
index f9e2369..adcca58 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3.txt
@@ -4361,49 +4361,118 @@
# W64-FAKE16: v_max_u16 v255, 0xfe0b, vcc_hi ; encoding: [0xff,0x00,0x09,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
0x05,0x00,0x60,0xd6,0x01,0x05,0x0e,0x00
-# GFX11: v_maxmin_f16 v5, v1, v2, s3 ; encoding: [0x05,0x00,0x60,0xd6,0x01,0x05,0x0e,0x00]
+# W32-REAL16: v_maxmin_f16 v5.l, v1.l, v2.l, s3 ; encoding: [0x05,0x00,0x60,0xd6,0x01,0x05,0x0e,0x00]
+# W32-FAKE16: v_maxmin_f16 v5, v1, v2, s3 ; encoding: [0x05,0x00,0x60,0xd6,0x01,0x05,0x0e,0x00]
+# W64-REAL16: v_maxmin_f16 v5.l, v1.l, v2.l, s3 ; encoding: [0x05,0x00,0x60,0xd6,0x01,0x05,0x0e,0x00]
+# W64-FAKE16: v_maxmin_f16 v5, v1, v2, s3 ; encoding: [0x05,0x00,0x60,0xd6,0x01,0x05,0x0e,0x00]
0x05,0x00,0x60,0xd6,0xff,0x05,0xa4,0x01
-# GFX11: v_maxmin_f16 v5, v255, s2, s105 ; encoding: [0x05,0x00,0x60,0xd6,0xff,0x05,0xa4,0x01]
+# W32-REAL16: v_maxmin_f16 v5.l, v255.l, s2, s105 ; encoding: [0x05,0x00,0x60,0xd6,0xff,0x05,0xa4,0x01]
+# W32-FAKE16: v_maxmin_f16 v5, v255, s2, s105 ; encoding: [0x05,0x00,0x60,0xd6,0xff,0x05,0xa4,0x01]
+# W64-REAL16: v_maxmin_f16 v5.l, v255.l, s2, s105 ; encoding: [0x05,0x00,0x60,0xd6,0xff,0x05,0xa4,0x01]
+# W64-FAKE16: v_maxmin_f16 v5, v255, s2, s105 ; encoding: [0x05,0x00,0x60,0xd6,0xff,0x05,0xa4,0x01]
0x05,0x00,0x60,0xd6,0x01,0xfe,0xff,0x01
-# GFX11: v_maxmin_f16 v5, s1, v255, exec_hi ; encoding: [0x05,0x00,0x60,0xd6,0x01,0xfe,0xff,0x01]
+# W32-REAL16: v_maxmin_f16 v5.l, s1, v255.l, exec_hi ; encoding: [0x05,0x00,0x60,0xd6,0x01,0xfe,0xff,0x01]
+# W32-FAKE16: v_maxmin_f16 v5, s1, v255, exec_hi ; encoding: [0x05,0x00,0x60,0xd6,0x01,0xfe,0xff,0x01]
+# W64-REAL16: v_maxmin_f16 v5.l, s1, v255.l, exec_hi ; encoding: [0x05,0x00,0x60,0xd6,0x01,0xfe,0xff,0x01]
+# W64-FAKE16: v_maxmin_f16 v5, s1, v255, exec_hi ; encoding: [0x05,0x00,0x60,0xd6,0x01,0xfe,0xff,0x01]
0x05,0x00,0x60,0xd6,0x69,0xd2,0xf8,0x01
-# GFX11: v_maxmin_f16 v5, s105, s105, exec_lo ; encoding: [0x05,0x00,0x60,0xd6,0x69,0xd2,0xf8,0x01]
+# W32-REAL16: v_maxmin_f16 v5.l, s105, s105, exec_lo ; encoding: [0x05,0x00,0x60,0xd6,0x69,0xd2,0xf8,0x01]
+# W32-FAKE16: v_maxmin_f16 v5, s105, s105, exec_lo ; encoding: [0x05,0x00,0x60,0xd6,0x69,0xd2,0xf8,0x01]
+# W64-REAL16: v_maxmin_f16 v5.l, s105, s105, exec_lo ; encoding: [0x05,0x00,0x60,0xd6,0x69,0xd2,0xf8,0x01]
+# W64-FAKE16: v_maxmin_f16 v5, s105, s105, exec_lo ; encoding: [0x05,0x00,0x60,0xd6,0x69,0xd2,0xf8,0x01]
0x05,0x00,0x60,0xd6,0x6a,0xf6,0x0c,0x04
-# GFX11: v_maxmin_f16 v5, vcc_lo, ttmp15, v3 ; encoding: [0x05,0x00,0x60,0xd6,0x6a,0xf6,0x0c,0x04]
+# W32-REAL16: v_maxmin_f16 v5.l, vcc_lo, ttmp15, v3.l ; encoding: [0x05,0x00,0x60,0xd6,0x6a,0xf6,0x0c,0x04]
+# W32-FAKE16: v_maxmin_f16 v5, vcc_lo, ttmp15, v3 ; encoding: [0x05,0x00,0x60,0xd6,0x6a,0xf6,0x0c,0x04]
+# W64-REAL16: v_maxmin_f16 v5.l, vcc_lo, ttmp15, v3.l ; encoding: [0x05,0x00,0x60,0xd6,0x6a,0xf6,0x0c,0x04]
+# W64-FAKE16: v_maxmin_f16 v5, vcc_lo, ttmp15, v3 ; encoding: [0x05,0x00,0x60,0xd6,0x6a,0xf6,0x0c,0x04]
0x05,0x00,0x60,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00
-# GFX11: v_maxmin_f16 v5, vcc_hi, 0xfe0b, v255 ; encoding: [0x05,0x00,0x60,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+# W32-REAL16: v_maxmin_f16 v5.l, vcc_hi, 0xfe0b, v255.l ; encoding: [0x05,0x00,0x60,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+# W32-FAKE16: v_maxmin_f16 v5, vcc_hi, 0xfe0b, v255 ; encoding: [0x05,0x00,0x60,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+# W64-REAL16: v_maxmin_f16 v5.l, vcc_hi, 0xfe0b, v255.l ; encoding: [0x05,0x00,0x60,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+# W64-FAKE16: v_maxmin_f16 v5, vcc_hi, 0xfe0b, v255 ; encoding: [0x05,0x00,0x60,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
0x05,0x07,0x60,0xd6,0x7b,0xfa,0xed,0xe1
-# GFX11: v_maxmin_f16 v5, -|ttmp15|, -|src_scc|, -|ttmp15| ; encoding: [0x05,0x07,0x60,0xd6,0x7b,0xfa,0xed,0xe1]
+# W32-REAL16: v_maxmin_f16 v5.l, -|ttmp15|, -|src_scc|, -|ttmp15| ; encoding: [0x05,0x07,0x60,0xd6,0x7b,0xfa,0xed,0xe1]
+# W32-FAKE16: v_maxmin_f16 v5, -|ttmp15|, -|src_scc|, -|ttmp15| ; encoding: [0x05,0x07,0x60,0xd6,0x7b,0xfa,0xed,0xe1]
+# W64-REAL16: v_maxmin_f16 v5.l, -|ttmp15|, -|src_scc|, -|ttmp15| ; encoding: [0x05,0x07,0x60,0xd6,0x7b,0xfa,0xed,0xe1]
+# W64-FAKE16: v_maxmin_f16 v5, -|ttmp15|, -|src_scc|, -|ttmp15| ; encoding: [0x05,0x07,0x60,0xd6,0x7b,0xfa,0xed,0xe1]
0x05,0x00,0x60,0xd6,0x7d,0xe0,0xf5,0x01
-# GFX11: v_maxmin_f16 v5, m0, 0.5, m0 ; encoding: [0x05,0x00,0x60,0xd6,0x7d,0xe0,0xf5,0x01]
+# W32-REAL16: v_maxmin_f16 v5.l, m0, 0.5, m0 ; encoding: [0x05,0x00,0x60,0xd6,0x7d,0xe0,0xf5,0x01]
+# W32-FAKE16: v_maxmin_f16 v5, m0, 0.5, m0 ; encoding: [0x05,0x00,0x60,0xd6,0x7d,0xe0,0xf5,0x01]
+# W64-REAL16: v_maxmin_f16 v5.l, m0, 0.5, m0 ; encoding: [0x05,0x00,0x60,0xd6,0x7d,0xe0,0xf5,0x01]
+# W64-FAKE16: v_maxmin_f16 v5, m0, 0.5, m0 ; encoding: [0x05,0x00,0x60,0xd6,0x7d,0xe0,0xf5,0x01]
0x05,0x01,0x60,0xd6,0x7e,0x82,0xad,0x01
-# GFX11: v_maxmin_f16 v5, |exec_lo|, -1, vcc_hi ; encoding: [0x05,0x01,0x60,0xd6,0x7e,0x82,0xad,0x01]
+# W32-REAL16: v_maxmin_f16 v5.l, |exec_lo|, -1, vcc_hi ; encoding: [0x05,0x01,0x60,0xd6,0x7e,0x82,0xad,0x01]
+# W32-FAKE16: v_maxmin_f16 v5, |exec_lo|, -1, vcc_hi ; encoding: [0x05,0x01,0x60,0xd6,0x7e,0x82,0xad,0x01]
+# W64-REAL16: v_maxmin_f16 v5.l, |exec_lo|, -1, vcc_hi ; encoding: [0x05,0x01,0x60,0xd6,0x7e,0x82,0xad,0x01]
+# W64-FAKE16: v_maxmin_f16 v5, |exec_lo|, -1, vcc_hi ; encoding: [0x05,0x01,0x60,0xd6,0x7e,0x82,0xad,0x01]
0x05,0x05,0x60,0xd6,0x7f,0xf8,0xa8,0xa1
-# GFX11: v_maxmin_f16 v5, -|exec_hi|, null, -|vcc_lo| ; encoding: [0x05,0x05,0x60,0xd6,0x7f,0xf8,0xa8,0xa1]
+# W32-REAL16: v_maxmin_f16 v5.l, -|exec_hi|, null, -|vcc_lo| ; encoding: [0x05,0x05,0x60,0xd6,0x7f,0xf8,0xa8,0xa1]
+# W32-FAKE16: v_maxmin_f16 v5, -|exec_hi|, null, -|vcc_lo| ; encoding: [0x05,0x05,0x60,0xd6,0x7f,0xf8,0xa8,0xa1]
+# W64-REAL16: v_maxmin_f16 v5.l, -|exec_hi|, null, -|vcc_lo| ; encoding: [0x05,0x05,0x60,0xd6,0x7f,0xf8,0xa8,0xa1]
+# W64-FAKE16: v_maxmin_f16 v5, -|exec_hi|, null, -|vcc_lo| ; encoding: [0x05,0x05,0x60,0xd6,0x7f,0xf8,0xa8,0xa1]
0x05,0x04,0x60,0xd6,0x7c,0xfc,0xfc,0x83,0x0b,0xfe,0x00,0x00
-# GFX11: v_maxmin_f16 v5, null, exec_lo, -|0xfe0b| ; encoding: [0x05,0x04,0x60,0xd6,0x7c,0xfc,0xfc,0x83,0x0b,0xfe,0x00,0x00]
+# W32-REAL16: v_maxmin_f16 v5.l, null, exec_lo, -|0xfe0b| ; encoding: [0x05,0x04,0x60,0xd6,0x7c,0xfc,0xfc,0x83,0x0b,0xfe,0x00,0x00]
+# W32-FAKE16: v_maxmin_f16 v5, null, exec_lo, -|0xfe0b| ; encoding: [0x05,0x04,0x60,0xd6,0x7c,0xfc,0xfc,0x83,0x0b,0xfe,0x00,0x00]
+# W64-REAL16: v_maxmin_f16 v5.l, null, exec_lo, -|0xfe0b| ; encoding: [0x05,0x04,0x60,0xd6,0x7c,0xfc,0xfc,0x83,0x0b,0xfe,0x00,0x00]
+# W64-FAKE16: v_maxmin_f16 v5, null, exec_lo, -|0xfe0b| ; encoding: [0x05,0x04,0x60,0xd6,0x7c,0xfc,0xfc,0x83,0x0b,0xfe,0x00,0x00]
0x05,0x06,0x60,0xd6,0xc1,0xfe,0xf4,0xc3
-# GFX11: v_maxmin_f16 v5, -1, -|exec_hi|, -|src_scc| ; encoding: [0x05,0x06,0x60,0xd6,0xc1,0xfe,0xf4,0xc3]
+# W32-REAL16: v_maxmin_f16 v5.l, -1, -|exec_hi|, -|src_scc| ; encoding: [0x05,0x06,0x60,0xd6,0xc1,0xfe,0xf4,0xc3]
+# W32-FAKE16: v_maxmin_f16 v5, -1, -|exec_hi|, -|src_scc| ; encoding: [0x05,0x06,0x60,0xd6,0xc1,0xfe,0xf4,0xc3]
+# W64-REAL16: v_maxmin_f16 v5.l, -1, -|exec_hi|, -|src_scc| ; encoding: [0x05,0x06,0x60,0xd6,0xc1,0xfe,0xf4,0xc3]
+# W64-FAKE16: v_maxmin_f16 v5, -1, -|exec_hi|, -|src_scc| ; encoding: [0x05,0x06,0x60,0xd6,0xc1,0xfe,0xf4,0xc3]
0x05,0x00,0x60,0xd6,0xf0,0xfa,0xc0,0x4b
-# GFX11: v_maxmin_f16 v5, 0.5, -m0, 0.5 mul:2 ; encoding: [0x05,0x00,0x60,0xd6,0xf0,0xfa,0xc0,0x4b]
+# W32-REAL16: v_maxmin_f16 v5.l, 0.5, -m0, 0.5 mul:2 ; encoding: [0x05,0x00,0x60,0xd6,0xf0,0xfa,0xc0,0x4b]
+# W32-FAKE16: v_maxmin_f16 v5, 0.5, -m0, 0.5 mul:2 ; encoding: [0x05,0x00,0x60,0xd6,0xf0,0xfa,0xc0,0x4b]
+# W64-REAL16: v_maxmin_f16 v5.l, 0.5, -m0, 0.5 mul:2 ; encoding: [0x05,0x00,0x60,0xd6,0xf0,0xfa,0xc0,0x4b]
+# W64-FAKE16: v_maxmin_f16 v5, 0.5, -m0, 0.5 mul:2 ; encoding: [0x05,0x00,0x60,0xd6,0xf0,0xfa,0xc0,0x4b]
0x05,0x02,0x60,0xd6,0xfd,0xd4,0x04,0x33
-# GFX11: v_maxmin_f16 v5, -src_scc, |vcc_lo|, -1 mul:4 ; encoding: [0x05,0x02,0x60,0xd6,0xfd,0xd4,0x04,0x33]
+# W32-REAL16: v_maxmin_f16 v5.l, -src_scc, |vcc_lo|, -1 mul:4 ; encoding: [0x05,0x02,0x60,0xd6,0xfd,0xd4,0x04,0x33]
+# W32-FAKE16: v_maxmin_f16 v5, -src_scc, |vcc_lo|, -1 mul:4 ; encoding: [0x05,0x02,0x60,0xd6,0xfd,0xd4,0x04,0x33]
+# W64-REAL16: v_maxmin_f16 v5.l, -src_scc, |vcc_lo|, -1 mul:4 ; encoding: [0x05,0x02,0x60,0xd6,0xfd,0xd4,0x04,0x33]
+# W64-FAKE16: v_maxmin_f16 v5, -src_scc, |vcc_lo|, -1 mul:4 ; encoding: [0x05,0x02,0x60,0xd6,0xfd,0xd4,0x04,0x33]
0xff,0x83,0x60,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00
-# GFX11: v_maxmin_f16 v255, -|0xfe0b|, -|vcc_hi|, null clamp div:2 ; encoding: [0xff,0x83,0x60,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00]
+# W32-REAL16: v_maxmin_f16 v255.l, -|0xfe0b|, -|vcc_hi|, null clamp div:2 ; encoding: [0xff,0x83,0x60,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00]
+# W32-FAKE16: v_maxmin_f16 v255, -|0xfe0b|, -|vcc_hi|, null clamp div:2 ; encoding: [0xff,0x83,0x60,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00]
+# W64-REAL16: v_maxmin_f16 v255.l, -|0xfe0b|, -|vcc_hi|, null clamp div:2 ; encoding: [0xff,0x83,0x60,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00]
+# W64-FAKE16: v_maxmin_f16 v255, -|0xfe0b|, -|vcc_hi|, null clamp div:2 ; encoding: [0xff,0x83,0x60,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00]
+
+0x05,0x08,0x60,0xd6,0xff,0x05,0xa4,0x01
+# W32-REAL16: v_maxmin_f16 v5.l, v255.h, s2, s105 op_sel:[1,0,0,0] ; encoding: [0x05,0x08,0x60,0xd6,0xff,0x05,0xa4,0x01]
+# W32-FAKE16: v_maxmin_f16 v5, v255, s2, s105 op_sel:[1,0,0,0] ; encoding: [0x05,0x08,0x60,0xd6,0xff,0x05,0xa4,0x01]
+# W64-REAL16: v_maxmin_f16 v5.l, v255.h, s2, s105 op_sel:[1,0,0,0] ; encoding: [0x05,0x08,0x60,0xd6,0xff,0x05,0xa4,0x01]
+# W64-FAKE16: v_maxmin_f16 v5, v255, s2, s105 op_sel:[1,0,0,0] ; encoding: [0x05,0x08,0x60,0xd6,0xff,0x05,0xa4,0x01]
+
+0x05,0x10,0x60,0xd6,0x01,0xfe,0xff,0x01
+# W32-REAL16: v_maxmin_f16 v5.l, s1, v255.h, exec_hi op_sel:[0,1,0,0] ; encoding: [0x05,0x10,0x60,0xd6,0x01,0xfe,0xff,0x01]
+# W32-FAKE16: v_maxmin_f16 v5, s1, v255, exec_hi op_sel:[0,1,0,0] ; encoding: [0x05,0x10,0x60,0xd6,0x01,0xfe,0xff,0x01]
+# W64-REAL16: v_maxmin_f16 v5.l, s1, v255.h, exec_hi op_sel:[0,1,0,0] ; encoding: [0x05,0x10,0x60,0xd6,0x01,0xfe,0xff,0x01]
+# W64-FAKE16: v_maxmin_f16 v5, s1, v255, exec_hi op_sel:[0,1,0,0] ; encoding: [0x05,0x10,0x60,0xd6,0x01,0xfe,0xff,0x01]
+
+0x05,0x20,0x60,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00
+# W32-REAL16: v_maxmin_f16 v5.l, vcc_hi, 0xfe0b, v255.h op_sel:[0,0,1,0] ; encoding: [0x05,0x20,0x60,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+# W32-FAKE16: v_maxmin_f16 v5, vcc_hi, 0xfe0b, v255 op_sel:[0,0,1,0] ; encoding: [0x05,0x20,0x60,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+# W64-REAL16: v_maxmin_f16 v5.l, vcc_hi, 0xfe0b, v255.h op_sel:[0,0,1,0] ; encoding: [0x05,0x20,0x60,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+# W64-FAKE16: v_maxmin_f16 v5, vcc_hi, 0xfe0b, v255 op_sel:[0,0,1,0] ; encoding: [0x05,0x20,0x60,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+
+0xff,0xc3,0x60,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00
+# W32-REAL16: v_maxmin_f16 v255.h, -|0xfe0b|, -|vcc_hi|, null op_sel:[0,0,0,1] clamp div:2 ; encoding: [0xff,0xc3,0x60,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00]
+# W32-FAKE16: v_maxmin_f16 v255, -|0xfe0b|, -|vcc_hi|, null op_sel:[0,0,0,1] clamp div:2 ; encoding: [0xff,0xc3,0x60,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00]
+# W64-REAL16: v_maxmin_f16 v255.h, -|0xfe0b|, -|vcc_hi|, null op_sel:[0,0,0,1] clamp div:2 ; encoding: [0xff,0xc3,0x60,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00]
+# W64-FAKE16: v_maxmin_f16 v255, -|0xfe0b|, -|vcc_hi|, null op_sel:[0,0,0,1] clamp div:2 ; encoding: [0xff,0xc3,0x60,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00]
0x05,0x00,0x5e,0xd6,0x01,0x05,0x0e,0x00
# GFX11: v_maxmin_f32 v5, v1, v2, s3 ; encoding: [0x05,0x00,0x5e,0xd6,0x01,0x05,0x0e,0x00]
@@ -5851,49 +5920,118 @@
# W64-FAKE16: v_min_u16 v255, 0xfe0b, vcc_hi ; encoding: [0xff,0x00,0x0b,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
0x05,0x00,0x61,0xd6,0x01,0x05,0x0e,0x00
-# GFX11: v_minmax_f16 v5, v1, v2, s3 ; encoding: [0x05,0x00,0x61,0xd6,0x01,0x05,0x0e,0x00]
+# W32-REAL16: v_minmax_f16 v5.l, v1.l, v2.l, s3 ; encoding: [0x05,0x00,0x61,0xd6,0x01,0x05,0x0e,0x00]
+# W32-FAKE16: v_minmax_f16 v5, v1, v2, s3 ; encoding: [0x05,0x00,0x61,0xd6,0x01,0x05,0x0e,0x00]
+# W64-REAL16: v_minmax_f16 v5.l, v1.l, v2.l, s3 ; encoding: [0x05,0x00,0x61,0xd6,0x01,0x05,0x0e,0x00]
+# W64-FAKE16: v_minmax_f16 v5, v1, v2, s3 ; encoding: [0x05,0x00,0x61,0xd6,0x01,0x05,0x0e,0x00]
0x05,0x00,0x61,0xd6,0xff,0x05,0xa4,0x01
-# GFX11: v_minmax_f16 v5, v255, s2, s105 ; encoding: [0x05,0x00,0x61,0xd6,0xff,0x05,0xa4,0x01]
+# W32-REAL16: v_minmax_f16 v5.l, v255.l, s2, s105 ; encoding: [0x05,0x00,0x61,0xd6,0xff,0x05,0xa4,0x01]
+# W32-FAKE16: v_minmax_f16 v5, v255, s2, s105 ; encoding: [0x05,0x00,0x61,0xd6,0xff,0x05,0xa4,0x01]
+# W64-REAL16: v_minmax_f16 v5.l, v255.l, s2, s105 ; encoding: [0x05,0x00,0x61,0xd6,0xff,0x05,0xa4,0x01]
+# W64-FAKE16: v_minmax_f16 v5, v255, s2, s105 ; encoding: [0x05,0x00,0x61,0xd6,0xff,0x05,0xa4,0x01]
0x05,0x00,0x61,0xd6,0x01,0xfe,0xff,0x01
-# GFX11: v_minmax_f16 v5, s1, v255, exec_hi ; encoding: [0x05,0x00,0x61,0xd6,0x01,0xfe,0xff,0x01]
+# W32-REAL16: v_minmax_f16 v5.l, s1, v255.l, exec_hi ; encoding: [0x05,0x00,0x61,0xd6,0x01,0xfe,0xff,0x01]
+# W32-FAKE16: v_minmax_f16 v5, s1, v255, exec_hi ; encoding: [0x05,0x00,0x61,0xd6,0x01,0xfe,0xff,0x01]
+# W64-REAL16: v_minmax_f16 v5.l, s1, v255.l, exec_hi ; encoding: [0x05,0x00,0x61,0xd6,0x01,0xfe,0xff,0x01]
+# W64-FAKE16: v_minmax_f16 v5, s1, v255, exec_hi ; encoding: [0x05,0x00,0x61,0xd6,0x01,0xfe,0xff,0x01]
0x05,0x00,0x61,0xd6,0x69,0xd2,0xf8,0x01
-# GFX11: v_minmax_f16 v5, s105, s105, exec_lo ; encoding: [0x05,0x00,0x61,0xd6,0x69,0xd2,0xf8,0x01]
+# W32-REAL16: v_minmax_f16 v5.l, s105, s105, exec_lo ; encoding: [0x05,0x00,0x61,0xd6,0x69,0xd2,0xf8,0x01]
+# W32-FAKE16: v_minmax_f16 v5, s105, s105, exec_lo ; encoding: [0x05,0x00,0x61,0xd6,0x69,0xd2,0xf8,0x01]
+# W64-REAL16: v_minmax_f16 v5.l, s105, s105, exec_lo ; encoding: [0x05,0x00,0x61,0xd6,0x69,0xd2,0xf8,0x01]
+# W64-FAKE16: v_minmax_f16 v5, s105, s105, exec_lo ; encoding: [0x05,0x00,0x61,0xd6,0x69,0xd2,0xf8,0x01]
0x05,0x00,0x61,0xd6,0x6a,0xf6,0x0c,0x04
-# GFX11: v_minmax_f16 v5, vcc_lo, ttmp15, v3 ; encoding: [0x05,0x00,0x61,0xd6,0x6a,0xf6,0x0c,0x04]
+# W32-REAL16: v_minmax_f16 v5.l, vcc_lo, ttmp15, v3.l ; encoding: [0x05,0x00,0x61,0xd6,0x6a,0xf6,0x0c,0x04]
+# W32-FAKE16: v_minmax_f16 v5, vcc_lo, ttmp15, v3 ; encoding: [0x05,0x00,0x61,0xd6,0x6a,0xf6,0x0c,0x04]
+# W64-REAL16: v_minmax_f16 v5.l, vcc_lo, ttmp15, v3.l ; encoding: [0x05,0x00,0x61,0xd6,0x6a,0xf6,0x0c,0x04]
+# W64-FAKE16: v_minmax_f16 v5, vcc_lo, ttmp15, v3 ; encoding: [0x05,0x00,0x61,0xd6,0x6a,0xf6,0x0c,0x04]
0x05,0x00,0x61,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00
-# GFX11: v_minmax_f16 v5, vcc_hi, 0xfe0b, v255 ; encoding: [0x05,0x00,0x61,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+# W32-REAL16: v_minmax_f16 v5.l, vcc_hi, 0xfe0b, v255.l ; encoding: [0x05,0x00,0x61,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+# W32-FAKE16: v_minmax_f16 v5, vcc_hi, 0xfe0b, v255 ; encoding: [0x05,0x00,0x61,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+# W64-REAL16: v_minmax_f16 v5.l, vcc_hi, 0xfe0b, v255.l ; encoding: [0x05,0x00,0x61,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+# W64-FAKE16: v_minmax_f16 v5, vcc_hi, 0xfe0b, v255 ; encoding: [0x05,0x00,0x61,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
0x05,0x07,0x61,0xd6,0x7b,0xfa,0xed,0xe1
-# GFX11: v_minmax_f16 v5, -|ttmp15|, -|src_scc|, -|ttmp15| ; encoding: [0x05,0x07,0x61,0xd6,0x7b,0xfa,0xed,0xe1]
+# W32-REAL16: v_minmax_f16 v5.l, -|ttmp15|, -|src_scc|, -|ttmp15| ; encoding: [0x05,0x07,0x61,0xd6,0x7b,0xfa,0xed,0xe1]
+# W32-FAKE16: v_minmax_f16 v5, -|ttmp15|, -|src_scc|, -|ttmp15| ; encoding: [0x05,0x07,0x61,0xd6,0x7b,0xfa,0xed,0xe1]
+# W64-REAL16: v_minmax_f16 v5.l, -|ttmp15|, -|src_scc|, -|ttmp15| ; encoding: [0x05,0x07,0x61,0xd6,0x7b,0xfa,0xed,0xe1]
+# W64-FAKE16: v_minmax_f16 v5, -|ttmp15|, -|src_scc|, -|ttmp15| ; encoding: [0x05,0x07,0x61,0xd6,0x7b,0xfa,0xed,0xe1]
0x05,0x00,0x61,0xd6,0x7d,0xe0,0xf5,0x01
-# GFX11: v_minmax_f16 v5, m0, 0.5, m0 ; encoding: [0x05,0x00,0x61,0xd6,0x7d,0xe0,0xf5,0x01]
+# W32-REAL16: v_minmax_f16 v5.l, m0, 0.5, m0 ; encoding: [0x05,0x00,0x61,0xd6,0x7d,0xe0,0xf5,0x01]
+# W32-FAKE16: v_minmax_f16 v5, m0, 0.5, m0 ; encoding: [0x05,0x00,0x61,0xd6,0x7d,0xe0,0xf5,0x01]
+# W64-REAL16: v_minmax_f16 v5.l, m0, 0.5, m0 ; encoding: [0x05,0x00,0x61,0xd6,0x7d,0xe0,0xf5,0x01]
+# W64-FAKE16: v_minmax_f16 v5, m0, 0.5, m0 ; encoding: [0x05,0x00,0x61,0xd6,0x7d,0xe0,0xf5,0x01]
0x05,0x01,0x61,0xd6,0x7e,0x82,0xad,0x01
-# GFX11: v_minmax_f16 v5, |exec_lo|, -1, vcc_hi ; encoding: [0x05,0x01,0x61,0xd6,0x7e,0x82,0xad,0x01]
+# W32-REAL16: v_minmax_f16 v5.l, |exec_lo|, -1, vcc_hi ; encoding: [0x05,0x01,0x61,0xd6,0x7e,0x82,0xad,0x01]
+# W32-FAKE16: v_minmax_f16 v5, |exec_lo|, -1, vcc_hi ; encoding: [0x05,0x01,0x61,0xd6,0x7e,0x82,0xad,0x01]
+# W64-REAL16: v_minmax_f16 v5.l, |exec_lo|, -1, vcc_hi ; encoding: [0x05,0x01,0x61,0xd6,0x7e,0x82,0xad,0x01]
+# W64-FAKE16: v_minmax_f16 v5, |exec_lo|, -1, vcc_hi ; encoding: [0x05,0x01,0x61,0xd6,0x7e,0x82,0xad,0x01]
0x05,0x05,0x61,0xd6,0x7f,0xf8,0xa8,0xa1
-# GFX11: v_minmax_f16 v5, -|exec_hi|, null, -|vcc_lo| ; encoding: [0x05,0x05,0x61,0xd6,0x7f,0xf8,0xa8,0xa1]
+# W32-REAL16: v_minmax_f16 v5.l, -|exec_hi|, null, -|vcc_lo| ; encoding: [0x05,0x05,0x61,0xd6,0x7f,0xf8,0xa8,0xa1]
+# W32-FAKE16: v_minmax_f16 v5, -|exec_hi|, null, -|vcc_lo| ; encoding: [0x05,0x05,0x61,0xd6,0x7f,0xf8,0xa8,0xa1]
+# W64-REAL16: v_minmax_f16 v5.l, -|exec_hi|, null, -|vcc_lo| ; encoding: [0x05,0x05,0x61,0xd6,0x7f,0xf8,0xa8,0xa1]
+# W64-FAKE16: v_minmax_f16 v5, -|exec_hi|, null, -|vcc_lo| ; encoding: [0x05,0x05,0x61,0xd6,0x7f,0xf8,0xa8,0xa1]
0x05,0x04,0x61,0xd6,0x7c,0xfc,0xfc,0x83,0x0b,0xfe,0x00,0x00
-# GFX11: v_minmax_f16 v5, null, exec_lo, -|0xfe0b| ; encoding: [0x05,0x04,0x61,0xd6,0x7c,0xfc,0xfc,0x83,0x0b,0xfe,0x00,0x00]
+# W32-REAL16: v_minmax_f16 v5.l, null, exec_lo, -|0xfe0b| ; encoding: [0x05,0x04,0x61,0xd6,0x7c,0xfc,0xfc,0x83,0x0b,0xfe,0x00,0x00]
+# W32-FAKE16: v_minmax_f16 v5, null, exec_lo, -|0xfe0b| ; encoding: [0x05,0x04,0x61,0xd6,0x7c,0xfc,0xfc,0x83,0x0b,0xfe,0x00,0x00]
+# W64-REAL16: v_minmax_f16 v5.l, null, exec_lo, -|0xfe0b| ; encoding: [0x05,0x04,0x61,0xd6,0x7c,0xfc,0xfc,0x83,0x0b,0xfe,0x00,0x00]
+# W64-FAKE16: v_minmax_f16 v5, null, exec_lo, -|0xfe0b| ; encoding: [0x05,0x04,0x61,0xd6,0x7c,0xfc,0xfc,0x83,0x0b,0xfe,0x00,0x00]
0x05,0x06,0x61,0xd6,0xc1,0xfe,0xf4,0xc3
-# GFX11: v_minmax_f16 v5, -1, -|exec_hi|, -|src_scc| ; encoding: [0x05,0x06,0x61,0xd6,0xc1,0xfe,0xf4,0xc3]
+# W32-REAL16: v_minmax_f16 v5.l, -1, -|exec_hi|, -|src_scc| ; encoding: [0x05,0x06,0x61,0xd6,0xc1,0xfe,0xf4,0xc3]
+# W32-FAKE16: v_minmax_f16 v5, -1, -|exec_hi|, -|src_scc| ; encoding: [0x05,0x06,0x61,0xd6,0xc1,0xfe,0xf4,0xc3]
+# W64-REAL16: v_minmax_f16 v5.l, -1, -|exec_hi|, -|src_scc| ; encoding: [0x05,0x06,0x61,0xd6,0xc1,0xfe,0xf4,0xc3]
+# W64-FAKE16: v_minmax_f16 v5, -1, -|exec_hi|, -|src_scc| ; encoding: [0x05,0x06,0x61,0xd6,0xc1,0xfe,0xf4,0xc3]
0x05,0x00,0x61,0xd6,0xf0,0xfa,0xc0,0x4b
-# GFX11: v_minmax_f16 v5, 0.5, -m0, 0.5 mul:2 ; encoding: [0x05,0x00,0x61,0xd6,0xf0,0xfa,0xc0,0x4b]
+# W32-REAL16: v_minmax_f16 v5.l, 0.5, -m0, 0.5 mul:2 ; encoding: [0x05,0x00,0x61,0xd6,0xf0,0xfa,0xc0,0x4b]
+# W32-FAKE16: v_minmax_f16 v5, 0.5, -m0, 0.5 mul:2 ; encoding: [0x05,0x00,0x61,0xd6,0xf0,0xfa,0xc0,0x4b]
+# W64-REAL16: v_minmax_f16 v5.l, 0.5, -m0, 0.5 mul:2 ; encoding: [0x05,0x00,0x61,0xd6,0xf0,0xfa,0xc0,0x4b]
+# W64-FAKE16: v_minmax_f16 v5, 0.5, -m0, 0.5 mul:2 ; encoding: [0x05,0x00,0x61,0xd6,0xf0,0xfa,0xc0,0x4b]
0x05,0x02,0x61,0xd6,0xfd,0xd4,0x04,0x33
-# GFX11: v_minmax_f16 v5, -src_scc, |vcc_lo|, -1 mul:4 ; encoding: [0x05,0x02,0x61,0xd6,0xfd,0xd4,0x04,0x33]
+# W32-REAL16: v_minmax_f16 v5.l, -src_scc, |vcc_lo|, -1 mul:4 ; encoding: [0x05,0x02,0x61,0xd6,0xfd,0xd4,0x04,0x33]
+# W32-FAKE16: v_minmax_f16 v5, -src_scc, |vcc_lo|, -1 mul:4 ; encoding: [0x05,0x02,0x61,0xd6,0xfd,0xd4,0x04,0x33]
+# W64-REAL16: v_minmax_f16 v5.l, -src_scc, |vcc_lo|, -1 mul:4 ; encoding: [0x05,0x02,0x61,0xd6,0xfd,0xd4,0x04,0x33]
+# W64-FAKE16: v_minmax_f16 v5, -src_scc, |vcc_lo|, -1 mul:4 ; encoding: [0x05,0x02,0x61,0xd6,0xfd,0xd4,0x04,0x33]
0xff,0x83,0x61,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00
-# GFX11: v_minmax_f16 v255, -|0xfe0b|, -|vcc_hi|, null clamp div:2 ; encoding: [0xff,0x83,0x61,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00]
+# W32-REAL16: v_minmax_f16 v255.l, -|0xfe0b|, -|vcc_hi|, null clamp div:2 ; encoding: [0xff,0x83,0x61,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00]
+# W32-FAKE16: v_minmax_f16 v255, -|0xfe0b|, -|vcc_hi|, null clamp div:2 ; encoding: [0xff,0x83,0x61,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00]
+# W64-REAL16: v_minmax_f16 v255.l, -|0xfe0b|, -|vcc_hi|, null clamp div:2 ; encoding: [0xff,0x83,0x61,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00]
+# W64-FAKE16: v_minmax_f16 v255, -|0xfe0b|, -|vcc_hi|, null clamp div:2 ; encoding: [0xff,0x83,0x61,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00]
+
+0x05,0x08,0x61,0xd6,0xff,0x05,0xa4,0x01
+# W32-REAL16: v_minmax_f16 v5.l, v255.h, s2, s105 op_sel:[1,0,0,0] ; encoding: [0x05,0x08,0x61,0xd6,0xff,0x05,0xa4,0x01]
+# W32-FAKE16: v_minmax_f16 v5, v255, s2, s105 op_sel:[1,0,0,0] ; encoding: [0x05,0x08,0x61,0xd6,0xff,0x05,0xa4,0x01]
+# W64-REAL16: v_minmax_f16 v5.l, v255.h, s2, s105 op_sel:[1,0,0,0] ; encoding: [0x05,0x08,0x61,0xd6,0xff,0x05,0xa4,0x01]
+# W64-FAKE16: v_minmax_f16 v5, v255, s2, s105 op_sel:[1,0,0,0] ; encoding: [0x05,0x08,0x61,0xd6,0xff,0x05,0xa4,0x01]
+
+0x05,0x10,0x61,0xd6,0x01,0xfe,0xff,0x01
+# W32-REAL16: v_minmax_f16 v5.l, s1, v255.h, exec_hi op_sel:[0,1,0,0] ; encoding: [0x05,0x10,0x61,0xd6,0x01,0xfe,0xff,0x01]
+# W32-FAKE16: v_minmax_f16 v5, s1, v255, exec_hi op_sel:[0,1,0,0] ; encoding: [0x05,0x10,0x61,0xd6,0x01,0xfe,0xff,0x01]
+# W64-REAL16: v_minmax_f16 v5.l, s1, v255.h, exec_hi op_sel:[0,1,0,0] ; encoding: [0x05,0x10,0x61,0xd6,0x01,0xfe,0xff,0x01]
+# W64-FAKE16: v_minmax_f16 v5, s1, v255, exec_hi op_sel:[0,1,0,0] ; encoding: [0x05,0x10,0x61,0xd6,0x01,0xfe,0xff,0x01]
+
+0x05,0x20,0x61,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00
+# W32-REAL16: v_minmax_f16 v5.l, vcc_hi, 0xfe0b, v255.h op_sel:[0,0,1,0] ; encoding: [0x05,0x20,0x61,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+# W32-FAKE16: v_minmax_f16 v5, vcc_hi, 0xfe0b, v255 op_sel:[0,0,1,0] ; encoding: [0x05,0x20,0x61,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+# W64-REAL16: v_minmax_f16 v5.l, vcc_hi, 0xfe0b, v255.h op_sel:[0,0,1,0] ; encoding: [0x05,0x20,0x61,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+# W64-FAKE16: v_minmax_f16 v5, vcc_hi, 0xfe0b, v255 op_sel:[0,0,1,0] ; encoding: [0x05,0x20,0x61,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+
+0xff,0xc3,0x61,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00
+# W32-REAL16: v_minmax_f16 v255.h, -|0xfe0b|, -|vcc_hi|, null op_sel:[0,0,0,1] clamp div:2 ; encoding: [0xff,0xc3,0x61,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00]
+# W32-FAKE16: v_minmax_f16 v255, -|0xfe0b|, -|vcc_hi|, null op_sel:[0,0,0,1] clamp div:2 ; encoding: [0xff,0xc3,0x61,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00]
+# W64-REAL16: v_minmax_f16 v255.h, -|0xfe0b|, -|vcc_hi|, null op_sel:[0,0,0,1] clamp div:2 ; encoding: [0xff,0xc3,0x61,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00]
+# W64-FAKE16: v_minmax_f16 v255, -|0xfe0b|, -|vcc_hi|, null op_sel:[0,0,0,1] clamp div:2 ; encoding: [0xff,0xc3,0x61,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00]
0x05,0x00,0x5f,0xd6,0x01,0x05,0x0e,0x00
# GFX11: v_minmax_f32 v5, v1, v2, s3 ; encoding: [0x05,0x00,0x5f,0xd6,0x01,0x05,0x0e,0x00]
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3_dpp16.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3_dpp16.txt
index 132fc80..2964360 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3_dpp16.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3_dpp16.txt
@@ -2113,46 +2113,118 @@
# W64-FAKE16: v_max_u16_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x09,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x0d,0x30]
0x05,0x00,0x60,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff
-# GFX11: v_maxmin_f16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x60,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+# W32-REAL16: v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, v3.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x60,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+# W32-FAKE16: v_maxmin_f16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x60,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+# W64-REAL16: v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, v3.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x60,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+# W64-FAKE16: v_maxmin_f16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x60,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
0x05,0x00,0x60,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff
-# GFX11: v_maxmin_f16_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x60,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+# W32-REAL16: v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, v3.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x60,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+# W32-FAKE16: v_maxmin_f16_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x60,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+# W64-REAL16: v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, v3.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x60,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+# W64-FAKE16: v_maxmin_f16_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x60,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
0x05,0x00,0x60,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff
-# GFX11: v_maxmin_f16_e64_dpp v5, v1, v2, v3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x60,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+# W32-REAL16: v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, v3.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x60,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+# W32-FAKE16: v_maxmin_f16_e64_dpp v5, v1, v2, v3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x60,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+# W64-REAL16: v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, v3.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x60,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+# W64-FAKE16: v_maxmin_f16_e64_dpp v5, v1, v2, v3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x60,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
0x05,0x00,0x60,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff
-# GFX11: v_maxmin_f16_e64_dpp v5, v1, v2, v255 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x60,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+# W32-REAL16: v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, v255.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x60,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+# W32-FAKE16: v_maxmin_f16_e64_dpp v5, v1, v2, v255 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x60,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+# W64-REAL16: v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, v255.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x60,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+# W64-FAKE16: v_maxmin_f16_e64_dpp v5, v1, v2, v255 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x60,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
0x05,0x00,0x60,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff
-# GFX11: v_maxmin_f16_e64_dpp v5, v1, v2, s105 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x60,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+# W32-REAL16: v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, s105 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x60,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+# W32-FAKE16: v_maxmin_f16_e64_dpp v5, v1, v2, s105 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x60,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+# W64-REAL16: v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, s105 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x60,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+# W64-FAKE16: v_maxmin_f16_e64_dpp v5, v1, v2, s105 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x60,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
0x05,0x00,0x60,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff
-# GFX11: v_maxmin_f16_e64_dpp v5, v1, v2, vcc_hi row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x60,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+# W32-REAL16: v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, vcc_hi row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x60,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+# W32-FAKE16: v_maxmin_f16_e64_dpp v5, v1, v2, vcc_hi row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x60,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+# W64-REAL16: v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, vcc_hi row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x60,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+# W64-FAKE16: v_maxmin_f16_e64_dpp v5, v1, v2, vcc_hi row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x60,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
0x05,0x00,0x60,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff
-# GFX11: v_maxmin_f16_e64_dpp v5, v1, v2, vcc_lo row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x60,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+# W32-REAL16: v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, vcc_lo row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x60,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+# W32-FAKE16: v_maxmin_f16_e64_dpp v5, v1, v2, vcc_lo row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x60,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+# W64-REAL16: v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, vcc_lo row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x60,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+# W64-FAKE16: v_maxmin_f16_e64_dpp v5, v1, v2, vcc_lo row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x60,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
0x05,0x01,0x60,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff
-# GFX11: v_maxmin_f16_e64_dpp v5, |v1|, v2, -ttmp15 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x01,0x60,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+# W32-REAL16: v_maxmin_f16_e64_dpp v5.l, |v1.l|, v2.l, -ttmp15 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x01,0x60,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+# W32-FAKE16: v_maxmin_f16_e64_dpp v5, |v1|, v2, -ttmp15 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x01,0x60,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+# W64-REAL16: v_maxmin_f16_e64_dpp v5.l, |v1.l|, v2.l, -ttmp15 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x01,0x60,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+# W64-FAKE16: v_maxmin_f16_e64_dpp v5, |v1|, v2, -ttmp15 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x01,0x60,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
0x05,0x02,0x60,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff
-# GFX11: v_maxmin_f16_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x02,0x60,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+# W32-REAL16: v_maxmin_f16_e64_dpp v5.l, v1.l, -|v2.l|, exec_hi row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x02,0x60,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+# W32-FAKE16: v_maxmin_f16_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x02,0x60,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+# W64-REAL16: v_maxmin_f16_e64_dpp v5.l, v1.l, -|v2.l|, exec_hi row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x02,0x60,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+# W64-FAKE16: v_maxmin_f16_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x02,0x60,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
0x05,0x04,0x60,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff
-# GFX11: v_maxmin_f16_e64_dpp v5, -v1, v2, |exec_lo| row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x60,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+# W32-REAL16: v_maxmin_f16_e64_dpp v5.l, -v1.l, v2.l, |exec_lo| row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x60,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+# W32-FAKE16: v_maxmin_f16_e64_dpp v5, -v1, v2, |exec_lo| row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x60,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+# W64-REAL16: v_maxmin_f16_e64_dpp v5.l, -v1.l, v2.l, |exec_lo| row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x60,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+# W64-FAKE16: v_maxmin_f16_e64_dpp v5, -v1, v2, |exec_lo| row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x60,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
0x05,0x03,0x60,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff
-# GFX11: v_maxmin_f16_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x03,0x60,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+# W32-REAL16: v_maxmin_f16_e64_dpp v5.l, -|v1.l|, -|v2.l|, null row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x03,0x60,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+# W32-FAKE16: v_maxmin_f16_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x03,0x60,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+# W64-REAL16: v_maxmin_f16_e64_dpp v5.l, -|v1.l|, -|v2.l|, null row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x03,0x60,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+# W64-FAKE16: v_maxmin_f16_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x03,0x60,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
0x05,0x05,0x60,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01
-# GFX11: v_maxmin_f16_e64_dpp v5, -|v1|, v2, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x05,0x60,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01]
+# W32-REAL16: v_maxmin_f16_e64_dpp v5.l, -|v1.l|, v2.l, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x05,0x60,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01]
+# W32-FAKE16: v_maxmin_f16_e64_dpp v5, -|v1|, v2, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x05,0x60,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01]
+# W64-REAL16: v_maxmin_f16_e64_dpp v5.l, -|v1.l|, v2.l, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x05,0x60,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01]
+# W64-FAKE16: v_maxmin_f16_e64_dpp v5, -|v1|, v2, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x05,0x60,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01]
0x05,0x06,0x60,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x01,0x13
-# GFX11: v_maxmin_f16_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x06,0x60,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x01,0x13]
+# W32-REAL16: v_maxmin_f16_e64_dpp v5.l, v1.l, -|v2.l|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x06,0x60,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x01,0x13]
+# W32-FAKE16: v_maxmin_f16_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x06,0x60,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x01,0x13]
+# W64-REAL16: v_maxmin_f16_e64_dpp v5.l, v1.l, -|v2.l|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x06,0x60,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x01,0x13]
+# W64-FAKE16: v_maxmin_f16_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x06,0x60,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x01,0x13]
0xff,0x87,0x60,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x0d,0x30
-# GFX11: v_maxmin_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x87,0x60,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x0d,0x30]
+# W32-REAL16: v_maxmin_f16_e64_dpp v255.l, -|v255.l|, -|v255.l|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x87,0x60,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x0d,0x30]
+# W32-FAKE16: v_maxmin_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x87,0x60,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x0d,0x30]
+# W64-REAL16: v_maxmin_f16_e64_dpp v255.l, -|v255.l|, -|v255.l|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x87,0x60,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x0d,0x30]
+# W64-FAKE16: v_maxmin_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x87,0x60,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x0d,0x30]
+
+0x05,0x78,0x60,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff
+# W32-REAL16: v_maxmin_f16_e64_dpp v5.h, v1.h, v2.h, v3.h op_sel:[1,1,1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x78,0x60,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+# W32-FAKE16: v_maxmin_f16_e64_dpp v5, v1, v2, v3 op_sel:[1,1,1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x78,0x60,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+# W64-REAL16: v_maxmin_f16_e64_dpp v5.h, v1.h, v2.h, v3.h op_sel:[1,1,1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x78,0x60,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+# W64-FAKE16: v_maxmin_f16_e64_dpp v5, v1, v2, v3 op_sel:[1,1,1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x78,0x60,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+0x05,0x20,0x60,0xd6,0xfa,0x04,0xfe,0x07,0x01,0xe4,0x00,0xff
+# W32-REAL16: v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, v255.h op_sel:[0,0,1,0] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x20,0x60,0xd6,0xfa,0x04,0xfe,0x07,0x01,0xe4,0x00,0xff]
+# W32-FAKE16: v_maxmin_f16_e64_dpp v5, v1, v2, v255 op_sel:[0,0,1,0] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x20,0x60,0xd6,0xfa,0x04,0xfe,0x07,0x01,0xe4,0x00,0xff]
+# W64-REAL16: v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, v255.h op_sel:[0,0,1,0] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x20,0x60,0xd6,0xfa,0x04,0xfe,0x07,0x01,0xe4,0x00,0xff]
+# W64-FAKE16: v_maxmin_f16_e64_dpp v5, v1, v2, v255 op_sel:[0,0,1,0] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x20,0x60,0xd6,0xfa,0x04,0xfe,0x07,0x01,0xe4,0x00,0xff]
+
+0x05,0x0a,0x60,0xd6,0xfa,0x04,0x06,0x2b,0x01,0x5f,0x01,0x01
+# W32-REAL16: v_maxmin_f16_e64_dpp v5.l, -v1.h, |v2.l|, -1 op_sel:[1,0,0,0] mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x0a,0x60,0xd6,0xfa,0x04,0x06,0x2b,0x01,0x5f,0x01,0x01]
+# W32-FAKE16: v_maxmin_f16_e64_dpp v5, -v1, |v2|, -1 op_sel:[1,0,0,0] mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x0a,0x60,0xd6,0xfa,0x04,0x06,0x2b,0x01,0x5f,0x01,0x01]
+# W64-REAL16: v_maxmin_f16_e64_dpp v5.l, -v1.h, |v2.l|, -1 op_sel:[1,0,0,0] mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x0a,0x60,0xd6,0xfa,0x04,0x06,0x2b,0x01,0x5f,0x01,0x01]
+# W64-FAKE16: v_maxmin_f16_e64_dpp v5, -v1, |v2|, -1 op_sel:[1,0,0,0] mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x0a,0x60,0xd6,0xfa,0x04,0x06,0x2b,0x01,0x5f,0x01,0x01]
+
+0x05,0x13,0x60,0xd6,0xfa,0x04,0xc2,0x73,0x01,0x60,0x01,0x13
+# W32-REAL16: v_maxmin_f16_e64_dpp v5.l, -|v1.l|, -|v2.h|, 0.5 op_sel:[0,1,0,0] mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x13,0x60,0xd6,0xfa,0x04,0xc2,0x73,0x01,0x60,0x01,0x13]
+# W32-FAKE16: v_maxmin_f16_e64_dpp v5, -|v1|, -|v2|, 0.5 op_sel:[0,1,0,0] mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x13,0x60,0xd6,0xfa,0x04,0xc2,0x73,0x01,0x60,0x01,0x13]
+# W64-REAL16: v_maxmin_f16_e64_dpp v5.l, -|v1.l|, -|v2.h|, 0.5 op_sel:[0,1,0,0] mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x13,0x60,0xd6,0xfa,0x04,0xc2,0x73,0x01,0x60,0x01,0x13]
+# W64-FAKE16: v_maxmin_f16_e64_dpp v5, -|v1|, -|v2|, 0.5 op_sel:[0,1,0,0] mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x13,0x60,0xd6,0xfa,0x04,0xc2,0x73,0x01,0x60,0x01,0x13]
+
+0xff,0xc7,0x60,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x0d,0x30
+# W32-REAL16: v_maxmin_f16_e64_dpp v255.h, -|v255.l|, -|v255.l|, -|src_scc| op_sel:[0,0,0,1] clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0xc7,0x60,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x0d,0x30]
+# W32-FAKE16: v_maxmin_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| op_sel:[0,0,0,1] clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0xc7,0x60,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x0d,0x30]
+# W64-REAL16: v_maxmin_f16_e64_dpp v255.h, -|v255.l|, -|v255.l|, -|src_scc| op_sel:[0,0,0,1] clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0xc7,0x60,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x0d,0x30]
+# W64-FAKE16: v_maxmin_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| op_sel:[0,0,0,1] clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0xc7,0x60,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x0d,0x30]
0x05,0x00,0x5e,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff
# GFX11: v_maxmin_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5e,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
@@ -2833,46 +2905,118 @@
# W64-FAKE16: v_min_u16_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x0b,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x0d,0x30]
0x05,0x00,0x61,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff
-# GFX11: v_minmax_f16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x61,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+# W32-REAL16: v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, v3.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x61,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+# W32-FAKE16: v_minmax_f16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x61,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+# W64-REAL16: v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, v3.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x61,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+# W64-FAKE16: v_minmax_f16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x61,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
0x05,0x00,0x61,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff
-# GFX11: v_minmax_f16_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x61,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+# W32-REAL16: v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, v3.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x61,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+# W32-FAKE16: v_minmax_f16_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x61,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+# W64-REAL16: v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, v3.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x61,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+# W64-FAKE16: v_minmax_f16_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x61,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
0x05,0x00,0x61,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff
-# GFX11: v_minmax_f16_e64_dpp v5, v1, v2, v3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x61,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+# W32-REAL16: v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, v3.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x61,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+# W32-FAKE16: v_minmax_f16_e64_dpp v5, v1, v2, v3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x61,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+# W64-REAL16: v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, v3.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x61,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+# W64-FAKE16: v_minmax_f16_e64_dpp v5, v1, v2, v3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x61,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
0x05,0x00,0x61,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff
-# GFX11: v_minmax_f16_e64_dpp v5, v1, v2, v255 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x61,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+# W32-REAL16: v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, v255.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x61,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+# W32-FAKE16: v_minmax_f16_e64_dpp v5, v1, v2, v255 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x61,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+# W64-REAL16: v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, v255.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x61,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+# W64-FAKE16: v_minmax_f16_e64_dpp v5, v1, v2, v255 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x61,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
0x05,0x00,0x61,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff
-# GFX11: v_minmax_f16_e64_dpp v5, v1, v2, s105 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x61,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+# W32-REAL16: v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, s105 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x61,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+# W32-FAKE16: v_minmax_f16_e64_dpp v5, v1, v2, s105 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x61,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+# W64-REAL16: v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, s105 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x61,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+# W64-FAKE16: v_minmax_f16_e64_dpp v5, v1, v2, s105 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x61,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
0x05,0x00,0x61,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff
-# GFX11: v_minmax_f16_e64_dpp v5, v1, v2, vcc_hi row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x61,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+# W32-REAL16: v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, vcc_hi row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x61,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+# W32-FAKE16: v_minmax_f16_e64_dpp v5, v1, v2, vcc_hi row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x61,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+# W64-REAL16: v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, vcc_hi row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x61,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+# W64-FAKE16: v_minmax_f16_e64_dpp v5, v1, v2, vcc_hi row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x61,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
0x05,0x00,0x61,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff
-# GFX11: v_minmax_f16_e64_dpp v5, v1, v2, vcc_lo row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x61,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+# W32-REAL16: v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, vcc_lo row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x61,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+# W32-FAKE16: v_minmax_f16_e64_dpp v5, v1, v2, vcc_lo row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x61,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+# W64-REAL16: v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, vcc_lo row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x61,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+# W64-FAKE16: v_minmax_f16_e64_dpp v5, v1, v2, vcc_lo row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x61,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
0x05,0x01,0x61,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff
-# GFX11: v_minmax_f16_e64_dpp v5, |v1|, v2, -ttmp15 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x01,0x61,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+# W32-REAL16: v_minmax_f16_e64_dpp v5.l, |v1.l|, v2.l, -ttmp15 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x01,0x61,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+# W32-FAKE16: v_minmax_f16_e64_dpp v5, |v1|, v2, -ttmp15 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x01,0x61,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+# W64-REAL16: v_minmax_f16_e64_dpp v5.l, |v1.l|, v2.l, -ttmp15 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x01,0x61,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+# W64-FAKE16: v_minmax_f16_e64_dpp v5, |v1|, v2, -ttmp15 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x01,0x61,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
0x05,0x02,0x61,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff
-# GFX11: v_minmax_f16_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x02,0x61,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+# W32-REAL16: v_minmax_f16_e64_dpp v5.l, v1.l, -|v2.l|, exec_hi row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x02,0x61,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+# W32-FAKE16: v_minmax_f16_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x02,0x61,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+# W64-REAL16: v_minmax_f16_e64_dpp v5.l, v1.l, -|v2.l|, exec_hi row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x02,0x61,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+# W64-FAKE16: v_minmax_f16_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x02,0x61,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
0x05,0x04,0x61,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff
-# GFX11: v_minmax_f16_e64_dpp v5, -v1, v2, |exec_lo| row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x61,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+# W32-REAL16: v_minmax_f16_e64_dpp v5.l, -v1.l, v2.l, |exec_lo| row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x61,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+# W32-FAKE16: v_minmax_f16_e64_dpp v5, -v1, v2, |exec_lo| row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x61,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+# W64-REAL16: v_minmax_f16_e64_dpp v5.l, -v1.l, v2.l, |exec_lo| row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x61,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+# W64-FAKE16: v_minmax_f16_e64_dpp v5, -v1, v2, |exec_lo| row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x61,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
0x05,0x03,0x61,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff
-# GFX11: v_minmax_f16_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x03,0x61,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+# W32-REAL16: v_minmax_f16_e64_dpp v5.l, -|v1.l|, -|v2.l|, null row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x03,0x61,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+# W32-FAKE16: v_minmax_f16_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x03,0x61,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+# W64-REAL16: v_minmax_f16_e64_dpp v5.l, -|v1.l|, -|v2.l|, null row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x03,0x61,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+# W64-FAKE16: v_minmax_f16_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x03,0x61,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
0x05,0x05,0x61,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01
-# GFX11: v_minmax_f16_e64_dpp v5, -|v1|, v2, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x05,0x61,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01]
+# W32-REAL16: v_minmax_f16_e64_dpp v5.l, -|v1.l|, v2.l, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x05,0x61,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01]
+# W32-FAKE16: v_minmax_f16_e64_dpp v5, -|v1|, v2, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x05,0x61,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01]
+# W64-REAL16: v_minmax_f16_e64_dpp v5.l, -|v1.l|, v2.l, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x05,0x61,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01]
+# W64-FAKE16: v_minmax_f16_e64_dpp v5, -|v1|, v2, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x05,0x61,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01]
0x05,0x06,0x61,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x01,0x13
-# GFX11: v_minmax_f16_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x06,0x61,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x01,0x13]
+# W32-REAL16: v_minmax_f16_e64_dpp v5.l, v1.l, -|v2.l|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x06,0x61,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x01,0x13]
+# W32-FAKE16: v_minmax_f16_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x06,0x61,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x01,0x13]
+# W64-REAL16: v_minmax_f16_e64_dpp v5.l, v1.l, -|v2.l|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x06,0x61,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x01,0x13]
+# W64-FAKE16: v_minmax_f16_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x06,0x61,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x01,0x13]
0xff,0x87,0x61,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x0d,0x30
-# GFX11: v_minmax_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x87,0x61,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x0d,0x30]
+# W32-REAL16: v_minmax_f16_e64_dpp v255.l, -|v255.l|, -|v255.l|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x87,0x61,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x0d,0x30]
+# W32-FAKE16: v_minmax_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x87,0x61,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x0d,0x30]
+# W64-REAL16: v_minmax_f16_e64_dpp v255.l, -|v255.l|, -|v255.l|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x87,0x61,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x0d,0x30]
+# W64-FAKE16: v_minmax_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x87,0x61,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x0d,0x30]
+
+0x05,0x78,0x61,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff
+# W32-REAL16: v_minmax_f16_e64_dpp v5.h, v1.h, v2.h, v3.h op_sel:[1,1,1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x78,0x61,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+# W32-FAKE16: v_minmax_f16_e64_dpp v5, v1, v2, v3 op_sel:[1,1,1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x78,0x61,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+# W64-REAL16: v_minmax_f16_e64_dpp v5.h, v1.h, v2.h, v3.h op_sel:[1,1,1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x78,0x61,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+# W64-FAKE16: v_minmax_f16_e64_dpp v5, v1, v2, v3 op_sel:[1,1,1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x78,0x61,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+0x05,0x20,0x61,0xd6,0xfa,0x04,0xfe,0x07,0x01,0xe4,0x00,0xff
+# W32-REAL16: v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, v255.h op_sel:[0,0,1,0] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x20,0x61,0xd6,0xfa,0x04,0xfe,0x07,0x01,0xe4,0x00,0xff]
+# W32-FAKE16: v_minmax_f16_e64_dpp v5, v1, v2, v255 op_sel:[0,0,1,0] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x20,0x61,0xd6,0xfa,0x04,0xfe,0x07,0x01,0xe4,0x00,0xff]
+# W64-REAL16: v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, v255.h op_sel:[0,0,1,0] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x20,0x61,0xd6,0xfa,0x04,0xfe,0x07,0x01,0xe4,0x00,0xff]
+# W64-FAKE16: v_minmax_f16_e64_dpp v5, v1, v2, v255 op_sel:[0,0,1,0] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x20,0x61,0xd6,0xfa,0x04,0xfe,0x07,0x01,0xe4,0x00,0xff]
+
+0x05,0x0a,0x61,0xd6,0xfa,0x04,0x06,0x2b,0x01,0x5f,0x01,0x01
+# W32-REAL16: v_minmax_f16_e64_dpp v5.l, -v1.h, |v2.l|, -1 op_sel:[1,0,0,0] mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x0a,0x61,0xd6,0xfa,0x04,0x06,0x2b,0x01,0x5f,0x01,0x01]
+# W32-FAKE16: v_minmax_f16_e64_dpp v5, -v1, |v2|, -1 op_sel:[1,0,0,0] mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x0a,0x61,0xd6,0xfa,0x04,0x06,0x2b,0x01,0x5f,0x01,0x01]
+# W64-REAL16: v_minmax_f16_e64_dpp v5.l, -v1.h, |v2.l|, -1 op_sel:[1,0,0,0] mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x0a,0x61,0xd6,0xfa,0x04,0x06,0x2b,0x01,0x5f,0x01,0x01]
+# W64-FAKE16: v_minmax_f16_e64_dpp v5, -v1, |v2|, -1 op_sel:[1,0,0,0] mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x0a,0x61,0xd6,0xfa,0x04,0x06,0x2b,0x01,0x5f,0x01,0x01]
+
+0x05,0x13,0x61,0xd6,0xfa,0x04,0xc2,0x73,0x01,0x60,0x01,0x13
+# W32-REAL16: v_minmax_f16_e64_dpp v5.l, -|v1.l|, -|v2.h|, 0.5 op_sel:[0,1,0,0] mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x13,0x61,0xd6,0xfa,0x04,0xc2,0x73,0x01,0x60,0x01,0x13]
+# W32-FAKE16: v_minmax_f16_e64_dpp v5, -|v1|, -|v2|, 0.5 op_sel:[0,1,0,0] mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x13,0x61,0xd6,0xfa,0x04,0xc2,0x73,0x01,0x60,0x01,0x13]
+# W64-REAL16: v_minmax_f16_e64_dpp v5.l, -|v1.l|, -|v2.h|, 0.5 op_sel:[0,1,0,0] mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x13,0x61,0xd6,0xfa,0x04,0xc2,0x73,0x01,0x60,0x01,0x13]
+# W64-FAKE16: v_minmax_f16_e64_dpp v5, -|v1|, -|v2|, 0.5 op_sel:[0,1,0,0] mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x13,0x61,0xd6,0xfa,0x04,0xc2,0x73,0x01,0x60,0x01,0x13]
+
+0xff,0xc7,0x61,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x0d,0x30
+# W32-REAL16: v_minmax_f16_e64_dpp v255.h, -|v255.l|, -|v255.l|, -|src_scc| op_sel:[0,0,0,1] clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0xc7,0x61,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x0d,0x30]
+# W32-FAKE16: v_minmax_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| op_sel:[0,0,0,1] clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0xc7,0x61,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x0d,0x30]
+# W64-REAL16: v_minmax_f16_e64_dpp v255.h, -|v255.l|, -|v255.l|, -|src_scc| op_sel:[0,0,0,1] clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0xc7,0x61,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x0d,0x30]
+# W64-FAKE16: v_minmax_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| op_sel:[0,0,0,1] clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0xc7,0x61,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x0d,0x30]
0x05,0x00,0x5f,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff
# GFX11: v_minmax_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x5f,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3_dpp8.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3_dpp8.txt
index 714fac9..7a81ba2 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3_dpp8.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx11_dasm_vop3_dpp8.txt
@@ -1141,40 +1141,106 @@
# W64-FAKE16: v_max_u16_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x09,0xd7,0xea,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
0x05,0x00,0x60,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05
-# GFX11: v_maxmin_f16_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x60,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+# W32-REAL16: v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, v3.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x60,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_maxmin_f16_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x60,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, v3.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x60,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_maxmin_f16_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x60,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
0x05,0x00,0x60,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05
-# GFX11: v_maxmin_f16_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x60,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+# W32-REAL16: v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, v255.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x60,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_maxmin_f16_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x60,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, v255.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x60,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_maxmin_f16_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x60,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
0x05,0x00,0x60,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05
-# GFX11: v_maxmin_f16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x60,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+# W32-REAL16: v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, s105 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x60,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_maxmin_f16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x60,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, s105 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x60,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_maxmin_f16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x60,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
0x05,0x00,0x60,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05
-# GFX11: v_maxmin_f16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x60,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+# W32-REAL16: v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, vcc_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x60,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_maxmin_f16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x60,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, vcc_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x60,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_maxmin_f16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x60,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
0x05,0x00,0x60,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05
-# GFX11: v_maxmin_f16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x60,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+# W32-REAL16: v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x60,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_maxmin_f16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x60,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x60,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_maxmin_f16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x60,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
0x05,0x01,0x60,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05
-# GFX11: v_maxmin_f16_e64_dpp v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x60,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+# W32-REAL16: v_maxmin_f16_e64_dpp v5.l, |v1.l|, v2.l, -ttmp15 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x60,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_maxmin_f16_e64_dpp v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x60,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_maxmin_f16_e64_dpp v5.l, |v1.l|, v2.l, -ttmp15 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x60,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_maxmin_f16_e64_dpp v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x60,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
0x05,0x02,0x60,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05
-# GFX11: v_maxmin_f16_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x02,0x60,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+# W32-REAL16: v_maxmin_f16_e64_dpp v5.l, v1.l, -|v2.l|, exec_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x02,0x60,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_maxmin_f16_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x02,0x60,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_maxmin_f16_e64_dpp v5.l, v1.l, -|v2.l|, exec_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x02,0x60,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_maxmin_f16_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x02,0x60,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
0x05,0x04,0x60,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05
-# GFX11: v_maxmin_f16_e64_dpp v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x60,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+# W32-REAL16: v_maxmin_f16_e64_dpp v5.l, -v1.l, v2.l, |exec_lo| dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x60,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_maxmin_f16_e64_dpp v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x60,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_maxmin_f16_e64_dpp v5.l, -v1.l, v2.l, |exec_lo| dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x60,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_maxmin_f16_e64_dpp v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x60,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
0x05,0x03,0x60,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05
-# GFX11: v_maxmin_f16_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x03,0x60,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+# W32-REAL16: v_maxmin_f16_e64_dpp v5.l, -|v1.l|, -|v2.l|, null dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x03,0x60,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_maxmin_f16_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x03,0x60,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_maxmin_f16_e64_dpp v5.l, -|v1.l|, -|v2.l|, null dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x03,0x60,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_maxmin_f16_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x03,0x60,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
0x05,0x05,0x60,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05
-# GFX11: v_maxmin_f16_e64_dpp v5, -|v1|, v2, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x05,0x60,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05]
+# W32-REAL16: v_maxmin_f16_e64_dpp v5.l, -|v1.l|, v2.l, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x05,0x60,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_maxmin_f16_e64_dpp v5, -|v1|, v2, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x05,0x60,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_maxmin_f16_e64_dpp v5.l, -|v1.l|, v2.l, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x05,0x60,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_maxmin_f16_e64_dpp v5, -|v1|, v2, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x05,0x60,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05]
0x05,0x06,0x60,0xd6,0xe9,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05
-# GFX11: v_maxmin_f16_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x06,0x60,0xd6,0xe9,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05]
+# W32-REAL16: v_maxmin_f16_e64_dpp v5.l, v1.l, -|v2.l|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x06,0x60,0xd6,0xe9,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_maxmin_f16_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x06,0x60,0xd6,0xe9,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_maxmin_f16_e64_dpp v5.l, v1.l, -|v2.l|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x06,0x60,0xd6,0xe9,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_maxmin_f16_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x06,0x60,0xd6,0xe9,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05]
0xff,0x87,0x60,0xd6,0xea,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00
-# GFX11: v_maxmin_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x87,0x60,0xd6,0xea,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+# W32-REAL16: v_maxmin_f16_e64_dpp v255.l, -|v255.l|, -|v255.l|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x87,0x60,0xd6,0xea,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+# W32-FAKE16: v_maxmin_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x87,0x60,0xd6,0xea,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+# W64-REAL16: v_maxmin_f16_e64_dpp v255.l, -|v255.l|, -|v255.l|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x87,0x60,0xd6,0xea,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+# W64-FAKE16: v_maxmin_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x87,0x60,0xd6,0xea,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+
+0x05,0x78,0x60,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05
+# W32-REAL16: v_maxmin_f16_e64_dpp v5.h, v1.h, v2.h, v3.h op_sel:[1,1,1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x78,0x60,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_maxmin_f16_e64_dpp v5, v1, v2, v3 op_sel:[1,1,1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x78,0x60,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_maxmin_f16_e64_dpp v5.h, v1.h, v2.h, v3.h op_sel:[1,1,1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x78,0x60,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_maxmin_f16_e64_dpp v5, v1, v2, v3 op_sel:[1,1,1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x78,0x60,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+0x05,0x20,0x60,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05
+# W32-REAL16: v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, v255.h op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x20,0x60,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_maxmin_f16_e64_dpp v5, v1, v2, v255 op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x20,0x60,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_maxmin_f16_e64_dpp v5.l, v1.l, v2.l, v255.h op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x20,0x60,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_maxmin_f16_e64_dpp v5, v1, v2, v255 op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x20,0x60,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+0x05,0x0a,0x60,0xd6,0xe9,0x04,0x06,0x2b,0x01,0x77,0x39,0x05
+# W32-REAL16: v_maxmin_f16_e64_dpp v5.l, -v1.h, |v2.l|, -1 op_sel:[1,0,0,0] mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x0a,0x60,0xd6,0xe9,0x04,0x06,0x2b,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_maxmin_f16_e64_dpp v5, -v1, |v2|, -1 op_sel:[1,0,0,0] mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x0a,0x60,0xd6,0xe9,0x04,0x06,0x2b,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_maxmin_f16_e64_dpp v5.l, -v1.h, |v2.l|, -1 op_sel:[1,0,0,0] mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x0a,0x60,0xd6,0xe9,0x04,0x06,0x2b,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_maxmin_f16_e64_dpp v5, -v1, |v2|, -1 op_sel:[1,0,0,0] mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x0a,0x60,0xd6,0xe9,0x04,0x06,0x2b,0x01,0x77,0x39,0x05]
+
+0x05,0x13,0x60,0xd6,0xe9,0x04,0xc2,0x73,0x01,0x77,0x39,0x05
+# W32-REAL16: v_maxmin_f16_e64_dpp v5.l, -|v1.l|, -|v2.h|, 0.5 op_sel:[0,1,0,0] mul:4 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x13,0x60,0xd6,0xe9,0x04,0xc2,0x73,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_maxmin_f16_e64_dpp v5, -|v1|, -|v2|, 0.5 op_sel:[0,1,0,0] mul:4 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x13,0x60,0xd6,0xe9,0x04,0xc2,0x73,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_maxmin_f16_e64_dpp v5.l, -|v1.l|, -|v2.h|, 0.5 op_sel:[0,1,0,0] mul:4 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x13,0x60,0xd6,0xe9,0x04,0xc2,0x73,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_maxmin_f16_e64_dpp v5, -|v1|, -|v2|, 0.5 op_sel:[0,1,0,0] mul:4 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x13,0x60,0xd6,0xe9,0x04,0xc2,0x73,0x01,0x77,0x39,0x05]
+
+0xff,0xc7,0x60,0xd6,0xea,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00
+# W32-REAL16: v_maxmin_f16_e64_dpp v255.h, -|v255.l|, -|v255.l|, -|src_scc| op_sel:[0,0,0,1] clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0xc7,0x60,0xd6,0xea,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+# W32-FAKE16: v_maxmin_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| op_sel:[0,0,0,1] clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0xc7,0x60,0xd6,0xea,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+# W64-REAL16: v_maxmin_f16_e64_dpp v255.h, -|v255.l|, -|v255.l|, -|src_scc| op_sel:[0,0,0,1] clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0xc7,0x60,0xd6,0xea,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+# W64-FAKE16: v_maxmin_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| op_sel:[0,0,0,1] clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0xc7,0x60,0xd6,0xea,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
0x05,0x00,0x5e,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05
# GFX11: v_maxmin_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5e,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
@@ -1585,40 +1651,106 @@
# W64-FAKE16: v_min_u16_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x0b,0xd7,0xea,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
0x05,0x00,0x61,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05
-# GFX11: v_minmax_f16_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x61,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+# W32-REAL16: v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, v3.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x61,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_minmax_f16_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x61,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, v3.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x61,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_minmax_f16_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x61,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
0x05,0x00,0x61,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05
-# GFX11: v_minmax_f16_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x61,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+# W32-REAL16: v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, v255.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x61,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_minmax_f16_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x61,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, v255.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x61,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_minmax_f16_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x61,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
0x05,0x00,0x61,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05
-# GFX11: v_minmax_f16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x61,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+# W32-REAL16: v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, s105 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x61,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_minmax_f16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x61,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, s105 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x61,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_minmax_f16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x61,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
0x05,0x00,0x61,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05
-# GFX11: v_minmax_f16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x61,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+# W32-REAL16: v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, vcc_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x61,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_minmax_f16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x61,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, vcc_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x61,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_minmax_f16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x61,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
0x05,0x00,0x61,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05
-# GFX11: v_minmax_f16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x61,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+# W32-REAL16: v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x61,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_minmax_f16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x61,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x61,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_minmax_f16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x61,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
0x05,0x01,0x61,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05
-# GFX11: v_minmax_f16_e64_dpp v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x61,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+# W32-REAL16: v_minmax_f16_e64_dpp v5.l, |v1.l|, v2.l, -ttmp15 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x61,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_minmax_f16_e64_dpp v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x61,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_minmax_f16_e64_dpp v5.l, |v1.l|, v2.l, -ttmp15 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x61,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_minmax_f16_e64_dpp v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x61,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
0x05,0x02,0x61,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05
-# GFX11: v_minmax_f16_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x02,0x61,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+# W32-REAL16: v_minmax_f16_e64_dpp v5.l, v1.l, -|v2.l|, exec_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x02,0x61,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_minmax_f16_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x02,0x61,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_minmax_f16_e64_dpp v5.l, v1.l, -|v2.l|, exec_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x02,0x61,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_minmax_f16_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x02,0x61,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
0x05,0x04,0x61,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05
-# GFX11: v_minmax_f16_e64_dpp v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x61,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+# W32-REAL16: v_minmax_f16_e64_dpp v5.l, -v1.l, v2.l, |exec_lo| dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x61,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_minmax_f16_e64_dpp v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x61,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_minmax_f16_e64_dpp v5.l, -v1.l, v2.l, |exec_lo| dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x61,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_minmax_f16_e64_dpp v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x61,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
0x05,0x03,0x61,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05
-# GFX11: v_minmax_f16_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x03,0x61,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+# W32-REAL16: v_minmax_f16_e64_dpp v5.l, -|v1.l|, -|v2.l|, null dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x03,0x61,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_minmax_f16_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x03,0x61,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_minmax_f16_e64_dpp v5.l, -|v1.l|, -|v2.l|, null dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x03,0x61,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_minmax_f16_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x03,0x61,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
0x05,0x05,0x61,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05
-# GFX11: v_minmax_f16_e64_dpp v5, -|v1|, v2, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x05,0x61,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05]
+# W32-REAL16: v_minmax_f16_e64_dpp v5.l, -|v1.l|, v2.l, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x05,0x61,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_minmax_f16_e64_dpp v5, -|v1|, v2, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x05,0x61,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_minmax_f16_e64_dpp v5.l, -|v1.l|, v2.l, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x05,0x61,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_minmax_f16_e64_dpp v5, -|v1|, v2, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x05,0x61,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05]
0x05,0x06,0x61,0xd6,0xe9,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05
-# GFX11: v_minmax_f16_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x06,0x61,0xd6,0xe9,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05]
+# W32-REAL16: v_minmax_f16_e64_dpp v5.l, v1.l, -|v2.l|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x06,0x61,0xd6,0xe9,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_minmax_f16_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x06,0x61,0xd6,0xe9,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_minmax_f16_e64_dpp v5.l, v1.l, -|v2.l|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x06,0x61,0xd6,0xe9,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_minmax_f16_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x06,0x61,0xd6,0xe9,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05]
0xff,0x87,0x61,0xd6,0xea,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00
-# GFX11: v_minmax_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x87,0x61,0xd6,0xea,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+# W32-REAL16: v_minmax_f16_e64_dpp v255.l, -|v255.l|, -|v255.l|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x87,0x61,0xd6,0xea,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+# W32-FAKE16: v_minmax_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x87,0x61,0xd6,0xea,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+# W64-REAL16: v_minmax_f16_e64_dpp v255.l, -|v255.l|, -|v255.l|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x87,0x61,0xd6,0xea,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+# W64-FAKE16: v_minmax_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x87,0x61,0xd6,0xea,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+
+0x05,0x78,0x61,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05
+# W32-REAL16: v_minmax_f16_e64_dpp v5.h, v1.h, v2.h, v3.h op_sel:[1,1,1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x78,0x61,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_minmax_f16_e64_dpp v5, v1, v2, v3 op_sel:[1,1,1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x78,0x61,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_minmax_f16_e64_dpp v5.h, v1.h, v2.h, v3.h op_sel:[1,1,1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x78,0x61,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_minmax_f16_e64_dpp v5, v1, v2, v3 op_sel:[1,1,1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x78,0x61,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+0x05,0x20,0x61,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05
+# W32-REAL16: v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, v255.h op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x20,0x61,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_minmax_f16_e64_dpp v5, v1, v2, v255 op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x20,0x61,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_minmax_f16_e64_dpp v5.l, v1.l, v2.l, v255.h op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x20,0x61,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_minmax_f16_e64_dpp v5, v1, v2, v255 op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x20,0x61,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+0x05,0x0a,0x61,0xd6,0xe9,0x04,0x06,0x2b,0x01,0x77,0x39,0x05
+# W32-REAL16: v_minmax_f16_e64_dpp v5.l, -v1.h, |v2.l|, -1 op_sel:[1,0,0,0] mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x0a,0x61,0xd6,0xe9,0x04,0x06,0x2b,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_minmax_f16_e64_dpp v5, -v1, |v2|, -1 op_sel:[1,0,0,0] mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x0a,0x61,0xd6,0xe9,0x04,0x06,0x2b,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_minmax_f16_e64_dpp v5.l, -v1.h, |v2.l|, -1 op_sel:[1,0,0,0] mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x0a,0x61,0xd6,0xe9,0x04,0x06,0x2b,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_minmax_f16_e64_dpp v5, -v1, |v2|, -1 op_sel:[1,0,0,0] mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x0a,0x61,0xd6,0xe9,0x04,0x06,0x2b,0x01,0x77,0x39,0x05]
+
+0x05,0x13,0x61,0xd6,0xe9,0x04,0xc2,0x73,0x01,0x77,0x39,0x05
+# W32-REAL16: v_minmax_f16_e64_dpp v5.l, -|v1.l|, -|v2.h|, 0.5 op_sel:[0,1,0,0] mul:4 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x13,0x61,0xd6,0xe9,0x04,0xc2,0x73,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_minmax_f16_e64_dpp v5, -|v1|, -|v2|, 0.5 op_sel:[0,1,0,0] mul:4 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x13,0x61,0xd6,0xe9,0x04,0xc2,0x73,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_minmax_f16_e64_dpp v5.l, -|v1.l|, -|v2.h|, 0.5 op_sel:[0,1,0,0] mul:4 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x13,0x61,0xd6,0xe9,0x04,0xc2,0x73,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_minmax_f16_e64_dpp v5, -|v1|, -|v2|, 0.5 op_sel:[0,1,0,0] mul:4 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x13,0x61,0xd6,0xe9,0x04,0xc2,0x73,0x01,0x77,0x39,0x05]
+
+0xff,0xc7,0x61,0xd6,0xea,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00
+# W32-REAL16: v_minmax_f16_e64_dpp v255.h, -|v255.l|, -|v255.l|, -|src_scc| op_sel:[0,0,0,1] clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0xc7,0x61,0xd6,0xea,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+# W32-FAKE16: v_minmax_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| op_sel:[0,0,0,1] clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0xc7,0x61,0xd6,0xea,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+# W64-REAL16: v_minmax_f16_e64_dpp v255.h, -|v255.l|, -|v255.l|, -|src_scc| op_sel:[0,0,0,1] clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0xc7,0x61,0xd6,0xea,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+# W64-FAKE16: v_minmax_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| op_sel:[0,0,0,1] clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0xc7,0x61,0xd6,0xea,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
0x05,0x00,0x5f,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05
# GFX11: v_minmax_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x5f,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3.txt
index 6d48440..633d3a4 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3.txt
@@ -4265,49 +4265,120 @@
# W64-FAKE16: v_max_u16 v255, 0xfe0b, vcc_hi ; encoding: [0xff,0x00,0x09,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
0x05,0x00,0x6b,0xd6,0x01,0x05,0x0e,0x00
-# GFX12: v_maxmin_num_f16 v5, v1, v2, s3 ; encoding: [0x05,0x00,0x6b,0xd6,0x01,0x05,0x0e,0x00]
+# W32-REAL16: v_maxmin_num_f16 v5.l, v1.l, v2.l, s3 ; encoding: [0x05,0x00,0x6b,0xd6,0x01,0x05,0x0e,0x00]
+# W32-FAKE16: v_maxmin_num_f16 v5, v1, v2, s3 ; encoding: [0x05,0x00,0x6b,0xd6,0x01,0x05,0x0e,0x00]
+# W64-REAL16: v_maxmin_num_f16 v5.l, v1.l, v2.l, s3 ; encoding: [0x05,0x00,0x6b,0xd6,0x01,0x05,0x0e,0x00]
+# W64-FAKE16: v_maxmin_num_f16 v5, v1, v2, s3 ; encoding: [0x05,0x00,0x6b,0xd6,0x01,0x05,0x0e,0x00]
0x05,0x00,0x6b,0xd6,0xff,0x05,0xa4,0x01
-# GFX12: v_maxmin_num_f16 v5, v255, s2, s105 ; encoding: [0x05,0x00,0x6b,0xd6,0xff,0x05,0xa4,0x01]
+# W32-REAL16: v_maxmin_num_f16 v5.l, v255.l, s2, s105 ; encoding: [0x05,0x00,0x6b,0xd6,0xff,0x05,0xa4,0x01]
+# W32-FAKE16: v_maxmin_num_f16 v5, v255, s2, s105 ; encoding: [0x05,0x00,0x6b,0xd6,0xff,0x05,0xa4,0x01]
+# W64-REAL16: v_maxmin_num_f16 v5.l, v255.l, s2, s105 ; encoding: [0x05,0x00,0x6b,0xd6,0xff,0x05,0xa4,0x01]
+# W64-FAKE16: v_maxmin_num_f16 v5, v255, s2, s105 ; encoding: [0x05,0x00,0x6b,0xd6,0xff,0x05,0xa4,0x01]
0x05,0x00,0x6b,0xd6,0x01,0xfe,0xff,0x01
-# GFX12: v_maxmin_num_f16 v5, s1, v255, exec_hi ; encoding: [0x05,0x00,0x6b,0xd6,0x01,0xfe,0xff,0x01]
+# W32-REAL16: v_maxmin_num_f16 v5.l, s1, v255.l, exec_hi ; encoding: [0x05,0x00,0x6b,0xd6,0x01,0xfe,0xff,0x01]
+# W32-FAKE16: v_maxmin_num_f16 v5, s1, v255, exec_hi ; encoding: [0x05,0x00,0x6b,0xd6,0x01,0xfe,0xff,0x01]
+# W64-REAL16: v_maxmin_num_f16 v5.l, s1, v255.l, exec_hi ; encoding: [0x05,0x00,0x6b,0xd6,0x01,0xfe,0xff,0x01]
+# W64-FAKE16: v_maxmin_num_f16 v5, s1, v255, exec_hi ; encoding: [0x05,0x00,0x6b,0xd6,0x01,0xfe,0xff,0x01]
0x05,0x00,0x6b,0xd6,0x69,0xd2,0xf8,0x01
-# GFX12: v_maxmin_num_f16 v5, s105, s105, exec_lo ; encoding: [0x05,0x00,0x6b,0xd6,0x69,0xd2,0xf8,0x01]
+# W32-REAL16: v_maxmin_num_f16 v5.l, s105, s105, exec_lo ; encoding: [0x05,0x00,0x6b,0xd6,0x69,0xd2,0xf8,0x01]
+# W32-FAKE16: v_maxmin_num_f16 v5, s105, s105, exec_lo ; encoding: [0x05,0x00,0x6b,0xd6,0x69,0xd2,0xf8,0x01]
+# W64-REAL16: v_maxmin_num_f16 v5.l, s105, s105, exec_lo ; encoding: [0x05,0x00,0x6b,0xd6,0x69,0xd2,0xf8,0x01]
+# W64-FAKE16: v_maxmin_num_f16 v5, s105, s105, exec_lo ; encoding: [0x05,0x00,0x6b,0xd6,0x69,0xd2,0xf8,0x01]
0x05,0x00,0x6b,0xd6,0x6a,0xf6,0x0c,0x04
-# GFX12: v_maxmin_num_f16 v5, vcc_lo, ttmp15, v3 ; encoding: [0x05,0x00,0x6b,0xd6,0x6a,0xf6,0x0c,0x04]
+# W32-REAL16: v_maxmin_num_f16 v5.l, vcc_lo, ttmp15, v3.l ; encoding: [0x05,0x00,0x6b,0xd6,0x6a,0xf6,0x0c,0x04]
+# W32-FAKE16: v_maxmin_num_f16 v5, vcc_lo, ttmp15, v3 ; encoding: [0x05,0x00,0x6b,0xd6,0x6a,0xf6,0x0c,0x04]
+# W64-REAL16: v_maxmin_num_f16 v5.l, vcc_lo, ttmp15, v3.l ; encoding: [0x05,0x00,0x6b,0xd6,0x6a,0xf6,0x0c,0x04]
+# W64-FAKE16: v_maxmin_num_f16 v5, vcc_lo, ttmp15, v3 ; encoding: [0x05,0x00,0x6b,0xd6,0x6a,0xf6,0x0c,0x04]
0x05,0x00,0x6b,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00
-# GFX12: v_maxmin_num_f16 v5, vcc_hi, 0xfe0b, v255 ; encoding: [0x05,0x00,0x6b,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+# W32-REAL16: v_maxmin_num_f16 v5.l, vcc_hi, 0xfe0b, v255.l ; encoding: [0x05,0x00,0x6b,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+# W32-FAKE16: v_maxmin_num_f16 v5, vcc_hi, 0xfe0b, v255 ; encoding: [0x05,0x00,0x6b,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+# W64-REAL16: v_maxmin_num_f16 v5.l, vcc_hi, 0xfe0b, v255.l ; encoding: [0x05,0x00,0x6b,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+# W64-FAKE16: v_maxmin_num_f16 v5, vcc_hi, 0xfe0b, v255 ; encoding: [0x05,0x00,0x6b,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
0x05,0x07,0x6b,0xd6,0x7b,0xfa,0xed,0xe1
-# GFX12: v_maxmin_num_f16 v5, -|ttmp15|, -|src_scc|, -|ttmp15| ; encoding: [0x05,0x07,0x6b,0xd6,0x7b,0xfa,0xed,0xe1]
+# W32-REAL16: v_maxmin_num_f16 v5.l, -|ttmp15|, -|src_scc|, -|ttmp15| ; encoding: [0x05,0x07,0x6b,0xd6,0x7b,0xfa,0xed,0xe1]
+# W32-FAKE16: v_maxmin_num_f16 v5, -|ttmp15|, -|src_scc|, -|ttmp15| ; encoding: [0x05,0x07,0x6b,0xd6,0x7b,0xfa,0xed,0xe1]
+# W64-REAL16: v_maxmin_num_f16 v5.l, -|ttmp15|, -|src_scc|, -|ttmp15| ; encoding: [0x05,0x07,0x6b,0xd6,0x7b,0xfa,0xed,0xe1]
+# W64-FAKE16: v_maxmin_num_f16 v5, -|ttmp15|, -|src_scc|, -|ttmp15| ; encoding: [0x05,0x07,0x6b,0xd6,0x7b,0xfa,0xed,0xe1]
0x05,0x00,0x6b,0xd6,0x7d,0xe0,0xf5,0x01
-# GFX12: v_maxmin_num_f16 v5, m0, 0.5, m0 ; encoding: [0x05,0x00,0x6b,0xd6,0x7d,0xe0,0xf5,0x01]
+# W32-REAL16: v_maxmin_num_f16 v5.l, m0, 0.5, m0 ; encoding: [0x05,0x00,0x6b,0xd6,0x7d,0xe0,0xf5,0x01]
+# W32-FAKE16: v_maxmin_num_f16 v5, m0, 0.5, m0 ; encoding: [0x05,0x00,0x6b,0xd6,0x7d,0xe0,0xf5,0x01]
+# W64-REAL16: v_maxmin_num_f16 v5.l, m0, 0.5, m0 ; encoding: [0x05,0x00,0x6b,0xd6,0x7d,0xe0,0xf5,0x01]
+# W64-FAKE16: v_maxmin_num_f16 v5, m0, 0.5, m0 ; encoding: [0x05,0x00,0x6b,0xd6,0x7d,0xe0,0xf5,0x01]
0x05,0x01,0x6b,0xd6,0x7e,0x82,0xad,0x01
-# GFX12: v_maxmin_num_f16 v5, |exec_lo|, -1, vcc_hi ; encoding: [0x05,0x01,0x6b,0xd6,0x7e,0x82,0xad,0x01]
+# W32-REAL16: v_maxmin_num_f16 v5.l, |exec_lo|, -1, vcc_hi ; encoding: [0x05,0x01,0x6b,0xd6,0x7e,0x82,0xad,0x01]
+# W32-FAKE16: v_maxmin_num_f16 v5, |exec_lo|, -1, vcc_hi ; encoding: [0x05,0x01,0x6b,0xd6,0x7e,0x82,0xad,0x01]
+# W64-REAL16: v_maxmin_num_f16 v5.l, |exec_lo|, -1, vcc_hi ; encoding: [0x05,0x01,0x6b,0xd6,0x7e,0x82,0xad,0x01]
+# W64-FAKE16: v_maxmin_num_f16 v5, |exec_lo|, -1, vcc_hi ; encoding: [0x05,0x01,0x6b,0xd6,0x7e,0x82,0xad,0x01]
0x05,0x05,0x6b,0xd6,0x7f,0xf8,0xa8,0xa1
-# GFX12: v_maxmin_num_f16 v5, -|exec_hi|, null, -|vcc_lo| ; encoding: [0x05,0x05,0x6b,0xd6,0x7f,0xf8,0xa8,0xa1]
+# W32-REAL16: v_maxmin_num_f16 v5.l, -|exec_hi|, null, -|vcc_lo| ; encoding: [0x05,0x05,0x6b,0xd6,0x7f,0xf8,0xa8,0xa1]
+# W32-FAKE16: v_maxmin_num_f16 v5, -|exec_hi|, null, -|vcc_lo| ; encoding: [0x05,0x05,0x6b,0xd6,0x7f,0xf8,0xa8,0xa1]
+# W64-REAL16: v_maxmin_num_f16 v5.l, -|exec_hi|, null, -|vcc_lo| ; encoding: [0x05,0x05,0x6b,0xd6,0x7f,0xf8,0xa8,0xa1]
+# W64-FAKE16: v_maxmin_num_f16 v5, -|exec_hi|, null, -|vcc_lo| ; encoding: [0x05,0x05,0x6b,0xd6,0x7f,0xf8,0xa8,0xa1]
0x05,0x04,0x6b,0xd6,0x7c,0xfc,0xfc,0x83,0x0b,0xfe,0x00,0x00
-# GFX12: v_maxmin_num_f16 v5, null, exec_lo, -|0xfe0b| ; encoding: [0x05,0x04,0x6b,0xd6,0x7c,0xfc,0xfc,0x83,0x0b,0xfe,0x00,0x00]
+# W32-REAL16: v_maxmin_num_f16 v5.l, null, exec_lo, -|0xfe0b| ; encoding: [0x05,0x04,0x6b,0xd6,0x7c,0xfc,0xfc,0x83,0x0b,0xfe,0x00,0x00]
+# W32-FAKE16: v_maxmin_num_f16 v5, null, exec_lo, -|0xfe0b| ; encoding: [0x05,0x04,0x6b,0xd6,0x7c,0xfc,0xfc,0x83,0x0b,0xfe,0x00,0x00]
+# W64-REAL16: v_maxmin_num_f16 v5.l, null, exec_lo, -|0xfe0b| ; encoding: [0x05,0x04,0x6b,0xd6,0x7c,0xfc,0xfc,0x83,0x0b,0xfe,0x00,0x00]
+# W64-FAKE16: v_maxmin_num_f16 v5, null, exec_lo, -|0xfe0b| ; encoding: [0x05,0x04,0x6b,0xd6,0x7c,0xfc,0xfc,0x83,0x0b,0xfe,0x00,0x00]
0x05,0x06,0x6b,0xd6,0xc1,0xfe,0xf4,0xc3
-# GFX12: v_maxmin_num_f16 v5, -1, -|exec_hi|, -|src_scc| ; encoding: [0x05,0x06,0x6b,0xd6,0xc1,0xfe,0xf4,0xc3]
+# W32-REAL16: v_maxmin_num_f16 v5.l, -1, -|exec_hi|, -|src_scc| ; encoding: [0x05,0x06,0x6b,0xd6,0xc1,0xfe,0xf4,0xc3]
+# W32-FAKE16: v_maxmin_num_f16 v5, -1, -|exec_hi|, -|src_scc| ; encoding: [0x05,0x06,0x6b,0xd6,0xc1,0xfe,0xf4,0xc3]
+# W64-REAL16: v_maxmin_num_f16 v5.l, -1, -|exec_hi|, -|src_scc| ; encoding: [0x05,0x06,0x6b,0xd6,0xc1,0xfe,0xf4,0xc3]
+# W64-FAKE16: v_maxmin_num_f16 v5, -1, -|exec_hi|, -|src_scc| ; encoding: [0x05,0x06,0x6b,0xd6,0xc1,0xfe,0xf4,0xc3]
0x05,0x00,0x6b,0xd6,0xf0,0xfa,0xc0,0x4b
-# GFX12: v_maxmin_num_f16 v5, 0.5, -m0, 0.5 mul:2 ; encoding: [0x05,0x00,0x6b,0xd6,0xf0,0xfa,0xc0,0x4b]
+# W32-REAL16: v_maxmin_num_f16 v5.l, 0.5, -m0, 0.5 mul:2 ; encoding: [0x05,0x00,0x6b,0xd6,0xf0,0xfa,0xc0,0x4b]
+# W32-FAKE16: v_maxmin_num_f16 v5, 0.5, -m0, 0.5 mul:2 ; encoding: [0x05,0x00,0x6b,0xd6,0xf0,0xfa,0xc0,0x4b]
+# W64-REAL16: v_maxmin_num_f16 v5.l, 0.5, -m0, 0.5 mul:2 ; encoding: [0x05,0x00,0x6b,0xd6,0xf0,0xfa,0xc0,0x4b]
+# W64-FAKE16: v_maxmin_num_f16 v5, 0.5, -m0, 0.5 mul:2 ; encoding: [0x05,0x00,0x6b,0xd6,0xf0,0xfa,0xc0,0x4b]
0x05,0x02,0x6b,0xd6,0xfd,0xd4,0x04,0x33
-# GFX12: v_maxmin_num_f16 v5, -src_scc, |vcc_lo|, -1 mul:4 ; encoding: [0x05,0x02,0x6b,0xd6,0xfd,0xd4,0x04,0x33]
+# W32-REAL16: v_maxmin_num_f16 v5.l, -src_scc, |vcc_lo|, -1 mul:4 ; encoding: [0x05,0x02,0x6b,0xd6,0xfd,0xd4,0x04,0x33]
+# W32-FAKE16: v_maxmin_num_f16 v5, -src_scc, |vcc_lo|, -1 mul:4 ; encoding: [0x05,0x02,0x6b,0xd6,0xfd,0xd4,0x04,0x33]
+# W64-REAL16: v_maxmin_num_f16 v5.l, -src_scc, |vcc_lo|, -1 mul:4 ; encoding: [0x05,0x02,0x6b,0xd6,0xfd,0xd4,0x04,0x33]
+# W64-FAKE16: v_maxmin_num_f16 v5, -src_scc, |vcc_lo|, -1 mul:4 ; encoding: [0x05,0x02,0x6b,0xd6,0xfd,0xd4,0x04,0x33]
0xff,0x83,0x6b,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00
-# GFX12: v_maxmin_num_f16 v255, -|0xfe0b|, -|vcc_hi|, null clamp div:2 ; encoding: [0xff,0x83,0x6b,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00]
+# W32-REAL16: v_maxmin_num_f16 v255.l, -|0xfe0b|, -|vcc_hi|, null clamp div:2 ; encoding: [0xff,0x83,0x6b,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00]
+# W32-FAKE16: v_maxmin_num_f16 v255, -|0xfe0b|, -|vcc_hi|, null clamp div:2 ; encoding: [0xff,0x83,0x6b,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00]
+# W64-REAL16: v_maxmin_num_f16 v255.l, -|0xfe0b|, -|vcc_hi|, null clamp div:2 ; encoding: [0xff,0x83,0x6b,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00]
+# W64-FAKE16: v_maxmin_num_f16 v255, -|0xfe0b|, -|vcc_hi|, null clamp div:2 ; encoding: [0xff,0x83,0x6b,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00]
+
+0x05,0x08,0x6b,0xd6,0xff,0x05,0xa4,0x01
+# W32-REAL16: v_maxmin_num_f16 v5.l, v255.h, s2, s105 op_sel:[1,0,0,0] ; encoding: [0x05,0x08,0x6b,0xd6,0xff,0x05,0xa4,0x01]
+# W32-FAKE16: v_maxmin_num_f16 v5, v255, s2, s105 op_sel:[1,0,0,0] ; encoding: [0x05,0x08,0x6b,0xd6,0xff,0x05,0xa4,0x01]
+# W64-REAL16: v_maxmin_num_f16 v5.l, v255.h, s2, s105 op_sel:[1,0,0,0] ; encoding: [0x05,0x08,0x6b,0xd6,0xff,0x05,0xa4,0x01]
+# W64-FAKE16: v_maxmin_num_f16 v5, v255, s2, s105 op_sel:[1,0,0,0] ; encoding: [0x05,0x08,0x6b,0xd6,0xff,0x05,0xa4,0x01]
+
+
+0x05,0x10,0x6b,0xd6,0x01,0xfe,0xff,0x01
+# W32-REAL16: v_maxmin_num_f16 v5.l, s1, v255.h, exec_hi op_sel:[0,1,0,0] ; encoding: [0x05,0x10,0x6b,0xd6,0x01,0xfe,0xff,0x01]
+# W32-FAKE16: v_maxmin_num_f16 v5, s1, v255, exec_hi op_sel:[0,1,0,0] ; encoding: [0x05,0x10,0x6b,0xd6,0x01,0xfe,0xff,0x01]
+# W64-REAL16: v_maxmin_num_f16 v5.l, s1, v255.h, exec_hi op_sel:[0,1,0,0] ; encoding: [0x05,0x10,0x6b,0xd6,0x01,0xfe,0xff,0x01]
+# W64-FAKE16: v_maxmin_num_f16 v5, s1, v255, exec_hi op_sel:[0,1,0,0] ; encoding: [0x05,0x10,0x6b,0xd6,0x01,0xfe,0xff,0x01]
+
+
+0x05,0x20,0x6b,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00
+# W32-REAL16: v_maxmin_num_f16 v5.l, vcc_hi, 0xfe0b, v255.h op_sel:[0,0,1,0] ; encoding: [0x05,0x20,0x6b,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+# W32-FAKE16: v_maxmin_num_f16 v5, vcc_hi, 0xfe0b, v255 op_sel:[0,0,1,0] ; encoding: [0x05,0x20,0x6b,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+# W64-REAL16: v_maxmin_num_f16 v5.l, vcc_hi, 0xfe0b, v255.h op_sel:[0,0,1,0] ; encoding: [0x05,0x20,0x6b,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+# W64-FAKE16: v_maxmin_num_f16 v5, vcc_hi, 0xfe0b, v255 op_sel:[0,0,1,0] ; encoding: [0x05,0x20,0x6b,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+
+0xff,0xc3,0x6b,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00
+# W32-REAL16: v_maxmin_num_f16 v255.h, -|0xfe0b|, -|vcc_hi|, null op_sel:[0,0,0,1] clamp div:2 ; encoding: [0xff,0xc3,0x6b,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00]
+# W32-FAKE16: v_maxmin_num_f16 v255, -|0xfe0b|, -|vcc_hi|, null op_sel:[0,0,0,1] clamp div:2 ; encoding: [0xff,0xc3,0x6b,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00]
+# W64-REAL16: v_maxmin_num_f16 v255.h, -|0xfe0b|, -|vcc_hi|, null op_sel:[0,0,0,1] clamp div:2 ; encoding: [0xff,0xc3,0x6b,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00]
+# W64-FAKE16: v_maxmin_num_f16 v255, -|0xfe0b|, -|vcc_hi|, null op_sel:[0,0,0,1] clamp div:2 ; encoding: [0xff,0xc3,0x6b,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00]
0x05,0x00,0x69,0xd6,0x01,0x05,0x0e,0x00
# GFX12: v_maxmin_num_f32 v5, v1, v2, s3 ; encoding: [0x05,0x00,0x69,0xd6,0x01,0x05,0x0e,0x00]
@@ -5693,49 +5764,120 @@
# W64-FAKE16: v_min_u16 v255, 0xfe0b, vcc_hi ; encoding: [0xff,0x00,0x0b,0xd7,0xff,0xd6,0x00,0x00,0x0b,0xfe,0x00,0x00]
0x05,0x00,0x6a,0xd6,0x01,0x05,0x0e,0x00
-# GFX12: v_minmax_num_f16 v5, v1, v2, s3 ; encoding: [0x05,0x00,0x6a,0xd6,0x01,0x05,0x0e,0x00]
+# W32-REAL16: v_minmax_num_f16 v5.l, v1.l, v2.l, s3 ; encoding: [0x05,0x00,0x6a,0xd6,0x01,0x05,0x0e,0x00]
+# W32-FAKE16: v_minmax_num_f16 v5, v1, v2, s3 ; encoding: [0x05,0x00,0x6a,0xd6,0x01,0x05,0x0e,0x00]
+# W64-REAL16: v_minmax_num_f16 v5.l, v1.l, v2.l, s3 ; encoding: [0x05,0x00,0x6a,0xd6,0x01,0x05,0x0e,0x00]
+# W64-FAKE16: v_minmax_num_f16 v5, v1, v2, s3 ; encoding: [0x05,0x00,0x6a,0xd6,0x01,0x05,0x0e,0x00]
0x05,0x00,0x6a,0xd6,0xff,0x05,0xa4,0x01
-# GFX12: v_minmax_num_f16 v5, v255, s2, s105 ; encoding: [0x05,0x00,0x6a,0xd6,0xff,0x05,0xa4,0x01]
+# W32-REAL16: v_minmax_num_f16 v5.l, v255.l, s2, s105 ; encoding: [0x05,0x00,0x6a,0xd6,0xff,0x05,0xa4,0x01]
+# W32-FAKE16: v_minmax_num_f16 v5, v255, s2, s105 ; encoding: [0x05,0x00,0x6a,0xd6,0xff,0x05,0xa4,0x01]
+# W64-REAL16: v_minmax_num_f16 v5.l, v255.l, s2, s105 ; encoding: [0x05,0x00,0x6a,0xd6,0xff,0x05,0xa4,0x01]
+# W64-FAKE16: v_minmax_num_f16 v5, v255, s2, s105 ; encoding: [0x05,0x00,0x6a,0xd6,0xff,0x05,0xa4,0x01]
0x05,0x00,0x6a,0xd6,0x01,0xfe,0xff,0x01
-# GFX12: v_minmax_num_f16 v5, s1, v255, exec_hi ; encoding: [0x05,0x00,0x6a,0xd6,0x01,0xfe,0xff,0x01]
+# W32-REAL16: v_minmax_num_f16 v5.l, s1, v255.l, exec_hi ; encoding: [0x05,0x00,0x6a,0xd6,0x01,0xfe,0xff,0x01]
+# W32-FAKE16: v_minmax_num_f16 v5, s1, v255, exec_hi ; encoding: [0x05,0x00,0x6a,0xd6,0x01,0xfe,0xff,0x01]
+# W64-REAL16: v_minmax_num_f16 v5.l, s1, v255.l, exec_hi ; encoding: [0x05,0x00,0x6a,0xd6,0x01,0xfe,0xff,0x01]
+# W64-FAKE16: v_minmax_num_f16 v5, s1, v255, exec_hi ; encoding: [0x05,0x00,0x6a,0xd6,0x01,0xfe,0xff,0x01]
0x05,0x00,0x6a,0xd6,0x69,0xd2,0xf8,0x01
-# GFX12: v_minmax_num_f16 v5, s105, s105, exec_lo ; encoding: [0x05,0x00,0x6a,0xd6,0x69,0xd2,0xf8,0x01]
+# W32-REAL16: v_minmax_num_f16 v5.l, s105, s105, exec_lo ; encoding: [0x05,0x00,0x6a,0xd6,0x69,0xd2,0xf8,0x01]
+# W32-FAKE16: v_minmax_num_f16 v5, s105, s105, exec_lo ; encoding: [0x05,0x00,0x6a,0xd6,0x69,0xd2,0xf8,0x01]
+# W64-REAL16: v_minmax_num_f16 v5.l, s105, s105, exec_lo ; encoding: [0x05,0x00,0x6a,0xd6,0x69,0xd2,0xf8,0x01]
+# W64-FAKE16: v_minmax_num_f16 v5, s105, s105, exec_lo ; encoding: [0x05,0x00,0x6a,0xd6,0x69,0xd2,0xf8,0x01]
0x05,0x00,0x6a,0xd6,0x6a,0xf6,0x0c,0x04
-# GFX12: v_minmax_num_f16 v5, vcc_lo, ttmp15, v3 ; encoding: [0x05,0x00,0x6a,0xd6,0x6a,0xf6,0x0c,0x04]
+# W32-REAL16: v_minmax_num_f16 v5.l, vcc_lo, ttmp15, v3.l ; encoding: [0x05,0x00,0x6a,0xd6,0x6a,0xf6,0x0c,0x04]
+# W32-FAKE16: v_minmax_num_f16 v5, vcc_lo, ttmp15, v3 ; encoding: [0x05,0x00,0x6a,0xd6,0x6a,0xf6,0x0c,0x04]
+# W64-REAL16: v_minmax_num_f16 v5.l, vcc_lo, ttmp15, v3.l ; encoding: [0x05,0x00,0x6a,0xd6,0x6a,0xf6,0x0c,0x04]
+# W64-FAKE16: v_minmax_num_f16 v5, vcc_lo, ttmp15, v3 ; encoding: [0x05,0x00,0x6a,0xd6,0x6a,0xf6,0x0c,0x04]
0x05,0x00,0x6a,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00
-# GFX12: v_minmax_num_f16 v5, vcc_hi, 0xfe0b, v255 ; encoding: [0x05,0x00,0x6a,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+# W32-REAL16: v_minmax_num_f16 v5.l, vcc_hi, 0xfe0b, v255.l ; encoding: [0x05,0x00,0x6a,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+# W32-FAKE16: v_minmax_num_f16 v5, vcc_hi, 0xfe0b, v255 ; encoding: [0x05,0x00,0x6a,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+# W64-REAL16: v_minmax_num_f16 v5.l, vcc_hi, 0xfe0b, v255.l ; encoding: [0x05,0x00,0x6a,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+# W64-FAKE16: v_minmax_num_f16 v5, vcc_hi, 0xfe0b, v255 ; encoding: [0x05,0x00,0x6a,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
0x05,0x07,0x6a,0xd6,0x7b,0xfa,0xed,0xe1
-# GFX12: v_minmax_num_f16 v5, -|ttmp15|, -|src_scc|, -|ttmp15| ; encoding: [0x05,0x07,0x6a,0xd6,0x7b,0xfa,0xed,0xe1]
+# W32-REAL16: v_minmax_num_f16 v5.l, -|ttmp15|, -|src_scc|, -|ttmp15| ; encoding: [0x05,0x07,0x6a,0xd6,0x7b,0xfa,0xed,0xe1]
+# W32-FAKE16: v_minmax_num_f16 v5, -|ttmp15|, -|src_scc|, -|ttmp15| ; encoding: [0x05,0x07,0x6a,0xd6,0x7b,0xfa,0xed,0xe1]
+# W64-REAL16: v_minmax_num_f16 v5.l, -|ttmp15|, -|src_scc|, -|ttmp15| ; encoding: [0x05,0x07,0x6a,0xd6,0x7b,0xfa,0xed,0xe1]
+# W64-FAKE16: v_minmax_num_f16 v5, -|ttmp15|, -|src_scc|, -|ttmp15| ; encoding: [0x05,0x07,0x6a,0xd6,0x7b,0xfa,0xed,0xe1]
0x05,0x00,0x6a,0xd6,0x7d,0xe0,0xf5,0x01
-# GFX12: v_minmax_num_f16 v5, m0, 0.5, m0 ; encoding: [0x05,0x00,0x6a,0xd6,0x7d,0xe0,0xf5,0x01]
+# W32-REAL16: v_minmax_num_f16 v5.l, m0, 0.5, m0 ; encoding: [0x05,0x00,0x6a,0xd6,0x7d,0xe0,0xf5,0x01]
+# W32-FAKE16: v_minmax_num_f16 v5, m0, 0.5, m0 ; encoding: [0x05,0x00,0x6a,0xd6,0x7d,0xe0,0xf5,0x01]
+# W64-REAL16: v_minmax_num_f16 v5.l, m0, 0.5, m0 ; encoding: [0x05,0x00,0x6a,0xd6,0x7d,0xe0,0xf5,0x01]
+# W64-FAKE16: v_minmax_num_f16 v5, m0, 0.5, m0 ; encoding: [0x05,0x00,0x6a,0xd6,0x7d,0xe0,0xf5,0x01]
0x05,0x01,0x6a,0xd6,0x7e,0x82,0xad,0x01
-# GFX12: v_minmax_num_f16 v5, |exec_lo|, -1, vcc_hi ; encoding: [0x05,0x01,0x6a,0xd6,0x7e,0x82,0xad,0x01]
+# W32-REAL16: v_minmax_num_f16 v5.l, |exec_lo|, -1, vcc_hi ; encoding: [0x05,0x01,0x6a,0xd6,0x7e,0x82,0xad,0x01]
+# W32-FAKE16: v_minmax_num_f16 v5, |exec_lo|, -1, vcc_hi ; encoding: [0x05,0x01,0x6a,0xd6,0x7e,0x82,0xad,0x01]
+# W64-REAL16: v_minmax_num_f16 v5.l, |exec_lo|, -1, vcc_hi ; encoding: [0x05,0x01,0x6a,0xd6,0x7e,0x82,0xad,0x01]
+# W64-FAKE16: v_minmax_num_f16 v5, |exec_lo|, -1, vcc_hi ; encoding: [0x05,0x01,0x6a,0xd6,0x7e,0x82,0xad,0x01]
0x05,0x05,0x6a,0xd6,0x7f,0xf8,0xa8,0xa1
-# GFX12: v_minmax_num_f16 v5, -|exec_hi|, null, -|vcc_lo| ; encoding: [0x05,0x05,0x6a,0xd6,0x7f,0xf8,0xa8,0xa1]
+# W32-REAL16: v_minmax_num_f16 v5.l, -|exec_hi|, null, -|vcc_lo| ; encoding: [0x05,0x05,0x6a,0xd6,0x7f,0xf8,0xa8,0xa1]
+# W32-FAKE16: v_minmax_num_f16 v5, -|exec_hi|, null, -|vcc_lo| ; encoding: [0x05,0x05,0x6a,0xd6,0x7f,0xf8,0xa8,0xa1]
+# W64-REAL16: v_minmax_num_f16 v5.l, -|exec_hi|, null, -|vcc_lo| ; encoding: [0x05,0x05,0x6a,0xd6,0x7f,0xf8,0xa8,0xa1]
+# W64-FAKE16: v_minmax_num_f16 v5, -|exec_hi|, null, -|vcc_lo| ; encoding: [0x05,0x05,0x6a,0xd6,0x7f,0xf8,0xa8,0xa1]
0x05,0x04,0x6a,0xd6,0x7c,0xfc,0xfc,0x83,0x0b,0xfe,0x00,0x00
-# GFX12: v_minmax_num_f16 v5, null, exec_lo, -|0xfe0b| ; encoding: [0x05,0x04,0x6a,0xd6,0x7c,0xfc,0xfc,0x83,0x0b,0xfe,0x00,0x00]
+# W32-REAL16: v_minmax_num_f16 v5.l, null, exec_lo, -|0xfe0b| ; encoding: [0x05,0x04,0x6a,0xd6,0x7c,0xfc,0xfc,0x83,0x0b,0xfe,0x00,0x00]
+# W32-FAKE16: v_minmax_num_f16 v5, null, exec_lo, -|0xfe0b| ; encoding: [0x05,0x04,0x6a,0xd6,0x7c,0xfc,0xfc,0x83,0x0b,0xfe,0x00,0x00]
+# W64-REAL16: v_minmax_num_f16 v5.l, null, exec_lo, -|0xfe0b| ; encoding: [0x05,0x04,0x6a,0xd6,0x7c,0xfc,0xfc,0x83,0x0b,0xfe,0x00,0x00]
+# W64-FAKE16: v_minmax_num_f16 v5, null, exec_lo, -|0xfe0b| ; encoding: [0x05,0x04,0x6a,0xd6,0x7c,0xfc,0xfc,0x83,0x0b,0xfe,0x00,0x00]
0x05,0x06,0x6a,0xd6,0xc1,0xfe,0xf4,0xc3
-# GFX12: v_minmax_num_f16 v5, -1, -|exec_hi|, -|src_scc| ; encoding: [0x05,0x06,0x6a,0xd6,0xc1,0xfe,0xf4,0xc3]
+# W32-REAL16: v_minmax_num_f16 v5.l, -1, -|exec_hi|, -|src_scc| ; encoding: [0x05,0x06,0x6a,0xd6,0xc1,0xfe,0xf4,0xc3]
+# W32-FAKE16: v_minmax_num_f16 v5, -1, -|exec_hi|, -|src_scc| ; encoding: [0x05,0x06,0x6a,0xd6,0xc1,0xfe,0xf4,0xc3]
+# W64-REAL16: v_minmax_num_f16 v5.l, -1, -|exec_hi|, -|src_scc| ; encoding: [0x05,0x06,0x6a,0xd6,0xc1,0xfe,0xf4,0xc3]
+# W64-FAKE16: v_minmax_num_f16 v5, -1, -|exec_hi|, -|src_scc| ; encoding: [0x05,0x06,0x6a,0xd6,0xc1,0xfe,0xf4,0xc3]
0x05,0x00,0x6a,0xd6,0xf0,0xfa,0xc0,0x4b
-# GFX12: v_minmax_num_f16 v5, 0.5, -m0, 0.5 mul:2 ; encoding: [0x05,0x00,0x6a,0xd6,0xf0,0xfa,0xc0,0x4b]
+# W32-REAL16: v_minmax_num_f16 v5.l, 0.5, -m0, 0.5 mul:2 ; encoding: [0x05,0x00,0x6a,0xd6,0xf0,0xfa,0xc0,0x4b]
+# W32-FAKE16: v_minmax_num_f16 v5, 0.5, -m0, 0.5 mul:2 ; encoding: [0x05,0x00,0x6a,0xd6,0xf0,0xfa,0xc0,0x4b]
+# W64-REAL16: v_minmax_num_f16 v5.l, 0.5, -m0, 0.5 mul:2 ; encoding: [0x05,0x00,0x6a,0xd6,0xf0,0xfa,0xc0,0x4b]
+# W64-FAKE16: v_minmax_num_f16 v5, 0.5, -m0, 0.5 mul:2 ; encoding: [0x05,0x00,0x6a,0xd6,0xf0,0xfa,0xc0,0x4b]
0x05,0x02,0x6a,0xd6,0xfd,0xd4,0x04,0x33
-# GFX12: v_minmax_num_f16 v5, -src_scc, |vcc_lo|, -1 mul:4 ; encoding: [0x05,0x02,0x6a,0xd6,0xfd,0xd4,0x04,0x33]
+# W32-REAL16: v_minmax_num_f16 v5.l, -src_scc, |vcc_lo|, -1 mul:4 ; encoding: [0x05,0x02,0x6a,0xd6,0xfd,0xd4,0x04,0x33]
+# W32-FAKE16: v_minmax_num_f16 v5, -src_scc, |vcc_lo|, -1 mul:4 ; encoding: [0x05,0x02,0x6a,0xd6,0xfd,0xd4,0x04,0x33]
+# W64-REAL16: v_minmax_num_f16 v5.l, -src_scc, |vcc_lo|, -1 mul:4 ; encoding: [0x05,0x02,0x6a,0xd6,0xfd,0xd4,0x04,0x33]
+# W64-FAKE16: v_minmax_num_f16 v5, -src_scc, |vcc_lo|, -1 mul:4 ; encoding: [0x05,0x02,0x6a,0xd6,0xfd,0xd4,0x04,0x33]
0xff,0x83,0x6a,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00
-# GFX12: v_minmax_num_f16 v255, -|0xfe0b|, -|vcc_hi|, null clamp div:2 ; encoding: [0xff,0x83,0x6a,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00]
+# W32-REAL16: v_minmax_num_f16 v255.l, -|0xfe0b|, -|vcc_hi|, null clamp div:2 ; encoding: [0xff,0x83,0x6a,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00]
+# W32-FAKE16: v_minmax_num_f16 v255, -|0xfe0b|, -|vcc_hi|, null clamp div:2 ; encoding: [0xff,0x83,0x6a,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00]
+# W64-REAL16: v_minmax_num_f16 v255.l, -|0xfe0b|, -|vcc_hi|, null clamp div:2 ; encoding: [0xff,0x83,0x6a,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00]
+# W64-FAKE16: v_minmax_num_f16 v255, -|0xfe0b|, -|vcc_hi|, null clamp div:2 ; encoding: [0xff,0x83,0x6a,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00]
+
+0x05,0x08,0x6a,0xd6,0xff,0x05,0xa4,0x01
+# W32-REAL16: v_minmax_num_f16 v5.l, v255.h, s2, s105 op_sel:[1,0,0,0] ; encoding: [0x05,0x08,0x6a,0xd6,0xff,0x05,0xa4,0x01]
+# W32-FAKE16: v_minmax_num_f16 v5, v255, s2, s105 op_sel:[1,0,0,0] ; encoding: [0x05,0x08,0x6a,0xd6,0xff,0x05,0xa4,0x01]
+# W64-REAL16: v_minmax_num_f16 v5.l, v255.h, s2, s105 op_sel:[1,0,0,0] ; encoding: [0x05,0x08,0x6a,0xd6,0xff,0x05,0xa4,0x01]
+# W64-FAKE16: v_minmax_num_f16 v5, v255, s2, s105 op_sel:[1,0,0,0] ; encoding: [0x05,0x08,0x6a,0xd6,0xff,0x05,0xa4,0x01]
+
+
+0x05,0x10,0x6a,0xd6,0x01,0xfe,0xff,0x01
+# W32-REAL16: v_minmax_num_f16 v5.l, s1, v255.h, exec_hi op_sel:[0,1,0,0] ; encoding: [0x05,0x10,0x6a,0xd6,0x01,0xfe,0xff,0x01]
+# W32-FAKE16: v_minmax_num_f16 v5, s1, v255, exec_hi op_sel:[0,1,0,0] ; encoding: [0x05,0x10,0x6a,0xd6,0x01,0xfe,0xff,0x01]
+# W64-REAL16: v_minmax_num_f16 v5.l, s1, v255.h, exec_hi op_sel:[0,1,0,0] ; encoding: [0x05,0x10,0x6a,0xd6,0x01,0xfe,0xff,0x01]
+# W64-FAKE16: v_minmax_num_f16 v5, s1, v255, exec_hi op_sel:[0,1,0,0] ; encoding: [0x05,0x10,0x6a,0xd6,0x01,0xfe,0xff,0x01]
+
+
+0x05,0x20,0x6a,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00
+# W32-REAL16: v_minmax_num_f16 v5.l, vcc_hi, 0xfe0b, v255.h op_sel:[0,0,1,0] ; encoding: [0x05,0x20,0x6a,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+# W32-FAKE16: v_minmax_num_f16 v5, vcc_hi, 0xfe0b, v255 op_sel:[0,0,1,0] ; encoding: [0x05,0x20,0x6a,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+# W64-REAL16: v_minmax_num_f16 v5.l, vcc_hi, 0xfe0b, v255.h op_sel:[0,0,1,0] ; encoding: [0x05,0x20,0x6a,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+# W64-FAKE16: v_minmax_num_f16 v5, vcc_hi, 0xfe0b, v255 op_sel:[0,0,1,0] ; encoding: [0x05,0x20,0x6a,0xd6,0x6b,0xfe,0xfd,0x07,0x0b,0xfe,0x00,0x00]
+
+0xff,0xc3,0x6a,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00
+# W32-REAL16: v_minmax_num_f16 v255.h, -|0xfe0b|, -|vcc_hi|, null op_sel:[0,0,0,1] clamp div:2 ; encoding: [0xff,0xc3,0x6a,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00]
+# W32-FAKE16: v_minmax_num_f16 v255, -|0xfe0b|, -|vcc_hi|, null op_sel:[0,0,0,1] clamp div:2 ; encoding: [0xff,0xc3,0x6a,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00]
+# W64-REAL16: v_minmax_num_f16 v255.h, -|0xfe0b|, -|vcc_hi|, null op_sel:[0,0,0,1] clamp div:2 ; encoding: [0xff,0xc3,0x6a,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00]
+# W64-FAKE16: v_minmax_num_f16 v255, -|0xfe0b|, -|vcc_hi|, null op_sel:[0,0,0,1] clamp div:2 ; encoding: [0xff,0xc3,0x6a,0xd6,0xff,0xd6,0xf0,0x79,0x0b,0xfe,0x00,0x00]
0x05,0x00,0x68,0xd6,0x01,0x05,0x0e,0x00
# GFX12: v_minmax_num_f32 v5, v1, v2, s3 ; encoding: [0x05,0x00,0x68,0xd6,0x01,0x05,0x0e,0x00]
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3_dpp16.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3_dpp16.txt
index 561d3a6..7e30a4a 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3_dpp16.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3_dpp16.txt
@@ -2329,52 +2329,131 @@
# W64-FAKE16: v_max_u16_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x09,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x0d,0x30]
0x05,0x00,0x6b,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff
-# GFX12: v_maxmin_num_f16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+# W32-REAL16: v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, v3.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+# W32-FAKE16: v_maxmin_num_f16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+# W64-REAL16: v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, v3.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+# W64-FAKE16: v_maxmin_num_f16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
0x05,0x00,0x6b,0xd6,0xfa,0x06,0x0c,0x04,0x01,0x1b,0x00,0xff
-# GFX12: v_maxmin_num_f16_e64_dpp v5, v1, s3, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x06,0x0c,0x04,0x01,0x1b,0x00,0xff]
+# W32-REAL16: v_maxmin_num_f16_e64_dpp v5.l, v1.l, s3, v3.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x06,0x0c,0x04,0x01,0x1b,0x00,0xff]
+# W32-FAKE16: v_maxmin_num_f16_e64_dpp v5, v1, s3, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x06,0x0c,0x04,0x01,0x1b,0x00,0xff]
+# W64-REAL16: v_maxmin_num_f16_e64_dpp v5.l, v1.l, s3, v3.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x06,0x0c,0x04,0x01,0x1b,0x00,0xff]
+# W64-FAKE16: v_maxmin_num_f16_e64_dpp v5, v1, s3, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x06,0x0c,0x04,0x01,0x1b,0x00,0xff]
0x05,0x00,0x6b,0xd6,0xfa,0xea,0x0d,0x04,0x01,0x1b,0x00,0xff
-# GFX12: v_maxmin_num_f16_e64_dpp v5, v1, -2.0, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0xea,0x0d,0x04,0x01,0x1b,0x00,0xff]
+# W32-REAL16: v_maxmin_num_f16_e64_dpp v5.l, v1.l, -2.0, v3.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0xea,0x0d,0x04,0x01,0x1b,0x00,0xff]
+# W32-FAKE16: v_maxmin_num_f16_e64_dpp v5, v1, -2.0, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0xea,0x0d,0x04,0x01,0x1b,0x00,0xff]
+# W64-REAL16: v_maxmin_num_f16_e64_dpp v5.l, v1.l, -2.0, v3.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0xea,0x0d,0x04,0x01,0x1b,0x00,0xff]
+# W64-FAKE16: v_maxmin_num_f16_e64_dpp v5, v1, -2.0, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0xea,0x0d,0x04,0x01,0x1b,0x00,0xff]
0x05,0x00,0x6b,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff
-# GFX12: v_maxmin_num_f16_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+# W32-REAL16: v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, v3.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+# W32-FAKE16: v_maxmin_num_f16_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+# W64-REAL16: v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, v3.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+# W64-FAKE16: v_maxmin_num_f16_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
0x05,0x00,0x6b,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff
-# GFX12: v_maxmin_num_f16_e64_dpp v5, v1, v2, v3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+# W32-REAL16: v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, v3.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+# W32-FAKE16: v_maxmin_num_f16_e64_dpp v5, v1, v2, v3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+# W64-REAL16: v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, v3.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+# W64-FAKE16: v_maxmin_num_f16_e64_dpp v5, v1, v2, v3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
0x05,0x00,0x6b,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff
-# GFX12: v_maxmin_num_f16_e64_dpp v5, v1, v2, v255 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+# W32-REAL16: v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, v255.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+# W32-FAKE16: v_maxmin_num_f16_e64_dpp v5, v1, v2, v255 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+# W64-REAL16: v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, v255.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+# W64-FAKE16: v_maxmin_num_f16_e64_dpp v5, v1, v2, v255 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
0x05,0x00,0x6b,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff
-# GFX12: v_maxmin_num_f16_e64_dpp v5, v1, v2, s105 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+# W32-REAL16: v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, s105 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+# W32-FAKE16: v_maxmin_num_f16_e64_dpp v5, v1, v2, s105 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+# W64-REAL16: v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, s105 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+# W64-FAKE16: v_maxmin_num_f16_e64_dpp v5, v1, v2, s105 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
0x05,0x00,0x6b,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff
-# GFX12: v_maxmin_num_f16_e64_dpp v5, v1, v2, vcc_hi row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+# W32-REAL16: v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, vcc_hi row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+# W32-FAKE16: v_maxmin_num_f16_e64_dpp v5, v1, v2, vcc_hi row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+# W64-REAL16: v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, vcc_hi row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+# W64-FAKE16: v_maxmin_num_f16_e64_dpp v5, v1, v2, vcc_hi row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
0x05,0x00,0x6b,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff
-# GFX12: v_maxmin_num_f16_e64_dpp v5, v1, v2, vcc_lo row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+# W32-REAL16: v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, vcc_lo row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+# W32-FAKE16: v_maxmin_num_f16_e64_dpp v5, v1, v2, vcc_lo row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+# W64-REAL16: v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, vcc_lo row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+# W64-FAKE16: v_maxmin_num_f16_e64_dpp v5, v1, v2, vcc_lo row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6b,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
0x05,0x01,0x6b,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff
-# GFX12: v_maxmin_num_f16_e64_dpp v5, |v1|, v2, -ttmp15 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x01,0x6b,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+# W32-REAL16: v_maxmin_num_f16_e64_dpp v5.l, |v1.l|, v2.l, -ttmp15 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x01,0x6b,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+# W32-FAKE16: v_maxmin_num_f16_e64_dpp v5, |v1|, v2, -ttmp15 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x01,0x6b,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+# W64-REAL16: v_maxmin_num_f16_e64_dpp v5.l, |v1.l|, v2.l, -ttmp15 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x01,0x6b,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+# W64-FAKE16: v_maxmin_num_f16_e64_dpp v5, |v1|, v2, -ttmp15 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x01,0x6b,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
0x05,0x02,0x6b,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff
-# GFX12: v_maxmin_num_f16_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x02,0x6b,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+# W32-REAL16: v_maxmin_num_f16_e64_dpp v5.l, v1.l, -|v2.l|, exec_hi row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x02,0x6b,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+# W32-FAKE16: v_maxmin_num_f16_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x02,0x6b,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+# W64-REAL16: v_maxmin_num_f16_e64_dpp v5.l, v1.l, -|v2.l|, exec_hi row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x02,0x6b,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+# W64-FAKE16: v_maxmin_num_f16_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x02,0x6b,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
0x05,0x04,0x6b,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff
-# GFX12: v_maxmin_num_f16_e64_dpp v5, -v1, v2, |exec_lo| row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x6b,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+# W32-REAL16: v_maxmin_num_f16_e64_dpp v5.l, -v1.l, v2.l, |exec_lo| row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x6b,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+# W32-FAKE16: v_maxmin_num_f16_e64_dpp v5, -v1, v2, |exec_lo| row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x6b,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+# W64-REAL16: v_maxmin_num_f16_e64_dpp v5.l, -v1.l, v2.l, |exec_lo| row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x6b,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+# W64-FAKE16: v_maxmin_num_f16_e64_dpp v5, -v1, v2, |exec_lo| row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x6b,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
0x05,0x03,0x6b,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff
-# GFX12: v_maxmin_num_f16_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x03,0x6b,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+# W32-REAL16: v_maxmin_num_f16_e64_dpp v5.l, -|v1.l|, -|v2.l|, null row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x03,0x6b,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+# W32-FAKE16: v_maxmin_num_f16_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x03,0x6b,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+# W64-REAL16: v_maxmin_num_f16_e64_dpp v5.l, -|v1.l|, -|v2.l|, null row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x03,0x6b,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+# W64-FAKE16: v_maxmin_num_f16_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x03,0x6b,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
0x05,0x05,0x6b,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01
-# GFX12: v_maxmin_num_f16_e64_dpp v5, -|v1|, v2, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x05,0x6b,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01]
+# W32-REAL16: v_maxmin_num_f16_e64_dpp v5.l, -|v1.l|, v2.l, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x05,0x6b,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01]
+# W32-FAKE16: v_maxmin_num_f16_e64_dpp v5, -|v1|, v2, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x05,0x6b,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01]
+# W64-REAL16: v_maxmin_num_f16_e64_dpp v5.l, -|v1.l|, v2.l, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x05,0x6b,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01]
+# W64-FAKE16: v_maxmin_num_f16_e64_dpp v5, -|v1|, v2, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x05,0x6b,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01]
0x05,0x06,0x6b,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x01,0x13
-# GFX12: v_maxmin_num_f16_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x06,0x6b,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x01,0x13]
+# W32-REAL16: v_maxmin_num_f16_e64_dpp v5.l, v1.l, -|v2.l|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x06,0x6b,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x01,0x13]
+# W32-FAKE16: v_maxmin_num_f16_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x06,0x6b,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x01,0x13]
+# W64-REAL16: v_maxmin_num_f16_e64_dpp v5.l, v1.l, -|v2.l|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x06,0x6b,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x01,0x13]
+# W64-FAKE16: v_maxmin_num_f16_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x06,0x6b,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x01,0x13]
0xff,0x87,0x6b,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x0d,0x30
-# GFX12: v_maxmin_num_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x87,0x6b,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x0d,0x30]
+# W32-REAL16: v_maxmin_num_f16_e64_dpp v255.l, -|v255.l|, -|v255.l|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x87,0x6b,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x0d,0x30]
+# W32-FAKE16: v_maxmin_num_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x87,0x6b,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x0d,0x30]
+# W64-REAL16: v_maxmin_num_f16_e64_dpp v255.l, -|v255.l|, -|v255.l|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x87,0x6b,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x0d,0x30]
+# W64-FAKE16: v_maxmin_num_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x87,0x6b,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x0d,0x30]
+
+0x05,0x78,0x6b,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff
+# W32-REAL16: v_maxmin_num_f16_e64_dpp v5.h, v1.h, v2.h, v3.h op_sel:[1,1,1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x78,0x6b,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+# W32-FAKE16: v_maxmin_num_f16_e64_dpp v5, v1, v2, v3 op_sel:[1,1,1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x78,0x6b,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+# W64-REAL16: v_maxmin_num_f16_e64_dpp v5.h, v1.h, v2.h, v3.h op_sel:[1,1,1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x78,0x6b,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+# W64-FAKE16: v_maxmin_num_f16_e64_dpp v5, v1, v2, v3 op_sel:[1,1,1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x78,0x6b,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+
+0x05,0x20,0x6b,0xd6,0xfa,0x04,0xfe,0x07,0x01,0xe4,0x00,0xff
+# W32-REAL16: v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, v255.h op_sel:[0,0,1,0] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x20,0x6b,0xd6,0xfa,0x04,0xfe,0x07,0x01,0xe4,0x00,0xff]
+# W32-FAKE16: v_maxmin_num_f16_e64_dpp v5, v1, v2, v255 op_sel:[0,0,1,0] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x20,0x6b,0xd6,0xfa,0x04,0xfe,0x07,0x01,0xe4,0x00,0xff]
+# W64-REAL16: v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, v255.h op_sel:[0,0,1,0] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x20,0x6b,0xd6,0xfa,0x04,0xfe,0x07,0x01,0xe4,0x00,0xff]
+# W64-FAKE16: v_maxmin_num_f16_e64_dpp v5, v1, v2, v255 op_sel:[0,0,1,0] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x20,0x6b,0xd6,0xfa,0x04,0xfe,0x07,0x01,0xe4,0x00,0xff]
+
+0x05,0x0a,0x6b,0xd6,0xfa,0x04,0x06,0x2b,0x01,0x5f,0x01,0x01
+# W32-REAL16: v_maxmin_num_f16_e64_dpp v5.l, -v1.h, |v2.l|, -1 op_sel:[1,0,0,0] mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x0a,0x6b,0xd6,0xfa,0x04,0x06,0x2b,0x01,0x5f,0x01,0x01]
+# W32-FAKE16: v_maxmin_num_f16_e64_dpp v5, -v1, |v2|, -1 op_sel:[1,0,0,0] mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x0a,0x6b,0xd6,0xfa,0x04,0x06,0x2b,0x01,0x5f,0x01,0x01]
+# W64-REAL16: v_maxmin_num_f16_e64_dpp v5.l, -v1.h, |v2.l|, -1 op_sel:[1,0,0,0] mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x0a,0x6b,0xd6,0xfa,0x04,0x06,0x2b,0x01,0x5f,0x01,0x01]
+# W64-FAKE16: v_maxmin_num_f16_e64_dpp v5, -v1, |v2|, -1 op_sel:[1,0,0,0] mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x0a,0x6b,0xd6,0xfa,0x04,0x06,0x2b,0x01,0x5f,0x01,0x01]
+
+0x05,0x13,0x6b,0xd6,0xfa,0x04,0xc2,0x73,0x01,0x60,0x01,0x13
+# W32-REAL16: v_maxmin_num_f16_e64_dpp v5.l, -|v1.l|, -|v2.h|, 0.5 op_sel:[0,1,0,0] mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x13,0x6b,0xd6,0xfa,0x04,0xc2,0x73,0x01,0x60,0x01,0x13]
+# W32-FAKE16: v_maxmin_num_f16_e64_dpp v5, -|v1|, -|v2|, 0.5 op_sel:[0,1,0,0] mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x13,0x6b,0xd6,0xfa,0x04,0xc2,0x73,0x01,0x60,0x01,0x13]
+# W64-REAL16: v_maxmin_num_f16_e64_dpp v5.l, -|v1.l|, -|v2.h|, 0.5 op_sel:[0,1,0,0] mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x13,0x6b,0xd6,0xfa,0x04,0xc2,0x73,0x01,0x60,0x01,0x13]
+# W64-FAKE16: v_maxmin_num_f16_e64_dpp v5, -|v1|, -|v2|, 0.5 op_sel:[0,1,0,0] mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x13,0x6b,0xd6,0xfa,0x04,0xc2,0x73,0x01,0x60,0x01,0x13]
+
+0xff,0xc7,0x6b,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x0d,0x30
+# W32-REAL16: v_maxmin_num_f16_e64_dpp v255.h, -|v255.l|, -|v255.l|, -|src_scc| op_sel:[0,0,0,1] clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0xc7,0x6b,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x0d,0x30]
+# W32-FAKE16: v_maxmin_num_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| op_sel:[0,0,0,1] clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0xc7,0x6b,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x0d,0x30]
+# W64-REAL16: v_maxmin_num_f16_e64_dpp v255.h, -|v255.l|, -|v255.l|, -|src_scc| op_sel:[0,0,0,1] clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0xc7,0x6b,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x0d,0x30]
+# W64-FAKE16: v_maxmin_num_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| op_sel:[0,0,0,1] clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0xc7,0x6b,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x0d,0x30]
0x05,0x00,0x69,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff
# GFX12: v_maxmin_num_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x69,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
@@ -3082,49 +3161,125 @@
# W64-FAKE16: v_min_u16_e64_dpp v255, v255, v255 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x00,0x0b,0xd7,0xfa,0xfe,0x03,0x00,0xff,0x6f,0x0d,0x30]
0x05,0x00,0x6a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff
-# GFX12: v_minmax_num_f16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+# W32-REAL16: v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, v3.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+# W32-FAKE16: v_minmax_num_f16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+# W64-REAL16: v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, v3.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+# W64-FAKE16: v_minmax_num_f16_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
0x05,0x00,0x6a,0xd6,0xfa,0x06,0x0c,0x04,0x01,0x1b,0x00,0xff
-# GFX12: v_minmax_num_f16_e64_dpp v5, v1, s3, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x06,0x0c,0x04,0x01,0x1b,0x00,0xff]
+# W32-REAL16: v_minmax_num_f16_e64_dpp v5.l, v1.l, s3, v3.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x06,0x0c,0x04,0x01,0x1b,0x00,0xff]
+# W32-FAKE16: v_minmax_num_f16_e64_dpp v5, v1, s3, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x06,0x0c,0x04,0x01,0x1b,0x00,0xff]
+# W64-REAL16: v_minmax_num_f16_e64_dpp v5.l, v1.l, s3, v3.l quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x06,0x0c,0x04,0x01,0x1b,0x00,0xff]
+# W64-FAKE16: v_minmax_num_f16_e64_dpp v5, v1, s3, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x06,0x0c,0x04,0x01,0x1b,0x00,0xff]
0x05,0x00,0x6a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff
-# GFX12: v_minmax_num_f16_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+# W32-REAL16: v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, v3.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+# W32-FAKE16: v_minmax_num_f16_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+# W64-REAL16: v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, v3.l quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
+# W64-FAKE16: v_minmax_num_f16_e64_dpp v5, v1, v2, v3 quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0xe4,0x00,0xff]
0x05,0x00,0x6a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff
-# GFX12: v_minmax_num_f16_e64_dpp v5, v1, v2, v3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+# W32-REAL16: v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, v3.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+# W32-FAKE16: v_minmax_num_f16_e64_dpp v5, v1, v2, v3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+# W64-REAL16: v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, v3.l row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
+# W64-FAKE16: v_minmax_num_f16_e64_dpp v5, v1, v2, v3 row_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x40,0x01,0xff]
0x05,0x00,0x6a,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff
-# GFX12: v_minmax_num_f16_e64_dpp v5, v1, v2, v255 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+# W32-REAL16: v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, v255.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+# W32-FAKE16: v_minmax_num_f16_e64_dpp v5, v1, v2, v255 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+# W64-REAL16: v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, v255.l row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
+# W64-FAKE16: v_minmax_num_f16_e64_dpp v5, v1, v2, v255 row_half_mirror row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0xfe,0x07,0x01,0x41,0x01,0xff]
0x05,0x00,0x6a,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff
-# GFX12: v_minmax_num_f16_e64_dpp v5, v1, v2, s105 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+# W32-REAL16: v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, s105 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+# W32-FAKE16: v_minmax_num_f16_e64_dpp v5, v1, v2, s105 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+# W64-REAL16: v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, s105 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
+# W64-FAKE16: v_minmax_num_f16_e64_dpp v5, v1, v2, s105 row_shl:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0xa6,0x01,0x01,0x01,0x01,0xff]
0x05,0x00,0x6a,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff
-# GFX12: v_minmax_num_f16_e64_dpp v5, v1, v2, vcc_hi row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+# W32-REAL16: v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, vcc_hi row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+# W32-FAKE16: v_minmax_num_f16_e64_dpp v5, v1, v2, vcc_hi row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+# W64-REAL16: v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, vcc_hi row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
+# W64-FAKE16: v_minmax_num_f16_e64_dpp v5, v1, v2, vcc_hi row_shl:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0xae,0x01,0x01,0x0f,0x01,0xff]
0x05,0x00,0x6a,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff
-# GFX12: v_minmax_num_f16_e64_dpp v5, v1, v2, vcc_lo row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+# W32-REAL16: v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, vcc_lo row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+# W32-FAKE16: v_minmax_num_f16_e64_dpp v5, v1, v2, vcc_lo row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+# W64-REAL16: v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, vcc_lo row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
+# W64-FAKE16: v_minmax_num_f16_e64_dpp v5, v1, v2, vcc_lo row_shr:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x6a,0xd6,0xfa,0x04,0xaa,0x01,0x01,0x11,0x01,0xff]
0x05,0x01,0x6a,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff
-# GFX12: v_minmax_num_f16_e64_dpp v5, |v1|, v2, -ttmp15 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x01,0x6a,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+# W32-REAL16: v_minmax_num_f16_e64_dpp v5.l, |v1.l|, v2.l, -ttmp15 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x01,0x6a,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+# W32-FAKE16: v_minmax_num_f16_e64_dpp v5, |v1|, v2, -ttmp15 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x01,0x6a,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+# W64-REAL16: v_minmax_num_f16_e64_dpp v5.l, |v1.l|, v2.l, -ttmp15 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x01,0x6a,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
+# W64-FAKE16: v_minmax_num_f16_e64_dpp v5, |v1|, v2, -ttmp15 row_shr:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x01,0x6a,0xd6,0xfa,0x04,0xee,0x81,0x01,0x1f,0x01,0xff]
0x05,0x02,0x6a,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff
-# GFX12: v_minmax_num_f16_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x02,0x6a,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+# W32-REAL16: v_minmax_num_f16_e64_dpp v5.l, v1.l, -|v2.l|, exec_hi row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x02,0x6a,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+# W32-FAKE16: v_minmax_num_f16_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x02,0x6a,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+# W64-REAL16: v_minmax_num_f16_e64_dpp v5.l, v1.l, -|v2.l|, exec_hi row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x02,0x6a,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
+# W64-FAKE16: v_minmax_num_f16_e64_dpp v5, v1, -|v2|, exec_hi row_ror:1 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x02,0x6a,0xd6,0xfa,0x04,0xfe,0x41,0x01,0x21,0x01,0xff]
0x05,0x04,0x6a,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff
-# GFX12: v_minmax_num_f16_e64_dpp v5, -v1, v2, |exec_lo| row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x6a,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+# W32-REAL16: v_minmax_num_f16_e64_dpp v5.l, -v1.l, v2.l, |exec_lo| row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x6a,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+# W32-FAKE16: v_minmax_num_f16_e64_dpp v5, -v1, v2, |exec_lo| row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x6a,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+# W64-REAL16: v_minmax_num_f16_e64_dpp v5.l, -v1.l, v2.l, |exec_lo| row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x6a,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
+# W64-FAKE16: v_minmax_num_f16_e64_dpp v5, -v1, v2, |exec_lo| row_ror:15 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x04,0x6a,0xd6,0xfa,0x04,0xfa,0x21,0x01,0x2f,0x01,0xff]
0x05,0x03,0x6a,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff
-# GFX12: v_minmax_num_f16_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x03,0x6a,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+# W32-REAL16: v_minmax_num_f16_e64_dpp v5.l, -|v1.l|, -|v2.l|, null row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x03,0x6a,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+# W32-FAKE16: v_minmax_num_f16_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x03,0x6a,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+# W64-REAL16: v_minmax_num_f16_e64_dpp v5.l, -|v1.l|, -|v2.l|, null row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x03,0x6a,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
+# W64-FAKE16: v_minmax_num_f16_e64_dpp v5, -|v1|, -|v2|, null row_share:0 row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x03,0x6a,0xd6,0xfa,0x04,0xf2,0x61,0x01,0x50,0x01,0xff]
0x05,0x05,0x6a,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01
-# GFX12: v_minmax_num_f16_e64_dpp v5, -|v1|, v2, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x05,0x6a,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01]
+# W32-REAL16: v_minmax_num_f16_e64_dpp v5.l, -|v1.l|, v2.l, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x05,0x6a,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01]
+# W32-FAKE16: v_minmax_num_f16_e64_dpp v5, -|v1|, v2, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x05,0x6a,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01]
+# W64-REAL16: v_minmax_num_f16_e64_dpp v5.l, -|v1.l|, v2.l, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x05,0x6a,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01]
+# W64-FAKE16: v_minmax_num_f16_e64_dpp v5, -|v1|, v2, -|-1| mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x05,0x6a,0xd6,0xfa,0x04,0x06,0xab,0x01,0x5f,0x01,0x01]
0x05,0x06,0x6a,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x01,0x13
-# GFX12: v_minmax_num_f16_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x06,0x6a,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x01,0x13]
+# W32-REAL16: v_minmax_num_f16_e64_dpp v5.l, v1.l, -|v2.l|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x06,0x6a,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x01,0x13]
+# W32-FAKE16: v_minmax_num_f16_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x06,0x6a,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x01,0x13]
+# W64-REAL16: v_minmax_num_f16_e64_dpp v5.l, v1.l, -|v2.l|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x06,0x6a,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x01,0x13]
+# W64-FAKE16: v_minmax_num_f16_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x06,0x6a,0xd6,0xfa,0x04,0xc2,0xd3,0x01,0x60,0x01,0x13]
0xff,0x87,0x6a,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x0d,0x30
-# GFX12: v_minmax_num_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x87,0x6a,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x0d,0x30]
+# W32-REAL16: v_minmax_num_f16_e64_dpp v255.l, -|v255.l|, -|v255.l|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x87,0x6a,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x0d,0x30]
+# W32-FAKE16: v_minmax_num_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x87,0x6a,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x0d,0x30]
+# W64-REAL16: v_minmax_num_f16_e64_dpp v255.l, -|v255.l|, -|v255.l|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x87,0x6a,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x0d,0x30]
+# W64-FAKE16: v_minmax_num_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0x87,0x6a,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x0d,0x30]
+
+0x05,0x78,0x6a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff
+# W32-REAL16: v_minmax_num_f16_e64_dpp v5.h, v1.h, v2.h, v3.h op_sel:[1,1,1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x78,0x6a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+# W32-FAKE16: v_minmax_num_f16_e64_dpp v5, v1, v2, v3 op_sel:[1,1,1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x78,0x6a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+# W64-REAL16: v_minmax_num_f16_e64_dpp v5.h, v1.h, v2.h, v3.h op_sel:[1,1,1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x78,0x6a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+# W64-FAKE16: v_minmax_num_f16_e64_dpp v5, v1, v2, v3 op_sel:[1,1,1,1] quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x78,0x6a,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
+
+
+0x05,0x20,0x6a,0xd6,0xfa,0x04,0xfe,0x07,0x01,0xe4,0x00,0xff
+# W32-REAL16: v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, v255.h op_sel:[0,0,1,0] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x20,0x6a,0xd6,0xfa,0x04,0xfe,0x07,0x01,0xe4,0x00,0xff]
+# W32-FAKE16: v_minmax_num_f16_e64_dpp v5, v1, v2, v255 op_sel:[0,0,1,0] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x20,0x6a,0xd6,0xfa,0x04,0xfe,0x07,0x01,0xe4,0x00,0xff]
+# W64-REAL16: v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, v255.h op_sel:[0,0,1,0] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x20,0x6a,0xd6,0xfa,0x04,0xfe,0x07,0x01,0xe4,0x00,0xff]
+# W64-FAKE16: v_minmax_num_f16_e64_dpp v5, v1, v2, v255 op_sel:[0,0,1,0] quad_perm:[0,1,2,3] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x20,0x6a,0xd6,0xfa,0x04,0xfe,0x07,0x01,0xe4,0x00,0xff]
+
+0x05,0x0a,0x6a,0xd6,0xfa,0x04,0x06,0x2b,0x01,0x5f,0x01,0x01
+# W32-REAL16: v_minmax_num_f16_e64_dpp v5.l, -v1.h, |v2.l|, -1 op_sel:[1,0,0,0] mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x0a,0x6a,0xd6,0xfa,0x04,0x06,0x2b,0x01,0x5f,0x01,0x01]
+# W32-FAKE16: v_minmax_num_f16_e64_dpp v5, -v1, |v2|, -1 op_sel:[1,0,0,0] mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x0a,0x6a,0xd6,0xfa,0x04,0x06,0x2b,0x01,0x5f,0x01,0x01]
+# W64-REAL16: v_minmax_num_f16_e64_dpp v5.l, -v1.h, |v2.l|, -1 op_sel:[1,0,0,0] mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x0a,0x6a,0xd6,0xfa,0x04,0x06,0x2b,0x01,0x5f,0x01,0x01]
+# W64-FAKE16: v_minmax_num_f16_e64_dpp v5, -v1, |v2|, -1 op_sel:[1,0,0,0] mul:2 row_share:15 row_mask:0x0 bank_mask:0x1 ; encoding: [0x05,0x0a,0x6a,0xd6,0xfa,0x04,0x06,0x2b,0x01,0x5f,0x01,0x01]
+
+0x05,0x13,0x6a,0xd6,0xfa,0x04,0xc2,0x73,0x01,0x60,0x01,0x13
+# W32-REAL16: v_minmax_num_f16_e64_dpp v5.l, -|v1.l|, -|v2.h|, 0.5 op_sel:[0,1,0,0] mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x13,0x6a,0xd6,0xfa,0x04,0xc2,0x73,0x01,0x60,0x01,0x13]
+# W32-FAKE16: v_minmax_num_f16_e64_dpp v5, -|v1|, -|v2|, 0.5 op_sel:[0,1,0,0] mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x13,0x6a,0xd6,0xfa,0x04,0xc2,0x73,0x01,0x60,0x01,0x13]
+# W64-REAL16: v_minmax_num_f16_e64_dpp v5.l, -|v1.l|, -|v2.h|, 0.5 op_sel:[0,1,0,0] mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x13,0x6a,0xd6,0xfa,0x04,0xc2,0x73,0x01,0x60,0x01,0x13]
+# W64-FAKE16: v_minmax_num_f16_e64_dpp v5, -|v1|, -|v2|, 0.5 op_sel:[0,1,0,0] mul:4 row_xmask:0 row_mask:0x1 bank_mask:0x3 ; encoding: [0x05,0x13,0x6a,0xd6,0xfa,0x04,0xc2,0x73,0x01,0x60,0x01,0x13]
+
+0xff,0xc7,0x6a,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x0d,0x30
+# W32-REAL16: v_minmax_num_f16_e64_dpp v255.h, -|v255.l|, -|v255.l|, -|src_scc| op_sel:[0,0,0,1] clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0xc7,0x6a,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x0d,0x30]
+# W32-FAKE16: v_minmax_num_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| op_sel:[0,0,0,1] clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0xc7,0x6a,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x0d,0x30]
+# W64-REAL16: v_minmax_num_f16_e64_dpp v255.h, -|v255.l|, -|v255.l|, -|src_scc| op_sel:[0,0,0,1] clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0xc7,0x6a,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x0d,0x30]
+# W64-FAKE16: v_minmax_num_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| op_sel:[0,0,0,1] clamp div:2 row_xmask:15 row_mask:0x3 bank_mask:0x0 bound_ctrl:1 fi:1 ; encoding: [0xff,0xc7,0x6a,0xd6,0xfa,0xfe,0xf7,0xfb,0xff,0x6f,0x0d,0x30]
0x05,0x00,0x68,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff
# GFX12: v_minmax_num_f32_e64_dpp v5, v1, v2, v3 quad_perm:[3,2,1,0] row_mask:0xf bank_mask:0xf ; encoding: [0x05,0x00,0x68,0xd6,0xfa,0x04,0x0e,0x04,0x01,0x1b,0x00,0xff]
diff --git a/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3_dpp8.txt b/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3_dpp8.txt
index 06b4bfcc..2aaba2a 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3_dpp8.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/gfx12_dasm_vop3_dpp8.txt
@@ -1294,43 +1294,112 @@
# W64-FAKE16: v_max_u16_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x09,0xd7,0xea,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
0x05,0x00,0x6b,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05
-# GFX12: v_maxmin_num_f16_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6b,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+# W32-REAL16: v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, v3.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6b,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_maxmin_num_f16_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6b,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, v3.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6b,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_maxmin_num_f16_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6b,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
0x05,0x00,0x6b,0xd6,0xe9,0x06,0x0c,0x04,0x01,0x77,0x39,0x05
-# GFX12: v_maxmin_num_f16_e64_dpp v5, v1, s3, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6b,0xd6,0xe9,0x06,0x0c,0x04,0x01,0x77,0x39,0x05]
+# W32-REAL16: v_maxmin_num_f16_e64_dpp v5.l, v1.l, s3, v3.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6b,0xd6,0xe9,0x06,0x0c,0x04,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_maxmin_num_f16_e64_dpp v5, v1, s3, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6b,0xd6,0xe9,0x06,0x0c,0x04,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_maxmin_num_f16_e64_dpp v5.l, v1.l, s3, v3.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6b,0xd6,0xe9,0x06,0x0c,0x04,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_maxmin_num_f16_e64_dpp v5, v1, s3, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6b,0xd6,0xe9,0x06,0x0c,0x04,0x01,0x77,0x39,0x05]
0x05,0x00,0x6b,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05
-# GFX12: v_maxmin_num_f16_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6b,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+# W32-REAL16: v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, v255.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6b,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_maxmin_num_f16_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6b,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, v255.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6b,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_maxmin_num_f16_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6b,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
0x05,0x00,0x6b,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05
-# GFX12: v_maxmin_num_f16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6b,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+# W32-REAL16: v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, s105 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6b,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_maxmin_num_f16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6b,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, s105 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6b,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_maxmin_num_f16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6b,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
0x05,0x00,0x6b,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05
-# GFX12: v_maxmin_num_f16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6b,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+# W32-REAL16: v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, vcc_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6b,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_maxmin_num_f16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6b,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, vcc_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6b,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_maxmin_num_f16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6b,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
0x05,0x00,0x6b,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05
-# GFX12: v_maxmin_num_f16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6b,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+# W32-REAL16: v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6b,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_maxmin_num_f16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6b,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6b,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_maxmin_num_f16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6b,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
0x05,0x01,0x6b,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05
-# GFX12: v_maxmin_num_f16_e64_dpp v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x6b,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+# W32-REAL16: v_maxmin_num_f16_e64_dpp v5.l, |v1.l|, v2.l, -ttmp15 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x6b,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_maxmin_num_f16_e64_dpp v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x6b,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_maxmin_num_f16_e64_dpp v5.l, |v1.l|, v2.l, -ttmp15 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x6b,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_maxmin_num_f16_e64_dpp v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x6b,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
0x05,0x02,0x6b,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05
-# GFX12: v_maxmin_num_f16_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x02,0x6b,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+# W32-REAL16: v_maxmin_num_f16_e64_dpp v5.l, v1.l, -|v2.l|, exec_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x02,0x6b,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_maxmin_num_f16_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x02,0x6b,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_maxmin_num_f16_e64_dpp v5.l, v1.l, -|v2.l|, exec_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x02,0x6b,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_maxmin_num_f16_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x02,0x6b,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
0x05,0x04,0x6b,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05
-# GFX12: v_maxmin_num_f16_e64_dpp v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x6b,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+# W32-REAL16: v_maxmin_num_f16_e64_dpp v5.l, -v1.l, v2.l, |exec_lo| dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x6b,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_maxmin_num_f16_e64_dpp v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x6b,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_maxmin_num_f16_e64_dpp v5.l, -v1.l, v2.l, |exec_lo| dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x6b,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_maxmin_num_f16_e64_dpp v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x6b,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
0x05,0x03,0x6b,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05
-# GFX12: v_maxmin_num_f16_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x03,0x6b,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+# W32-REAL16: v_maxmin_num_f16_e64_dpp v5.l, -|v1.l|, -|v2.l|, null dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x03,0x6b,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_maxmin_num_f16_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x03,0x6b,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_maxmin_num_f16_e64_dpp v5.l, -|v1.l|, -|v2.l|, null dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x03,0x6b,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_maxmin_num_f16_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x03,0x6b,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
0x05,0x05,0x6b,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05
-# GFX12: v_maxmin_num_f16_e64_dpp v5, -|v1|, v2, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x05,0x6b,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05]
+# W32-REAL16: v_maxmin_num_f16_e64_dpp v5.l, -|v1.l|, v2.l, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x05,0x6b,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_maxmin_num_f16_e64_dpp v5, -|v1|, v2, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x05,0x6b,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_maxmin_num_f16_e64_dpp v5.l, -|v1.l|, v2.l, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x05,0x6b,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_maxmin_num_f16_e64_dpp v5, -|v1|, v2, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x05,0x6b,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05]
0x05,0x06,0x6b,0xd6,0xe9,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05
-# GFX12: v_maxmin_num_f16_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x06,0x6b,0xd6,0xe9,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05]
+# W32-REAL16: v_maxmin_num_f16_e64_dpp v5.l, v1.l, -|v2.l|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x06,0x6b,0xd6,0xe9,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_maxmin_num_f16_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x06,0x6b,0xd6,0xe9,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_maxmin_num_f16_e64_dpp v5.l, v1.l, -|v2.l|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x06,0x6b,0xd6,0xe9,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_maxmin_num_f16_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x06,0x6b,0xd6,0xe9,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05]
0xff,0x87,0x6b,0xd6,0xea,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00
-# GFX12: v_maxmin_num_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x87,0x6b,0xd6,0xea,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+# W32-REAL16: v_maxmin_num_f16_e64_dpp v255.l, -|v255.l|, -|v255.l|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x87,0x6b,0xd6,0xea,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+# W32-FAKE16: v_maxmin_num_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x87,0x6b,0xd6,0xea,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+# W64-REAL16: v_maxmin_num_f16_e64_dpp v255.l, -|v255.l|, -|v255.l|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x87,0x6b,0xd6,0xea,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+# W64-FAKE16: v_maxmin_num_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x87,0x6b,0xd6,0xea,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+
+0x05,0x78,0x6b,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05
+# W32-REAL16: v_maxmin_num_f16_e64_dpp v5.h, v1.h, v2.h, v3.h op_sel:[1,1,1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x78,0x6b,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_maxmin_num_f16_e64_dpp v5, v1, v2, v3 op_sel:[1,1,1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x78,0x6b,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_maxmin_num_f16_e64_dpp v5.h, v1.h, v2.h, v3.h op_sel:[1,1,1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x78,0x6b,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_maxmin_num_f16_e64_dpp v5, v1, v2, v3 op_sel:[1,1,1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x78,0x6b,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+0x05,0x20,0x6b,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05
+# W32-REAL16: v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, v255.h op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x20,0x6b,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_maxmin_num_f16_e64_dpp v5, v1, v2, v255 op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x20,0x6b,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_maxmin_num_f16_e64_dpp v5.l, v1.l, v2.l, v255.h op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x20,0x6b,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_maxmin_num_f16_e64_dpp v5, v1, v2, v255 op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x20,0x6b,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+0x05,0x0a,0x6b,0xd6,0xe9,0x04,0x06,0x2b,0x01,0x77,0x39,0x05
+# W32-REAL16: v_maxmin_num_f16_e64_dpp v5.l, -v1.h, |v2.l|, -1 op_sel:[1,0,0,0] mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x0a,0x6b,0xd6,0xe9,0x04,0x06,0x2b,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_maxmin_num_f16_e64_dpp v5, -v1, |v2|, -1 op_sel:[1,0,0,0] mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x0a,0x6b,0xd6,0xe9,0x04,0x06,0x2b,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_maxmin_num_f16_e64_dpp v5.l, -v1.h, |v2.l|, -1 op_sel:[1,0,0,0] mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x0a,0x6b,0xd6,0xe9,0x04,0x06,0x2b,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_maxmin_num_f16_e64_dpp v5, -v1, |v2|, -1 op_sel:[1,0,0,0] mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x0a,0x6b,0xd6,0xe9,0x04,0x06,0x2b,0x01,0x77,0x39,0x05]
+
+0x05,0x13,0x6b,0xd6,0xe9,0x04,0xc2,0x73,0x01,0x77,0x39,0x05
+# W32-REAL16: v_maxmin_num_f16_e64_dpp v5.l, -|v1.l|, -|v2.h|, 0.5 op_sel:[0,1,0,0] mul:4 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x13,0x6b,0xd6,0xe9,0x04,0xc2,0x73,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_maxmin_num_f16_e64_dpp v5, -|v1|, -|v2|, 0.5 op_sel:[0,1,0,0] mul:4 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x13,0x6b,0xd6,0xe9,0x04,0xc2,0x73,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_maxmin_num_f16_e64_dpp v5.l, -|v1.l|, -|v2.h|, 0.5 op_sel:[0,1,0,0] mul:4 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x13,0x6b,0xd6,0xe9,0x04,0xc2,0x73,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_maxmin_num_f16_e64_dpp v5, -|v1|, -|v2|, 0.5 op_sel:[0,1,0,0] mul:4 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x13,0x6b,0xd6,0xe9,0x04,0xc2,0x73,0x01,0x77,0x39,0x05]
+
+0xff,0xc7,0x6b,0xd6,0xea,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00
+# W32-REAL16: v_maxmin_num_f16_e64_dpp v255.h, -|v255.l|, -|v255.l|, -|src_scc| op_sel:[0,0,0,1] clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0xc7,0x6b,0xd6,0xea,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+# W32-FAKE16: v_maxmin_num_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| op_sel:[0,0,0,1] clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0xc7,0x6b,0xd6,0xea,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+# W64-REAL16: v_maxmin_num_f16_e64_dpp v255.h, -|v255.l|, -|v255.l|, -|src_scc| op_sel:[0,0,0,1] clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0xc7,0x6b,0xd6,0xea,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+# W64-FAKE16: v_maxmin_num_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| op_sel:[0,0,0,1] clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0xc7,0x6b,0xd6,0xea,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
0x05,0x00,0x69,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05
# GFX12: v_maxmin_num_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x69,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
@@ -1768,43 +1837,112 @@
# W64-FAKE16: v_min_u16_e64_dpp v255, v255, v255 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x00,0x0b,0xd7,0xea,0xfe,0x03,0x00,0xff,0x00,0x00,0x00]
0x05,0x00,0x6a,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05
-# GFX12: v_minmax_num_f16_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6a,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+# W32-REAL16: v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, v3.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6a,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_minmax_num_f16_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6a,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, v3.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6a,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_minmax_num_f16_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6a,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
0x05,0x00,0x6a,0xd6,0xe9,0x06,0x0c,0x04,0x01,0x77,0x39,0x05
-# GFX12: v_minmax_num_f16_e64_dpp v5, v1, s3, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6a,0xd6,0xe9,0x06,0x0c,0x04,0x01,0x77,0x39,0x05]
+# W32-REAL16: v_minmax_num_f16_e64_dpp v5.l, v1.l, s3, v3.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6a,0xd6,0xe9,0x06,0x0c,0x04,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_minmax_num_f16_e64_dpp v5, v1, s3, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6a,0xd6,0xe9,0x06,0x0c,0x04,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_minmax_num_f16_e64_dpp v5.l, v1.l, s3, v3.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6a,0xd6,0xe9,0x06,0x0c,0x04,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_minmax_num_f16_e64_dpp v5, v1, s3, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6a,0xd6,0xe9,0x06,0x0c,0x04,0x01,0x77,0x39,0x05]
0x05,0x00,0x6a,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05
-# GFX12: v_minmax_num_f16_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6a,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+# W32-REAL16: v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, v255.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6a,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_minmax_num_f16_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6a,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, v255.l dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6a,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_minmax_num_f16_e64_dpp v5, v1, v2, v255 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6a,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
0x05,0x00,0x6a,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05
-# GFX12: v_minmax_num_f16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6a,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+# W32-REAL16: v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, s105 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6a,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_minmax_num_f16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6a,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, s105 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6a,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_minmax_num_f16_e64_dpp v5, v1, v2, s105 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6a,0xd6,0xe9,0x04,0xa6,0x01,0x01,0x77,0x39,0x05]
0x05,0x00,0x6a,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05
-# GFX12: v_minmax_num_f16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6a,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+# W32-REAL16: v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, vcc_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6a,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_minmax_num_f16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6a,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, vcc_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6a,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_minmax_num_f16_e64_dpp v5, v1, v2, vcc_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6a,0xd6,0xe9,0x04,0xae,0x01,0x01,0x77,0x39,0x05]
0x05,0x00,0x6a,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05
-# GFX12: v_minmax_num_f16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6a,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+# W32-REAL16: v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6a,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_minmax_num_f16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6a,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6a,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_minmax_num_f16_e64_dpp v5, v1, v2, vcc_lo dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x6a,0xd6,0xe9,0x04,0xaa,0x01,0x01,0x77,0x39,0x05]
0x05,0x01,0x6a,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05
-# GFX12: v_minmax_num_f16_e64_dpp v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x6a,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+# W32-REAL16: v_minmax_num_f16_e64_dpp v5.l, |v1.l|, v2.l, -ttmp15 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x6a,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_minmax_num_f16_e64_dpp v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x6a,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_minmax_num_f16_e64_dpp v5.l, |v1.l|, v2.l, -ttmp15 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x6a,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_minmax_num_f16_e64_dpp v5, |v1|, v2, -ttmp15 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x01,0x6a,0xd6,0xe9,0x04,0xee,0x81,0x01,0x77,0x39,0x05]
0x05,0x02,0x6a,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05
-# GFX12: v_minmax_num_f16_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x02,0x6a,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+# W32-REAL16: v_minmax_num_f16_e64_dpp v5.l, v1.l, -|v2.l|, exec_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x02,0x6a,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_minmax_num_f16_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x02,0x6a,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_minmax_num_f16_e64_dpp v5.l, v1.l, -|v2.l|, exec_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x02,0x6a,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_minmax_num_f16_e64_dpp v5, v1, -|v2|, exec_hi dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x02,0x6a,0xd6,0xe9,0x04,0xfe,0x41,0x01,0x77,0x39,0x05]
0x05,0x04,0x6a,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05
-# GFX12: v_minmax_num_f16_e64_dpp v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x6a,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+# W32-REAL16: v_minmax_num_f16_e64_dpp v5.l, -v1.l, v2.l, |exec_lo| dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x6a,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_minmax_num_f16_e64_dpp v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x6a,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_minmax_num_f16_e64_dpp v5.l, -v1.l, v2.l, |exec_lo| dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x6a,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_minmax_num_f16_e64_dpp v5, -v1, v2, |exec_lo| dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x04,0x6a,0xd6,0xe9,0x04,0xfa,0x21,0x01,0x77,0x39,0x05]
0x05,0x03,0x6a,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05
-# GFX12: v_minmax_num_f16_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x03,0x6a,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+# W32-REAL16: v_minmax_num_f16_e64_dpp v5.l, -|v1.l|, -|v2.l|, null dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x03,0x6a,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_minmax_num_f16_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x03,0x6a,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_minmax_num_f16_e64_dpp v5.l, -|v1.l|, -|v2.l|, null dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x03,0x6a,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_minmax_num_f16_e64_dpp v5, -|v1|, -|v2|, null dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x03,0x6a,0xd6,0xe9,0x04,0xf2,0x61,0x01,0x77,0x39,0x05]
0x05,0x05,0x6a,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05
-# GFX12: v_minmax_num_f16_e64_dpp v5, -|v1|, v2, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x05,0x6a,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05]
+# W32-REAL16: v_minmax_num_f16_e64_dpp v5.l, -|v1.l|, v2.l, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x05,0x6a,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_minmax_num_f16_e64_dpp v5, -|v1|, v2, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x05,0x6a,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_minmax_num_f16_e64_dpp v5.l, -|v1.l|, v2.l, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x05,0x6a,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_minmax_num_f16_e64_dpp v5, -|v1|, v2, -|-1| mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x05,0x6a,0xd6,0xe9,0x04,0x06,0xab,0x01,0x77,0x39,0x05]
0x05,0x06,0x6a,0xd6,0xe9,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05
-# GFX12: v_minmax_num_f16_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x06,0x6a,0xd6,0xe9,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05]
+# W32-REAL16: v_minmax_num_f16_e64_dpp v5.l, v1.l, -|v2.l|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x06,0x6a,0xd6,0xe9,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_minmax_num_f16_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x06,0x6a,0xd6,0xe9,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_minmax_num_f16_e64_dpp v5.l, v1.l, -|v2.l|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x06,0x6a,0xd6,0xe9,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_minmax_num_f16_e64_dpp v5, v1, -|v2|, -|0.5| mul:4 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x06,0x6a,0xd6,0xe9,0x04,0xc2,0xd3,0x01,0x77,0x39,0x05]
0xff,0x87,0x6a,0xd6,0xea,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00
-# GFX12: v_minmax_num_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x87,0x6a,0xd6,0xea,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+# W32-REAL16: v_minmax_num_f16_e64_dpp v255.l, -|v255.l|, -|v255.l|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x87,0x6a,0xd6,0xea,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+# W32-FAKE16: v_minmax_num_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x87,0x6a,0xd6,0xea,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+# W64-REAL16: v_minmax_num_f16_e64_dpp v255.l, -|v255.l|, -|v255.l|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x87,0x6a,0xd6,0xea,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+# W64-FAKE16: v_minmax_num_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0x87,0x6a,0xd6,0xea,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+
+0x05,0x78,0x6a,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05
+# W32-REAL16: v_minmax_num_f16_e64_dpp v5.h, v1.h, v2.h, v3.h op_sel:[1,1,1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x78,0x6a,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_minmax_num_f16_e64_dpp v5, v1, v2, v3 op_sel:[1,1,1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x78,0x6a,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_minmax_num_f16_e64_dpp v5.h, v1.h, v2.h, v3.h op_sel:[1,1,1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x78,0x6a,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_minmax_num_f16_e64_dpp v5, v1, v2, v3 op_sel:[1,1,1,1] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x78,0x6a,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
+
+0x05,0x20,0x6a,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05
+# W32-REAL16: v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, v255.h op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x20,0x6a,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_minmax_num_f16_e64_dpp v5, v1, v2, v255 op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x20,0x6a,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_minmax_num_f16_e64_dpp v5.l, v1.l, v2.l, v255.h op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x20,0x6a,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_minmax_num_f16_e64_dpp v5, v1, v2, v255 op_sel:[0,0,1,0] dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x20,0x6a,0xd6,0xe9,0x04,0xfe,0x07,0x01,0x77,0x39,0x05]
+
+0x05,0x0a,0x6a,0xd6,0xe9,0x04,0x06,0x2b,0x01,0x77,0x39,0x05
+# W32-REAL16: v_minmax_num_f16_e64_dpp v5.l, -v1.h, |v2.l|, -1 op_sel:[1,0,0,0] mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x0a,0x6a,0xd6,0xe9,0x04,0x06,0x2b,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_minmax_num_f16_e64_dpp v5, -v1, |v2|, -1 op_sel:[1,0,0,0] mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x0a,0x6a,0xd6,0xe9,0x04,0x06,0x2b,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_minmax_num_f16_e64_dpp v5.l, -v1.h, |v2.l|, -1 op_sel:[1,0,0,0] mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x0a,0x6a,0xd6,0xe9,0x04,0x06,0x2b,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_minmax_num_f16_e64_dpp v5, -v1, |v2|, -1 op_sel:[1,0,0,0] mul:2 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x0a,0x6a,0xd6,0xe9,0x04,0x06,0x2b,0x01,0x77,0x39,0x05]
+
+0x05,0x13,0x6a,0xd6,0xe9,0x04,0xc2,0x73,0x01,0x77,0x39,0x05
+# W32-REAL16: v_minmax_num_f16_e64_dpp v5.l, -|v1.l|, -|v2.h|, 0.5 op_sel:[0,1,0,0] mul:4 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x13,0x6a,0xd6,0xe9,0x04,0xc2,0x73,0x01,0x77,0x39,0x05]
+# W32-FAKE16: v_minmax_num_f16_e64_dpp v5, -|v1|, -|v2|, 0.5 op_sel:[0,1,0,0] mul:4 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x13,0x6a,0xd6,0xe9,0x04,0xc2,0x73,0x01,0x77,0x39,0x05]
+# W64-REAL16: v_minmax_num_f16_e64_dpp v5.l, -|v1.l|, -|v2.h|, 0.5 op_sel:[0,1,0,0] mul:4 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x13,0x6a,0xd6,0xe9,0x04,0xc2,0x73,0x01,0x77,0x39,0x05]
+# W64-FAKE16: v_minmax_num_f16_e64_dpp v5, -|v1|, -|v2|, 0.5 op_sel:[0,1,0,0] mul:4 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x13,0x6a,0xd6,0xe9,0x04,0xc2,0x73,0x01,0x77,0x39,0x05]
+
+0xff,0xc7,0x6a,0xd6,0xea,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00
+# W32-REAL16: v_minmax_num_f16_e64_dpp v255.h, -|v255.l|, -|v255.l|, -|src_scc| op_sel:[0,0,0,1] clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0xc7,0x6a,0xd6,0xea,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+# W32-FAKE16: v_minmax_num_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| op_sel:[0,0,0,1] clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0xc7,0x6a,0xd6,0xea,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+# W64-REAL16: v_minmax_num_f16_e64_dpp v255.h, -|v255.l|, -|v255.l|, -|src_scc| op_sel:[0,0,0,1] clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0xc7,0x6a,0xd6,0xea,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
+# W64-FAKE16: v_minmax_num_f16_e64_dpp v255, -|v255|, -|v255|, -|src_scc| op_sel:[0,0,0,1] clamp div:2 dpp8:[0,0,0,0,0,0,0,0] fi:1 ; encoding: [0xff,0xc7,0x6a,0xd6,0xea,0xfe,0xf7,0xfb,0xff,0x00,0x00,0x00]
0x05,0x00,0x68,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05
# GFX12: v_minmax_num_f32_e64_dpp v5, v1, v2, v3 dpp8:[7,6,5,4,3,2,1,0] ; encoding: [0x05,0x00,0x68,0xd6,0xe9,0x04,0x0e,0x04,0x01,0x77,0x39,0x05]
diff --git a/llvm/test/MC/RISCV/custom_reloc.s b/llvm/test/MC/RISCV/custom_reloc.s
index 4bd4700..cdb8194 100644
--- a/llvm/test/MC/RISCV/custom_reloc.s
+++ b/llvm/test/MC/RISCV/custom_reloc.s
@@ -21,16 +21,33 @@
.reloc ., R_RISCV_VENDOR, VENDOR_NAME
.reloc ., R_RISCV_CUSTOM192, my_foo + 1
addi a0, a0, 0
- # CHECK-ASM: [[L1:.L[^:]+]]:
+ # CHECK-ASM: [[L1:.L[^:]+]]:
# CHECK-ASM-NEXT: .reloc [[L1]], R_RISCV_VENDOR, VENDOR_NAME
# CHECK-ASM-NEXT: [[L2:.L[^:]+]]:
# CHECK-ASM-NEXT: .reloc [[L2]], R_RISCV_CUSTOM192, my_foo+1
# CHECK-ASM-NEXT: mv a0, a0
- # CHECK-OBJ: addi a0, a0, 0
+ # CHECK-OBJ: addi a0, a0, 0
# CHECK-OBJ-NEXT: R_RISCV_VENDOR VENDOR_NAME
# CHECK-OBJ-NEXT: R_RISCV_CUSTOM192 my_foo+0x1
nop
# CHECK-ASM: nop
# CHECK-OBJ: addi zero, zero, 0x0
+
+ .reloc ., R_RISCV_VENDOR, QUALCOMM
+ .reloc ., R_RISCV_QC_ABS20_U, my_bar + 2
+ addi a1, a1, 0
+ # CHECK-ASM: [[L3:.L[^:]+]]:
+ # CHECK-ASM-NEXT: .reloc [[L3]], R_RISCV_VENDOR, QUALCOMM
+ # CHECK-ASM-NEXT: [[L4:.L[^:]+]]:
+ # CHECK-ASM-NEXT: .reloc [[L4]], R_RISCV_QC_ABS20_U, my_bar+2
+ # CHECK-ASM-NEXT: mv a1, a1
+
+ # CHECK-OBJ: addi a1, a1, 0
+ # CHECK-OBJ-NEXT: R_RISCV_VENDOR QUALCOMM
+ # CHECK-OBJ-NEXT: R_RISCV_CUSTOM192 my_bar+0x2
+
+ nop
+ # CHECK-ASM: nop
+ # CHECK-OBJ: addi zero, zero, 0x0
diff --git a/llvm/test/MC/RISCV/xqcicm-invalid.s b/llvm/test/MC/RISCV/xqcicm-invalid.s
new file mode 100644
index 0000000..8b37ed4
--- /dev/null
+++ b/llvm/test/MC/RISCV/xqcicm-invalid.s
@@ -0,0 +1,152 @@
+# Xqcicm - Qualcomm uC Conditional Move Extension
+# RUN: not llvm-mc -triple riscv32 -mattr=+experimental-xqcicm < %s 2>&1 \
+# RUN: | FileCheck -check-prefixes=CHECK,CHECK-IMM %s
+# RUN: not llvm-mc -triple riscv32 -mattr=-experimental-xqcicm < %s 2>&1 \
+# RUN: | FileCheck -check-prefixes=CHECK,CHECK-EXT %s
+
+# CHECK: :[[@LINE+1]]:12: error: invalid operand for instruction
+qc.c.mveqz 9, x10
+
+# CHECK: :[[@LINE+1]]:1: error: too few operands for instruction
+qc.c.mveqz x9
+
+# CHECK-EXT: :[[@LINE+1]]:1: error: instruction requires the following: 'Xqcicm' (Qualcomm uC Conditional Move Extension)
+qc.c.mveqz x9, x10
+
+
+# CHECK: :[[@LINE+1]]:9: error: invalid operand for instruction
+qc.mveq 9, x10, x11, x12
+
+# CHECK: :[[@LINE+1]]:1: error: too few operands for instruction
+qc.mveq x9
+
+# CHECK-EXT: :[[@LINE+1]]:1: error: instruction requires the following: 'Xqcicm' (Qualcomm uC Conditional Move Extension)
+qc.mveq x9, x10, x11, x12
+
+
+# CHECK: :[[@LINE+1]]:9: error: invalid operand for instruction
+qc.mvge 9, x10, x11, x12
+
+# CHECK: :[[@LINE+1]]:1: error: too few operands for instruction
+qc.mvge x9
+
+# CHECK-EXT: :[[@LINE+1]]:1: error: instruction requires the following: 'Xqcicm' (Qualcomm uC Conditional Move Extension)
+qc.mvge x9, x10, x11, x12
+
+
+# CHECK: :[[@LINE+1]]:10: error: invalid operand for instruction
+qc.mvgeu 9, x10, x11, x12
+
+# CHECK: :[[@LINE+1]]:1: error: too few operands for instruction
+qc.mvgeu x9
+
+# CHECK-EXT: :[[@LINE+1]]:1: error: instruction requires the following: 'Xqcicm' (Qualcomm uC Conditional Move Extension)
+qc.mvgeu x9, x10, x11, x12
+
+
+# CHECK: :[[@LINE+1]]:9: error: invalid operand for instruction
+qc.mvlt 9, x10, x11, x12
+
+# CHECK: :[[@LINE+1]]:1: error: too few operands for instruction
+qc.mvlt x9
+
+# CHECK-EXT: :[[@LINE+1]]:1: error: instruction requires the following: 'Xqcicm' (Qualcomm uC Conditional Move Extension)
+qc.mvlt x9, x10, x11, x12
+
+
+# CHECK: :[[@LINE+1]]:10: error: invalid operand for instruction
+qc.mvltu 9, x10, x11, x12
+
+# CHECK: :[[@LINE+1]]:1: error: too few operands for instruction
+qc.mvltu x9
+
+# CHECK-EXT: :[[@LINE+1]]:1: error: instruction requires the following: 'Xqcicm' (Qualcomm uC Conditional Move Extension)
+qc.mvltu x9, x10, x11, x12
+
+
+# CHECK: :[[@LINE+1]]:9: error: invalid operand for instruction
+qc.mvne 9, x10, x11, x12
+
+# CHECK: :[[@LINE+1]]:1: error: too few operands for instruction
+qc.mvne x9
+
+# CHECK-EXT: :[[@LINE+1]]:1: error: instruction requires the following: 'Xqcicm' (Qualcomm uC Conditional Move Extension)
+qc.mvne x9, x10, x11, x12
+
+
+# CHECK: :[[@LINE+1]]:10: error: invalid operand for instruction
+qc.mveqi 9, x10, 5, x12
+
+# CHECK: :[[@LINE+1]]:1: error: too few operands for instruction
+qc.mveqi x9
+
+# CHECK-IMM: :[[@LINE+1]]:19: error: immediate must be an integer in the range [-16, 15]
+qc.mveqi x9, x10, 17, x12
+
+# CHECK-EXT: :[[@LINE+1]]:1: error: instruction requires the following: 'Xqcicm' (Qualcomm uC Conditional Move Extension)
+qc.mveqi x9, x10, 5, x12
+
+
+# CHECK: :[[@LINE+1]]:10: error: invalid operand for instruction
+qc.mvgei 9, x10, 5, x12
+
+# CHECK: :[[@LINE+1]]:1: error: too few operands for instruction
+qc.mvgei x9
+
+# CHECK-IMM: :[[@LINE+1]]:19: error: immediate must be an integer in the range [-16, 15]
+qc.mvgei x9, x10, 17, x12
+
+# CHECK-EXT: :[[@LINE+1]]:1: error: instruction requires the following: 'Xqcicm' (Qualcomm uC Conditional Move Extension)
+qc.mvgei x9, x10, 5, x12
+
+
+# CHECK: :[[@LINE+1]]:10: error: invalid operand for instruction
+qc.mvlti 9, x10, 5, x12
+
+# CHECK: :[[@LINE+1]]:1: error: too few operands for instruction
+qc.mvlti x9
+
+# CHECK-IMM: :[[@LINE+1]]:19: error: immediate must be an integer in the range [-16, 15]
+qc.mvlti x9, x10, 17, x12
+
+# CHECK-EXT: :[[@LINE+1]]:1: error: instruction requires the following: 'Xqcicm' (Qualcomm uC Conditional Move Extension)
+qc.mvlti x9, x10, 5, x12
+
+
+# CHECK: :[[@LINE+1]]:10: error: invalid operand for instruction
+qc.mvnei 9, x10, 5, x12
+
+# CHECK: :[[@LINE+1]]:1: error: too few operands for instruction
+qc.mvnei x9
+
+# CHECK-IMM: :[[@LINE+1]]:19: error: immediate must be an integer in the range [-16, 15]
+qc.mvnei x9, x10, 17, x12
+
+# CHECK-EXT: :[[@LINE+1]]:1: error: instruction requires the following: 'Xqcicm' (Qualcomm uC Conditional Move Extension)
+qc.mvnei x9, x10, 5, x12
+
+
+# CHECK: :[[@LINE+1]]:11: error: invalid operand for instruction
+qc.mvltui 9, x10, 5, x12
+
+# CHECK: :[[@LINE+1]]:1: error: too few operands for instruction
+qc.mvltui x9
+
+# CHECK-IMM: :[[@LINE+1]]:20: error: immediate must be an integer in the range [0, 31]
+qc.mvltui x9, x10, 37, x12
+
+# CHECK-EXT: :[[@LINE+1]]:1: error: instruction requires the following: 'Xqcicm' (Qualcomm uC Conditional Move Extension)
+qc.mvltui x9, x10, 5, x12
+
+
+# CHECK: :[[@LINE+1]]:11: error: invalid operand for instruction
+qc.mvgeui 9, x10, 5, x12
+
+# CHECK: :[[@LINE+1]]:1: error: too few operands for instruction
+qc.mvgeui x9
+
+# CHECK-IMM: :[[@LINE+1]]:20: error: immediate must be an integer in the range [0, 31]
+qc.mvgeui x9, x10, 37, x12
+
+# CHECK-EXT: :[[@LINE+1]]:1: error: instruction requires the following: 'Xqcicm' (Qualcomm uC Conditional Move Extension)
+qc.mvgeui x9, x10, 5, x12
diff --git a/llvm/test/MC/RISCV/xqcicm-valid.s b/llvm/test/MC/RISCV/xqcicm-valid.s
new file mode 100644
index 0000000..7d0050b
--- /dev/null
+++ b/llvm/test/MC/RISCV/xqcicm-valid.s
@@ -0,0 +1,123 @@
+# Xqcicm - Qualcomm uC Conditional Move Extension
+# RUN: llvm-mc %s -triple=riscv32 -mattr=+experimental-xqcicm -riscv-no-aliases -show-encoding \
+# RUN: | FileCheck -check-prefixes=CHECK-ENC,CHECK-INST %s
+# RUN: llvm-mc -filetype=obj -triple riscv32 -mattr=+experimental-xqcicm < %s \
+# RUN: | llvm-objdump --mattr=+experimental-xqcicm -M no-aliases --no-print-imm-hex -d - \
+# RUN: | FileCheck -check-prefix=CHECK-INST %s
+# RUN: llvm-mc %s -triple=riscv32 -mattr=+experimental-xqcicm -show-encoding \
+# RUN: | FileCheck -check-prefixes=CHECK-ENC,CHECK-INST %s
+# RUN: llvm-mc -filetype=obj -triple riscv32 -mattr=+experimental-xqcicm < %s \
+# RUN: | llvm-objdump --mattr=+experimental-xqcicm --no-print-imm-hex -d - \
+# RUN: | FileCheck -check-prefix=CHECK-INST %s
+
+# CHECK-INST: qc.c.mveqz s1, a0
+# CHECK-ENC: encoding: [0x06,0xad]
+qc.c.mveqz x9, x10
+
+
+# CHECK-INST: qc.mveq s1, a0, a1, a2
+# CHECK-ENC: encoding: [0xdb,0x04,0xb5,0x60]
+qc.mveq x9, x10, x11, x12
+
+
+# CHECK-INST: qc.mvge s1, a0, a1, a2
+# CHECK-ENC: encoding: [0xdb,0x54,0xb5,0x60]
+qc.mvge x9, x10, x11, x12
+
+
+# CHECK-INST: qc.mvgeu s1, a0, a1, a2
+# CHECK-ENC: encoding: [0xdb,0x74,0xb5,0x60]
+qc.mvgeu x9, x10, x11, x12
+
+
+# CHECK-INST: qc.mvlt s1, a0, a1, a2
+# CHECK-ENC: encoding: [0xdb,0x44,0xb5,0x60]
+qc.mvlt x9, x10, x11, x12
+
+
+# CHECK-INST: qc.mvltu s1, a0, a1, a2
+# CHECK-ENC: encoding: [0xdb,0x64,0xb5,0x60]
+qc.mvltu x9, x10, x11, x12
+
+
+# CHECK-INST: qc.mvne s1, a0, a1, a2
+# CHECK-ENC: encoding: [0xdb,0x14,0xb5,0x60]
+qc.mvne x9, x10, x11, x12
+
+
+# CHECK-INST: qc.mveqi s1, a0, 5, a2
+# CHECK-ENC: encoding: [0xdb,0x04,0x55,0x64]
+qc.mveqi x9, x10, 5, x12
+
+# CHECK-INST: qc.mveqi s1, a0, -16, a2
+# CHECK-ENC: encoding: [0xdb,0x04,0x05,0x65]
+qc.mveqi x9, x10, -16, x12
+
+# CHECK-INST: qc.mveqi s1, a0, 15, a2
+# CHECK-ENC: encoding: [0xdb,0x04,0xf5,0x64]
+qc.mveqi x9, x10, 15, x12
+
+
+# CHECK-INST: qc.mvgei s1, a0, 5, a2
+# CHECK-ENC: encoding: [0xdb,0x54,0x55,0x64]
+qc.mvgei x9, x10, 5, x12
+
+# CHECK-INST: qc.mvgei s1, a0, -16, a2
+# CHECK-ENC: encoding: [0xdb,0x54,0x05,0x65]
+qc.mvgei x9, x10, -16, x12
+
+# CHECK-INST: qc.mvgei s1, a0, 15, a2
+# CHECK-ENC: encoding: [0xdb,0x54,0xf5,0x64]
+qc.mvgei x9, x10, 15, x12
+
+
+# CHECK-INST: qc.mvlti s1, a0, 5, a2
+# CHECK-ENC: encoding: [0xdb,0x44,0x55,0x64]
+qc.mvlti x9, x10, 5, x12
+
+# CHECK-INST: qc.mvlti s1, a0, -16, a2
+# CHECK-ENC: encoding: [0xdb,0x44,0x05,0x65]
+qc.mvlti x9, x10, -16, x12
+
+# CHECK-INST: qc.mvlti s1, a0, 15, a2
+# CHECK-ENC: encoding: [0xdb,0x44,0xf5,0x64]
+qc.mvlti x9, x10, 15, x12
+
+
+# CHECK-INST: qc.mvnei s1, a0, 5, a2
+# CHECK-ENC: encoding: [0xdb,0x14,0x55,0x64]
+qc.mvnei x9, x10, 5, x12
+
+# CHECK-INST: qc.mvnei s1, a0, -16, a2
+# CHECK-ENC: encoding: [0xdb,0x14,0x05,0x65]
+qc.mvnei x9, x10, -16, x12
+
+# CHECK-INST: qc.mvnei s1, a0, 15, a2
+# CHECK-ENC: encoding: [0xdb,0x14,0xf5,0x64]
+qc.mvnei x9, x10, 15, x12
+
+
+# CHECK-INST: qc.mvltui s1, a0, 5, a2
+# CHECK-ENC: encoding: [0xdb,0x64,0x55,0x64]
+qc.mvltui x9, x10, 5, x12
+
+# CHECK-INST: qc.mvltui s1, a0, 0, a2
+# CHECK-ENC: encoding: [0xdb,0x64,0x05,0x64]
+qc.mvltui x9, x10, 0, x12
+
+# CHECK-INST: qc.mvltui s1, a0, 31, a2
+# CHECK-ENC: encoding: [0xdb,0x64,0xf5,0x65]
+qc.mvltui x9, x10, 31, x12
+
+
+# CHECK-INST: qc.mvgeui s1, a0, 5, a2
+# CHECK-ENC: encoding: [0xdb,0x74,0x55,0x64]
+qc.mvgeui x9, x10, 5, x12
+
+# CHECK-INST: qc.mvgeui s1, a0, 0, a2
+# CHECK-ENC: encoding: [0xdb,0x74,0x05,0x64]
+qc.mvgeui x9, x10, 0, x12
+
+# CHECK-INST: qc.mvgeui s1, a0, 31, a2
+# CHECK-ENC: encoding: [0xdb,0x74,0xf5,0x65]
+qc.mvgeui x9, x10, 31, x12
diff --git a/llvm/test/ThinLTO/X86/memprof-recursive.ll b/llvm/test/ThinLTO/X86/memprof-recursive.ll
new file mode 100644
index 0000000..2b1d708
--- /dev/null
+++ b/llvm/test/ThinLTO/X86/memprof-recursive.ll
@@ -0,0 +1,141 @@
+;; Test recursion handling during cloning.
+;;
+;; See llvm/test/Transforms/MemProfContextDisambiguation/recursive.ll for
+;; information on how the test was created.
+
+; RUN: opt -thinlto-bc %s >%t.o
+
+;; By default we should enable cloning of contexts involved with recursive
+;; cycles, but not through the cycle itself. I.e. until full support for
+;; recursion is added, the cloned recursive call from C back to B (line 12) will
+;; not be updated to call a clone.
+; RUN: llvm-lto2 run %t.o -enable-memprof-context-disambiguation \
+; RUN: -supports-hot-cold-new \
+; RUN: -r=%t.o,_Z1Dv,plx \
+; RUN: -r=%t.o,_Z1Ci,plx \
+; RUN: -r=%t.o,_Z1Bi,plx \
+; RUN: -r=%t.o,main,plx \
+; RUN: -r=%t.o,_Znam, \
+; RUN: -memprof-verify-ccg -memprof-verify-nodes \
+; RUN: -pass-remarks=memprof-context-disambiguation \
+; RUN: -o %t.out 2>&1 | FileCheck %s \
+; RUN: --implicit-check-not "memprof_recursive3.cc:12:10: call in clone _Z1Ci.memprof.1 assigned" \
+; RUN: --check-prefix=ALLOW-RECUR-CALLSITES --check-prefix=ALLOW-RECUR-CONTEXTS
+
+;; Skipping recursive callsites should result in no cloning.
+; RUN: llvm-lto2 run %t.o -enable-memprof-context-disambiguation \
+; RUN: -supports-hot-cold-new \
+; RUN: -r=%t.o,_Z1Dv,plx \
+; RUN: -r=%t.o,_Z1Ci,plx \
+; RUN: -r=%t.o,_Z1Bi,plx \
+; RUN: -r=%t.o,main,plx \
+; RUN: -r=%t.o,_Znam, \
+; RUN: -memprof-verify-ccg -memprof-verify-nodes \
+; RUN: -pass-remarks=memprof-context-disambiguation \
+; RUN: -memprof-allow-recursive-callsites=false \
+; RUN: -o %t.out 2>&1 | FileCheck %s --allow-empty \
+; RUN: --implicit-check-not "memprof_recursive3.cc:12:10: call in clone _Z1Ci.memprof.1 assigned" \
+; RUN: --implicit-check-not="created clone" \
+; RUN: --implicit-check-not="marked with memprof allocation attribute cold"
+
+;; Skipping recursive contexts should prevent spurious call to cloned version of
+;; B from the context starting at memprof_recursive.cc:19:13, which is actually
+;; recursive (until that support is added).
+; RUN: llvm-lto2 run %t.o -enable-memprof-context-disambiguation \
+; RUN: -supports-hot-cold-new \
+; RUN: -r=%t.o,_Z1Dv,plx \
+; RUN: -r=%t.o,_Z1Ci,plx \
+; RUN: -r=%t.o,_Z1Bi,plx \
+; RUN: -r=%t.o,main,plx \
+; RUN: -r=%t.o,_Znam, \
+; RUN: -memprof-verify-ccg -memprof-verify-nodes \
+; RUN: -pass-remarks=memprof-context-disambiguation \
+; RUN: -memprof-allow-recursive-contexts=false \
+; RUN: -o %t.out 2>&1 | FileCheck %s \
+; RUN: --implicit-check-not "memprof_recursive3.cc:12:10: call in clone _Z1Ci.memprof.1 assigned" \
+; RUN: --check-prefix=ALLOW-RECUR-CALLSITES --check-prefix=SKIP-RECUR-CONTEXTS
+
+; ALLOW-RECUR-CALLSITES: memprof_recursive.cc:4:0: created clone _Z1Dv.memprof.1
+; ALLOW-RECUR-CALLSITES: memprof_recursive.cc:5:10: call in clone _Z1Dv marked with memprof allocation attribute notcold
+; ALLOW-RECUR-CALLSITES: memprof_recursive.cc:5:10: call in clone _Z1Dv.memprof.1 marked with memprof allocation attribute cold
+; ALLOW-RECUR-CALLSITES: memprof_recursive.cc:8:0: created clone _Z1Ci.memprof.1
+; ALLOW-RECUR-CALLSITES: memprof_recursive.cc:10:12: call in clone _Z1Ci.memprof.1 assigned to call function clone _Z1Dv.memprof.1
+; ALLOW-RECUR-CALLSITES: memprof_recursive.cc:14:0: created clone _Z1Bi.memprof.1
+; ALLOW-RECUR-CALLSITES: memprof_recursive.cc:15:10: call in clone _Z1Bi.memprof.1 assigned to call function clone _Z1Ci.memprof.1
+;; We should only call the cold clone for the recursive context if we enabled
+;; recursive contexts via -memprof-allow-recursive-contexts=true (default).
+; ALLOW-RECUR-CONTEXTS: memprof_recursive.cc:19:13: call in clone main assigned to call function clone _Z1Bi.memprof.1
+; SKIP-RECUR-CONTEXTS-NOT: memprof_recursive.cc:19:13: call in clone main assigned to call function clone _Z1Bi.memprof.1
+; ALLOW-RECUR-CALLSITES: memprof_recursive.cc:20:13: call in clone main assigned to call function clone _Z1Bi.memprof.1
+
+target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+define ptr @_Z1Dv() !dbg !3 {
+entry:
+ %call = tail call ptr @_Znam(i64 10), !dbg !6, !memprof !7, !callsite !14
+ ret ptr null
+}
+
+define ptr @_Z1Ci(i32 %n) !dbg !15 {
+entry:
+ %call = tail call ptr @_Z1Dv(), !dbg !16, !callsite !17
+ br label %return
+
+if.end: ; No predecessors!
+ %call1 = tail call ptr @_Z1Bi(i32 0), !dbg !18, !callsite !19
+ br label %return
+
+return: ; preds = %if.end, %entry
+ ret ptr null
+}
+
+define ptr @_Z1Bi(i32 %n) !dbg !20 {
+entry:
+ %call = tail call ptr @_Z1Ci(i32 0), !dbg !21, !callsite !22
+ ret ptr null
+}
+
+define i32 @main() {
+entry:
+ %call = tail call ptr @_Z1Bi(i32 0), !dbg !23, !callsite !25
+ %call1 = tail call ptr @_Z1Bi(i32 0), !dbg !26, !callsite !27
+ %call2 = tail call ptr @_Z1Bi(i32 0), !dbg !28, !callsite !29
+ ret i32 0
+}
+
+declare ptr @_Znam(i64)
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!2}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus_14, file: !1, producer: "clang version 20.0.0git (https://github.com/llvm/llvm-project.git 7aec6dc477f8148ed066d10dfc7a012a51b6599c)", isOptimized: true, runtimeVersion: 0, emissionKind: LineTablesOnly, splitDebugInlining: false, debugInfoForProfiling: true, nameTableKind: None)
+!1 = !DIFile(filename: "memprof_recursive.cc", directory: ".", checksumkind: CSK_MD5, checksum: "2f15f63b187a0e0d40e7fdd18b10576a")
+!2 = !{i32 2, !"Debug Info Version", i32 3}
+!3 = distinct !DISubprogram(name: "D", linkageName: "_Z1Dv", scope: !1, file: !1, line: 4, type: !4, scopeLine: 4, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0)
+!4 = !DISubroutineType(types: !5)
+!5 = !{}
+!6 = !DILocation(line: 5, column: 10, scope: !3)
+!7 = !{!8, !10, !12}
+!8 = !{!9, !"cold"}
+!9 = !{i64 6541423618768552252, i64 -200552803509692312, i64 -2954124005641725917, i64 6307901912192269588}
+!10 = !{!11, !"notcold"}
+!11 = !{i64 6541423618768552252, i64 -200552803509692312, i64 -2954124005641725917, i64 -7155190423157709404, i64 -2954124005641725917, i64 8632435727821051414}
+!12 = !{!13, !"cold"}
+!13 = !{i64 6541423618768552252, i64 -200552803509692312, i64 -2954124005641725917, i64 -7155190423157709404, i64 -2954124005641725917, i64 -3421689549917153178}
+!14 = !{i64 6541423618768552252}
+!15 = distinct !DISubprogram(name: "C", linkageName: "_Z1Ci", scope: !1, file: !1, line: 8, type: !4, scopeLine: 8, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0)
+!16 = !DILocation(line: 10, column: 12, scope: !15)
+!17 = !{i64 -200552803509692312}
+!18 = !DILocation(line: 12, column: 10, scope: !15)
+!19 = !{i64 -7155190423157709404}
+!20 = distinct !DISubprogram(name: "B", linkageName: "_Z1Bi", scope: !1, file: !1, line: 14, type: !4, scopeLine: 14, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0)
+!21 = !DILocation(line: 15, column: 10, scope: !20)
+!22 = !{i64 -2954124005641725917}
+!23 = !DILocation(line: 18, column: 13, scope: !24)
+!24 = distinct !DISubprogram(name: "main", scope: !1, file: !1, line: 17, type: !4, scopeLine: 17, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0)
+!25 = !{i64 8632435727821051414}
+!26 = !DILocation(line: 19, column: 13, scope: !24)
+!27 = !{i64 -3421689549917153178}
+!28 = !DILocation(line: 20, column: 13, scope: !24)
+!29 = !{i64 6307901912192269588}
diff --git a/llvm/test/Transforms/InstCombine/and-xor-or.ll b/llvm/test/Transforms/InstCombine/and-xor-or.ll
index 5a0890e..5a58995 100644
--- a/llvm/test/Transforms/InstCombine/and-xor-or.ll
+++ b/llvm/test/Transforms/InstCombine/and-xor-or.ll
@@ -388,10 +388,9 @@ define i8 @xor_shl(i8 %x, i8 %y, i8 %zarg, i8 %shamt) {
; CHECK-LABEL: define {{[^@]+}}@xor_shl
; CHECK-SAME: (i8 [[X:%.*]], i8 [[Y:%.*]], i8 [[ZARG:%.*]], i8 [[SHAMT:%.*]]) {
; CHECK-NEXT: [[Z:%.*]] = sdiv i8 42, [[ZARG]]
-; CHECK-NEXT: [[SX:%.*]] = shl i8 [[X]], [[SHAMT]]
-; CHECK-NEXT: [[SY:%.*]] = shl i8 [[Y]], [[SHAMT]]
-; CHECK-NEXT: [[A:%.*]] = xor i8 [[Z]], [[SX]]
-; CHECK-NEXT: [[R:%.*]] = xor i8 [[A]], [[SY]]
+; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X]], [[Y]]
+; CHECK-NEXT: [[TMP2:%.*]] = shl i8 [[TMP1]], [[SHAMT]]
+; CHECK-NEXT: [[R:%.*]] = xor i8 [[TMP2]], [[Z]]
; CHECK-NEXT: ret i8 [[R]]
;
%z = sdiv i8 42, %zarg ; thwart complexity-based canonicalization
@@ -406,10 +405,9 @@ define i8 @and_lshr(i8 %x, i8 %y, i8 %zarg, i8 %shamt) {
; CHECK-LABEL: define {{[^@]+}}@and_lshr
; CHECK-SAME: (i8 [[X:%.*]], i8 [[Y:%.*]], i8 [[ZARG:%.*]], i8 [[SHAMT:%.*]]) {
; CHECK-NEXT: [[Z:%.*]] = sdiv i8 42, [[ZARG]]
-; CHECK-NEXT: [[SX:%.*]] = lshr i8 [[X]], [[SHAMT]]
-; CHECK-NEXT: [[SY:%.*]] = lshr i8 [[Y]], [[SHAMT]]
-; CHECK-NEXT: [[A:%.*]] = and i8 [[Z]], [[SX]]
-; CHECK-NEXT: [[R:%.*]] = and i8 [[SY]], [[A]]
+; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[X]], [[Y]]
+; CHECK-NEXT: [[TMP2:%.*]] = lshr i8 [[TMP1]], [[SHAMT]]
+; CHECK-NEXT: [[R:%.*]] = and i8 [[TMP2]], [[Z]]
; CHECK-NEXT: ret i8 [[R]]
;
%z = sdiv i8 42, %zarg ; thwart complexity-based canonicalization
@@ -435,6 +433,51 @@ define i8 @or_lshr(i8 %x, i8 %y, i8 %z, i8 %shamt) {
ret i8 %r
}
+define i8 @or_lshr_commuted1(i8 %x, i8 %y, i8 %z, i8 %shamt) {
+; CHECK-LABEL: define {{[^@]+}}@or_lshr_commuted1
+; CHECK-SAME: (i8 [[X:%.*]], i8 [[Y:%.*]], i8 [[Z:%.*]], i8 [[SHAMT:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[X]], [[Y]]
+; CHECK-NEXT: [[TMP2:%.*]] = lshr i8 [[TMP1]], [[SHAMT]]
+; CHECK-NEXT: [[R:%.*]] = or i8 [[TMP2]], [[Z]]
+; CHECK-NEXT: ret i8 [[R]]
+;
+ %sx = lshr i8 %x, %shamt
+ %sy = lshr i8 %y, %shamt
+ %a = or i8 %z, %sx
+ %r = or i8 %sy, %a
+ ret i8 %r
+}
+
+define i8 @or_lshr_commuted2(i8 %x, i8 %y, i8 %z, i8 %shamt) {
+; CHECK-LABEL: define {{[^@]+}}@or_lshr_commuted2
+; CHECK-SAME: (i8 [[X:%.*]], i8 [[Y:%.*]], i8 [[Z:%.*]], i8 [[SHAMT:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[X]], [[Y]]
+; CHECK-NEXT: [[TMP2:%.*]] = lshr i8 [[TMP1]], [[SHAMT]]
+; CHECK-NEXT: [[R:%.*]] = or i8 [[TMP2]], [[Z]]
+; CHECK-NEXT: ret i8 [[R]]
+;
+ %sx = lshr i8 %x, %shamt
+ %sy = lshr i8 %y, %shamt
+ %a = or i8 %z, %sx
+ %r = or i8 %a, %sy
+ ret i8 %r
+}
+
+define i8 @or_lshr_commuted3(i8 %x, i8 %y, i8 %z, i8 %shamt) {
+; CHECK-LABEL: define {{[^@]+}}@or_lshr_commuted3
+; CHECK-SAME: (i8 [[X:%.*]], i8 [[Y:%.*]], i8 [[Z:%.*]], i8 [[SHAMT:%.*]]) {
+; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[X]], [[Y]]
+; CHECK-NEXT: [[TMP2:%.*]] = lshr i8 [[TMP1]], [[SHAMT]]
+; CHECK-NEXT: [[R:%.*]] = or i8 [[TMP2]], [[Z]]
+; CHECK-NEXT: ret i8 [[R]]
+;
+ %sx = lshr i8 %x, %shamt
+ %sy = lshr i8 %y, %shamt
+ %a = or i8 %sx, %z
+ %r = or i8 %a, %sy
+ ret i8 %r
+}
+
define i8 @xor_lshr(i8 %x, i8 %y, i8 %z, i8 %shamt) {
; CHECK-LABEL: define {{[^@]+}}@xor_lshr
; CHECK-SAME: (i8 [[X:%.*]], i8 [[Y:%.*]], i8 [[Z:%.*]], i8 [[SHAMT:%.*]]) {
diff --git a/llvm/test/Transforms/InstCombine/compare-signs.ll b/llvm/test/Transforms/InstCombine/compare-signs.ll
index 9703b47..59ec9ad 100644
--- a/llvm/test/Transforms/InstCombine/compare-signs.ll
+++ b/llvm/test/Transforms/InstCombine/compare-signs.ll
@@ -152,6 +152,19 @@ define i1 @test4a(i32 %a) {
ret i1 %c
}
+define i1 @test4a_commuted(i32 %a) {
+; CHECK-LABEL: @test4a_commuted(
+; CHECK-NEXT: [[C:%.*]] = icmp slt i32 [[SIGNUM:%.*]], 1
+; CHECK-NEXT: ret i1 [[C]]
+;
+ %l = ashr i32 %a, 31
+ %na = sub i32 0, %a
+ %r = lshr i32 %na, 31
+ %signum = or i32 %r, %l
+ %c = icmp slt i32 %signum, 1
+ ret i1 %c
+}
+
define <2 x i1> @test4a_vec(<2 x i32> %a) {
; CHECK-LABEL: @test4a_vec(
; CHECK-NEXT: [[C:%.*]] = icmp slt <2 x i32> [[A:%.*]], splat (i32 1)
diff --git a/llvm/test/Transforms/InstCombine/icmp-add.ll b/llvm/test/Transforms/InstCombine/icmp-add.ll
index 579247a..a8cdf80 100644
--- a/llvm/test/Transforms/InstCombine/icmp-add.ll
+++ b/llvm/test/Transforms/InstCombine/icmp-add.ll
@@ -79,6 +79,19 @@ bb:
ret i1 %i4
}
+define i1 @cvt_icmp_0_zext_plus_zext_eq_i2(i1 %a, i1 %b) {
+; CHECK-LABEL: @cvt_icmp_0_zext_plus_zext_eq_i2(
+; CHECK-NEXT: [[TMP1:%.*]] = or i1 [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[CMP:%.*]] = xor i1 [[TMP1]], true
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %a.ext = zext i1 %a to i2
+ %b.ext = zext i1 %b to i2
+ %add = add i2 %a.ext, %b.ext
+ %cmp = icmp eq i2 %add, 0
+ ret i1 %cmp
+}
+
define i1 @cvt_icmp_1_zext_plus_zext_eq(i1 %arg, i1 %arg1) {
; CHECK-LABEL: @cvt_icmp_1_zext_plus_zext_eq(
; CHECK-NEXT: bb:
diff --git a/llvm/test/Transforms/InstCombine/onehot_merge.ll b/llvm/test/Transforms/InstCombine/onehot_merge.ll
index 2e57597..3b7314d 100644
--- a/llvm/test/Transforms/InstCombine/onehot_merge.ll
+++ b/llvm/test/Transforms/InstCombine/onehot_merge.ll
@@ -1143,3 +1143,19 @@ define i1 @foo1_and_signbit_lshr_without_shifting_signbit_not_pwr2_logical(i32 %
%or = select i1 %t2, i1 true, i1 %t4
ret i1 %or
}
+
+define i1 @two_types_of_bittest(i8 %x, i8 %c) {
+; CHECK-LABEL: @two_types_of_bittest(
+; CHECK-NEXT: [[T0:%.*]] = shl nuw i8 1, [[C:%.*]]
+; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[T0]], -128
+; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[X:%.*]], [[TMP1]]
+; CHECK-NEXT: [[RET:%.*]] = icmp eq i8 [[TMP2]], [[TMP1]]
+; CHECK-NEXT: ret i1 [[RET]]
+;
+ %t0 = shl i8 1, %c
+ %icmp1 = icmp slt i8 %x, 0
+ %and = and i8 %x, %t0
+ %icmp2 = icmp ne i8 %and, 0
+ %ret = and i1 %icmp1, %icmp2
+ ret i1 %ret
+}
diff --git a/llvm/test/Transforms/InstCombine/phi.ll b/llvm/test/Transforms/InstCombine/phi.ll
index 3384929..4756b4f 100644
--- a/llvm/test/Transforms/InstCombine/phi.ll
+++ b/llvm/test/Transforms/InstCombine/phi.ll
@@ -2828,13 +2828,13 @@ define i1 @test_zext_icmp_eq_0(i1 %a, i1 %b, i32 %c) {
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 [[A:%.*]], label [[IF:%.*]], label [[ELSE:%.*]]
; CHECK: if:
-; CHECK-NEXT: [[B_EXT:%.*]] = zext i1 [[B:%.*]] to i32
+; CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[B:%.*]], true
; CHECK-NEXT: br label [[JOIN:%.*]]
; CHECK: else:
+; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[C:%.*]], 0
; CHECK-NEXT: br label [[JOIN]]
; CHECK: join:
-; CHECK-NEXT: [[PHI:%.*]] = phi i32 [ [[B_EXT]], [[IF]] ], [ [[C:%.*]], [[ELSE]] ]
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[PHI]], 0
+; CHECK-NEXT: [[CMP:%.*]] = phi i1 [ [[TMP0]], [[IF]] ], [ [[TMP1]], [[ELSE]] ]
; CHECK-NEXT: ret i1 [[CMP]]
;
entry:
@@ -2916,10 +2916,9 @@ define i1 @test_zext_icmp_eq_0_loop(i1 %c, i1 %b) {
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
-; CHECK-NEXT: [[PHI:%.*]] = phi i32 [ 1, [[ENTRY:%.*]] ], [ [[EXT:%.*]], [[LOOP]] ]
-; CHECK-NEXT: [[X:%.*]] = icmp eq i32 [[PHI]], 0
+; CHECK-NEXT: [[X:%.*]] = phi i1 [ false, [[ENTRY:%.*]] ], [ [[TMP0:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[Y:%.*]] = and i1 [[X]], [[B:%.*]]
-; CHECK-NEXT: [[EXT]] = zext i1 [[Y]] to i32
+; CHECK-NEXT: [[TMP0]] = xor i1 [[Y]], true
; CHECK-NEXT: br i1 [[C:%.*]], label [[LOOP]], label [[EXIT:%.*]]
; CHECK: exit:
; CHECK-NEXT: ret i1 [[X]]
diff --git a/llvm/test/Transforms/InstCombine/select-divrem.ll b/llvm/test/Transforms/InstCombine/select-divrem.ll
index a674f9c..7dff78e 100644
--- a/llvm/test/Transforms/InstCombine/select-divrem.ll
+++ b/llvm/test/Transforms/InstCombine/select-divrem.ll
@@ -322,6 +322,21 @@ define i8 @rem_euclid_non_const_pow2(i8 %0, i8 %1) {
ret i8 %sel
}
+define i8 @rem_euclid_non_const_pow2_commuted(i8 %0, i8 %1) {
+; CHECK-LABEL: @rem_euclid_non_const_pow2_commuted(
+; CHECK-NEXT: [[NOTMASK:%.*]] = shl nsw i8 -1, [[TMP0:%.*]]
+; CHECK-NEXT: [[TMP3:%.*]] = xor i8 [[NOTMASK]], -1
+; CHECK-NEXT: [[SEL:%.*]] = and i8 [[TMP1:%.*]], [[TMP3]]
+; CHECK-NEXT: ret i8 [[SEL]]
+;
+ %pow2 = shl i8 1, %0
+ %rem = srem i8 %1, %pow2
+ %cond = icmp slt i8 %rem, 0
+ %add = add i8 %pow2, %rem
+ %sel = select i1 %cond, i8 %add, i8 %rem
+ ret i8 %sel
+}
+
define i32 @rem_euclid_pow2_true_arm_folded(i32 %n) {
; CHECK-LABEL: @rem_euclid_pow2_true_arm_folded(
; CHECK-NEXT: [[RES:%.*]] = and i32 [[N:%.*]], 1
diff --git a/llvm/test/Transforms/InstCombine/select.ll b/llvm/test/Transforms/InstCombine/select.ll
index 9de3c24..0f15fa6 100644
--- a/llvm/test/Transforms/InstCombine/select.ll
+++ b/llvm/test/Transforms/InstCombine/select.ll
@@ -3937,11 +3937,8 @@ entry:
define i32 @src_or_eq_0_and_xor(i32 %x, i32 %y) {
; CHECK-LABEL: @src_or_eq_0_and_xor(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[OR:%.*]] = or i32 [[Y:%.*]], [[X:%.*]]
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[OR]], 0
-; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[Y]], [[X]]
-; CHECK-NEXT: [[COND:%.*]] = select i1 [[CMP]], i32 0, i32 [[XOR]]
-; CHECK-NEXT: ret i32 [[COND]]
+; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[Y:%.*]], [[X:%.*]]
+; CHECK-NEXT: ret i32 [[XOR]]
;
entry:
%or = or i32 %y, %x
@@ -3956,11 +3953,8 @@ entry:
define i32 @src_or_eq_0_xor_and(i32 %x, i32 %y) {
; CHECK-LABEL: @src_or_eq_0_xor_and(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[OR:%.*]] = or i32 [[Y:%.*]], [[X:%.*]]
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[OR]], 0
-; CHECK-NEXT: [[AND:%.*]] = and i32 [[Y]], [[X]]
-; CHECK-NEXT: [[COND:%.*]] = select i1 [[CMP]], i32 0, i32 [[AND]]
-; CHECK-NEXT: ret i32 [[COND]]
+; CHECK-NEXT: [[AND:%.*]] = and i32 [[Y:%.*]], [[X:%.*]]
+; CHECK-NEXT: ret i32 [[AND]]
;
entry:
%or = or i32 %y, %x
@@ -4438,11 +4432,8 @@ define i32 @src_no_trans_select_and_eq0_xor_and(i32 %x, i32 %y) {
define i32 @src_no_trans_select_or_eq0_or_and(i32 %x, i32 %y) {
; CHECK-LABEL: @src_no_trans_select_or_eq0_or_and(
-; CHECK-NEXT: [[OR:%.*]] = or i32 [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT: [[OR0:%.*]] = icmp eq i32 [[OR]], 0
-; CHECK-NEXT: [[AND:%.*]] = and i32 [[X]], [[Y]]
-; CHECK-NEXT: [[COND:%.*]] = select i1 [[OR0]], i32 0, i32 [[AND]]
-; CHECK-NEXT: ret i32 [[COND]]
+; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: ret i32 [[AND]]
;
%or = or i32 %x, %y
%or0 = icmp eq i32 %or, 0
@@ -4837,3 +4828,16 @@ define i32 @replace_and_cond_multiuse2(i1 %cond1, i1 %cond2) {
%mux = select i1 %cond1, i32 %sel, i32 1
ret i32 %mux
}
+
+define i32 @src_simplify_2x_at_once_and(i32 %x, i32 %y) {
+; CHECK-LABEL: @src_simplify_2x_at_once_and(
+; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[X:%.*]], [[Y:%.*]]
+; CHECK-NEXT: ret i32 [[XOR]]
+;
+ %and = and i32 %x, %y
+ %and0 = icmp eq i32 %and, -1
+ %sub = sub i32 %x, %y
+ %xor = xor i32 %x, %y
+ %cond = select i1 %and0, i32 %sub, i32 %xor
+ ret i32 %cond
+}
diff --git a/llvm/test/Transforms/InstCombine/xor-and-or.ll b/llvm/test/Transforms/InstCombine/xor-and-or.ll
index 47275ce..c380e27 100644
--- a/llvm/test/Transforms/InstCombine/xor-and-or.ll
+++ b/llvm/test/Transforms/InstCombine/xor-and-or.ll
@@ -25,6 +25,18 @@ define i1 @xor_logic_and_logic_or2(i1 %c, i1 %x, i1 %y) {
ret i1 %r
}
+define i1 @xor_logic_and_logic_or2_commuted(i1 %c, i1 %x, i1 %y) {
+; CHECK-LABEL: @xor_logic_and_logic_or2_commuted(
+; CHECK-NEXT: [[TMP1:%.*]] = xor i1 [[X:%.*]], true
+; CHECK-NEXT: [[R:%.*]] = select i1 [[C:%.*]], i1 [[TMP1]], i1 [[Y:%.*]]
+; CHECK-NEXT: ret i1 [[R]]
+;
+ %o = select i1 %y, i1 true, i1 %c
+ %a = select i1 %c, i1 %x, i1 false
+ %r = xor i1 %o, %a
+ ret i1 %r
+}
+
define i1 @xor_logic_and_logic_or3(i1 %c, i1 %x, i1 %y) {
; CHECK-LABEL: @xor_logic_and_logic_or3(
; CHECK-NEXT: [[TMP1:%.*]] = freeze i1 [[C:%.*]]
diff --git a/llvm/test/Transforms/InstSimplify/const-fold-nvvm-f2i-d2i.ll b/llvm/test/Transforms/InstSimplify/const-fold-nvvm-f2i-d2i.ll
new file mode 100644
index 0000000..543c731
--- /dev/null
+++ b/llvm/test/Transforms/InstSimplify/const-fold-nvvm-f2i-d2i.ll
@@ -0,0 +1,1129 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt < %s -passes=instsimplify -march=nvptx64 -S | FileCheck %s
+
+; f2i/f2ui and d2i/d2ui - double/float to i32 tests
+
+;###############################################################
+;# Tests with Positive 1.5 #
+;###############################################################
+
+;+-------------------------------------------------------------+
+;| f2i |
+;+-------------------------------------------------------------+
+define i32 @test_pos_1_5_f2i_rm() {
+; CHECK-LABEL: define i32 @test_pos_1_5_f2i_rm() {
+; CHECK-NEXT: ret i32 1
+;
+ %res = call i32 @llvm.nvvm.f2i.rm(float 1.5)
+ ret i32 %res
+}
+
+define i32 @test_pos_1_5_f2i_rn() {
+; CHECK-LABEL: define i32 @test_pos_1_5_f2i_rn() {
+; CHECK-NEXT: ret i32 2
+;
+ %res = call i32 @llvm.nvvm.f2i.rn(float 1.5)
+ ret i32 %res
+}
+
+
+define i32 @test_pos_1_5_f2i_rp() {
+; CHECK-LABEL: define i32 @test_pos_1_5_f2i_rp() {
+; CHECK-NEXT: ret i32 2
+;
+ %res = call i32 @llvm.nvvm.f2i.rp(float 1.5)
+ ret i32 %res
+}
+
+define i32 @test_pos_1_5_f2i_rz() {
+; CHECK-LABEL: define i32 @test_pos_1_5_f2i_rz() {
+; CHECK-NEXT: ret i32 1
+;
+ %res = call i32 @llvm.nvvm.f2i.rz(float 1.5)
+ ret i32 %res
+}
+
+;+-------------------------------------------------------------+
+;| f2i_ftz |
+;+-------------------------------------------------------------+
+define i32 @test_pos_1_5_f2i_rm_ftz() {
+; CHECK-LABEL: define i32 @test_pos_1_5_f2i_rm_ftz() {
+; CHECK-NEXT: ret i32 1
+;
+ %res = call i32 @llvm.nvvm.f2i.rm.ftz(float 1.5)
+ ret i32 %res
+}
+
+define i32 @test_pos_1_5_f2i_rn_ftz() {
+; CHECK-LABEL: define i32 @test_pos_1_5_f2i_rn_ftz() {
+; CHECK-NEXT: ret i32 2
+;
+ %res = call i32 @llvm.nvvm.f2i.rn.ftz(float 1.5)
+ ret i32 %res
+}
+
+define i32 @test_pos_1_5_f2i_rp_ftz() {
+; CHECK-LABEL: define i32 @test_pos_1_5_f2i_rp_ftz() {
+; CHECK-NEXT: ret i32 2
+;
+ %res = call i32 @llvm.nvvm.f2i.rp.ftz(float 1.5)
+ ret i32 %res
+}
+
+define i32 @test_pos_1_5_f2i_rz_ftz() {
+; CHECK-LABEL: define i32 @test_pos_1_5_f2i_rz_ftz() {
+; CHECK-NEXT: ret i32 1
+;
+ %res = call i32 @llvm.nvvm.f2i.rz.ftz(float 1.5)
+ ret i32 %res
+}
+;+-------------------------------------------------------------+
+;| d2i |
+;+-------------------------------------------------------------+
+define i32 @test_pos_1_5_d2i_rm() {
+; CHECK-LABEL: define i32 @test_pos_1_5_d2i_rm() {
+; CHECK-NEXT: ret i32 1
+;
+ %res = call i32 @llvm.nvvm.d2i.rm(double 1.5)
+ ret i32 %res
+}
+
+define i32 @test_pos_1_5_d2i_rn() {
+; CHECK-LABEL: define i32 @test_pos_1_5_d2i_rn() {
+; CHECK-NEXT: ret i32 2
+;
+ %res = call i32 @llvm.nvvm.d2i.rn(double 1.5)
+ ret i32 %res
+}
+
+
+define i32 @test_pos_1_5_d2i_rp() {
+; CHECK-LABEL: define i32 @test_pos_1_5_d2i_rp() {
+; CHECK-NEXT: ret i32 2
+;
+ %res = call i32 @llvm.nvvm.d2i.rp(double 1.5)
+ ret i32 %res
+}
+
+define i32 @test_pos_1_5_d2i_rz() {
+; CHECK-LABEL: define i32 @test_pos_1_5_d2i_rz() {
+; CHECK-NEXT: ret i32 1
+;
+ %res = call i32 @llvm.nvvm.d2i.rz(double 1.5)
+ ret i32 %res
+}
+
+;+-------------------------------------------------------------+
+;| f2ui |
+;+-------------------------------------------------------------+
+define i32 @test_pos_1_5_f2ui_rm() {
+; CHECK-LABEL: define i32 @test_pos_1_5_f2ui_rm() {
+; CHECK-NEXT: ret i32 1
+;
+ %res = call i32 @llvm.nvvm.f2ui.rm(float 1.5)
+ ret i32 %res
+}
+
+define i32 @test_pos_1_5_f2ui_rn() {
+; CHECK-LABEL: define i32 @test_pos_1_5_f2ui_rn() {
+; CHECK-NEXT: ret i32 2
+;
+ %res = call i32 @llvm.nvvm.f2ui.rn(float 1.5)
+ ret i32 %res
+}
+
+
+define i32 @test_pos_1_5_f2ui_rp() {
+; CHECK-LABEL: define i32 @test_pos_1_5_f2ui_rp() {
+; CHECK-NEXT: ret i32 2
+;
+ %res = call i32 @llvm.nvvm.f2ui.rp(float 1.5)
+ ret i32 %res
+}
+
+define i32 @test_pos_1_5_f2ui_rz() {
+; CHECK-LABEL: define i32 @test_pos_1_5_f2ui_rz() {
+; CHECK-NEXT: ret i32 1
+;
+ %res = call i32 @llvm.nvvm.f2ui.rz(float 1.5)
+ ret i32 %res
+}
+
+;+-------------------------------------------------------------+
+;| f2ui_ftz |
+;+-------------------------------------------------------------+
+define i32 @test_pos_1_5_f2ui_rm_ftz() {
+; CHECK-LABEL: define i32 @test_pos_1_5_f2ui_rm_ftz() {
+; CHECK-NEXT: ret i32 1
+;
+ %res = call i32 @llvm.nvvm.f2ui.rm.ftz(float 1.5)
+ ret i32 %res
+}
+
+define i32 @test_pos_1_5_f2ui_rn_ftz() {
+; CHECK-LABEL: define i32 @test_pos_1_5_f2ui_rn_ftz() {
+; CHECK-NEXT: ret i32 2
+;
+ %res = call i32 @llvm.nvvm.f2ui.rn.ftz(float 1.5)
+ ret i32 %res
+}
+
+define i32 @test_pos_1_5_f2ui_rp_ftz() {
+; CHECK-LABEL: define i32 @test_pos_1_5_f2ui_rp_ftz() {
+; CHECK-NEXT: ret i32 2
+;
+ %res = call i32 @llvm.nvvm.f2ui.rp.ftz(float 1.5)
+ ret i32 %res
+}
+
+define i32 @test_pos_1_5_f2ui_rz_ftz() {
+; CHECK-LABEL: define i32 @test_pos_1_5_f2ui_rz_ftz() {
+; CHECK-NEXT: ret i32 1
+;
+ %res = call i32 @llvm.nvvm.f2ui.rz.ftz(float 1.5)
+ ret i32 %res
+}
+;+-------------------------------------------------------------+
+;| d2ui |
+;+-------------------------------------------------------------+
+define i32 @test_pos_1_5_d2ui_rm() {
+; CHECK-LABEL: define i32 @test_pos_1_5_d2ui_rm() {
+; CHECK-NEXT: ret i32 1
+;
+ %res = call i32 @llvm.nvvm.d2ui.rm(double 1.5)
+ ret i32 %res
+}
+
+define i32 @test_pos_1_5_d2ui_rn() {
+; CHECK-LABEL: define i32 @test_pos_1_5_d2ui_rn() {
+; CHECK-NEXT: ret i32 2
+;
+ %res = call i32 @llvm.nvvm.d2ui.rn(double 1.5)
+ ret i32 %res
+}
+
+
+define i32 @test_pos_1_5_d2ui_rp() {
+; CHECK-LABEL: define i32 @test_pos_1_5_d2ui_rp() {
+; CHECK-NEXT: ret i32 2
+;
+ %res = call i32 @llvm.nvvm.d2ui.rp(double 1.5)
+ ret i32 %res
+}
+
+define i32 @test_pos_1_5_d2ui_rz() {
+; CHECK-LABEL: define i32 @test_pos_1_5_d2ui_rz() {
+; CHECK-NEXT: ret i32 1
+;
+ %res = call i32 @llvm.nvvm.d2ui.rz(double 1.5)
+ ret i32 %res
+}
+
+;###############################################################
+;# Tests with Negative 1.5 #
+;###############################################################
+
+;+-------------------------------------------------------------+
+;| f2i |
+;+-------------------------------------------------------------+
+define i32 @test_neg_1_5_f2i_rm() {
+; CHECK-LABEL: define i32 @test_neg_1_5_f2i_rm() {
+; CHECK-NEXT: ret i32 -2
+;
+ %res = call i32 @llvm.nvvm.f2i.rm(float -1.5)
+ ret i32 %res
+}
+
+define i32 @test_neg_1_5_f2i_rn() {
+; CHECK-LABEL: define i32 @test_neg_1_5_f2i_rn() {
+; CHECK-NEXT: ret i32 -2
+;
+ %res = call i32 @llvm.nvvm.f2i.rn(float -1.5)
+ ret i32 %res
+}
+
+
+define i32 @test_neg_1_5_f2i_rp() {
+; CHECK-LABEL: define i32 @test_neg_1_5_f2i_rp() {
+; CHECK-NEXT: ret i32 -1
+;
+ %res = call i32 @llvm.nvvm.f2i.rp(float -1.5)
+ ret i32 %res
+}
+
+define i32 @test_neg_1_5_f2i_rz() {
+; CHECK-LABEL: define i32 @test_neg_1_5_f2i_rz() {
+; CHECK-NEXT: ret i32 -1
+;
+ %res = call i32 @llvm.nvvm.f2i.rz(float -1.5)
+ ret i32 %res
+}
+
+;+-------------------------------------------------------------+
+;| f2i_ftz |
+;+-------------------------------------------------------------+
+define i32 @test_neg_1_5_f2i_rm_ftz() {
+; CHECK-LABEL: define i32 @test_neg_1_5_f2i_rm_ftz() {
+; CHECK-NEXT: ret i32 -2
+;
+ %res = call i32 @llvm.nvvm.f2i.rm.ftz(float -1.5)
+ ret i32 %res
+}
+
+define i32 @test_neg_1_5_f2i_rn_ftz() {
+; CHECK-LABEL: define i32 @test_neg_1_5_f2i_rn_ftz() {
+; CHECK-NEXT: ret i32 -2
+;
+ %res = call i32 @llvm.nvvm.f2i.rn.ftz(float -1.5)
+ ret i32 %res
+}
+
+define i32 @test_neg_1_5_f2i_rp_ftz() {
+; CHECK-LABEL: define i32 @test_neg_1_5_f2i_rp_ftz() {
+; CHECK-NEXT: ret i32 -1
+;
+ %res = call i32 @llvm.nvvm.f2i.rp.ftz(float -1.5)
+ ret i32 %res
+}
+
+define i32 @test_neg_1_5_f2i_rz_ftz() {
+; CHECK-LABEL: define i32 @test_neg_1_5_f2i_rz_ftz() {
+; CHECK-NEXT: ret i32 -1
+;
+ %res = call i32 @llvm.nvvm.f2i.rz.ftz(float -1.5)
+ ret i32 %res
+}
+;+-------------------------------------------------------------+
+;| d2i |
+;+-------------------------------------------------------------+
+define i32 @test_neg_1_5_d2i_rm() {
+; CHECK-LABEL: define i32 @test_neg_1_5_d2i_rm() {
+; CHECK-NEXT: ret i32 -2
+;
+ %res = call i32 @llvm.nvvm.d2i.rm(double -1.5)
+ ret i32 %res
+}
+
+define i32 @test_neg_1_5_d2i_rn() {
+; CHECK-LABEL: define i32 @test_neg_1_5_d2i_rn() {
+; CHECK-NEXT: ret i32 -2
+;
+ %res = call i32 @llvm.nvvm.d2i.rn(double -1.5)
+ ret i32 %res
+}
+
+
+define i32 @test_neg_1_5_d2i_rp() {
+; CHECK-LABEL: define i32 @test_neg_1_5_d2i_rp() {
+; CHECK-NEXT: ret i32 -1
+;
+ %res = call i32 @llvm.nvvm.d2i.rp(double -1.5)
+ ret i32 %res
+}
+
+define i32 @test_neg_1_5_d2i_rz() {
+; CHECK-LABEL: define i32 @test_neg_1_5_d2i_rz() {
+; CHECK-NEXT: ret i32 -1
+;
+ %res = call i32 @llvm.nvvm.d2i.rz(double -1.5)
+ ret i32 %res
+}
+
+;+-------------------------------------------------------------+
+;| f2ui |
+;+-------------------------------------------------------------+
+define i32 @test_neg_1_5_f2ui_rm() {
+; CHECK-LABEL: define i32 @test_neg_1_5_f2ui_rm() {
+; CHECK-NEXT: [[RES:%.*]] = call i32 @llvm.nvvm.f2ui.rm(float -1.500000e+00)
+; CHECK-NEXT: ret i32 [[RES]]
+;
+ %res = call i32 @llvm.nvvm.f2ui.rm(float -1.5)
+ ret i32 %res
+}
+
+define i32 @test_neg_1_5_f2ui_rn() {
+; CHECK-LABEL: define i32 @test_neg_1_5_f2ui_rn() {
+; CHECK-NEXT: [[RES:%.*]] = call i32 @llvm.nvvm.f2ui.rn(float -1.500000e+00)
+; CHECK-NEXT: ret i32 [[RES]]
+;
+ %res = call i32 @llvm.nvvm.f2ui.rn(float -1.5)
+ ret i32 %res
+}
+
+
+define i32 @test_neg_1_5_f2ui_rp() {
+; CHECK-LABEL: define i32 @test_neg_1_5_f2ui_rp() {
+; CHECK-NEXT: [[RES:%.*]] = call i32 @llvm.nvvm.f2ui.rp(float -1.500000e+00)
+; CHECK-NEXT: ret i32 [[RES]]
+;
+ %res = call i32 @llvm.nvvm.f2ui.rp(float -1.5)
+ ret i32 %res
+}
+
+define i32 @test_neg_1_5_f2ui_rz() {
+; CHECK-LABEL: define i32 @test_neg_1_5_f2ui_rz() {
+; CHECK-NEXT: [[RES:%.*]] = call i32 @llvm.nvvm.f2ui.rz(float -1.500000e+00)
+; CHECK-NEXT: ret i32 [[RES]]
+;
+ %res = call i32 @llvm.nvvm.f2ui.rz(float -1.5)
+ ret i32 %res
+}
+
+;+-------------------------------------------------------------+
+;| f2ui_ftz |
+;+-------------------------------------------------------------+
+define i32 @test_neg_1_5_f2ui_rm_ftz() {
+; CHECK-LABEL: define i32 @test_neg_1_5_f2ui_rm_ftz() {
+; CHECK-NEXT: [[RES:%.*]] = call i32 @llvm.nvvm.f2ui.rm.ftz(float -1.500000e+00)
+; CHECK-NEXT: ret i32 [[RES]]
+;
+ %res = call i32 @llvm.nvvm.f2ui.rm.ftz(float -1.5)
+ ret i32 %res
+}
+
+define i32 @test_neg_1_5_f2ui_rn_ftz() {
+; CHECK-LABEL: define i32 @test_neg_1_5_f2ui_rn_ftz() {
+; CHECK-NEXT: [[RES:%.*]] = call i32 @llvm.nvvm.f2ui.rn.ftz(float -1.500000e+00)
+; CHECK-NEXT: ret i32 [[RES]]
+;
+ %res = call i32 @llvm.nvvm.f2ui.rn.ftz(float -1.5)
+ ret i32 %res
+}
+
+define i32 @test_neg_1_5_f2ui_rp_ftz() {
+; CHECK-LABEL: define i32 @test_neg_1_5_f2ui_rp_ftz() {
+; CHECK-NEXT: [[RES:%.*]] = call i32 @llvm.nvvm.f2ui.rp.ftz(float -1.500000e+00)
+; CHECK-NEXT: ret i32 [[RES]]
+;
+ %res = call i32 @llvm.nvvm.f2ui.rp.ftz(float -1.5)
+ ret i32 %res
+}
+
+define i32 @test_neg_1_5_f2ui_rz_ftz() {
+; CHECK-LABEL: define i32 @test_neg_1_5_f2ui_rz_ftz() {
+; CHECK-NEXT: [[RES:%.*]] = call i32 @llvm.nvvm.f2ui.rz.ftz(float -1.500000e+00)
+; CHECK-NEXT: ret i32 [[RES]]
+;
+ %res = call i32 @llvm.nvvm.f2ui.rz.ftz(float -1.5)
+ ret i32 %res
+}
+;+-------------------------------------------------------------+
+;| d2ui |
+;+-------------------------------------------------------------+
+define i32 @test_neg_1_5_d2ui_rm() {
+; CHECK-LABEL: define i32 @test_neg_1_5_d2ui_rm() {
+; CHECK-NEXT: [[RES:%.*]] = call i32 @llvm.nvvm.d2ui.rm(double -1.500000e+00)
+; CHECK-NEXT: ret i32 [[RES]]
+;
+ %res = call i32 @llvm.nvvm.d2ui.rm(double -1.5)
+ ret i32 %res
+}
+
+define i32 @test_neg_1_5_d2ui_rn() {
+; CHECK-LABEL: define i32 @test_neg_1_5_d2ui_rn() {
+; CHECK-NEXT: [[RES:%.*]] = call i32 @llvm.nvvm.d2ui.rn(double -1.500000e+00)
+; CHECK-NEXT: ret i32 [[RES]]
+;
+ %res = call i32 @llvm.nvvm.d2ui.rn(double -1.5)
+ ret i32 %res
+}
+
+
+define i32 @test_neg_1_5_d2ui_rp() {
+; CHECK-LABEL: define i32 @test_neg_1_5_d2ui_rp() {
+; CHECK-NEXT: [[RES:%.*]] = call i32 @llvm.nvvm.d2ui.rp(double -1.500000e+00)
+; CHECK-NEXT: ret i32 [[RES]]
+;
+ %res = call i32 @llvm.nvvm.d2ui.rp(double -1.5)
+ ret i32 %res
+}
+
+define i32 @test_neg_1_5_d2ui_rz() {
+; CHECK-LABEL: define i32 @test_neg_1_5_d2ui_rz() {
+; CHECK-NEXT: [[RES:%.*]] = call i32 @llvm.nvvm.d2ui.rz(double -1.500000e+00)
+; CHECK-NEXT: ret i32 [[RES]]
+;
+ %res = call i32 @llvm.nvvm.d2ui.rz(double -1.5)
+ ret i32 %res
+}
+
+;###############################################################
+;# Tests with NaN #
+;###############################################################
+
+;+-------------------------------------------------------------+
+;| f2i |
+;+-------------------------------------------------------------+
+define i32 @test_nan_f2i_rm() {
+; CHECK-LABEL: define i32 @test_nan_f2i_rm() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.f2i.rm(float 0x7FFFFF0000000000)
+ ret i32 %res
+}
+
+define i32 @test_nan_f2i_rn() {
+; CHECK-LABEL: define i32 @test_nan_f2i_rn() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.f2i.rn(float 0x7FFFFF0000000000)
+ ret i32 %res
+}
+
+
+define i32 @test_nan_f2i_rp() {
+; CHECK-LABEL: define i32 @test_nan_f2i_rp() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.f2i.rp(float 0x7FFFFF0000000000)
+ ret i32 %res
+}
+
+define i32 @test_nan_f2i_rz() {
+; CHECK-LABEL: define i32 @test_nan_f2i_rz() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.f2i.rz(float 0x7FFFFF0000000000)
+ ret i32 %res
+}
+
+;+-------------------------------------------------------------+
+;| f2i_ftz |
+;+-------------------------------------------------------------+
+define i32 @test_nan_f2i_rm_ftz() {
+; CHECK-LABEL: define i32 @test_nan_f2i_rm_ftz() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.f2i.rm.ftz(float 0x7FFFFF0000000000)
+ ret i32 %res
+}
+
+define i32 @test_nan_f2i_rn_ftz() {
+; CHECK-LABEL: define i32 @test_nan_f2i_rn_ftz() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.f2i.rn.ftz(float 0x7FFFFF0000000000)
+ ret i32 %res
+}
+
+define i32 @test_nan_f2i_rp_ftz() {
+; CHECK-LABEL: define i32 @test_nan_f2i_rp_ftz() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.f2i.rp.ftz(float 0x7FFFFF0000000000)
+ ret i32 %res
+}
+
+define i32 @test_nan_f2i_rz_ftz() {
+; CHECK-LABEL: define i32 @test_nan_f2i_rz_ftz() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.f2i.rz.ftz(float 0x7FFFFF0000000000)
+ ret i32 %res
+}
+;+-------------------------------------------------------------+
+;| d2i |
+;+-------------------------------------------------------------+
+define i32 @test_nan_d2i_rm() {
+; CHECK-LABEL: define i32 @test_nan_d2i_rm() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.d2i.rm(double 0xFFF8000000000000)
+ ret i32 %res
+}
+
+define i32 @test_nan_d2i_rn() {
+; CHECK-LABEL: define i32 @test_nan_d2i_rn() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.d2i.rn(double 0xFFF8000000000000)
+ ret i32 %res
+}
+
+
+define i32 @test_nan_d2i_rp() {
+; CHECK-LABEL: define i32 @test_nan_d2i_rp() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.d2i.rp(double 0xFFF8000000000000)
+ ret i32 %res
+}
+
+define i32 @test_nan_d2i_rz() {
+; CHECK-LABEL: define i32 @test_nan_d2i_rz() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.d2i.rz(double 0xFFF8000000000000)
+ ret i32 %res
+}
+
+;+-------------------------------------------------------------+
+;| f2ui |
+;+-------------------------------------------------------------+
+define i32 @test_nan_f2ui_rm() {
+; CHECK-LABEL: define i32 @test_nan_f2ui_rm() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.f2ui.rm(float 0x7FFFFF0000000000)
+ ret i32 %res
+}
+
+define i32 @test_nan_f2ui_rn() {
+; CHECK-LABEL: define i32 @test_nan_f2ui_rn() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.f2ui.rn(float 0x7FFFFF0000000000)
+ ret i32 %res
+}
+
+
+define i32 @test_nan_f2ui_rp() {
+; CHECK-LABEL: define i32 @test_nan_f2ui_rp() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.f2ui.rp(float 0x7FFFFF0000000000)
+ ret i32 %res
+}
+
+define i32 @test_nan_f2ui_rz() {
+; CHECK-LABEL: define i32 @test_nan_f2ui_rz() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.f2ui.rz(float 0x7FFFFF0000000000)
+ ret i32 %res
+}
+
+;+-------------------------------------------------------------+
+;| f2ui_ftz |
+;+-------------------------------------------------------------+
+define i32 @test_nan_f2ui_rm_ftz() {
+; CHECK-LABEL: define i32 @test_nan_f2ui_rm_ftz() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.f2ui.rm.ftz(float 0x7FFFFF0000000000)
+ ret i32 %res
+}
+
+define i32 @test_nan_f2ui_rn_ftz() {
+; CHECK-LABEL: define i32 @test_nan_f2ui_rn_ftz() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.f2ui.rn.ftz(float 0x7FFFFF0000000000)
+ ret i32 %res
+}
+
+define i32 @test_nan_f2ui_rp_ftz() {
+; CHECK-LABEL: define i32 @test_nan_f2ui_rp_ftz() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.f2ui.rp.ftz(float 0x7FFFFF0000000000)
+ ret i32 %res
+}
+
+define i32 @test_nan_f2ui_rz_ftz() {
+; CHECK-LABEL: define i32 @test_nan_f2ui_rz_ftz() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.f2ui.rz.ftz(float 0x7FFFFF0000000000)
+ ret i32 %res
+}
+;+-------------------------------------------------------------+
+;| d2ui |
+;+-------------------------------------------------------------+
+define i32 @test_nan_d2ui_rm() {
+; CHECK-LABEL: define i32 @test_nan_d2ui_rm() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.d2ui.rm(double 0xFFF8000000000000)
+ ret i32 %res
+}
+
+define i32 @test_nan_d2ui_rn() {
+; CHECK-LABEL: define i32 @test_nan_d2ui_rn() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.d2ui.rn(double 0xFFF8000000000000)
+ ret i32 %res
+}
+
+
+define i32 @test_nan_d2ui_rp() {
+; CHECK-LABEL: define i32 @test_nan_d2ui_rp() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.d2ui.rp(double 0xFFF8000000000000)
+ ret i32 %res
+}
+
+define i32 @test_nan_d2ui_rz() {
+; CHECK-LABEL: define i32 @test_nan_d2ui_rz() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.d2ui.rz(double 0xFFF8000000000000)
+ ret i32 %res
+}
+
+;###############################################################
+;# Tests with Positive Subnormal #
+;###############################################################
+
+;+-------------------------------------------------------------+
+;| f2i |
+;+-------------------------------------------------------------+
+define i32 @test_pos_subnormal_f2i_rm() {
+; CHECK-LABEL: define i32 @test_pos_subnormal_f2i_rm() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.f2i.rm(float 0x380FFFFFC0000000)
+ ret i32 %res
+}
+
+define i32 @test_pos_subnormal_f2i_rn() {
+; CHECK-LABEL: define i32 @test_pos_subnormal_f2i_rn() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.f2i.rn(float 0x380FFFFFC0000000)
+ ret i32 %res
+}
+
+
+define i32 @test_pos_subnormal_f2i_rp() {
+; CHECK-LABEL: define i32 @test_pos_subnormal_f2i_rp() {
+; CHECK-NEXT: ret i32 1
+;
+ %res = call i32 @llvm.nvvm.f2i.rp(float 0x380FFFFFC0000000)
+ ret i32 %res
+}
+
+define i32 @test_pos_subnormal_f2i_rz() {
+; CHECK-LABEL: define i32 @test_pos_subnormal_f2i_rz() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.f2i.rz(float 0x380FFFFFC0000000)
+ ret i32 %res
+}
+
+;+-------------------------------------------------------------+
+;| f2i_ftz |
+;+-------------------------------------------------------------+
+define i32 @test_pos_subnormal_f2i_rm_ftz() {
+; CHECK-LABEL: define i32 @test_pos_subnormal_f2i_rm_ftz() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.f2i.rm.ftz(float 0x380FFFFFC0000000)
+ ret i32 %res
+}
+
+define i32 @test_pos_subnormal_f2i_rn_ftz() {
+; CHECK-LABEL: define i32 @test_pos_subnormal_f2i_rn_ftz() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.f2i.rn.ftz(float 0x380FFFFFC0000000)
+ ret i32 %res
+}
+
+define i32 @test_pos_subnormal_f2i_rp_ftz() {
+; CHECK-LABEL: define i32 @test_pos_subnormal_f2i_rp_ftz() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.f2i.rp.ftz(float 0x380FFFFFC0000000)
+ ret i32 %res
+}
+
+define i32 @test_pos_subnormal_f2i_rz_ftz() {
+; CHECK-LABEL: define i32 @test_pos_subnormal_f2i_rz_ftz() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.f2i.rz.ftz(float 0x380FFFFFC0000000)
+ ret i32 %res
+}
+;+-------------------------------------------------------------+
+;| d2i |
+;+-------------------------------------------------------------+
+define i32 @test_pos_subnormal_d2i_rm() {
+; CHECK-LABEL: define i32 @test_pos_subnormal_d2i_rm() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.d2i.rm(double 0x000fffffffffffff)
+ ret i32 %res
+}
+
+define i32 @test_pos_subnormal_d2i_rn() {
+; CHECK-LABEL: define i32 @test_pos_subnormal_d2i_rn() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.d2i.rn(double 0x000fffffffffffff)
+ ret i32 %res
+}
+
+
+define i32 @test_pos_subnormal_d2i_rp() {
+; CHECK-LABEL: define i32 @test_pos_subnormal_d2i_rp() {
+; CHECK-NEXT: ret i32 1
+;
+ %res = call i32 @llvm.nvvm.d2i.rp(double 0x000fffffffffffff)
+ ret i32 %res
+}
+
+define i32 @test_pos_subnormal_d2i_rz() {
+; CHECK-LABEL: define i32 @test_pos_subnormal_d2i_rz() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.d2i.rz(double 0x000fffffffffffff)
+ ret i32 %res
+}
+
+;+-------------------------------------------------------------+
+;| f2ui |
+;+-------------------------------------------------------------+
+define i32 @test_pos_subnormal_f2ui_rm() {
+; CHECK-LABEL: define i32 @test_pos_subnormal_f2ui_rm() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.f2ui.rm(float 0x380FFFFFC0000000)
+ ret i32 %res
+}
+
+define i32 @test_pos_subnormal_f2ui_rn() {
+; CHECK-LABEL: define i32 @test_pos_subnormal_f2ui_rn() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.f2ui.rn(float 0x380FFFFFC0000000)
+ ret i32 %res
+}
+
+
+define i32 @test_pos_subnormal_f2ui_rp() {
+; CHECK-LABEL: define i32 @test_pos_subnormal_f2ui_rp() {
+; CHECK-NEXT: ret i32 1
+;
+ %res = call i32 @llvm.nvvm.f2ui.rp(float 0x380FFFFFC0000000)
+ ret i32 %res
+}
+
+define i32 @test_pos_subnormal_f2ui_rz() {
+; CHECK-LABEL: define i32 @test_pos_subnormal_f2ui_rz() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.f2ui.rz(float 0x380FFFFFC0000000)
+ ret i32 %res
+}
+
+;+-------------------------------------------------------------+
+;| f2ui_ftz |
+;+-------------------------------------------------------------+
+define i32 @test_pos_subnormal_f2ui_rm_ftz() {
+; CHECK-LABEL: define i32 @test_pos_subnormal_f2ui_rm_ftz() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.f2ui.rm.ftz(float 0x380FFFFFC0000000)
+ ret i32 %res
+}
+
+define i32 @test_pos_subnormal_f2ui_rn_ftz() {
+; CHECK-LABEL: define i32 @test_pos_subnormal_f2ui_rn_ftz() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.f2ui.rn.ftz(float 0x380FFFFFC0000000)
+ ret i32 %res
+}
+
+define i32 @test_pos_subnormal_f2ui_rp_ftz() {
+; CHECK-LABEL: define i32 @test_pos_subnormal_f2ui_rp_ftz() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.f2ui.rp.ftz(float 0x380FFFFFC0000000)
+ ret i32 %res
+}
+
+define i32 @test_pos_subnormal_f2ui_rz_ftz() {
+; CHECK-LABEL: define i32 @test_pos_subnormal_f2ui_rz_ftz() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.f2ui.rz.ftz(float 0x380FFFFFC0000000)
+ ret i32 %res
+}
+;+-------------------------------------------------------------+
+;| d2ui |
+;+-------------------------------------------------------------+
+define i32 @test_pos_subnormal_d2ui_rm() {
+; CHECK-LABEL: define i32 @test_pos_subnormal_d2ui_rm() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.d2ui.rm(double 0x000fffffffffffff)
+ ret i32 %res
+}
+
+define i32 @test_pos_subnormal_d2ui_rn() {
+; CHECK-LABEL: define i32 @test_pos_subnormal_d2ui_rn() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.d2ui.rn(double 0x000fffffffffffff)
+ ret i32 %res
+}
+
+
+define i32 @test_pos_subnormal_d2ui_rp() {
+; CHECK-LABEL: define i32 @test_pos_subnormal_d2ui_rp() {
+; CHECK-NEXT: ret i32 1
+;
+ %res = call i32 @llvm.nvvm.d2ui.rp(double 0x000fffffffffffff)
+ ret i32 %res
+}
+
+define i32 @test_pos_subnormal_d2ui_rz() {
+; CHECK-LABEL: define i32 @test_pos_subnormal_d2ui_rz() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.d2ui.rz(double 0x000fffffffffffff)
+ ret i32 %res
+}
+
+;###############################################################
+;# Tests with Negative Subnormal #
+;###############################################################
+
+;+-------------------------------------------------------------+
+;| f2i |
+;+-------------------------------------------------------------+
+define i32 @test_neg_subnormal_f2i_rm() {
+; CHECK-LABEL: define i32 @test_neg_subnormal_f2i_rm() {
+; CHECK-NEXT: ret i32 -1
+;
+ %res = call i32 @llvm.nvvm.f2i.rm(float 0xB80FFFFFC0000000)
+ ret i32 %res
+}
+
+define i32 @test_neg_subnormal_f2i_rn() {
+; CHECK-LABEL: define i32 @test_neg_subnormal_f2i_rn() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.f2i.rn(float 0xB80FFFFFC0000000)
+ ret i32 %res
+}
+
+
+define i32 @test_neg_subnormal_f2i_rp() {
+; CHECK-LABEL: define i32 @test_neg_subnormal_f2i_rp() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.f2i.rp(float 0xB80FFFFFC0000000)
+ ret i32 %res
+}
+
+define i32 @test_neg_subnormal_f2i_rz() {
+; CHECK-LABEL: define i32 @test_neg_subnormal_f2i_rz() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.f2i.rz(float 0xB80FFFFFC0000000)
+ ret i32 %res
+}
+
+;+-------------------------------------------------------------+
+;| f2i_ftz |
+;+-------------------------------------------------------------+
+define i32 @test_neg_subnormal_f2i_rm_ftz() {
+; CHECK-LABEL: define i32 @test_neg_subnormal_f2i_rm_ftz() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.f2i.rm.ftz(float 0xB80FFFFFC0000000)
+ ret i32 %res
+}
+
+define i32 @test_neg_subnormal_f2i_rn_ftz() {
+; CHECK-LABEL: define i32 @test_neg_subnormal_f2i_rn_ftz() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.f2i.rn.ftz(float 0xB80FFFFFC0000000)
+ ret i32 %res
+}
+
+define i32 @test_neg_subnormal_f2i_rp_ftz() {
+; CHECK-LABEL: define i32 @test_neg_subnormal_f2i_rp_ftz() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.f2i.rp.ftz(float 0xB80FFFFFC0000000)
+ ret i32 %res
+}
+
+define i32 @test_neg_subnormal_f2i_rz_ftz() {
+; CHECK-LABEL: define i32 @test_neg_subnormal_f2i_rz_ftz() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.f2i.rz.ftz(float 0xB80FFFFFC0000000)
+ ret i32 %res
+}
+;+-------------------------------------------------------------+
+;| d2i |
+;+-------------------------------------------------------------+
+define i32 @test_neg_subnormal_d2i_rm() {
+; CHECK-LABEL: define i32 @test_neg_subnormal_d2i_rm() {
+; CHECK-NEXT: ret i32 -1
+;
+ %res = call i32 @llvm.nvvm.d2i.rm(double 0x800fffffffffffff)
+ ret i32 %res
+}
+
+define i32 @test_neg_subnormal_d2i_rn() {
+; CHECK-LABEL: define i32 @test_neg_subnormal_d2i_rn() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.d2i.rn(double 0x800fffffffffffff)
+ ret i32 %res
+}
+
+
+define i32 @test_neg_subnormal_d2i_rp() {
+; CHECK-LABEL: define i32 @test_neg_subnormal_d2i_rp() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.d2i.rp(double 0x800fffffffffffff)
+ ret i32 %res
+}
+
+define i32 @test_neg_subnormal_d2i_rz() {
+; CHECK-LABEL: define i32 @test_neg_subnormal_d2i_rz() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.d2i.rz(double 0x800fffffffffffff)
+ ret i32 %res
+}
+
+;+-------------------------------------------------------------+
+;| f2ui |
+;+-------------------------------------------------------------+
+define i32 @test_neg_subnormal_f2ui_rm() {
+; CHECK-LABEL: define i32 @test_neg_subnormal_f2ui_rm() {
+; CHECK-NEXT: [[RES:%.*]] = call i32 @llvm.nvvm.f2ui.rm(float 0xB80FFFFFC0000000)
+; CHECK-NEXT: ret i32 [[RES]]
+;
+ %res = call i32 @llvm.nvvm.f2ui.rm(float 0xB80FFFFFC0000000)
+ ret i32 %res
+}
+
+define i32 @test_neg_subnormal_f2ui_rn() {
+; CHECK-LABEL: define i32 @test_neg_subnormal_f2ui_rn() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.f2ui.rn(float 0xB80FFFFFC0000000)
+ ret i32 %res
+}
+
+
+define i32 @test_neg_subnormal_f2ui_rp() {
+; CHECK-LABEL: define i32 @test_neg_subnormal_f2ui_rp() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.f2ui.rp(float 0xB80FFFFFC0000000)
+ ret i32 %res
+}
+
+define i32 @test_neg_subnormal_f2ui_rz() {
+; CHECK-LABEL: define i32 @test_neg_subnormal_f2ui_rz() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.f2ui.rz(float 0xB80FFFFFC0000000)
+ ret i32 %res
+}
+
+;+-------------------------------------------------------------+
+;| f2ui_ftz |
+;+-------------------------------------------------------------+
+define i32 @test_neg_subnormal_f2ui_rm_ftz() {
+; CHECK-LABEL: define i32 @test_neg_subnormal_f2ui_rm_ftz() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.f2ui.rm.ftz(float 0xB80FFFFFC0000000)
+ ret i32 %res
+}
+
+define i32 @test_neg_subnormal_f2ui_rn_ftz() {
+; CHECK-LABEL: define i32 @test_neg_subnormal_f2ui_rn_ftz() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.f2ui.rn.ftz(float 0xB80FFFFFC0000000)
+ ret i32 %res
+}
+
+define i32 @test_neg_subnormal_f2ui_rp_ftz() {
+; CHECK-LABEL: define i32 @test_neg_subnormal_f2ui_rp_ftz() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.f2ui.rp.ftz(float 0xB80FFFFFC0000000)
+ ret i32 %res
+}
+
+define i32 @test_neg_subnormal_f2ui_rz_ftz() {
+; CHECK-LABEL: define i32 @test_neg_subnormal_f2ui_rz_ftz() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.f2ui.rz.ftz(float 0xB80FFFFFC0000000)
+ ret i32 %res
+}
+;+-------------------------------------------------------------+
+;| d2ui |
+;+-------------------------------------------------------------+
+define i32 @test_neg_subnormal_d2ui_rm() {
+; CHECK-LABEL: define i32 @test_neg_subnormal_d2ui_rm() {
+; CHECK-NEXT: [[RES:%.*]] = call i32 @llvm.nvvm.d2ui.rm(double 0x800FFFFFFFFFFFFF)
+; CHECK-NEXT: ret i32 [[RES]]
+;
+ %res = call i32 @llvm.nvvm.d2ui.rm(double 0x800fffffffffffff)
+ ret i32 %res
+}
+
+define i32 @test_neg_subnormal_d2ui_rn() {
+; CHECK-LABEL: define i32 @test_neg_subnormal_d2ui_rn() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.d2ui.rn(double 0x800fffffffffffff)
+ ret i32 %res
+}
+
+
+define i32 @test_neg_subnormal_d2ui_rp() {
+; CHECK-LABEL: define i32 @test_neg_subnormal_d2ui_rp() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.d2ui.rp(double 0x800fffffffffffff)
+ ret i32 %res
+}
+
+define i32 @test_neg_subnormal_d2ui_rz() {
+; CHECK-LABEL: define i32 @test_neg_subnormal_d2ui_rz() {
+; CHECK-NEXT: ret i32 0
+;
+ %res = call i32 @llvm.nvvm.d2ui.rz(double 0x800fffffffffffff)
+ ret i32 %res
+}
+
+declare i32 @llvm.nvvm.f2i.rm(float)
+declare i32 @llvm.nvvm.f2i.rn(float)
+declare i32 @llvm.nvvm.f2i.rp(float)
+declare i32 @llvm.nvvm.f2i.rz(float)
+
+declare i32 @llvm.nvvm.f2i.rm.ftz(float)
+declare i32 @llvm.nvvm.f2i.rn.ftz(float)
+declare i32 @llvm.nvvm.f2i.rp.ftz(float)
+declare i32 @llvm.nvvm.f2i.rz.ftz(float)
+
+declare i32 @llvm.nvvm.d2i.rm(double)
+declare i32 @llvm.nvvm.d2i.rn(double)
+declare i32 @llvm.nvvm.d2i.rp(double)
+declare i32 @llvm.nvvm.d2i.rz(double)
+
+
+declare i32 @llvm.nvvm.f2ui.rm(float)
+declare i32 @llvm.nvvm.f2ui.rn(float)
+declare i32 @llvm.nvvm.f2ui.rp(float)
+declare i32 @llvm.nvvm.f2ui.rz(float)
+
+declare i32 @llvm.nvvm.f2ui.rm.ftz(float)
+declare i32 @llvm.nvvm.f2ui.rn.ftz(float)
+declare i32 @llvm.nvvm.f2ui.rp.ftz(float)
+declare i32 @llvm.nvvm.f2ui.rz.ftz(float)
+
+declare i32 @llvm.nvvm.d2ui.rm(double)
+declare i32 @llvm.nvvm.d2ui.rn(double)
+declare i32 @llvm.nvvm.d2ui.rp(double)
+declare i32 @llvm.nvvm.d2ui.rz(double)
diff --git a/llvm/test/Transforms/InstSimplify/const-fold-nvvm-f2ll-d2ll.ll b/llvm/test/Transforms/InstSimplify/const-fold-nvvm-f2ll-d2ll.ll
new file mode 100644
index 0000000..be38177
--- /dev/null
+++ b/llvm/test/Transforms/InstSimplify/const-fold-nvvm-f2ll-d2ll.ll
@@ -0,0 +1,1129 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt < %s -passes=instsimplify -march=nvptx64 -S | FileCheck %s
+
+; f2ll/f2ull and d2ll/d2ull - double/float to i64 tests
+
+;###############################################################
+;# Tests with Positive 1.5 #
+;###############################################################
+
+;+-------------------------------------------------------------+
+;| f2ll |
+;+-------------------------------------------------------------+
+define i64 @test_pos_1_5_f2ll_rm() {
+; CHECK-LABEL: define i64 @test_pos_1_5_f2ll_rm() {
+; CHECK-NEXT: ret i64 1
+;
+ %res = call i64 @llvm.nvvm.f2ll.rm(float 1.5)
+ ret i64 %res
+}
+
+define i64 @test_pos_1_5_f2ll_rn() {
+; CHECK-LABEL: define i64 @test_pos_1_5_f2ll_rn() {
+; CHECK-NEXT: ret i64 2
+;
+ %res = call i64 @llvm.nvvm.f2ll.rn(float 1.5)
+ ret i64 %res
+}
+
+
+define i64 @test_pos_1_5_f2ll_rp() {
+; CHECK-LABEL: define i64 @test_pos_1_5_f2ll_rp() {
+; CHECK-NEXT: ret i64 2
+;
+ %res = call i64 @llvm.nvvm.f2ll.rp(float 1.5)
+ ret i64 %res
+}
+
+define i64 @test_pos_1_5_f2ll_rz() {
+; CHECK-LABEL: define i64 @test_pos_1_5_f2ll_rz() {
+; CHECK-NEXT: ret i64 1
+;
+ %res = call i64 @llvm.nvvm.f2ll.rz(float 1.5)
+ ret i64 %res
+}
+
+;+-------------------------------------------------------------+
+;| f2ll_ftz |
+;+-------------------------------------------------------------+
+define i64 @test_pos_1_5_f2ll_rm_ftz() {
+; CHECK-LABEL: define i64 @test_pos_1_5_f2ll_rm_ftz() {
+; CHECK-NEXT: ret i64 1
+;
+ %res = call i64 @llvm.nvvm.f2ll.rm.ftz(float 1.5)
+ ret i64 %res
+}
+
+define i64 @test_pos_1_5_f2ll_rn_ftz() {
+; CHECK-LABEL: define i64 @test_pos_1_5_f2ll_rn_ftz() {
+; CHECK-NEXT: ret i64 2
+;
+ %res = call i64 @llvm.nvvm.f2ll.rn.ftz(float 1.5)
+ ret i64 %res
+}
+
+define i64 @test_pos_1_5_f2ll_rp_ftz() {
+; CHECK-LABEL: define i64 @test_pos_1_5_f2ll_rp_ftz() {
+; CHECK-NEXT: ret i64 2
+;
+ %res = call i64 @llvm.nvvm.f2ll.rp.ftz(float 1.5)
+ ret i64 %res
+}
+
+define i64 @test_pos_1_5_f2ll_rz_ftz() {
+; CHECK-LABEL: define i64 @test_pos_1_5_f2ll_rz_ftz() {
+; CHECK-NEXT: ret i64 1
+;
+ %res = call i64 @llvm.nvvm.f2ll.rz.ftz(float 1.5)
+ ret i64 %res
+}
+;+-------------------------------------------------------------+
+;| d2ll |
+;+-------------------------------------------------------------+
+define i64 @test_pos_1_5_d2ll_rm() {
+; CHECK-LABEL: define i64 @test_pos_1_5_d2ll_rm() {
+; CHECK-NEXT: ret i64 1
+;
+ %res = call i64 @llvm.nvvm.d2ll.rm(double 1.5)
+ ret i64 %res
+}
+
+define i64 @test_pos_1_5_d2ll_rn() {
+; CHECK-LABEL: define i64 @test_pos_1_5_d2ll_rn() {
+; CHECK-NEXT: ret i64 2
+;
+ %res = call i64 @llvm.nvvm.d2ll.rn(double 1.5)
+ ret i64 %res
+}
+
+
+define i64 @test_pos_1_5_d2ll_rp() {
+; CHECK-LABEL: define i64 @test_pos_1_5_d2ll_rp() {
+; CHECK-NEXT: ret i64 2
+;
+ %res = call i64 @llvm.nvvm.d2ll.rp(double 1.5)
+ ret i64 %res
+}
+
+define i64 @test_pos_1_5_d2ll_rz() {
+; CHECK-LABEL: define i64 @test_pos_1_5_d2ll_rz() {
+; CHECK-NEXT: ret i64 1
+;
+ %res = call i64 @llvm.nvvm.d2ll.rz(double 1.5)
+ ret i64 %res
+}
+
+;+-------------------------------------------------------------+
+;| f2ull |
+;+-------------------------------------------------------------+
+define i64 @test_pos_1_5_f2ull_rm() {
+; CHECK-LABEL: define i64 @test_pos_1_5_f2ull_rm() {
+; CHECK-NEXT: ret i64 1
+;
+ %res = call i64 @llvm.nvvm.f2ull.rm(float 1.5)
+ ret i64 %res
+}
+
+define i64 @test_pos_1_5_f2ull_rn() {
+; CHECK-LABEL: define i64 @test_pos_1_5_f2ull_rn() {
+; CHECK-NEXT: ret i64 2
+;
+ %res = call i64 @llvm.nvvm.f2ull.rn(float 1.5)
+ ret i64 %res
+}
+
+
+define i64 @test_pos_1_5_f2ull_rp() {
+; CHECK-LABEL: define i64 @test_pos_1_5_f2ull_rp() {
+; CHECK-NEXT: ret i64 2
+;
+ %res = call i64 @llvm.nvvm.f2ull.rp(float 1.5)
+ ret i64 %res
+}
+
+define i64 @test_pos_1_5_f2ull_rz() {
+; CHECK-LABEL: define i64 @test_pos_1_5_f2ull_rz() {
+; CHECK-NEXT: ret i64 1
+;
+ %res = call i64 @llvm.nvvm.f2ull.rz(float 1.5)
+ ret i64 %res
+}
+
+;+-------------------------------------------------------------+
+;| f2ull_ftz |
+;+-------------------------------------------------------------+
+define i64 @test_pos_1_5_f2ull_rm_ftz() {
+; CHECK-LABEL: define i64 @test_pos_1_5_f2ull_rm_ftz() {
+; CHECK-NEXT: ret i64 1
+;
+ %res = call i64 @llvm.nvvm.f2ull.rm.ftz(float 1.5)
+ ret i64 %res
+}
+
+define i64 @test_pos_1_5_f2ull_rn_ftz() {
+; CHECK-LABEL: define i64 @test_pos_1_5_f2ull_rn_ftz() {
+; CHECK-NEXT: ret i64 2
+;
+ %res = call i64 @llvm.nvvm.f2ull.rn.ftz(float 1.5)
+ ret i64 %res
+}
+
+define i64 @test_pos_1_5_f2ull_rp_ftz() {
+; CHECK-LABEL: define i64 @test_pos_1_5_f2ull_rp_ftz() {
+; CHECK-NEXT: ret i64 2
+;
+ %res = call i64 @llvm.nvvm.f2ull.rp.ftz(float 1.5)
+ ret i64 %res
+}
+
+define i64 @test_pos_1_5_f2ull_rz_ftz() {
+; CHECK-LABEL: define i64 @test_pos_1_5_f2ull_rz_ftz() {
+; CHECK-NEXT: ret i64 1
+;
+ %res = call i64 @llvm.nvvm.f2ull.rz.ftz(float 1.5)
+ ret i64 %res
+}
+;+-------------------------------------------------------------+
+;| d2ull |
+;+-------------------------------------------------------------+
+define i64 @test_pos_1_5_d2ull_rm() {
+; CHECK-LABEL: define i64 @test_pos_1_5_d2ull_rm() {
+; CHECK-NEXT: ret i64 1
+;
+ %res = call i64 @llvm.nvvm.d2ull.rm(double 1.5)
+ ret i64 %res
+}
+
+define i64 @test_pos_1_5_d2ull_rn() {
+; CHECK-LABEL: define i64 @test_pos_1_5_d2ull_rn() {
+; CHECK-NEXT: ret i64 2
+;
+ %res = call i64 @llvm.nvvm.d2ull.rn(double 1.5)
+ ret i64 %res
+}
+
+
+define i64 @test_pos_1_5_d2ull_rp() {
+; CHECK-LABEL: define i64 @test_pos_1_5_d2ull_rp() {
+; CHECK-NEXT: ret i64 2
+;
+ %res = call i64 @llvm.nvvm.d2ull.rp(double 1.5)
+ ret i64 %res
+}
+
+define i64 @test_pos_1_5_d2ull_rz() {
+; CHECK-LABEL: define i64 @test_pos_1_5_d2ull_rz() {
+; CHECK-NEXT: ret i64 1
+;
+ %res = call i64 @llvm.nvvm.d2ull.rz(double 1.5)
+ ret i64 %res
+}
+
+;###############################################################
+;# Tests with Negative 1.5 #
+;###############################################################
+
+;+-------------------------------------------------------------+
+;| f2ll |
+;+-------------------------------------------------------------+
+define i64 @test_neg_1_5_f2ll_rm() {
+; CHECK-LABEL: define i64 @test_neg_1_5_f2ll_rm() {
+; CHECK-NEXT: ret i64 -2
+;
+ %res = call i64 @llvm.nvvm.f2ll.rm(float -1.5)
+ ret i64 %res
+}
+
+define i64 @test_neg_1_5_f2ll_rn() {
+; CHECK-LABEL: define i64 @test_neg_1_5_f2ll_rn() {
+; CHECK-NEXT: ret i64 -2
+;
+ %res = call i64 @llvm.nvvm.f2ll.rn(float -1.5)
+ ret i64 %res
+}
+
+
+define i64 @test_neg_1_5_f2ll_rp() {
+; CHECK-LABEL: define i64 @test_neg_1_5_f2ll_rp() {
+; CHECK-NEXT: ret i64 -1
+;
+ %res = call i64 @llvm.nvvm.f2ll.rp(float -1.5)
+ ret i64 %res
+}
+
+define i64 @test_neg_1_5_f2ll_rz() {
+; CHECK-LABEL: define i64 @test_neg_1_5_f2ll_rz() {
+; CHECK-NEXT: ret i64 -1
+;
+ %res = call i64 @llvm.nvvm.f2ll.rz(float -1.5)
+ ret i64 %res
+}
+
+;+-------------------------------------------------------------+
+;| f2ll_ftz |
+;+-------------------------------------------------------------+
+define i64 @test_neg_1_5_f2ll_rm_ftz() {
+; CHECK-LABEL: define i64 @test_neg_1_5_f2ll_rm_ftz() {
+; CHECK-NEXT: ret i64 -2
+;
+ %res = call i64 @llvm.nvvm.f2ll.rm.ftz(float -1.5)
+ ret i64 %res
+}
+
+define i64 @test_neg_1_5_f2ll_rn_ftz() {
+; CHECK-LABEL: define i64 @test_neg_1_5_f2ll_rn_ftz() {
+; CHECK-NEXT: ret i64 -2
+;
+ %res = call i64 @llvm.nvvm.f2ll.rn.ftz(float -1.5)
+ ret i64 %res
+}
+
+define i64 @test_neg_1_5_f2ll_rp_ftz() {
+; CHECK-LABEL: define i64 @test_neg_1_5_f2ll_rp_ftz() {
+; CHECK-NEXT: ret i64 -1
+;
+ %res = call i64 @llvm.nvvm.f2ll.rp.ftz(float -1.5)
+ ret i64 %res
+}
+
+define i64 @test_neg_1_5_f2ll_rz_ftz() {
+; CHECK-LABEL: define i64 @test_neg_1_5_f2ll_rz_ftz() {
+; CHECK-NEXT: ret i64 -1
+;
+ %res = call i64 @llvm.nvvm.f2ll.rz.ftz(float -1.5)
+ ret i64 %res
+}
+;+-------------------------------------------------------------+
+;| d2ll |
+;+-------------------------------------------------------------+
+define i64 @test_neg_1_5_d2ll_rm() {
+; CHECK-LABEL: define i64 @test_neg_1_5_d2ll_rm() {
+; CHECK-NEXT: ret i64 -2
+;
+ %res = call i64 @llvm.nvvm.d2ll.rm(double -1.5)
+ ret i64 %res
+}
+
+define i64 @test_neg_1_5_d2ll_rn() {
+; CHECK-LABEL: define i64 @test_neg_1_5_d2ll_rn() {
+; CHECK-NEXT: ret i64 -2
+;
+ %res = call i64 @llvm.nvvm.d2ll.rn(double -1.5)
+ ret i64 %res
+}
+
+
+define i64 @test_neg_1_5_d2ll_rp() {
+; CHECK-LABEL: define i64 @test_neg_1_5_d2ll_rp() {
+; CHECK-NEXT: ret i64 -1
+;
+ %res = call i64 @llvm.nvvm.d2ll.rp(double -1.5)
+ ret i64 %res
+}
+
+define i64 @test_neg_1_5_d2ll_rz() {
+; CHECK-LABEL: define i64 @test_neg_1_5_d2ll_rz() {
+; CHECK-NEXT: ret i64 -1
+;
+ %res = call i64 @llvm.nvvm.d2ll.rz(double -1.5)
+ ret i64 %res
+}
+
+;+-------------------------------------------------------------+
+;| f2ull |
+;+-------------------------------------------------------------+
+define i64 @test_neg_1_5_f2ull_rm() {
+; CHECK-LABEL: define i64 @test_neg_1_5_f2ull_rm() {
+; CHECK-NEXT: [[RES:%.*]] = call i64 @llvm.nvvm.f2ull.rm(float -1.500000e+00)
+; CHECK-NEXT: ret i64 [[RES]]
+;
+ %res = call i64 @llvm.nvvm.f2ull.rm(float -1.5)
+ ret i64 %res
+}
+
+define i64 @test_neg_1_5_f2ull_rn() {
+; CHECK-LABEL: define i64 @test_neg_1_5_f2ull_rn() {
+; CHECK-NEXT: [[RES:%.*]] = call i64 @llvm.nvvm.f2ull.rn(float -1.500000e+00)
+; CHECK-NEXT: ret i64 [[RES]]
+;
+ %res = call i64 @llvm.nvvm.f2ull.rn(float -1.5)
+ ret i64 %res
+}
+
+
+define i64 @test_neg_1_5_f2ull_rp() {
+; CHECK-LABEL: define i64 @test_neg_1_5_f2ull_rp() {
+; CHECK-NEXT: [[RES:%.*]] = call i64 @llvm.nvvm.f2ull.rp(float -1.500000e+00)
+; CHECK-NEXT: ret i64 [[RES]]
+;
+ %res = call i64 @llvm.nvvm.f2ull.rp(float -1.5)
+ ret i64 %res
+}
+
+define i64 @test_neg_1_5_f2ull_rz() {
+; CHECK-LABEL: define i64 @test_neg_1_5_f2ull_rz() {
+; CHECK-NEXT: [[RES:%.*]] = call i64 @llvm.nvvm.f2ull.rz(float -1.500000e+00)
+; CHECK-NEXT: ret i64 [[RES]]
+;
+ %res = call i64 @llvm.nvvm.f2ull.rz(float -1.5)
+ ret i64 %res
+}
+
+;+-------------------------------------------------------------+
+;| f2ull_ftz |
+;+-------------------------------------------------------------+
+define i64 @test_neg_1_5_f2ull_rm_ftz() {
+; CHECK-LABEL: define i64 @test_neg_1_5_f2ull_rm_ftz() {
+; CHECK-NEXT: [[RES:%.*]] = call i64 @llvm.nvvm.f2ull.rm.ftz(float -1.500000e+00)
+; CHECK-NEXT: ret i64 [[RES]]
+;
+ %res = call i64 @llvm.nvvm.f2ull.rm.ftz(float -1.5)
+ ret i64 %res
+}
+
+define i64 @test_neg_1_5_f2ull_rn_ftz() {
+; CHECK-LABEL: define i64 @test_neg_1_5_f2ull_rn_ftz() {
+; CHECK-NEXT: [[RES:%.*]] = call i64 @llvm.nvvm.f2ull.rn.ftz(float -1.500000e+00)
+; CHECK-NEXT: ret i64 [[RES]]
+;
+ %res = call i64 @llvm.nvvm.f2ull.rn.ftz(float -1.5)
+ ret i64 %res
+}
+
+define i64 @test_neg_1_5_f2ull_rp_ftz() {
+; CHECK-LABEL: define i64 @test_neg_1_5_f2ull_rp_ftz() {
+; CHECK-NEXT: [[RES:%.*]] = call i64 @llvm.nvvm.f2ull.rp.ftz(float -1.500000e+00)
+; CHECK-NEXT: ret i64 [[RES]]
+;
+ %res = call i64 @llvm.nvvm.f2ull.rp.ftz(float -1.5)
+ ret i64 %res
+}
+
+define i64 @test_neg_1_5_f2ull_rz_ftz() {
+; CHECK-LABEL: define i64 @test_neg_1_5_f2ull_rz_ftz() {
+; CHECK-NEXT: [[RES:%.*]] = call i64 @llvm.nvvm.f2ull.rz.ftz(float -1.500000e+00)
+; CHECK-NEXT: ret i64 [[RES]]
+;
+ %res = call i64 @llvm.nvvm.f2ull.rz.ftz(float -1.5)
+ ret i64 %res
+}
+;+-------------------------------------------------------------+
+;| d2ull |
+;+-------------------------------------------------------------+
+define i64 @test_neg_1_5_d2ull_rm() {
+; CHECK-LABEL: define i64 @test_neg_1_5_d2ull_rm() {
+; CHECK-NEXT: [[RES:%.*]] = call i64 @llvm.nvvm.d2ull.rm(double -1.500000e+00)
+; CHECK-NEXT: ret i64 [[RES]]
+;
+ %res = call i64 @llvm.nvvm.d2ull.rm(double -1.5)
+ ret i64 %res
+}
+
+define i64 @test_neg_1_5_d2ull_rn() {
+; CHECK-LABEL: define i64 @test_neg_1_5_d2ull_rn() {
+; CHECK-NEXT: [[RES:%.*]] = call i64 @llvm.nvvm.d2ull.rn(double -1.500000e+00)
+; CHECK-NEXT: ret i64 [[RES]]
+;
+ %res = call i64 @llvm.nvvm.d2ull.rn(double -1.5)
+ ret i64 %res
+}
+
+
+define i64 @test_neg_1_5_d2ull_rp() {
+; CHECK-LABEL: define i64 @test_neg_1_5_d2ull_rp() {
+; CHECK-NEXT: [[RES:%.*]] = call i64 @llvm.nvvm.d2ull.rp(double -1.500000e+00)
+; CHECK-NEXT: ret i64 [[RES]]
+;
+ %res = call i64 @llvm.nvvm.d2ull.rp(double -1.5)
+ ret i64 %res
+}
+
+define i64 @test_neg_1_5_d2ull_rz() {
+; CHECK-LABEL: define i64 @test_neg_1_5_d2ull_rz() {
+; CHECK-NEXT: [[RES:%.*]] = call i64 @llvm.nvvm.d2ull.rz(double -1.500000e+00)
+; CHECK-NEXT: ret i64 [[RES]]
+;
+ %res = call i64 @llvm.nvvm.d2ull.rz(double -1.5)
+ ret i64 %res
+}
+
+;###############################################################
+;# Tests with NaN #
+;###############################################################
+
+;+-------------------------------------------------------------+
+;| f2ll |
+;+-------------------------------------------------------------+
+define i64 @test_nan_f2ll_rm() {
+; CHECK-LABEL: define i64 @test_nan_f2ll_rm() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.f2ll.rm(float 0x7FFFFF0000000000)
+ ret i64 %res
+}
+
+define i64 @test_nan_f2ll_rn() {
+; CHECK-LABEL: define i64 @test_nan_f2ll_rn() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.f2ll.rn(float 0x7FFFFF0000000000)
+ ret i64 %res
+}
+
+
+define i64 @test_nan_f2ll_rp() {
+; CHECK-LABEL: define i64 @test_nan_f2ll_rp() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.f2ll.rp(float 0x7FFFFF0000000000)
+ ret i64 %res
+}
+
+define i64 @test_nan_f2ll_rz() {
+; CHECK-LABEL: define i64 @test_nan_f2ll_rz() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.f2ll.rz(float 0x7FFFFF0000000000)
+ ret i64 %res
+}
+
+;+-------------------------------------------------------------+
+;| f2ll_ftz |
+;+-------------------------------------------------------------+
+define i64 @test_nan_f2ll_rm_ftz() {
+; CHECK-LABEL: define i64 @test_nan_f2ll_rm_ftz() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.f2ll.rm.ftz(float 0x7FFFFF0000000000)
+ ret i64 %res
+}
+
+define i64 @test_nan_f2ll_rn_ftz() {
+; CHECK-LABEL: define i64 @test_nan_f2ll_rn_ftz() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.f2ll.rn.ftz(float 0x7FFFFF0000000000)
+ ret i64 %res
+}
+
+define i64 @test_nan_f2ll_rp_ftz() {
+; CHECK-LABEL: define i64 @test_nan_f2ll_rp_ftz() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.f2ll.rp.ftz(float 0x7FFFFF0000000000)
+ ret i64 %res
+}
+
+define i64 @test_nan_f2ll_rz_ftz() {
+; CHECK-LABEL: define i64 @test_nan_f2ll_rz_ftz() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.f2ll.rz.ftz(float 0x7FFFFF0000000000)
+ ret i64 %res
+}
+;+-------------------------------------------------------------+
+;| d2ll |
+;+-------------------------------------------------------------+
+define i64 @test_nan_d2ll_rm() {
+; CHECK-LABEL: define i64 @test_nan_d2ll_rm() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.d2ll.rm(double 0xFFF8000000000000)
+ ret i64 %res
+}
+
+define i64 @test_nan_d2ll_rn() {
+; CHECK-LABEL: define i64 @test_nan_d2ll_rn() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.d2ll.rn(double 0xFFF8000000000000)
+ ret i64 %res
+}
+
+
+define i64 @test_nan_d2ll_rp() {
+; CHECK-LABEL: define i64 @test_nan_d2ll_rp() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.d2ll.rp(double 0xFFF8000000000000)
+ ret i64 %res
+}
+
+define i64 @test_nan_d2ll_rz() {
+; CHECK-LABEL: define i64 @test_nan_d2ll_rz() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.d2ll.rz(double 0xFFF8000000000000)
+ ret i64 %res
+}
+
+;+-------------------------------------------------------------+
+;| f2ull |
+;+-------------------------------------------------------------+
+define i64 @test_nan_f2ull_rm() {
+; CHECK-LABEL: define i64 @test_nan_f2ull_rm() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.f2ull.rm(float 0x7FFFFF0000000000)
+ ret i64 %res
+}
+
+define i64 @test_nan_f2ull_rn() {
+; CHECK-LABEL: define i64 @test_nan_f2ull_rn() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.f2ull.rn(float 0x7FFFFF0000000000)
+ ret i64 %res
+}
+
+
+define i64 @test_nan_f2ull_rp() {
+; CHECK-LABEL: define i64 @test_nan_f2ull_rp() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.f2ull.rp(float 0x7FFFFF0000000000)
+ ret i64 %res
+}
+
+define i64 @test_nan_f2ull_rz() {
+; CHECK-LABEL: define i64 @test_nan_f2ull_rz() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.f2ull.rz(float 0x7FFFFF0000000000)
+ ret i64 %res
+}
+
+;+-------------------------------------------------------------+
+;| f2ull_ftz |
+;+-------------------------------------------------------------+
+define i64 @test_nan_f2ull_rm_ftz() {
+; CHECK-LABEL: define i64 @test_nan_f2ull_rm_ftz() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.f2ull.rm.ftz(float 0x7FFFFF0000000000)
+ ret i64 %res
+}
+
+define i64 @test_nan_f2ull_rn_ftz() {
+; CHECK-LABEL: define i64 @test_nan_f2ull_rn_ftz() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.f2ull.rn.ftz(float 0x7FFFFF0000000000)
+ ret i64 %res
+}
+
+define i64 @test_nan_f2ull_rp_ftz() {
+; CHECK-LABEL: define i64 @test_nan_f2ull_rp_ftz() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.f2ull.rp.ftz(float 0x7FFFFF0000000000)
+ ret i64 %res
+}
+
+define i64 @test_nan_f2ull_rz_ftz() {
+; CHECK-LABEL: define i64 @test_nan_f2ull_rz_ftz() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.f2ull.rz.ftz(float 0x7FFFFF0000000000)
+ ret i64 %res
+}
+;+-------------------------------------------------------------+
+;| d2ull |
+;+-------------------------------------------------------------+
+define i64 @test_nan_d2ull_rm() {
+; CHECK-LABEL: define i64 @test_nan_d2ull_rm() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.d2ull.rm(double 0xFFF8000000000000)
+ ret i64 %res
+}
+
+define i64 @test_nan_d2ull_rn() {
+; CHECK-LABEL: define i64 @test_nan_d2ull_rn() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.d2ull.rn(double 0xFFF8000000000000)
+ ret i64 %res
+}
+
+
+define i64 @test_nan_d2ull_rp() {
+; CHECK-LABEL: define i64 @test_nan_d2ull_rp() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.d2ull.rp(double 0xFFF8000000000000)
+ ret i64 %res
+}
+
+define i64 @test_nan_d2ull_rz() {
+; CHECK-LABEL: define i64 @test_nan_d2ull_rz() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.d2ull.rz(double 0xFFF8000000000000)
+ ret i64 %res
+}
+
+;###############################################################
+;# Tests with Positive Subnormal #
+;###############################################################
+
+;+-------------------------------------------------------------+
+;| f2ll |
+;+-------------------------------------------------------------+
+define i64 @test_pos_subnormal_f2ll_rm() {
+; CHECK-LABEL: define i64 @test_pos_subnormal_f2ll_rm() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.f2ll.rm(float 0x380FFFFFC0000000)
+ ret i64 %res
+}
+
+define i64 @test_pos_subnormal_f2ll_rn() {
+; CHECK-LABEL: define i64 @test_pos_subnormal_f2ll_rn() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.f2ll.rn(float 0x380FFFFFC0000000)
+ ret i64 %res
+}
+
+
+define i64 @test_pos_subnormal_f2ll_rp() {
+; CHECK-LABEL: define i64 @test_pos_subnormal_f2ll_rp() {
+; CHECK-NEXT: ret i64 1
+;
+ %res = call i64 @llvm.nvvm.f2ll.rp(float 0x380FFFFFC0000000)
+ ret i64 %res
+}
+
+define i64 @test_pos_subnormal_f2ll_rz() {
+; CHECK-LABEL: define i64 @test_pos_subnormal_f2ll_rz() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.f2ll.rz(float 0x380FFFFFC0000000)
+ ret i64 %res
+}
+
+;+-------------------------------------------------------------+
+;| f2ll_ftz |
+;+-------------------------------------------------------------+
+define i64 @test_pos_subnormal_f2ll_rm_ftz() {
+; CHECK-LABEL: define i64 @test_pos_subnormal_f2ll_rm_ftz() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.f2ll.rm.ftz(float 0x380FFFFFC0000000)
+ ret i64 %res
+}
+
+define i64 @test_pos_subnormal_f2ll_rn_ftz() {
+; CHECK-LABEL: define i64 @test_pos_subnormal_f2ll_rn_ftz() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.f2ll.rn.ftz(float 0x380FFFFFC0000000)
+ ret i64 %res
+}
+
+define i64 @test_pos_subnormal_f2ll_rp_ftz() {
+; CHECK-LABEL: define i64 @test_pos_subnormal_f2ll_rp_ftz() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.f2ll.rp.ftz(float 0x380FFFFFC0000000)
+ ret i64 %res
+}
+
+define i64 @test_pos_subnormal_f2ll_rz_ftz() {
+; CHECK-LABEL: define i64 @test_pos_subnormal_f2ll_rz_ftz() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.f2ll.rz.ftz(float 0x380FFFFFC0000000)
+ ret i64 %res
+}
+;+-------------------------------------------------------------+
+;| d2ll |
+;+-------------------------------------------------------------+
+define i64 @test_pos_subnormal_d2ll_rm() {
+; CHECK-LABEL: define i64 @test_pos_subnormal_d2ll_rm() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.d2ll.rm(double 0x000fffffffffffff)
+ ret i64 %res
+}
+
+define i64 @test_pos_subnormal_d2ll_rn() {
+; CHECK-LABEL: define i64 @test_pos_subnormal_d2ll_rn() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.d2ll.rn(double 0x000fffffffffffff)
+ ret i64 %res
+}
+
+
+define i64 @test_pos_subnormal_d2ll_rp() {
+; CHECK-LABEL: define i64 @test_pos_subnormal_d2ll_rp() {
+; CHECK-NEXT: ret i64 1
+;
+ %res = call i64 @llvm.nvvm.d2ll.rp(double 0x000fffffffffffff)
+ ret i64 %res
+}
+
+define i64 @test_pos_subnormal_d2ll_rz() {
+; CHECK-LABEL: define i64 @test_pos_subnormal_d2ll_rz() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.d2ll.rz(double 0x000fffffffffffff)
+ ret i64 %res
+}
+
+;+-------------------------------------------------------------+
+;| f2ull |
+;+-------------------------------------------------------------+
+define i64 @test_pos_subnormal_f2ull_rm() {
+; CHECK-LABEL: define i64 @test_pos_subnormal_f2ull_rm() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.f2ull.rm(float 0x380FFFFFC0000000)
+ ret i64 %res
+}
+
+define i64 @test_pos_subnormal_f2ull_rn() {
+; CHECK-LABEL: define i64 @test_pos_subnormal_f2ull_rn() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.f2ull.rn(float 0x380FFFFFC0000000)
+ ret i64 %res
+}
+
+
+define i64 @test_pos_subnormal_f2ull_rp() {
+; CHECK-LABEL: define i64 @test_pos_subnormal_f2ull_rp() {
+; CHECK-NEXT: ret i64 1
+;
+ %res = call i64 @llvm.nvvm.f2ull.rp(float 0x380FFFFFC0000000)
+ ret i64 %res
+}
+
+define i64 @test_pos_subnormal_f2ull_rz() {
+; CHECK-LABEL: define i64 @test_pos_subnormal_f2ull_rz() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.f2ull.rz(float 0x380FFFFFC0000000)
+ ret i64 %res
+}
+
+;+-------------------------------------------------------------+
+;| f2ull_ftz |
+;+-------------------------------------------------------------+
+define i64 @test_pos_subnormal_f2ull_rm_ftz() {
+; CHECK-LABEL: define i64 @test_pos_subnormal_f2ull_rm_ftz() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.f2ull.rm.ftz(float 0x380FFFFFC0000000)
+ ret i64 %res
+}
+
+define i64 @test_pos_subnormal_f2ull_rn_ftz() {
+; CHECK-LABEL: define i64 @test_pos_subnormal_f2ull_rn_ftz() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.f2ull.rn.ftz(float 0x380FFFFFC0000000)
+ ret i64 %res
+}
+
+define i64 @test_pos_subnormal_f2ull_rp_ftz() {
+; CHECK-LABEL: define i64 @test_pos_subnormal_f2ull_rp_ftz() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.f2ull.rp.ftz(float 0x380FFFFFC0000000)
+ ret i64 %res
+}
+
+define i64 @test_pos_subnormal_f2ull_rz_ftz() {
+; CHECK-LABEL: define i64 @test_pos_subnormal_f2ull_rz_ftz() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.f2ull.rz.ftz(float 0x380FFFFFC0000000)
+ ret i64 %res
+}
+;+-------------------------------------------------------------+
+;| d2ull |
+;+-------------------------------------------------------------+
+define i64 @test_pos_subnormal_d2ull_rm() {
+; CHECK-LABEL: define i64 @test_pos_subnormal_d2ull_rm() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.d2ull.rm(double 0x000fffffffffffff)
+ ret i64 %res
+}
+
+define i64 @test_pos_subnormal_d2ull_rn() {
+; CHECK-LABEL: define i64 @test_pos_subnormal_d2ull_rn() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.d2ull.rn(double 0x000fffffffffffff)
+ ret i64 %res
+}
+
+
+define i64 @test_pos_subnormal_d2ull_rp() {
+; CHECK-LABEL: define i64 @test_pos_subnormal_d2ull_rp() {
+; CHECK-NEXT: ret i64 1
+;
+ %res = call i64 @llvm.nvvm.d2ull.rp(double 0x000fffffffffffff)
+ ret i64 %res
+}
+
+define i64 @test_pos_subnormal_d2ull_rz() {
+; CHECK-LABEL: define i64 @test_pos_subnormal_d2ull_rz() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.d2ull.rz(double 0x000fffffffffffff)
+ ret i64 %res
+}
+
+;###############################################################
+;# Tests with Negative Subnormal #
+;###############################################################
+
+;+-------------------------------------------------------------+
+;| f2ll |
+;+-------------------------------------------------------------+
+define i64 @test_neg_subnormal_f2ll_rm() {
+; CHECK-LABEL: define i64 @test_neg_subnormal_f2ll_rm() {
+; CHECK-NEXT: ret i64 -1
+;
+ %res = call i64 @llvm.nvvm.f2ll.rm(float 0xB80FFFFFC0000000)
+ ret i64 %res
+}
+
+define i64 @test_neg_subnormal_f2ll_rn() {
+; CHECK-LABEL: define i64 @test_neg_subnormal_f2ll_rn() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.f2ll.rn(float 0xB80FFFFFC0000000)
+ ret i64 %res
+}
+
+
+define i64 @test_neg_subnormal_f2ll_rp() {
+; CHECK-LABEL: define i64 @test_neg_subnormal_f2ll_rp() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.f2ll.rp(float 0xB80FFFFFC0000000)
+ ret i64 %res
+}
+
+define i64 @test_neg_subnormal_f2ll_rz() {
+; CHECK-LABEL: define i64 @test_neg_subnormal_f2ll_rz() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.f2ll.rz(float 0xB80FFFFFC0000000)
+ ret i64 %res
+}
+
+;+-------------------------------------------------------------+
+;| f2ll_ftz |
+;+-------------------------------------------------------------+
+define i64 @test_neg_subnormal_f2ll_rm_ftz() {
+; CHECK-LABEL: define i64 @test_neg_subnormal_f2ll_rm_ftz() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.f2ll.rm.ftz(float 0xB80FFFFFC0000000)
+ ret i64 %res
+}
+
+define i64 @test_neg_subnormal_f2ll_rn_ftz() {
+; CHECK-LABEL: define i64 @test_neg_subnormal_f2ll_rn_ftz() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.f2ll.rn.ftz(float 0xB80FFFFFC0000000)
+ ret i64 %res
+}
+
+define i64 @test_neg_subnormal_f2ll_rp_ftz() {
+; CHECK-LABEL: define i64 @test_neg_subnormal_f2ll_rp_ftz() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.f2ll.rp.ftz(float 0xB80FFFFFC0000000)
+ ret i64 %res
+}
+
+define i64 @test_neg_subnormal_f2ll_rz_ftz() {
+; CHECK-LABEL: define i64 @test_neg_subnormal_f2ll_rz_ftz() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.f2ll.rz.ftz(float 0xB80FFFFFC0000000)
+ ret i64 %res
+}
+;+-------------------------------------------------------------+
+;| d2ll |
+;+-------------------------------------------------------------+
+define i64 @test_neg_subnormal_d2ll_rm() {
+; CHECK-LABEL: define i64 @test_neg_subnormal_d2ll_rm() {
+; CHECK-NEXT: ret i64 -1
+;
+ %res = call i64 @llvm.nvvm.d2ll.rm(double 0x800fffffffffffff)
+ ret i64 %res
+}
+
+define i64 @test_neg_subnormal_d2ll_rn() {
+; CHECK-LABEL: define i64 @test_neg_subnormal_d2ll_rn() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.d2ll.rn(double 0x800fffffffffffff)
+ ret i64 %res
+}
+
+
+define i64 @test_neg_subnormal_d2ll_rp() {
+; CHECK-LABEL: define i64 @test_neg_subnormal_d2ll_rp() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.d2ll.rp(double 0x800fffffffffffff)
+ ret i64 %res
+}
+
+define i64 @test_neg_subnormal_d2ll_rz() {
+; CHECK-LABEL: define i64 @test_neg_subnormal_d2ll_rz() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.d2ll.rz(double 0x800fffffffffffff)
+ ret i64 %res
+}
+
+;+-------------------------------------------------------------+
+;| f2ull |
+;+-------------------------------------------------------------+
+define i64 @test_neg_subnormal_f2ull_rm() {
+; CHECK-LABEL: define i64 @test_neg_subnormal_f2ull_rm() {
+; CHECK-NEXT: [[RES:%.*]] = call i64 @llvm.nvvm.f2ull.rm(float 0xB80FFFFFC0000000)
+; CHECK-NEXT: ret i64 [[RES]]
+;
+ %res = call i64 @llvm.nvvm.f2ull.rm(float 0xB80FFFFFC0000000)
+ ret i64 %res
+}
+
+define i64 @test_neg_subnormal_f2ull_rn() {
+; CHECK-LABEL: define i64 @test_neg_subnormal_f2ull_rn() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.f2ull.rn(float 0xB80FFFFFC0000000)
+ ret i64 %res
+}
+
+
+define i64 @test_neg_subnormal_f2ull_rp() {
+; CHECK-LABEL: define i64 @test_neg_subnormal_f2ull_rp() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.f2ull.rp(float 0xB80FFFFFC0000000)
+ ret i64 %res
+}
+
+define i64 @test_neg_subnormal_f2ull_rz() {
+; CHECK-LABEL: define i64 @test_neg_subnormal_f2ull_rz() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.f2ull.rz(float 0xB80FFFFFC0000000)
+ ret i64 %res
+}
+
+;+-------------------------------------------------------------+
+;| f2ull_ftz |
+;+-------------------------------------------------------------+
+define i64 @test_neg_subnormal_f2ull_rm_ftz() {
+; CHECK-LABEL: define i64 @test_neg_subnormal_f2ull_rm_ftz() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.f2ull.rm.ftz(float 0xB80FFFFFC0000000)
+ ret i64 %res
+}
+
+define i64 @test_neg_subnormal_f2ull_rn_ftz() {
+; CHECK-LABEL: define i64 @test_neg_subnormal_f2ull_rn_ftz() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.f2ull.rn.ftz(float 0xB80FFFFFC0000000)
+ ret i64 %res
+}
+
+define i64 @test_neg_subnormal_f2ull_rp_ftz() {
+; CHECK-LABEL: define i64 @test_neg_subnormal_f2ull_rp_ftz() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.f2ull.rp.ftz(float 0xB80FFFFFC0000000)
+ ret i64 %res
+}
+
+define i64 @test_neg_subnormal_f2ull_rz_ftz() {
+; CHECK-LABEL: define i64 @test_neg_subnormal_f2ull_rz_ftz() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.f2ull.rz.ftz(float 0xB80FFFFFC0000000)
+ ret i64 %res
+}
+;+-------------------------------------------------------------+
+;| d2ull |
+;+-------------------------------------------------------------+
+define i64 @test_neg_subnormal_d2ull_rm() {
+; CHECK-LABEL: define i64 @test_neg_subnormal_d2ull_rm() {
+; CHECK-NEXT: [[RES:%.*]] = call i64 @llvm.nvvm.d2ull.rm(double 0x800FFFFFFFFFFFFF)
+; CHECK-NEXT: ret i64 [[RES]]
+;
+ %res = call i64 @llvm.nvvm.d2ull.rm(double 0x800fffffffffffff)
+ ret i64 %res
+}
+
+define i64 @test_neg_subnormal_d2ull_rn() {
+; CHECK-LABEL: define i64 @test_neg_subnormal_d2ull_rn() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.d2ull.rn(double 0x800fffffffffffff)
+ ret i64 %res
+}
+
+
+define i64 @test_neg_subnormal_d2ull_rp() {
+; CHECK-LABEL: define i64 @test_neg_subnormal_d2ull_rp() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.d2ull.rp(double 0x800fffffffffffff)
+ ret i64 %res
+}
+
+define i64 @test_neg_subnormal_d2ull_rz() {
+; CHECK-LABEL: define i64 @test_neg_subnormal_d2ull_rz() {
+; CHECK-NEXT: ret i64 0
+;
+ %res = call i64 @llvm.nvvm.d2ull.rz(double 0x800fffffffffffff)
+ ret i64 %res
+}
+
+declare i64 @llvm.nvvm.f2ll.rm(float)
+declare i64 @llvm.nvvm.f2ll.rn(float)
+declare i64 @llvm.nvvm.f2ll.rp(float)
+declare i64 @llvm.nvvm.f2ll.rz(float)
+
+declare i64 @llvm.nvvm.f2ll.rm.ftz(float)
+declare i64 @llvm.nvvm.f2ll.rn.ftz(float)
+declare i64 @llvm.nvvm.f2ll.rp.ftz(float)
+declare i64 @llvm.nvvm.f2ll.rz.ftz(float)
+
+declare i64 @llvm.nvvm.d2ll.rm(double)
+declare i64 @llvm.nvvm.d2ll.rn(double)
+declare i64 @llvm.nvvm.d2ll.rp(double)
+declare i64 @llvm.nvvm.d2ll.rz(double)
+
+
+declare i64 @llvm.nvvm.f2ull.rm(float)
+declare i64 @llvm.nvvm.f2ull.rn(float)
+declare i64 @llvm.nvvm.f2ull.rp(float)
+declare i64 @llvm.nvvm.f2ull.rz(float)
+
+declare i64 @llvm.nvvm.f2ull.rm.ftz(float)
+declare i64 @llvm.nvvm.f2ull.rn.ftz(float)
+declare i64 @llvm.nvvm.f2ull.rp.ftz(float)
+declare i64 @llvm.nvvm.f2ull.rz.ftz(float)
+
+declare i64 @llvm.nvvm.d2ull.rm(double)
+declare i64 @llvm.nvvm.d2ull.rn(double)
+declare i64 @llvm.nvvm.d2ull.rp(double)
+declare i64 @llvm.nvvm.d2ull.rz(double)
diff --git a/llvm/test/Transforms/LoadStoreVectorizer/X86/massive_indirection.ll b/llvm/test/Transforms/LoadStoreVectorizer/X86/massive_indirection.ll
new file mode 100644
index 0000000..fe8a7e5
--- /dev/null
+++ b/llvm/test/Transforms/LoadStoreVectorizer/X86/massive_indirection.ll
@@ -0,0 +1,180 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt %s -mtriple=x86_64-unknown-linux-gnu -passes=load-store-vectorizer -mcpu=skx -S -o - | FileCheck %s
+
+; This test verifies that the vectorizer can handle an extended sequence of
+; getelementptr instructions and generate longer vectors. With special handling,
+; some elements can still be vectorized even if they require looking up the
+; common underlying object deeper than 6 levels from the original pointer.
+
+; The test below is the simplified version of actual performance oriented
+; workload; the offsets in getelementptr instructions are similar or same for
+; the test simplicity.
+
+define void @v1_v2_v4_v1_to_v8_levels_6_7_8_8(i32 %arg0, ptr align 16 %arg1) {
+; CHECK-LABEL: define void @v1_v2_v4_v1_to_v8_levels_6_7_8_8(
+; CHECK-SAME: i32 [[ARG0:%.*]], ptr align 16 [[ARG1:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[LEVEL1:%.*]] = getelementptr i8, ptr [[ARG1]], i32 917504
+; CHECK-NEXT: [[LEVEL2:%.*]] = getelementptr i8, ptr [[LEVEL1]], i32 [[ARG0]]
+; CHECK-NEXT: [[LEVEL3:%.*]] = getelementptr i8, ptr [[LEVEL2]], i32 32768
+; CHECK-NEXT: [[LEVEL4:%.*]] = getelementptr i8, ptr [[LEVEL3]], i32 [[ARG0]]
+; CHECK-NEXT: [[LEVEL5:%.*]] = getelementptr i8, ptr [[LEVEL4]], i32 [[ARG0]]
+; CHECK-NEXT: [[A6:%.*]] = getelementptr i8, ptr [[LEVEL5]], i32 [[ARG0]]
+; CHECK-NEXT: store <8 x half> zeroinitializer, ptr [[A6]], align 16
+; CHECK-NEXT: ret void
+;
+
+ %level1 = getelementptr i8, ptr %arg1, i32 917504
+ %level2 = getelementptr i8, ptr %level1, i32 %arg0
+ %level3 = getelementptr i8, ptr %level2, i32 32768
+ %level4 = getelementptr i8, ptr %level3, i32 %arg0
+ %level5 = getelementptr i8, ptr %level4, i32 %arg0
+
+ %a6 = getelementptr i8, ptr %level5, i32 %arg0
+ %b7 = getelementptr i8, ptr %a6, i32 2
+ %c8 = getelementptr i8, ptr %b7, i32 8
+ %d8 = getelementptr i8, ptr %b7, i32 12
+
+ store half 0xH0000, ptr %a6, align 16
+ store <4 x half> zeroinitializer, ptr %b7, align 2
+ store <2 x half> zeroinitializer, ptr %c8, align 2
+ store half 0xH0000, ptr %d8, align 2
+ ret void
+}
+
+define void @v1x8_levels_6_7_8_9_10_11_12_13(i32 %arg0, ptr align 16 %arg1) {
+; CHECK-LABEL: define void @v1x8_levels_6_7_8_9_10_11_12_13(
+; CHECK-SAME: i32 [[ARG0:%.*]], ptr align 16 [[ARG1:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[LEVEL1:%.*]] = getelementptr i8, ptr [[ARG1]], i32 917504
+; CHECK-NEXT: [[LEVEL2:%.*]] = getelementptr i8, ptr [[LEVEL1]], i32 [[ARG0]]
+; CHECK-NEXT: [[LEVEL3:%.*]] = getelementptr i8, ptr [[LEVEL2]], i32 32768
+; CHECK-NEXT: [[LEVEL4:%.*]] = getelementptr i8, ptr [[LEVEL3]], i32 [[ARG0]]
+; CHECK-NEXT: [[LEVEL5:%.*]] = getelementptr i8, ptr [[LEVEL4]], i32 [[ARG0]]
+; CHECK-NEXT: [[A6:%.*]] = getelementptr i8, ptr [[LEVEL5]], i32 [[ARG0]]
+; CHECK-NEXT: store <8 x half> zeroinitializer, ptr [[A6]], align 16
+; CHECK-NEXT: ret void
+;
+
+ %level1 = getelementptr i8, ptr %arg1, i32 917504
+ %level2 = getelementptr i8, ptr %level1, i32 %arg0
+ %level3 = getelementptr i8, ptr %level2, i32 32768
+ %level4 = getelementptr i8, ptr %level3, i32 %arg0
+ %level5 = getelementptr i8, ptr %level4, i32 %arg0
+
+ %a6 = getelementptr i8, ptr %level5, i32 %arg0
+ %b7 = getelementptr i8, ptr %a6, i32 2
+ %c8 = getelementptr i8, ptr %b7, i32 2
+ %d9 = getelementptr i8, ptr %c8, i32 2
+ %e10 = getelementptr i8, ptr %d9, i32 2
+ %f11 = getelementptr i8, ptr %e10, i32 2
+ %g12 = getelementptr i8, ptr %f11, i32 2
+ %h13 = getelementptr i8, ptr %g12, i32 2
+
+ store half 0xH0000, ptr %a6, align 16
+ store half 0xH0000, ptr %b7, align 2
+ store half 0xH0000, ptr %c8, align 2
+ store half 0xH0000, ptr %d9, align 2
+ store half 0xH0000, ptr %e10, align 8
+ store half 0xH0000, ptr %f11, align 2
+ store half 0xH0000, ptr %g12, align 2
+ store half 0xH0000, ptr %h13, align 2
+ ret void
+}
+
+define void @v1_4_4_4_2_1_to_v8_8_levels_6_7(i32 %arg0, ptr addrspace(3) align 16 %arg1_ptr, i32 %arg2, i32 %arg3, i32 %arg4, i32 %arg5, half %arg6_half, half %arg7_half, <2 x half> %arg8_2xhalf) {
+; CHECK-LABEL: define void @v1_4_4_4_2_1_to_v8_8_levels_6_7(
+; CHECK-SAME: i32 [[ARG0:%.*]], ptr addrspace(3) align 16 [[ARG1_PTR:%.*]], i32 [[ARG2:%.*]], i32 [[ARG3:%.*]], i32 [[ARG4:%.*]], i32 [[ARG5:%.*]], half [[ARG6_HALF:%.*]], half [[ARG7_HALF:%.*]], <2 x half> [[ARG8_2XHALF:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr addrspace(3) [[ARG1_PTR]], i32 458752
+; CHECK-NEXT: br [[DOTPREHEADER11_PREHEADER:label %.*]]
+; CHECK: [[_PREHEADER11_PREHEADER:.*:]]
+; CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i32 [[ARG0]], 6
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr addrspace(3) [[TMP1]], i32 [[TMP2]]
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr addrspace(3) [[TMP3]], i32 [[ARG2]]
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr addrspace(3) [[TMP4]], i32 [[ARG3]]
+; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[ARG0]], 2
+; CHECK-NEXT: br i1 [[CMP]], [[DOTLR_PH:label %.*]], [[DOTEXIT_POINT:label %.*]]
+; CHECK: [[_LR_PH:.*:]]
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i8, ptr addrspace(3) [[TMP5]], i32 [[ARG4]]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr addrspace(3) [[GEP]], i32 [[ARG5]]
+; CHECK-NEXT: [[TMP7:%.*]] = insertelement <8 x half> poison, half [[ARG6_HALF]], i32 0
+; CHECK-NEXT: [[TMP8:%.*]] = insertelement <8 x half> [[TMP7]], half 0xH0000, i32 1
+; CHECK-NEXT: [[TMP9:%.*]] = insertelement <8 x half> [[TMP8]], half 0xH0000, i32 2
+; CHECK-NEXT: [[TMP10:%.*]] = insertelement <8 x half> [[TMP9]], half 0xH0000, i32 3
+; CHECK-NEXT: [[TMP11:%.*]] = insertelement <8 x half> [[TMP10]], half 0xH0000, i32 4
+; CHECK-NEXT: [[TMP12:%.*]] = extractelement <2 x half> [[ARG8_2XHALF]], i32 0
+; CHECK-NEXT: [[TMP13:%.*]] = insertelement <8 x half> [[TMP11]], half [[TMP12]], i32 5
+; CHECK-NEXT: [[TMP14:%.*]] = extractelement <2 x half> [[ARG8_2XHALF]], i32 1
+; CHECK-NEXT: [[TMP15:%.*]] = insertelement <8 x half> [[TMP13]], half [[TMP14]], i32 6
+; CHECK-NEXT: [[TMP16:%.*]] = insertelement <8 x half> [[TMP15]], half [[ARG7_HALF]], i32 7
+; CHECK-NEXT: store <8 x half> [[TMP16]], ptr addrspace(3) [[TMP6]], align 2
+; CHECK-NEXT: br [[DOTEXIT_POINT]]
+; CHECK: [[_EXIT_POINT:.*:]]
+; CHECK-NEXT: ret void
+;
+ %base1 = getelementptr inbounds i8, ptr addrspace(3) %arg1_ptr, i32 458752
+ br label %.preheader11.preheader
+
+.preheader11.preheader:
+ %base2 = shl nuw nsw i32 %arg0, 6
+ %base3 = getelementptr inbounds i8, ptr addrspace(3) %base1, i32 %base2
+
+ %base4 = getelementptr inbounds i8, ptr addrspace(3) %base3, i32 %arg2
+ %base5 = getelementptr inbounds i8, ptr addrspace(3) %base4, i32 %arg3
+
+ %cmp = icmp sgt i32 %arg0, 2
+ br i1 %cmp, label %.lr.ph, label %.exit_point
+
+.lr.ph:
+ %gep = getelementptr inbounds i8, ptr addrspace(3) %base5, i32 %arg4
+
+ %dst = getelementptr inbounds i8, ptr addrspace(3) %gep, i32 %arg5
+ %dst_off2 = getelementptr inbounds i8, ptr addrspace(3) %dst, i32 2
+ %dst_off10 = getelementptr inbounds i8, ptr addrspace(3) %dst, i32 10
+ %dst_off14 = getelementptr inbounds i8, ptr addrspace(3) %dst, i32 14
+
+ store half %arg6_half, ptr addrspace(3) %dst, align 2
+ store <4 x half> zeroinitializer, ptr addrspace(3) %dst_off2, align 2
+ store <2 x half> %arg8_2xhalf, ptr addrspace(3) %dst_off10, align 2
+ store half %arg7_half, ptr addrspace(3) %dst_off14, align 2
+ br label %.exit_point
+
+.exit_point:
+ ret void
+}
+
+; The regression test for merging equivalence classes. It is reduced and adapted
+; for LSV from llvm/test/CodeGen/NVPTX/variadics-backend.ll, which failed at
+; post-commit checks with memory sanitizer on the initial attempt to implement
+; the merging of the equivalence classes.
+define void @variadics1(ptr %vlist) {
+; CHECK-LABEL: define void @variadics1(
+; CHECK-SAME: ptr [[VLIST:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ARGP_CUR7_ALIGNED2:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[VLIST]], i64 0)
+; CHECK-NEXT: [[ARGP_NEXT8:%.*]] = getelementptr i8, ptr [[ARGP_CUR7_ALIGNED2]], i64 8
+; CHECK-NEXT: [[X0:%.*]] = getelementptr i8, ptr [[ARGP_NEXT8]], i32 7
+; CHECK-NEXT: [[ARGP_CUR11_ALIGNED:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[X0]], i64 0)
+; CHECK-NEXT: [[ARGP_NEXT12:%.*]] = getelementptr i8, ptr [[ARGP_CUR11_ALIGNED]], i64 8
+; CHECK-NEXT: [[X2:%.*]] = getelementptr i8, ptr [[ARGP_NEXT12]], i32 7
+; CHECK-NEXT: [[ARGP_CUR16_ALIGNED:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[X2]], i64 0)
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x double>, ptr [[ARGP_CUR16_ALIGNED]], align 4294967296
+; CHECK-NEXT: [[X31:%.*]] = extractelement <2 x double> [[TMP1]], i32 0
+; CHECK-NEXT: [[X42:%.*]] = extractelement <2 x double> [[TMP1]], i32 1
+; CHECK-NEXT: [[X5:%.*]] = fadd double [[X42]], [[X31]]
+; CHECK-NEXT: store double [[X5]], ptr null, align 8
+; CHECK-NEXT: ret void
+;
+ %argp.cur7.aligned2 = call ptr @llvm.ptrmask.p0.i64(ptr %vlist, i64 0)
+ %argp.next8 = getelementptr i8, ptr %argp.cur7.aligned2, i64 8
+ %x0 = getelementptr i8, ptr %argp.next8, i32 7
+ %argp.cur11.aligned = call ptr @llvm.ptrmask.p0.i64(ptr %x0, i64 0)
+ %argp.next12 = getelementptr i8, ptr %argp.cur11.aligned, i64 8
+ %x2 = getelementptr i8, ptr %argp.next12, i32 7
+ %argp.cur16.aligned = call ptr @llvm.ptrmask.p0.i64(ptr %x2, i64 0)
+ %x3 = load double, ptr %argp.cur16.aligned, align 8
+ %argp.cur16.aligned_off8 = getelementptr i8, ptr %argp.cur16.aligned, i32 8
+ %x4 = load double, ptr %argp.cur16.aligned_off8, align 8
+ %x5 = fadd double %x4, %x3
+ store double %x5, ptr null, align 8
+ ret void
+}
+
+declare ptr @llvm.ptrmask.p0.i64(ptr, i64)
diff --git a/llvm/test/Transforms/LoopStrengthReduce/NVPTX/trunc.ll b/llvm/test/Transforms/LoopStrengthReduce/NVPTX/trunc.ll
index 8761122..e6b5991 100644
--- a/llvm/test/Transforms/LoopStrengthReduce/NVPTX/trunc.ll
+++ b/llvm/test/Transforms/LoopStrengthReduce/NVPTX/trunc.ll
@@ -13,7 +13,7 @@ target triple = "nvptx64-nvidia-cuda"
; That would be worthless, because "i" is simulated by two 32-bit registers and
; truncating it to 32-bit is as simple as directly using the register that
; contains the low bits.
-define void @trunc_is_free(i64 %begin, i64 %stride, i64 %end) {
+define ptx_kernel void @trunc_is_free(i64 %begin, i64 %stride, i64 %end) {
; CHECK-LABEL: @trunc_is_free(
entry:
%cmp.4 = icmp eq i64 %begin, %end
@@ -41,5 +41,3 @@ for.body: ; preds = %for.body.preheader,
declare void @_Z3usei(i32)
-!nvvm.annotations = !{!0}
-!0 = !{ptr @trunc_is_free, !"kernel", i32 1}
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/blend-costs.ll b/llvm/test/Transforms/LoopVectorize/AArch64/blend-costs.ll
index ddf6c10..254cdf2 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/blend-costs.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/blend-costs.ll
@@ -209,6 +209,7 @@ define void @test_blend_feeding_replicated_store_2(ptr noalias %src, ptr %dst, i
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i1> poison, i1 [[C_0]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i1> [[BROADCAST_SPLATINSERT]], <16 x i1> poison, <16 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP5:%.*]] = xor <16 x i1> [[BROADCAST_SPLAT]], splat (i1 true)
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE30:.*]] ]
@@ -218,7 +219,6 @@ define void @test_blend_feeding_replicated_store_2(ptr noalias %src, ptr %dst, i
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq <16 x i8> [[WIDE_LOAD]], zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = xor <16 x i1> [[TMP3]], splat (i1 true)
-; CHECK-NEXT: [[TMP5:%.*]] = xor <16 x i1> [[BROADCAST_SPLAT]], splat (i1 true)
; CHECK-NEXT: [[TMP6:%.*]] = select <16 x i1> [[TMP4]], <16 x i1> [[TMP5]], <16 x i1> zeroinitializer
; CHECK-NEXT: [[TMP7:%.*]] = or <16 x i1> [[TMP6]], [[TMP3]]
; CHECK-NEXT: [[PREDPHI:%.*]] = select <16 x i1> [[TMP6]], <16 x i8> zeroinitializer, <16 x i8> splat (i8 1)
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/force-target-instruction-cost.ll b/llvm/test/Transforms/LoopVectorize/AArch64/force-target-instruction-cost.ll
index 08a6001..8c5d84e 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/force-target-instruction-cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/force-target-instruction-cost.ll
@@ -151,9 +151,9 @@ exit:
ret void
}
-define void @test_exit_branch_cost(ptr %dst, i64 %x, i32 %y, ptr %dst.1, i1 %c.4, ptr %src, ptr %dst.3, i1 %c.3, ptr %dst.2) {
+define void @test_exit_branch_cost(ptr %dst, ptr noalias %x.ptr, ptr noalias %y.ptr, ptr %dst.1, i1 %c.4, ptr %src, ptr %dst.3, i1 %c.3, ptr %dst.2) {
; CHECK-LABEL: define void @test_exit_branch_cost(
-; CHECK-SAME: ptr [[DST:%.*]], i64 [[X:%.*]], i32 [[Y:%.*]], ptr [[DST_1:%.*]], i1 [[C_4:%.*]], ptr [[SRC:%.*]], ptr [[DST_3:%.*]], i1 [[C_3:%.*]], ptr [[DST_2:%.*]]) {
+; CHECK-SAME: ptr [[DST:%.*]], ptr noalias [[X_PTR:%.*]], ptr noalias [[Y_PTR:%.*]], ptr [[DST_1:%.*]], i1 [[C_4:%.*]], ptr [[SRC:%.*]], ptr [[DST_3:%.*]], i1 [[C_3:%.*]], ptr [[DST_2:%.*]]) {
; CHECK-NEXT: [[ENTRY:.*]]:
; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
; CHECK: [[VECTOR_MEMCHECK]]:
@@ -172,11 +172,11 @@ define void @test_exit_branch_cost(ptr %dst, i64 %x, i32 %y, ptr %dst.1, i1 %c.4
; CHECK-NEXT: [[BOUND08:%.*]] = icmp ult ptr [[DST_1]], [[SCEVGEP3]]
; CHECK-NEXT: [[BOUND19:%.*]] = icmp ult ptr [[DST]], [[SCEVGEP]]
; CHECK-NEXT: [[FOUND_CONFLICT10:%.*]] = and i1 [[BOUND08]], [[BOUND19]]
-; CHECK-NEXT: [[CONFLICT_RDX11:%.*]] = or i1 [[CONFLICT_RDX]], [[FOUND_CONFLICT10]]
+; CHECK-NEXT: [[CONFLICT_RDX21:%.*]] = or i1 [[CONFLICT_RDX]], [[FOUND_CONFLICT10]]
; CHECK-NEXT: [[BOUND012:%.*]] = icmp ult ptr [[DST_1]], [[SCEVGEP4]]
; CHECK-NEXT: [[BOUND113:%.*]] = icmp ult ptr [[SRC]], [[SCEVGEP]]
; CHECK-NEXT: [[FOUND_CONFLICT14:%.*]] = and i1 [[BOUND012]], [[BOUND113]]
-; CHECK-NEXT: [[CONFLICT_RDX15:%.*]] = or i1 [[CONFLICT_RDX11]], [[FOUND_CONFLICT14]]
+; CHECK-NEXT: [[CONFLICT_RDX15:%.*]] = or i1 [[CONFLICT_RDX21]], [[FOUND_CONFLICT14]]
; CHECK-NEXT: [[BOUND016:%.*]] = icmp ult ptr [[DST_3]], [[SCEVGEP2]]
; CHECK-NEXT: [[BOUND117:%.*]] = icmp ult ptr [[DST_2]], [[SCEVGEP1]]
; CHECK-NEXT: [[FOUND_CONFLICT18:%.*]] = and i1 [[BOUND016]], [[BOUND117]]
@@ -184,161 +184,101 @@ define void @test_exit_branch_cost(ptr %dst, i64 %x, i32 %y, ptr %dst.1, i1 %c.4
; CHECK-NEXT: [[BOUND020:%.*]] = icmp ult ptr [[DST_3]], [[SCEVGEP3]]
; CHECK-NEXT: [[BOUND121:%.*]] = icmp ult ptr [[DST]], [[SCEVGEP1]]
; CHECK-NEXT: [[FOUND_CONFLICT22:%.*]] = and i1 [[BOUND020]], [[BOUND121]]
-; CHECK-NEXT: [[CONFLICT_RDX23:%.*]] = or i1 [[CONFLICT_RDX19]], [[FOUND_CONFLICT22]]
+; CHECK-NEXT: [[CONFLICT_RDX41:%.*]] = or i1 [[CONFLICT_RDX19]], [[FOUND_CONFLICT22]]
; CHECK-NEXT: [[BOUND024:%.*]] = icmp ult ptr [[DST_3]], [[SCEVGEP4]]
; CHECK-NEXT: [[BOUND125:%.*]] = icmp ult ptr [[SRC]], [[SCEVGEP1]]
; CHECK-NEXT: [[FOUND_CONFLICT26:%.*]] = and i1 [[BOUND024]], [[BOUND125]]
-; CHECK-NEXT: [[CONFLICT_RDX27:%.*]] = or i1 [[CONFLICT_RDX23]], [[FOUND_CONFLICT26]]
+; CHECK-NEXT: [[CONFLICT_RDX27:%.*]] = or i1 [[CONFLICT_RDX41]], [[FOUND_CONFLICT26]]
; CHECK-NEXT: [[BOUND028:%.*]] = icmp ult ptr [[DST_2]], [[SCEVGEP3]]
; CHECK-NEXT: [[BOUND129:%.*]] = icmp ult ptr [[DST]], [[SCEVGEP2]]
; CHECK-NEXT: [[FOUND_CONFLICT30:%.*]] = and i1 [[BOUND028]], [[BOUND129]]
-; CHECK-NEXT: [[CONFLICT_RDX31:%.*]] = or i1 [[CONFLICT_RDX27]], [[FOUND_CONFLICT30]]
+; CHECK-NEXT: [[CONFLICT_RDX65:%.*]] = or i1 [[CONFLICT_RDX27]], [[FOUND_CONFLICT30]]
; CHECK-NEXT: [[BOUND032:%.*]] = icmp ult ptr [[DST_2]], [[SCEVGEP4]]
; CHECK-NEXT: [[BOUND133:%.*]] = icmp ult ptr [[SRC]], [[SCEVGEP2]]
-; CHECK-NEXT: [[FOUND_CONFLICT34:%.*]] = and i1 [[BOUND032]], [[BOUND133]]
-; CHECK-NEXT: [[CONFLICT_RDX35:%.*]] = or i1 [[CONFLICT_RDX31]], [[FOUND_CONFLICT34]]
+; CHECK-NEXT: [[FOUND_CONFLICT68:%.*]] = and i1 [[BOUND032]], [[BOUND133]]
+; CHECK-NEXT: [[CONFLICT_RDX35:%.*]] = or i1 [[CONFLICT_RDX65]], [[FOUND_CONFLICT68]]
; CHECK-NEXT: [[BOUND036:%.*]] = icmp ult ptr [[DST]], [[SCEVGEP4]]
; CHECK-NEXT: [[BOUND137:%.*]] = icmp ult ptr [[SRC]], [[SCEVGEP3]]
; CHECK-NEXT: [[FOUND_CONFLICT38:%.*]] = and i1 [[BOUND036]], [[BOUND137]]
; CHECK-NEXT: [[CONFLICT_RDX39:%.*]] = or i1 [[CONFLICT_RDX35]], [[FOUND_CONFLICT38]]
; CHECK-NEXT: br i1 [[CONFLICT_RDX39]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i64> poison, i64 [[X]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i64> [[BROADCAST_SPLATINSERT]], <2 x i64> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP47:%.*]] = icmp eq <2 x i64> [[BROADCAST_SPLAT]], zeroinitializer
; CHECK-NEXT: [[BROADCAST_SPLATINSERT40:%.*]] = insertelement <2 x i1> poison, i1 [[C_3]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT41:%.*]] = shufflevector <2 x i1> [[BROADCAST_SPLATINSERT40]], <2 x i1> poison, <2 x i32> zeroinitializer
; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[C_4]], <2 x i1> [[BROADCAST_SPLAT41]], <2 x i1> zeroinitializer
+; CHECK-NEXT: [[TMP11:%.*]] = xor <2 x i1> [[TMP2]], splat (i1 true)
; CHECK-NEXT: [[BROADCAST_SPLATINSERT56:%.*]] = insertelement <2 x i1> poison, i1 [[C_4]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT57:%.*]] = shufflevector <2 x i1> [[BROADCAST_SPLATINSERT56]], <2 x i1> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP33:%.*]] = xor <2 x i1> [[BROADCAST_SPLAT57]], splat (i1 true)
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE74:.*]] ]
-; CHECK-NEXT: [[TMP4:%.*]] = xor <2 x i1> [[TMP47]], splat (i1 true)
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE55:.*]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 0
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i64, ptr [[X_PTR]], i64 [[TMP3]]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[TMP4]], i32 0
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i64>, ptr [[TMP6]], align 8
+; CHECK-NEXT: [[TMP47:%.*]] = icmp eq <2 x i64> [[WIDE_LOAD]], zeroinitializer
; CHECK-NEXT: [[TMP5:%.*]] = xor <2 x i1> [[TMP47]], splat (i1 true)
-; CHECK-NEXT: [[TMP6:%.*]] = extractelement <2 x i1> [[TMP4]], i32 0
-; CHECK-NEXT: br i1 [[TMP6]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]]
+; CHECK-NEXT: [[TMP8:%.*]] = extractelement <2 x i1> [[TMP5]], i32 0
+; CHECK-NEXT: br i1 [[TMP8]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]]
; CHECK: [[PRED_STORE_IF]]:
; CHECK-NEXT: store i64 0, ptr [[DST_1]], align 8, !alias.scope [[META7:![0-9]+]], !noalias [[META10:![0-9]+]]
; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE]]
; CHECK: [[PRED_STORE_CONTINUE]]:
-; CHECK-NEXT: [[TMP7:%.*]] = extractelement <2 x i1> [[TMP4]], i32 1
-; CHECK-NEXT: br i1 [[TMP7]], label %[[PRED_STORE_IF42:.*]], label %[[PRED_STORE_CONTINUE43:.*]]
+; CHECK-NEXT: [[TMP9:%.*]] = extractelement <2 x i1> [[TMP5]], i32 1
+; CHECK-NEXT: br i1 [[TMP9]], label %[[PRED_STORE_IF42:.*]], label %[[PRED_STORE_CONTINUE43:.*]]
; CHECK: [[PRED_STORE_IF42]]:
; CHECK-NEXT: store i64 0, ptr [[DST_1]], align 8, !alias.scope [[META7]], !noalias [[META10]]
; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE43]]
; CHECK: [[PRED_STORE_CONTINUE43]]:
-; CHECK-NEXT: [[TMP8:%.*]] = extractelement <2 x i1> [[TMP5]], i32 0
-; CHECK-NEXT: br i1 [[TMP8]], label %[[PRED_STORE_IF44:.*]], label %[[PRED_STORE_CONTINUE45:.*]]
+; CHECK-NEXT: [[TMP13:%.*]] = select <2 x i1> [[TMP5]], <2 x i1> [[TMP11]], <2 x i1> zeroinitializer
+; CHECK-NEXT: [[TMP16:%.*]] = extractelement <2 x i1> [[TMP13]], i32 0
+; CHECK-NEXT: br i1 [[TMP16]], label %[[PRED_STORE_IF44:.*]], label %[[PRED_STORE_CONTINUE45:.*]]
; CHECK: [[PRED_STORE_IF44]]:
-; CHECK-NEXT: store i64 0, ptr [[DST_1]], align 8, !alias.scope [[META7]], !noalias [[META10]]
+; CHECK-NEXT: store i64 0, ptr [[DST_3]], align 8, !alias.scope [[META15:![0-9]+]], !noalias [[META16:![0-9]+]]
; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE45]]
; CHECK: [[PRED_STORE_CONTINUE45]]:
-; CHECK-NEXT: [[TMP9:%.*]] = extractelement <2 x i1> [[TMP5]], i32 1
-; CHECK-NEXT: br i1 [[TMP9]], label %[[PRED_STORE_IF46:.*]], label %[[PRED_STORE_CONTINUE47:.*]]
+; CHECK-NEXT: [[TMP17:%.*]] = extractelement <2 x i1> [[TMP13]], i32 1
+; CHECK-NEXT: br i1 [[TMP17]], label %[[PRED_STORE_IF46:.*]], label %[[PRED_STORE_CONTINUE47:.*]]
; CHECK: [[PRED_STORE_IF46]]:
-; CHECK-NEXT: store i64 0, ptr [[DST_1]], align 8, !alias.scope [[META7]], !noalias [[META10]]
+; CHECK-NEXT: store i64 0, ptr [[DST_3]], align 8, !alias.scope [[META15]], !noalias [[META16]]
; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE47]]
; CHECK: [[PRED_STORE_CONTINUE47]]:
-; CHECK-NEXT: [[TMP10:%.*]] = xor <2 x i1> [[TMP2]], splat (i1 true)
-; CHECK-NEXT: [[TMP11:%.*]] = xor <2 x i1> [[TMP2]], splat (i1 true)
-; CHECK-NEXT: [[TMP12:%.*]] = select <2 x i1> [[TMP4]], <2 x i1> [[TMP10]], <2 x i1> zeroinitializer
-; CHECK-NEXT: [[TMP13:%.*]] = select <2 x i1> [[TMP5]], <2 x i1> [[TMP11]], <2 x i1> zeroinitializer
-; CHECK-NEXT: [[TMP14:%.*]] = extractelement <2 x i1> [[TMP12]], i32 0
-; CHECK-NEXT: br i1 [[TMP14]], label %[[PRED_STORE_IF48:.*]], label %[[PRED_STORE_CONTINUE49:.*]]
-; CHECK: [[PRED_STORE_IF48]]:
-; CHECK-NEXT: store i64 0, ptr [[DST_3]], align 8, !alias.scope [[META15:![0-9]+]], !noalias [[META16:![0-9]+]]
-; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE49]]
-; CHECK: [[PRED_STORE_CONTINUE49]]:
-; CHECK-NEXT: [[TMP15:%.*]] = extractelement <2 x i1> [[TMP12]], i32 1
-; CHECK-NEXT: br i1 [[TMP15]], label %[[PRED_STORE_IF50:.*]], label %[[PRED_STORE_CONTINUE51:.*]]
-; CHECK: [[PRED_STORE_IF50]]:
-; CHECK-NEXT: store i64 0, ptr [[DST_3]], align 8, !alias.scope [[META15]], !noalias [[META16]]
-; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE51]]
-; CHECK: [[PRED_STORE_CONTINUE51]]:
-; CHECK-NEXT: [[TMP16:%.*]] = extractelement <2 x i1> [[TMP13]], i32 0
-; CHECK-NEXT: br i1 [[TMP16]], label %[[PRED_STORE_IF52:.*]], label %[[PRED_STORE_CONTINUE53:.*]]
-; CHECK: [[PRED_STORE_IF52]]:
-; CHECK-NEXT: store i64 0, ptr [[DST_3]], align 8, !alias.scope [[META15]], !noalias [[META16]]
-; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE53]]
-; CHECK: [[PRED_STORE_CONTINUE53]]:
-; CHECK-NEXT: [[TMP17:%.*]] = extractelement <2 x i1> [[TMP13]], i32 1
-; CHECK-NEXT: br i1 [[TMP17]], label %[[PRED_STORE_IF54:.*]], label %[[PRED_STORE_CONTINUE55:.*]]
-; CHECK: [[PRED_STORE_IF54]]:
-; CHECK-NEXT: store i64 0, ptr [[DST_3]], align 8, !alias.scope [[META15]], !noalias [[META16]]
-; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE55]]
-; CHECK: [[PRED_STORE_CONTINUE55]]:
-; CHECK-NEXT: [[TMP18:%.*]] = select <2 x i1> [[TMP4]], <2 x i1> [[BROADCAST_SPLAT41]], <2 x i1> zeroinitializer
; CHECK-NEXT: [[TMP19:%.*]] = select <2 x i1> [[TMP5]], <2 x i1> [[BROADCAST_SPLAT41]], <2 x i1> zeroinitializer
-; CHECK-NEXT: [[TMP20:%.*]] = select <2 x i1> [[TMP18]], <2 x i1> [[BROADCAST_SPLAT57]], <2 x i1> zeroinitializer
; CHECK-NEXT: [[TMP21:%.*]] = select <2 x i1> [[TMP19]], <2 x i1> [[BROADCAST_SPLAT57]], <2 x i1> zeroinitializer
-; CHECK-NEXT: [[TMP22:%.*]] = or <2 x i1> [[TMP47]], [[TMP20]]
; CHECK-NEXT: [[TMP23:%.*]] = or <2 x i1> [[TMP47]], [[TMP21]]
-; CHECK-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP20]], <2 x i64> zeroinitializer, <2 x i64> splat (i64 1)
; CHECK-NEXT: [[PREDPHI58:%.*]] = select <2 x i1> [[TMP21]], <2 x i64> zeroinitializer, <2 x i64> splat (i64 1)
-; CHECK-NEXT: [[TMP24:%.*]] = extractelement <2 x i1> [[TMP22]], i32 0
-; CHECK-NEXT: br i1 [[TMP24]], label %[[PRED_STORE_IF59:.*]], label %[[PRED_STORE_CONTINUE60:.*]]
-; CHECK: [[PRED_STORE_IF59]]:
-; CHECK-NEXT: [[TMP25:%.*]] = extractelement <2 x i64> [[PREDPHI]], i32 0
-; CHECK-NEXT: store i64 [[TMP25]], ptr [[DST_2]], align 8, !alias.scope [[META17:![0-9]+]], !noalias [[META18:![0-9]+]]
-; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE60]]
-; CHECK: [[PRED_STORE_CONTINUE60]]:
-; CHECK-NEXT: [[TMP26:%.*]] = extractelement <2 x i1> [[TMP22]], i32 1
-; CHECK-NEXT: br i1 [[TMP26]], label %[[PRED_STORE_IF61:.*]], label %[[PRED_STORE_CONTINUE62:.*]]
-; CHECK: [[PRED_STORE_IF61]]:
-; CHECK-NEXT: [[TMP27:%.*]] = extractelement <2 x i64> [[PREDPHI]], i32 1
-; CHECK-NEXT: store i64 [[TMP27]], ptr [[DST_2]], align 8, !alias.scope [[META17]], !noalias [[META18]]
-; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE62]]
-; CHECK: [[PRED_STORE_CONTINUE62]]:
; CHECK-NEXT: [[TMP28:%.*]] = extractelement <2 x i1> [[TMP23]], i32 0
-; CHECK-NEXT: br i1 [[TMP28]], label %[[PRED_STORE_IF63:.*]], label %[[PRED_STORE_CONTINUE64:.*]]
-; CHECK: [[PRED_STORE_IF63]]:
+; CHECK-NEXT: br i1 [[TMP28]], label %[[PRED_STORE_IF48:.*]], label %[[PRED_STORE_CONTINUE49:.*]]
+; CHECK: [[PRED_STORE_IF48]]:
; CHECK-NEXT: [[TMP29:%.*]] = extractelement <2 x i64> [[PREDPHI58]], i32 0
-; CHECK-NEXT: store i64 [[TMP29]], ptr [[DST_2]], align 8, !alias.scope [[META17]], !noalias [[META18]]
-; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE64]]
-; CHECK: [[PRED_STORE_CONTINUE64]]:
+; CHECK-NEXT: store i64 [[TMP29]], ptr [[DST_2]], align 8, !alias.scope [[META17:![0-9]+]], !noalias [[META18:![0-9]+]]
+; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE49]]
+; CHECK: [[PRED_STORE_CONTINUE49]]:
; CHECK-NEXT: [[TMP30:%.*]] = extractelement <2 x i1> [[TMP23]], i32 1
-; CHECK-NEXT: br i1 [[TMP30]], label %[[PRED_STORE_IF65:.*]], label %[[PRED_STORE_CONTINUE66:.*]]
-; CHECK: [[PRED_STORE_IF65]]:
+; CHECK-NEXT: br i1 [[TMP30]], label %[[PRED_STORE_IF50:.*]], label %[[PRED_STORE_CONTINUE51:.*]]
+; CHECK: [[PRED_STORE_IF50]]:
; CHECK-NEXT: [[TMP31:%.*]] = extractelement <2 x i64> [[PREDPHI58]], i32 1
; CHECK-NEXT: store i64 [[TMP31]], ptr [[DST_2]], align 8, !alias.scope [[META17]], !noalias [[META18]]
-; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE66]]
-; CHECK: [[PRED_STORE_CONTINUE66]]:
-; CHECK-NEXT: [[TMP32:%.*]] = xor <2 x i1> [[BROADCAST_SPLAT57]], splat (i1 true)
-; CHECK-NEXT: [[TMP33:%.*]] = xor <2 x i1> [[BROADCAST_SPLAT57]], splat (i1 true)
-; CHECK-NEXT: [[TMP34:%.*]] = select <2 x i1> [[TMP18]], <2 x i1> [[TMP32]], <2 x i1> zeroinitializer
+; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE51]]
+; CHECK: [[PRED_STORE_CONTINUE51]]:
; CHECK-NEXT: [[TMP35:%.*]] = select <2 x i1> [[TMP19]], <2 x i1> [[TMP33]], <2 x i1> zeroinitializer
-; CHECK-NEXT: [[TMP36:%.*]] = or <2 x i1> [[TMP22]], [[TMP34]]
; CHECK-NEXT: [[TMP37:%.*]] = or <2 x i1> [[TMP23]], [[TMP35]]
-; CHECK-NEXT: [[TMP38:%.*]] = extractelement <2 x i1> [[TMP36]], i32 0
-; CHECK-NEXT: br i1 [[TMP38]], label %[[PRED_STORE_IF67:.*]], label %[[PRED_STORE_CONTINUE68:.*]]
-; CHECK: [[PRED_STORE_IF67]]:
-; CHECK-NEXT: [[TMP45:%.*]] = load i64, ptr [[SRC]], align 8, !alias.scope [[META19:![0-9]+]]
-; CHECK-NEXT: store i64 [[TMP45]], ptr [[DST]], align 8, !alias.scope [[META20:![0-9]+]], !noalias [[META19]]
-; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE68]]
-; CHECK: [[PRED_STORE_CONTINUE68]]:
-; CHECK-NEXT: [[TMP40:%.*]] = extractelement <2 x i1> [[TMP36]], i32 1
-; CHECK-NEXT: br i1 [[TMP40]], label %[[PRED_STORE_IF69:.*]], label %[[PRED_STORE_CONTINUE70:.*]]
-; CHECK: [[PRED_STORE_IF69]]:
-; CHECK-NEXT: [[TMP39:%.*]] = load i64, ptr [[SRC]], align 8, !alias.scope [[META19]]
-; CHECK-NEXT: store i64 [[TMP39]], ptr [[DST]], align 8, !alias.scope [[META20]], !noalias [[META19]]
-; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE70]]
-; CHECK: [[PRED_STORE_CONTINUE70]]:
; CHECK-NEXT: [[TMP42:%.*]] = extractelement <2 x i1> [[TMP37]], i32 0
-; CHECK-NEXT: br i1 [[TMP42]], label %[[PRED_STORE_IF71:.*]], label %[[PRED_STORE_CONTINUE72:.*]]
-; CHECK: [[PRED_STORE_IF71]]:
-; CHECK-NEXT: [[TMP41:%.*]] = load i64, ptr [[SRC]], align 8, !alias.scope [[META19]]
-; CHECK-NEXT: store i64 [[TMP41]], ptr [[DST]], align 8, !alias.scope [[META20]], !noalias [[META19]]
-; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE72]]
-; CHECK: [[PRED_STORE_CONTINUE72]]:
+; CHECK-NEXT: br i1 [[TMP42]], label %[[PRED_STORE_IF52:.*]], label %[[PRED_STORE_CONTINUE53:.*]]
+; CHECK: [[PRED_STORE_IF52]]:
+; CHECK-NEXT: [[TMP24:%.*]] = load i64, ptr [[SRC]], align 8, !alias.scope [[META19:![0-9]+]]
+; CHECK-NEXT: store i64 [[TMP24]], ptr [[DST]], align 8, !alias.scope [[META20:![0-9]+]], !noalias [[META19]]
+; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE53]]
+; CHECK: [[PRED_STORE_CONTINUE53]]:
; CHECK-NEXT: [[TMP44:%.*]] = extractelement <2 x i1> [[TMP37]], i32 1
-; CHECK-NEXT: br i1 [[TMP44]], label %[[PRED_STORE_IF73:.*]], label %[[PRED_STORE_CONTINUE74]]
-; CHECK: [[PRED_STORE_IF73]]:
-; CHECK-NEXT: [[TMP43:%.*]] = load i64, ptr [[SRC]], align 8, !alias.scope [[META19]]
-; CHECK-NEXT: store i64 [[TMP43]], ptr [[DST]], align 8, !alias.scope [[META20]], !noalias [[META19]]
-; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE74]]
-; CHECK: [[PRED_STORE_CONTINUE74]]:
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; CHECK-NEXT: br i1 [[TMP44]], label %[[PRED_STORE_IF54:.*]], label %[[PRED_STORE_CONTINUE55]]
+; CHECK: [[PRED_STORE_IF54]]:
+; CHECK-NEXT: [[TMP25:%.*]] = load i64, ptr [[SRC]], align 8, !alias.scope [[META19]]
+; CHECK-NEXT: store i64 [[TMP25]], ptr [[DST]], align 8, !alias.scope [[META20]], !noalias [[META19]]
+; CHECK-NEXT: br label %[[PRED_STORE_CONTINUE55]]
+; CHECK: [[PRED_STORE_CONTINUE55]]:
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; CHECK-NEXT: [[TMP46:%.*]] = icmp eq i64 [[INDEX_NEXT]], 64
; CHECK-NEXT: br i1 [[TMP46]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
; CHECK: [[MIDDLE_BLOCK]]:
@@ -348,6 +288,10 @@ define void @test_exit_branch_cost(ptr %dst, i64 %x, i32 %y, ptr %dst.1, i1 %c.4
; CHECK-NEXT: br label %[[LOOP_HEADER:.*]]
; CHECK: [[LOOP_HEADER]]:
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP_LATCH:.*]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; CHECK-NEXT: [[X_GEP:%.*]] = getelementptr i64, ptr [[X_PTR]], i64 [[IV]]
+; CHECK-NEXT: [[X:%.*]] = load i64, ptr [[X_GEP]], align 8
+; CHECK-NEXT: [[Y_GEP:%.*]] = getelementptr i32, ptr [[Y_PTR]], i64 [[IV]]
+; CHECK-NEXT: [[Y:%.*]] = load i32, ptr [[Y_GEP]], align 4
; CHECK-NEXT: [[C1:%.*]] = icmp eq i64 [[X]], 0
; CHECK-NEXT: br i1 [[C1]], label %[[THEN_4:.*]], label %[[THEN_1:.*]]
; CHECK: [[THEN_1]]:
@@ -386,6 +330,10 @@ entry:
loop.header:
%iv = phi i64 [ %iv.next, %loop.latch ], [ 0, %entry ]
+ %x.gep = getelementptr i64, ptr %x.ptr, i64 %iv
+ %x = load i64, ptr %x.gep
+ %y.gep = getelementptr i32, ptr %y.ptr, i64 %iv
+ %y = load i32, ptr %y.gep
%c1 = icmp eq i64 %x, 0
br i1 %c1, label %then.4, label %then.1
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/blend-any-of-reduction-cost.ll b/llvm/test/Transforms/LoopVectorize/RISCV/blend-any-of-reduction-cost.ll
index f9c1ab4..3d00c22 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/blend-any-of-reduction-cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/blend-any-of-reduction-cost.ll
@@ -75,17 +75,17 @@ define i32 @any_of_reduction_used_in_blend_with_mutliple_phis(ptr %src, i64 %N,
; CHECK-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 2
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i1> poison, i1 [[C_0]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i1> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP6:%.*]] = xor <vscale x 2 x i1> [[BROADCAST_SPLAT]], splat (i1 true)
; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 2 x i1> poison, i1 [[C_1]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <vscale x 2 x i1> [[BROADCAST_SPLATINSERT1]], <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP7:%.*]] = xor <vscale x 2 x i1> [[BROADCAST_SPLAT2]], splat (i1 true)
+; CHECK-NEXT: [[TMP8:%.*]] = select <vscale x 2 x i1> [[TMP6]], <vscale x 2 x i1> [[TMP7]], <vscale x 2 x i1> zeroinitializer
; CHECK-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <vscale x 2 x ptr> poison, ptr [[SRC]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <vscale x 2 x ptr> [[BROADCAST_SPLATINSERT3]], <vscale x 2 x ptr> poison, <vscale x 2 x i32> zeroinitializer
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 2 x i1> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[PREDPHI:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP6:%.*]] = xor <vscale x 2 x i1> [[BROADCAST_SPLAT]], splat (i1 true)
-; CHECK-NEXT: [[TMP7:%.*]] = xor <vscale x 2 x i1> [[BROADCAST_SPLAT2]], splat (i1 true)
-; CHECK-NEXT: [[TMP8:%.*]] = select <vscale x 2 x i1> [[TMP6]], <vscale x 2 x i1> [[TMP7]], <vscale x 2 x i1> zeroinitializer
; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x ptr> @llvm.masked.gather.nxv2p0.nxv2p0(<vscale x 2 x ptr> [[BROADCAST_SPLAT4]], i32 8, <vscale x 2 x i1> [[TMP8]], <vscale x 2 x ptr> poison)
; CHECK-NEXT: [[TMP9:%.*]] = icmp eq <vscale x 2 x ptr> [[WIDE_MASKED_GATHER]], zeroinitializer
; CHECK-NEXT: [[TMP10:%.*]] = or <vscale x 2 x i1> [[VEC_PHI]], [[TMP9]]
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/divrem.ll b/llvm/test/Transforms/LoopVectorize/RISCV/divrem.ll
index a7765f4..038e726 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/divrem.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/divrem.ll
@@ -432,6 +432,7 @@ define void @predicated_udiv(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V:%.*]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
; CHECK-NEXT: [[TMP6:%.*]] = icmp ne <vscale x 2 x i64> [[BROADCAST_SPLAT]], zeroinitializer
+; CHECK-NEXT: [[TMP10:%.*]] = select <vscale x 2 x i1> [[TMP6]], <vscale x 2 x i64> [[BROADCAST_SPLAT]], <vscale x 2 x i64> splat (i64 1)
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -439,7 +440,6 @@ define void @predicated_udiv(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[TMP7]]
; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i64, ptr [[TMP8]], i32 0
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP9]], align 8
-; CHECK-NEXT: [[TMP10:%.*]] = select <vscale x 2 x i1> [[TMP6]], <vscale x 2 x i64> [[BROADCAST_SPLAT]], <vscale x 2 x i64> splat (i64 1)
; CHECK-NEXT: [[TMP11:%.*]] = udiv <vscale x 2 x i64> [[WIDE_LOAD]], [[TMP10]]
; CHECK-NEXT: [[PREDPHI:%.*]] = select <vscale x 2 x i1> [[TMP6]], <vscale x 2 x i64> [[TMP11]], <vscale x 2 x i64> [[WIDE_LOAD]]
; CHECK-NEXT: store <vscale x 2 x i64> [[PREDPHI]], ptr [[TMP9]], align 8
@@ -477,6 +477,7 @@ define void @predicated_udiv(ptr noalias nocapture %a, i64 %v, i64 %n) {
; FIXED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[V:%.*]], i64 0
; FIXED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer
; FIXED-NEXT: [[TMP0:%.*]] = icmp ne <4 x i64> [[BROADCAST_SPLAT]], zeroinitializer
+; FIXED-NEXT: [[TMP5:%.*]] = select <4 x i1> [[TMP0]], <4 x i64> [[BROADCAST_SPLAT]], <4 x i64> splat (i64 1)
; FIXED-NEXT: br label [[VECTOR_BODY:%.*]]
; FIXED: vector.body:
; FIXED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -486,10 +487,8 @@ define void @predicated_udiv(ptr noalias nocapture %a, i64 %v, i64 %n) {
; FIXED-NEXT: [[TMP4:%.*]] = getelementptr inbounds i64, ptr [[TMP2]], i32 4
; FIXED-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP3]], align 8
; FIXED-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP4]], align 8
-; FIXED-NEXT: [[TMP5:%.*]] = select <4 x i1> [[TMP0]], <4 x i64> [[BROADCAST_SPLAT]], <4 x i64> splat (i64 1)
-; FIXED-NEXT: [[TMP6:%.*]] = select <4 x i1> [[TMP0]], <4 x i64> [[BROADCAST_SPLAT]], <4 x i64> splat (i64 1)
; FIXED-NEXT: [[TMP7:%.*]] = udiv <4 x i64> [[WIDE_LOAD]], [[TMP5]]
-; FIXED-NEXT: [[TMP8:%.*]] = udiv <4 x i64> [[WIDE_LOAD1]], [[TMP6]]
+; FIXED-NEXT: [[TMP8:%.*]] = udiv <4 x i64> [[WIDE_LOAD1]], [[TMP5]]
; FIXED-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[TMP0]], <4 x i64> [[TMP7]], <4 x i64> [[WIDE_LOAD]]
; FIXED-NEXT: [[PREDPHI2:%.*]] = select <4 x i1> [[TMP0]], <4 x i64> [[TMP8]], <4 x i64> [[WIDE_LOAD1]]
; FIXED-NEXT: store <4 x i64> [[PREDPHI]], ptr [[TMP3]], align 8
@@ -560,6 +559,7 @@ define void @predicated_sdiv(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V:%.*]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
; CHECK-NEXT: [[TMP6:%.*]] = icmp ne <vscale x 2 x i64> [[BROADCAST_SPLAT]], zeroinitializer
+; CHECK-NEXT: [[TMP10:%.*]] = select <vscale x 2 x i1> [[TMP6]], <vscale x 2 x i64> [[BROADCAST_SPLAT]], <vscale x 2 x i64> splat (i64 1)
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -567,7 +567,6 @@ define void @predicated_sdiv(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[TMP7]]
; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i64, ptr [[TMP8]], i32 0
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP9]], align 8
-; CHECK-NEXT: [[TMP10:%.*]] = select <vscale x 2 x i1> [[TMP6]], <vscale x 2 x i64> [[BROADCAST_SPLAT]], <vscale x 2 x i64> splat (i64 1)
; CHECK-NEXT: [[TMP11:%.*]] = sdiv <vscale x 2 x i64> [[WIDE_LOAD]], [[TMP10]]
; CHECK-NEXT: [[PREDPHI:%.*]] = select <vscale x 2 x i1> [[TMP6]], <vscale x 2 x i64> [[TMP11]], <vscale x 2 x i64> [[WIDE_LOAD]]
; CHECK-NEXT: store <vscale x 2 x i64> [[PREDPHI]], ptr [[TMP9]], align 8
@@ -605,6 +604,7 @@ define void @predicated_sdiv(ptr noalias nocapture %a, i64 %v, i64 %n) {
; FIXED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[V:%.*]], i64 0
; FIXED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer
; FIXED-NEXT: [[TMP0:%.*]] = icmp ne <4 x i64> [[BROADCAST_SPLAT]], zeroinitializer
+; FIXED-NEXT: [[TMP5:%.*]] = select <4 x i1> [[TMP0]], <4 x i64> [[BROADCAST_SPLAT]], <4 x i64> splat (i64 1)
; FIXED-NEXT: br label [[VECTOR_BODY:%.*]]
; FIXED: vector.body:
; FIXED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
@@ -614,10 +614,8 @@ define void @predicated_sdiv(ptr noalias nocapture %a, i64 %v, i64 %n) {
; FIXED-NEXT: [[TMP4:%.*]] = getelementptr inbounds i64, ptr [[TMP2]], i32 4
; FIXED-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP3]], align 8
; FIXED-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP4]], align 8
-; FIXED-NEXT: [[TMP5:%.*]] = select <4 x i1> [[TMP0]], <4 x i64> [[BROADCAST_SPLAT]], <4 x i64> splat (i64 1)
-; FIXED-NEXT: [[TMP6:%.*]] = select <4 x i1> [[TMP0]], <4 x i64> [[BROADCAST_SPLAT]], <4 x i64> splat (i64 1)
; FIXED-NEXT: [[TMP7:%.*]] = sdiv <4 x i64> [[WIDE_LOAD]], [[TMP5]]
-; FIXED-NEXT: [[TMP8:%.*]] = sdiv <4 x i64> [[WIDE_LOAD1]], [[TMP6]]
+; FIXED-NEXT: [[TMP8:%.*]] = sdiv <4 x i64> [[WIDE_LOAD1]], [[TMP5]]
; FIXED-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[TMP0]], <4 x i64> [[TMP7]], <4 x i64> [[WIDE_LOAD]]
; FIXED-NEXT: [[PREDPHI2:%.*]] = select <4 x i1> [[TMP0]], <4 x i64> [[TMP8]], <4 x i64> [[WIDE_LOAD1]]
; FIXED-NEXT: store <4 x i64> [[PREDPHI]], ptr [[TMP3]], align 8
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-reverse-load-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-reverse-load-store.ll
index a330b696..f323231 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-reverse-load-store.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-reverse-load-store.ll
@@ -37,16 +37,16 @@ define void @reverse_load_store(i64 %startval, ptr noalias %ptr, ptr noalias %pt
; IF-EVL-NEXT: [[TMP18:%.*]] = zext i32 [[TMP5]] to i64
; IF-EVL-NEXT: [[TMP9:%.*]] = mul i64 0, [[TMP18]]
; IF-EVL-NEXT: [[TMP10:%.*]] = sub i64 1, [[TMP18]]
-; IF-EVL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[TMP8]], i64 [[TMP9]]
-; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[TMP11]], i64 [[TMP10]]
+; IF-EVL-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr [[TMP8]], i64 [[TMP9]]
+; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr i32, ptr [[TMP16]], i64 [[TMP10]]
; IF-EVL-NEXT: [[VP_OP_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP12]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP5]])
; IF-EVL-NEXT: [[VP_REVERSE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.reverse.nxv4i32(<vscale x 4 x i32> [[VP_OP_LOAD]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP5]])
; IF-EVL-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[PTR2:%.*]], i64 [[TMP7]]
; IF-EVL-NEXT: [[TMP19:%.*]] = zext i32 [[TMP5]] to i64
; IF-EVL-NEXT: [[TMP14:%.*]] = mul i64 0, [[TMP19]]
; IF-EVL-NEXT: [[TMP15:%.*]] = sub i64 1, [[TMP19]]
-; IF-EVL-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[TMP13]], i64 [[TMP14]]
-; IF-EVL-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[TMP16]], i64 [[TMP15]]
+; IF-EVL-NEXT: [[TMP22:%.*]] = getelementptr i32, ptr [[TMP13]], i64 [[TMP14]]
+; IF-EVL-NEXT: [[TMP17:%.*]] = getelementptr i32, ptr [[TMP22]], i64 [[TMP15]]
; IF-EVL-NEXT: [[VP_REVERSE3:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vp.reverse.nxv4i32(<vscale x 4 x i32> [[VP_REVERSE]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP5]])
; IF-EVL-NEXT: call void @llvm.vp.store.nxv4i32.p0(<vscale x 4 x i32> [[VP_REVERSE3]], ptr align 4 [[TMP17]], <vscale x 4 x i1> splat (i1 true), i32 [[TMP5]])
; IF-EVL-NEXT: [[TMP20:%.*]] = zext i32 [[TMP5]] to i64
diff --git a/llvm/test/Transforms/LoopVectorize/X86/drop-inbounds-flags-for-reverse-vector-pointer.ll b/llvm/test/Transforms/LoopVectorize/X86/drop-inbounds-flags-for-reverse-vector-pointer.ll
index 66bb935..3d23090 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/drop-inbounds-flags-for-reverse-vector-pointer.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/drop-inbounds-flags-for-reverse-vector-pointer.ll
@@ -4,8 +4,6 @@
target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
-; FIXME: GEP flags on GEPs for reverse vector pointer need to be dropped when folding the tail.
-
define i1 @fn(ptr %nno) #0 {
; CHECK-LABEL: define i1 @fn(
; CHECK-SAME: ptr [[NNO:%.*]]) #[[ATTR0:[0-9]+]] {
@@ -26,8 +24,8 @@ define i1 @fn(ptr %nno) #0 {
; CHECK-NEXT: [[TMP2:%.*]] = and <4 x i64> [[VEC_IND]], splat (i64 1)
; CHECK-NEXT: [[TMP3:%.*]] = icmp eq <4 x i64> [[TMP2]], zeroinitializer
; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds nuw i32, ptr [[NNO]], i64 [[TMP22]]
-; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP23]], i32 0
-; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[TMP5]], i32 -3
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i32, ptr [[TMP23]], i32 0
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i32, ptr [[TMP5]], i32 -3
; CHECK-NEXT: [[REVERSE:%.*]] = shufflevector <4 x i1> [[TMP1]], <4 x i1> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[TMP6]], i32 4, <4 x i1> [[REVERSE]], <4 x i32> poison)
; CHECK-NEXT: [[REVERSE1:%.*]] = shufflevector <4 x i32> [[WIDE_MASKED_LOAD]], <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
diff --git a/llvm/test/Transforms/LoopVectorize/X86/pr109581-unused-blend.ll b/llvm/test/Transforms/LoopVectorize/X86/pr109581-unused-blend.ll
index 270e6bc..1a9e7dd 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/pr109581-unused-blend.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/pr109581-unused-blend.ll
@@ -14,72 +14,70 @@ define i32 @unused_blend_after_unrolling(ptr %p, i32 %a, i1 %c.1, i16 %x, i16 %y
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i1> poison, i1 [[C_1]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i1> [[BROADCAST_SPLATINSERT]], <4 x i1> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP0:%.*]] = xor <4 x i1> [[BROADCAST_SPLAT]], splat (i1 true)
; CHECK-NEXT: [[BROADCAST_SPLATINSERT16:%.*]] = insertelement <4 x i1> poison, i1 [[C]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT17:%.*]] = shufflevector <4 x i1> [[BROADCAST_SPLATINSERT16]], <4 x i1> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP22:%.*]] = xor <4 x i1> [[BROADCAST_SPLAT17]], splat (i1 true)
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
-; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_SDIV_CONTINUE15:.*]] ]
-; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i1> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP24:%.*]], %[[PRED_SDIV_CONTINUE15]] ]
-; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i1> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP25:%.*]], %[[PRED_SDIV_CONTINUE15]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = xor <4 x i1> [[BROADCAST_SPLAT]], splat (i1 true)
-; CHECK-NEXT: [[TMP1:%.*]] = xor <4 x i1> [[BROADCAST_SPLAT]], splat (i1 true)
+; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_SDIV_CONTINUE17:.*]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i1> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP24:%.*]], %[[PRED_SDIV_CONTINUE17]] ]
+; CHECK-NEXT: [[VEC_PHI3:%.*]] = phi <4 x i1> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP25:%.*]], %[[PRED_SDIV_CONTINUE17]] ]
; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x i1> [[TMP0]], i32 0
; CHECK-NEXT: br i1 [[TMP2]], label %[[PRED_SDIV_IF:.*]], label %[[PRED_SDIV_CONTINUE:.*]]
; CHECK: [[PRED_SDIV_IF]]:
; CHECK-NEXT: br label %[[PRED_SDIV_CONTINUE]]
; CHECK: [[PRED_SDIV_CONTINUE]]:
; CHECK-NEXT: [[TMP3:%.*]] = extractelement <4 x i1> [[TMP0]], i32 1
-; CHECK-NEXT: br i1 [[TMP3]], label %[[PRED_SDIV_IF2:.*]], label %[[PRED_SDIV_CONTINUE3:.*]]
-; CHECK: [[PRED_SDIV_IF2]]:
-; CHECK-NEXT: br label %[[PRED_SDIV_CONTINUE3]]
-; CHECK: [[PRED_SDIV_CONTINUE3]]:
-; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i1> [[TMP0]], i32 2
-; CHECK-NEXT: br i1 [[TMP4]], label %[[PRED_SDIV_IF4:.*]], label %[[PRED_SDIV_CONTINUE5:.*]]
+; CHECK-NEXT: br i1 [[TMP3]], label %[[PRED_SDIV_IF4:.*]], label %[[PRED_SDIV_CONTINUE5:.*]]
; CHECK: [[PRED_SDIV_IF4]]:
; CHECK-NEXT: br label %[[PRED_SDIV_CONTINUE5]]
; CHECK: [[PRED_SDIV_CONTINUE5]]:
-; CHECK-NEXT: [[TMP5:%.*]] = extractelement <4 x i1> [[TMP0]], i32 3
-; CHECK-NEXT: br i1 [[TMP5]], label %[[PRED_SDIV_IF6:.*]], label %[[PRED_SDIV_CONTINUE7:.*]]
+; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i1> [[TMP0]], i32 2
+; CHECK-NEXT: br i1 [[TMP4]], label %[[PRED_SDIV_IF6:.*]], label %[[PRED_SDIV_CONTINUE7:.*]]
; CHECK: [[PRED_SDIV_IF6]]:
; CHECK-NEXT: br label %[[PRED_SDIV_CONTINUE7]]
; CHECK: [[PRED_SDIV_CONTINUE7]]:
-; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x i1> [[TMP1]], i32 0
-; CHECK-NEXT: br i1 [[TMP6]], label %[[PRED_SDIV_IF8:.*]], label %[[PRED_SDIV_CONTINUE9:.*]]
+; CHECK-NEXT: [[TMP5:%.*]] = extractelement <4 x i1> [[TMP0]], i32 3
+; CHECK-NEXT: br i1 [[TMP5]], label %[[PRED_SDIV_IF8:.*]], label %[[PRED_SDIV_CONTINUE9:.*]]
; CHECK: [[PRED_SDIV_IF8]]:
-; CHECK-NEXT: [[TMP7:%.*]] = sdiv i16 [[X]], [[Y]]
-; CHECK-NEXT: [[TMP8:%.*]] = insertelement <4 x i16> poison, i16 [[TMP7]], i32 0
; CHECK-NEXT: br label %[[PRED_SDIV_CONTINUE9]]
; CHECK: [[PRED_SDIV_CONTINUE9]]:
-; CHECK-NEXT: [[TMP9:%.*]] = phi <4 x i16> [ poison, %[[PRED_SDIV_CONTINUE7]] ], [ [[TMP8]], %[[PRED_SDIV_IF8]] ]
-; CHECK-NEXT: [[TMP10:%.*]] = extractelement <4 x i1> [[TMP1]], i32 1
-; CHECK-NEXT: br i1 [[TMP10]], label %[[PRED_SDIV_IF10:.*]], label %[[PRED_SDIV_CONTINUE11:.*]]
+; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x i1> [[TMP0]], i32 0
+; CHECK-NEXT: br i1 [[TMP6]], label %[[PRED_SDIV_IF10:.*]], label %[[PRED_SDIV_CONTINUE11:.*]]
; CHECK: [[PRED_SDIV_IF10]]:
-; CHECK-NEXT: [[TMP11:%.*]] = sdiv i16 [[X]], [[Y]]
-; CHECK-NEXT: [[TMP12:%.*]] = insertelement <4 x i16> [[TMP9]], i16 [[TMP11]], i32 1
+; CHECK-NEXT: [[TMP7:%.*]] = sdiv i16 [[X]], [[Y]]
+; CHECK-NEXT: [[TMP8:%.*]] = insertelement <4 x i16> poison, i16 [[TMP7]], i32 0
; CHECK-NEXT: br label %[[PRED_SDIV_CONTINUE11]]
; CHECK: [[PRED_SDIV_CONTINUE11]]:
-; CHECK-NEXT: [[TMP13:%.*]] = phi <4 x i16> [ [[TMP9]], %[[PRED_SDIV_CONTINUE9]] ], [ [[TMP12]], %[[PRED_SDIV_IF10]] ]
-; CHECK-NEXT: [[TMP14:%.*]] = extractelement <4 x i1> [[TMP1]], i32 2
-; CHECK-NEXT: br i1 [[TMP14]], label %[[PRED_SDIV_IF12:.*]], label %[[PRED_SDIV_CONTINUE13:.*]]
+; CHECK-NEXT: [[TMP9:%.*]] = phi <4 x i16> [ poison, %[[PRED_SDIV_CONTINUE9]] ], [ [[TMP8]], %[[PRED_SDIV_IF10]] ]
+; CHECK-NEXT: [[TMP10:%.*]] = extractelement <4 x i1> [[TMP0]], i32 1
+; CHECK-NEXT: br i1 [[TMP10]], label %[[PRED_SDIV_IF12:.*]], label %[[PRED_SDIV_CONTINUE13:.*]]
; CHECK: [[PRED_SDIV_IF12]]:
-; CHECK-NEXT: [[TMP15:%.*]] = sdiv i16 [[X]], [[Y]]
-; CHECK-NEXT: [[TMP16:%.*]] = insertelement <4 x i16> [[TMP13]], i16 [[TMP15]], i32 2
+; CHECK-NEXT: [[TMP11:%.*]] = sdiv i16 [[X]], [[Y]]
+; CHECK-NEXT: [[TMP12:%.*]] = insertelement <4 x i16> [[TMP9]], i16 [[TMP11]], i32 1
; CHECK-NEXT: br label %[[PRED_SDIV_CONTINUE13]]
; CHECK: [[PRED_SDIV_CONTINUE13]]:
-; CHECK-NEXT: [[TMP17:%.*]] = phi <4 x i16> [ [[TMP13]], %[[PRED_SDIV_CONTINUE11]] ], [ [[TMP16]], %[[PRED_SDIV_IF12]] ]
-; CHECK-NEXT: [[TMP18:%.*]] = extractelement <4 x i1> [[TMP1]], i32 3
-; CHECK-NEXT: br i1 [[TMP18]], label %[[PRED_SDIV_IF14:.*]], label %[[PRED_SDIV_CONTINUE15]]
+; CHECK-NEXT: [[TMP13:%.*]] = phi <4 x i16> [ [[TMP9]], %[[PRED_SDIV_CONTINUE11]] ], [ [[TMP12]], %[[PRED_SDIV_IF12]] ]
+; CHECK-NEXT: [[TMP14:%.*]] = extractelement <4 x i1> [[TMP0]], i32 2
+; CHECK-NEXT: br i1 [[TMP14]], label %[[PRED_SDIV_IF14:.*]], label %[[PRED_SDIV_CONTINUE15:.*]]
; CHECK: [[PRED_SDIV_IF14]]:
-; CHECK-NEXT: [[TMP19:%.*]] = sdiv i16 [[X]], [[Y]]
-; CHECK-NEXT: [[TMP20:%.*]] = insertelement <4 x i16> [[TMP17]], i16 [[TMP19]], i32 3
+; CHECK-NEXT: [[TMP15:%.*]] = sdiv i16 [[X]], [[Y]]
+; CHECK-NEXT: [[TMP16:%.*]] = insertelement <4 x i16> [[TMP13]], i16 [[TMP15]], i32 2
; CHECK-NEXT: br label %[[PRED_SDIV_CONTINUE15]]
; CHECK: [[PRED_SDIV_CONTINUE15]]:
-; CHECK-NEXT: [[TMP21:%.*]] = phi <4 x i16> [ [[TMP17]], %[[PRED_SDIV_CONTINUE13]] ], [ [[TMP20]], %[[PRED_SDIV_IF14]] ]
+; CHECK-NEXT: [[TMP17:%.*]] = phi <4 x i16> [ [[TMP13]], %[[PRED_SDIV_CONTINUE13]] ], [ [[TMP16]], %[[PRED_SDIV_IF14]] ]
+; CHECK-NEXT: [[TMP18:%.*]] = extractelement <4 x i1> [[TMP0]], i32 3
+; CHECK-NEXT: br i1 [[TMP18]], label %[[PRED_SDIV_IF16:.*]], label %[[PRED_SDIV_CONTINUE17]]
+; CHECK: [[PRED_SDIV_IF16]]:
+; CHECK-NEXT: [[TMP19:%.*]] = sdiv i16 [[X]], [[Y]]
+; CHECK-NEXT: [[TMP20:%.*]] = insertelement <4 x i16> [[TMP17]], i16 [[TMP19]], i32 3
+; CHECK-NEXT: br label %[[PRED_SDIV_CONTINUE17]]
+; CHECK: [[PRED_SDIV_CONTINUE17]]:
+; CHECK-NEXT: [[TMP21:%.*]] = phi <4 x i16> [ [[TMP17]], %[[PRED_SDIV_CONTINUE15]] ], [ [[TMP20]], %[[PRED_SDIV_IF16]] ]
; CHECK-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[BROADCAST_SPLAT]], <4 x i16> zeroinitializer, <4 x i16> [[TMP21]]
-; CHECK-NEXT: [[TMP22:%.*]] = xor <4 x i1> [[BROADCAST_SPLAT17]], splat (i1 true)
-; CHECK-NEXT: [[TMP23:%.*]] = xor <4 x i1> [[BROADCAST_SPLAT17]], splat (i1 true)
; CHECK-NEXT: [[TMP24]] = or <4 x i1> [[VEC_PHI]], [[TMP22]]
-; CHECK-NEXT: [[TMP25]] = or <4 x i1> [[VEC_PHI1]], [[TMP23]]
+; CHECK-NEXT: [[TMP25]] = or <4 x i1> [[VEC_PHI3]], [[TMP22]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i32 [[INDEX_NEXT]], 96
; CHECK-NEXT: br i1 [[TMP26]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/X86/replicate-uniform-call.ll b/llvm/test/Transforms/LoopVectorize/X86/replicate-uniform-call.ll
index 8d56c33..cfae26a 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/replicate-uniform-call.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/replicate-uniform-call.ll
@@ -15,19 +15,18 @@ define void @smax_call_uniform(ptr %dst, i64 %x) {
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i1> poison, i1 [[C]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i1> [[BROADCAST_SPLATINSERT]], <2 x i1> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i1> [[BROADCAST_SPLAT]], splat (i1 true)
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_UREM_CONTINUE6:.*]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = xor <2 x i1> [[BROADCAST_SPLAT]], splat (i1 true)
-; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i1> [[BROADCAST_SPLAT]], splat (i1 true)
-; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x i1> [[TMP0]], i32 0
+; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x i1> [[TMP1]], i32 0
; CHECK-NEXT: br i1 [[TMP2]], label %[[PRED_UREM_IF:.*]], label %[[PRED_UREM_CONTINUE:.*]]
; CHECK: [[PRED_UREM_IF]]:
; CHECK-NEXT: [[REM:%.*]] = urem i64 [[MUL]], [[X]]
; CHECK-NEXT: br label %[[PRED_UREM_CONTINUE]]
; CHECK: [[PRED_UREM_CONTINUE]]:
; CHECK-NEXT: [[TMP4:%.*]] = phi i64 [ poison, %[[VECTOR_BODY]] ], [ [[REM]], %[[PRED_UREM_IF]] ]
-; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x i1> [[TMP0]], i32 1
+; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x i1> [[TMP1]], i32 1
; CHECK-NEXT: br i1 [[TMP5]], label %[[PRED_UREM_IF1:.*]], label %[[PRED_UREM_CONTINUE2:.*]]
; CHECK: [[PRED_UREM_IF1]]:
; CHECK-NEXT: [[TMP6:%.*]] = urem i64 [[MUL]], [[X]]
@@ -48,7 +47,7 @@ define void @smax_call_uniform(ptr %dst, i64 %x) {
; CHECK: [[PRED_UREM_CONTINUE6]]:
; CHECK-NEXT: [[TMP12:%.*]] = tail call i64 @llvm.smax.i64(i64 [[TMP4]], i64 0)
; CHECK-NEXT: [[TMP13:%.*]] = tail call i64 @llvm.smax.i64(i64 [[TMP9]], i64 0)
-; CHECK-NEXT: [[TMP14:%.*]] = extractelement <2 x i1> [[TMP0]], i32 0
+; CHECK-NEXT: [[TMP14:%.*]] = extractelement <2 x i1> [[TMP1]], i32 0
; CHECK-NEXT: [[P:%.*]] = select i1 [[TMP14]], i64 [[TMP12]], i64 1
; CHECK-NEXT: [[TMP15:%.*]] = extractelement <2 x i1> [[TMP1]], i32 0
; CHECK-NEXT: [[PREDPHI7:%.*]] = select i1 [[TMP15]], i64 [[TMP13]], i64 1
diff --git a/llvm/test/Transforms/LoopVectorize/X86/scatter_crash.ll b/llvm/test/Transforms/LoopVectorize/X86/scatter_crash.ll
index c14c34c..a0294f7 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/scatter_crash.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/scatter_crash.ll
@@ -129,6 +129,7 @@ define void @_Z3fn1v() #0 {
; CHECK-NEXT: [[IND_END43:%.*]] = mul i64 [[N_VEC32]], 2
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i1> poison, i1 [[TOBOOL6]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i1> [[BROADCAST_SPLATINSERT]], <16 x i1> poison, <16 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP34:%.*]] = xor <16 x i1> [[BROADCAST_SPLAT]], splat (i1 true)
; CHECK-NEXT: br label [[VECTOR_BODY29:%.*]]
; CHECK: vector.body29:
; CHECK-NEXT: [[INDEX34:%.*]] = phi i64 [ 0, [[VECTOR_PH30]] ], [ [[INDEX_NEXT39:%.*]], [[VECTOR_BODY29]] ]
@@ -138,7 +139,6 @@ define void @_Z3fn1v() #0 {
; CHECK-NEXT: [[TMP31:%.*]] = getelementptr inbounds [10 x [10 x i32]], ptr @d, i64 0, <16 x i64> [[VEC_IND35]]
; CHECK-NEXT: [[TMP32:%.*]] = add nsw <16 x i64> [[TMP30]], [[VEC_IND37]]
; CHECK-NEXT: [[TMP33:%.*]] = getelementptr inbounds [10 x i32], <16 x ptr> [[TMP31]], <16 x i64> [[TMP32]], i64 0
-; CHECK-NEXT: [[TMP34:%.*]] = xor <16 x i1> [[BROADCAST_SPLAT]], splat (i1 true)
; CHECK-NEXT: call void @llvm.masked.scatter.v16i32.v16p0(<16 x i32> splat (i32 8), <16 x ptr> [[TMP33]], i32 16, <16 x i1> [[TMP34]])
; CHECK-NEXT: [[TMP35:%.*]] = or disjoint <16 x i64> [[VEC_IND37]], splat (i64 1)
; CHECK-NEXT: [[TMP36:%.*]] = add nsw <16 x i64> [[TMP30]], [[TMP35]]
@@ -173,16 +173,17 @@ define void @_Z3fn1v() #0 {
; CHECK-NEXT: [[TMP43:%.*]] = mul i64 [[N_VEC53]], 2
; CHECK-NEXT: [[IND_END54:%.*]] = add i64 8, [[TMP43]]
; CHECK-NEXT: [[IND_END57:%.*]] = mul i64 [[N_VEC53]], 2
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT50:%.*]] = insertelement <8 x i1> poison, i1 [[TOBOOL6]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT73:%.*]] = shufflevector <8 x i1> [[BROADCAST_SPLATINSERT50]], <8 x i1> poison, <8 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP48:%.*]] = xor <8 x i1> [[BROADCAST_SPLAT73]], splat (i1 true)
; CHECK-NEXT: [[DOTSPLATINSERT62:%.*]] = insertelement <8 x i64> poison, i64 [[BC_RESUME_VAL42]], i64 0
; CHECK-NEXT: [[DOTSPLAT63:%.*]] = shufflevector <8 x i64> [[DOTSPLATINSERT62]], <8 x i64> poison, <8 x i32> zeroinitializer
; CHECK-NEXT: [[INDUCTION64:%.*]] = add <8 x i64> [[DOTSPLAT63]], <i64 0, i64 2, i64 4, i64 6, i64 8, i64 10, i64 12, i64 14>
; CHECK-NEXT: [[DOTSPLATINSERT67:%.*]] = insertelement <8 x i64> poison, i64 [[BC_RESUME_VAL44]], i64 0
; CHECK-NEXT: [[DOTSPLAT68:%.*]] = shufflevector <8 x i64> [[DOTSPLATINSERT67]], <8 x i64> poison, <8 x i32> zeroinitializer
; CHECK-NEXT: [[INDUCTION69:%.*]] = add <8 x i64> [[DOTSPLAT68]], <i64 0, i64 2, i64 4, i64 6, i64 8, i64 10, i64 12, i64 14>
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT72:%.*]] = insertelement <8 x i1> poison, i1 [[TOBOOL6]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT73:%.*]] = shufflevector <8 x i1> [[BROADCAST_SPLATINSERT72]], <8 x i1> poison, <8 x i32> zeroinitializer
; CHECK-NEXT: br label [[VEC_EPILOG_VECTOR_BODY50:%.*]]
-; CHECK: vec.epilog.vector.body50:
+; CHECK: vec.epilog.vector.body52:
; CHECK-NEXT: [[INDEX61:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL51]], [[VEC_EPILOG_PH42]] ], [ [[INDEX_NEXT74:%.*]], [[VEC_EPILOG_VECTOR_BODY50]] ]
; CHECK-NEXT: [[VEC_IND65:%.*]] = phi <8 x i64> [ [[INDUCTION64]], [[VEC_EPILOG_PH42]] ], [ [[VEC_IND_NEXT66:%.*]], [[VEC_EPILOG_VECTOR_BODY50]] ]
; CHECK-NEXT: [[VEC_IND70:%.*]] = phi <8 x i64> [ [[INDUCTION69]], [[VEC_EPILOG_PH42]] ], [ [[VEC_IND_NEXT71:%.*]], [[VEC_EPILOG_VECTOR_BODY50]] ]
@@ -190,7 +191,6 @@ define void @_Z3fn1v() #0 {
; CHECK-NEXT: [[TMP45:%.*]] = getelementptr inbounds [10 x [10 x i32]], ptr @d, i64 0, <8 x i64> [[VEC_IND65]]
; CHECK-NEXT: [[TMP46:%.*]] = add nsw <8 x i64> [[TMP44]], [[VEC_IND70]]
; CHECK-NEXT: [[TMP47:%.*]] = getelementptr inbounds [10 x i32], <8 x ptr> [[TMP45]], <8 x i64> [[TMP46]], i64 0
-; CHECK-NEXT: [[TMP48:%.*]] = xor <8 x i1> [[BROADCAST_SPLAT73]], splat (i1 true)
; CHECK-NEXT: call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> splat (i32 8), <8 x ptr> [[TMP47]], i32 16, <8 x i1> [[TMP48]])
; CHECK-NEXT: [[TMP49:%.*]] = or disjoint <8 x i64> [[VEC_IND70]], splat (i64 1)
; CHECK-NEXT: [[TMP50:%.*]] = add nsw <8 x i64> [[TMP44]], [[TMP49]]
diff --git a/llvm/test/Transforms/LoopVectorize/blend-in-header.ll b/llvm/test/Transforms/LoopVectorize/blend-in-header.ll
index 4c95584..2fea016 100644
--- a/llvm/test/Transforms/LoopVectorize/blend-in-header.ll
+++ b/llvm/test/Transforms/LoopVectorize/blend-in-header.ll
@@ -171,11 +171,11 @@ define i64 @invar_cond_incoming_ops_reordered(i1 %c) {
; CHECK: vector.ph:
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i1> poison, i1 [[C]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i1> [[BROADCAST_SPLATINSERT]], <4 x i1> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP1:%.*]] = xor <4 x i1> [[BROADCAST_SPLAT]], splat (i1 true)
+; CHECK-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[TMP1]], <4 x i64> splat (i64 1), <4 x i64> zeroinitializer
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP1:%.*]] = xor <4 x i1> [[BROADCAST_SPLAT]], splat (i1 true)
-; CHECK-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[TMP1]], <4 x i64> splat (i64 1), <4 x i64> zeroinitializer
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
; CHECK-NEXT: [[TMP0:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1000
; CHECK-NEXT: br i1 [[TMP0]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/if-pred-stores.ll b/llvm/test/Transforms/LoopVectorize/if-pred-stores.ll
index c4509e4..7db53d8 100644
--- a/llvm/test/Transforms/LoopVectorize/if-pred-stores.ll
+++ b/llvm/test/Transforms/LoopVectorize/if-pred-stores.ll
@@ -172,6 +172,7 @@ define void @bug18724(i1 %cond, ptr %ptr, i1 %cond.2, i64 %v.1, i32 %v.2) {
; UNROLL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP4]], 2
; UNROLL-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP4]], [[N_MOD_VF]]
; UNROLL-NEXT: [[IND_END:%.*]] = add i64 [[V_1]], [[N_VEC]]
+; UNROLL-NEXT: [[TMP13:%.*]] = xor i1 [[COND_2:%.*]], true
; UNROLL-NEXT: br label [[VECTOR_BODY:%.*]]
; UNROLL: vector.body:
; UNROLL-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE3:%.*]] ]
@@ -184,7 +185,7 @@ define void @bug18724(i1 %cond, ptr %ptr, i1 %cond.2, i64 %v.1, i32 %v.2) {
; UNROLL-NEXT: [[TMP8:%.*]] = getelementptr inbounds [768 x i32], ptr [[PTR]], i64 0, i64 [[TMP6]]
; UNROLL-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP7]], align 4
; UNROLL-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP8]], align 4
-; UNROLL-NEXT: br i1 [[COND_2:%.*]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE3]]
+; UNROLL-NEXT: br i1 [[COND_2]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE3]]
; UNROLL: pred.store.if:
; UNROLL-NEXT: store i32 [[TMP9]], ptr [[TMP7]], align 4
; UNROLL-NEXT: store i32 [[TMP10]], ptr [[TMP8]], align 4
@@ -192,10 +193,8 @@ define void @bug18724(i1 %cond, ptr %ptr, i1 %cond.2, i64 %v.1, i32 %v.2) {
; UNROLL: pred.store.continue3:
; UNROLL-NEXT: [[TMP11:%.*]] = add i32 [[VEC_PHI]], 1
; UNROLL-NEXT: [[TMP12:%.*]] = add i32 [[VEC_PHI1]], 1
-; UNROLL-NEXT: [[TMP13:%.*]] = xor i1 [[COND_2]], true
-; UNROLL-NEXT: [[TMP14:%.*]] = xor i1 [[COND_2]], true
; UNROLL-NEXT: [[PREDPHI]] = select i1 [[TMP13]], i32 [[VEC_PHI]], i32 [[TMP11]]
-; UNROLL-NEXT: [[PREDPHI4]] = select i1 [[TMP14]], i32 [[VEC_PHI1]], i32 [[TMP12]]
+; UNROLL-NEXT: [[PREDPHI4]] = select i1 [[TMP13]], i32 [[VEC_PHI1]], i32 [[TMP12]]
; UNROLL-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; UNROLL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; UNROLL-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
@@ -244,6 +243,7 @@ define void @bug18724(i1 %cond, ptr %ptr, i1 %cond.2, i64 %v.1, i32 %v.2) {
; UNROLL-NOSIMPLIFY-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP3]], 2
; UNROLL-NOSIMPLIFY-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP3]], [[N_MOD_VF]]
; UNROLL-NOSIMPLIFY-NEXT: [[IND_END:%.*]] = add i64 [[V_1]], [[N_VEC]]
+; UNROLL-NOSIMPLIFY-NEXT: [[TMP12:%.*]] = xor i1 [[COND_2:%.*]], true
; UNROLL-NOSIMPLIFY-NEXT: br label [[VECTOR_BODY:%.*]]
; UNROLL-NOSIMPLIFY: vector.body:
; UNROLL-NOSIMPLIFY-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE3:%.*]] ]
@@ -256,7 +256,7 @@ define void @bug18724(i1 %cond, ptr %ptr, i1 %cond.2, i64 %v.1, i32 %v.2) {
; UNROLL-NOSIMPLIFY-NEXT: [[TMP7:%.*]] = getelementptr inbounds [768 x i32], ptr [[PTR]], i64 0, i64 [[TMP5]]
; UNROLL-NOSIMPLIFY-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP6]], align 4
; UNROLL-NOSIMPLIFY-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP7]], align 4
-; UNROLL-NOSIMPLIFY-NEXT: br i1 [[COND_2:%.*]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
+; UNROLL-NOSIMPLIFY-NEXT: br i1 [[COND_2]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
; UNROLL-NOSIMPLIFY: pred.store.if:
; UNROLL-NOSIMPLIFY-NEXT: store i32 [[TMP8]], ptr [[TMP6]], align 4
; UNROLL-NOSIMPLIFY-NEXT: br label [[PRED_STORE_CONTINUE]]
@@ -268,10 +268,8 @@ define void @bug18724(i1 %cond, ptr %ptr, i1 %cond.2, i64 %v.1, i32 %v.2) {
; UNROLL-NOSIMPLIFY: pred.store.continue3:
; UNROLL-NOSIMPLIFY-NEXT: [[TMP10:%.*]] = add i32 [[VEC_PHI]], 1
; UNROLL-NOSIMPLIFY-NEXT: [[TMP11:%.*]] = add i32 [[VEC_PHI1]], 1
-; UNROLL-NOSIMPLIFY-NEXT: [[TMP12:%.*]] = xor i1 [[COND_2]], true
-; UNROLL-NOSIMPLIFY-NEXT: [[TMP13:%.*]] = xor i1 [[COND_2]], true
; UNROLL-NOSIMPLIFY-NEXT: [[PREDPHI]] = select i1 [[TMP12]], i32 [[VEC_PHI]], i32 [[TMP10]]
-; UNROLL-NOSIMPLIFY-NEXT: [[PREDPHI4]] = select i1 [[TMP13]], i32 [[VEC_PHI1]], i32 [[TMP11]]
+; UNROLL-NOSIMPLIFY-NEXT: [[PREDPHI4]] = select i1 [[TMP12]], i32 [[VEC_PHI1]], i32 [[TMP11]]
; UNROLL-NOSIMPLIFY-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; UNROLL-NOSIMPLIFY-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; UNROLL-NOSIMPLIFY-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
@@ -321,9 +319,10 @@ define void @bug18724(i1 %cond, ptr %ptr, i1 %cond.2, i64 %v.1, i32 %v.2) {
; VEC-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP4]], 2
; VEC-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP4]], [[N_MOD_VF]]
; VEC-NEXT: [[IND_END:%.*]] = add i64 [[V_1]], [[N_VEC]]
-; VEC-NEXT: [[TMP5:%.*]] = insertelement <2 x i32> zeroinitializer, i32 [[V_2:%.*]], i32 0
; VEC-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i1> poison, i1 [[COND_2:%.*]], i64 0
; VEC-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i1> [[BROADCAST_SPLATINSERT]], <2 x i1> poison, <2 x i32> zeroinitializer
+; VEC-NEXT: [[TMP17:%.*]] = xor <2 x i1> [[BROADCAST_SPLAT]], splat (i1 true)
+; VEC-NEXT: [[TMP5:%.*]] = insertelement <2 x i32> zeroinitializer, i32 [[V_2:%.*]], i32 0
; VEC-NEXT: br label [[VECTOR_BODY:%.*]]
; VEC: vector.body:
; VEC-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE2:%.*]] ]
@@ -351,7 +350,6 @@ define void @bug18724(i1 %cond, ptr %ptr, i1 %cond.2, i64 %v.1, i32 %v.2) {
; VEC-NEXT: br label [[PRED_STORE_CONTINUE2]]
; VEC: pred.store.continue2:
; VEC-NEXT: [[TMP16:%.*]] = add <2 x i32> [[VEC_PHI]], splat (i32 1)
-; VEC-NEXT: [[TMP17:%.*]] = xor <2 x i1> [[BROADCAST_SPLAT]], splat (i1 true)
; VEC-NEXT: [[PREDPHI]] = select <2 x i1> [[TMP17]], <2 x i32> [[VEC_PHI]], <2 x i32> [[TMP16]]
; VEC-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; VEC-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
diff --git a/llvm/test/Transforms/LoopVectorize/induction.ll b/llvm/test/Transforms/LoopVectorize/induction.ll
index 2175eab..96311de 100644
--- a/llvm/test/Transforms/LoopVectorize/induction.ll
+++ b/llvm/test/Transforms/LoopVectorize/induction.ll
@@ -1962,6 +1962,7 @@ define i32 @scalarize_induction_variable_05(ptr %a, i32 %x, i1 %c, i32 %n) {
; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[SMAX]], [[N_MOD_VF]]
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i1> poison, i1 [[C:%.*]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i1> [[BROADCAST_SPLATINSERT]], <2 x i1> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP14:%.*]] = xor <2 x i1> [[BROADCAST_SPLAT]], splat (i1 true)
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_UDIV_CONTINUE2:%.*]] ]
@@ -1989,7 +1990,6 @@ define i32 @scalarize_induction_variable_05(ptr %a, i32 %x, i1 %c, i32 %n) {
; CHECK-NEXT: br label [[PRED_UDIV_CONTINUE2]]
; CHECK: pred.udiv.continue2:
; CHECK-NEXT: [[TMP13:%.*]] = phi <2 x i32> [ [[TMP7]], [[PRED_UDIV_CONTINUE]] ], [ [[TMP12]], [[PRED_UDIV_IF1]] ]
-; CHECK-NEXT: [[TMP14:%.*]] = xor <2 x i1> [[BROADCAST_SPLAT]], splat (i1 true)
; CHECK-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP14]], <2 x i32> [[WIDE_LOAD]], <2 x i32> [[TMP13]]
; CHECK-NEXT: [[TMP15]] = add <2 x i32> [[PREDPHI]], [[VEC_PHI]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 2
@@ -2030,6 +2030,8 @@ define i32 @scalarize_induction_variable_05(ptr %a, i32 %x, i1 %c, i32 %n) {
; IND: vector.ph:
; IND-NEXT: [[N_VEC:%.*]] = and i32 [[SMAX]], 2147483646
; IND-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i1> poison, i1 [[C:%.*]], i64 0
+; IND-NEXT: [[TMP11:%.*]] = xor <2 x i1> [[BROADCAST_SPLATINSERT]], <i1 true, i1 poison>
+; IND-NEXT: [[TMP12:%.*]] = shufflevector <2 x i1> [[TMP11]], <2 x i1> poison, <2 x i32> zeroinitializer
; IND-NEXT: br label [[VECTOR_BODY:%.*]]
; IND: vector.body:
; IND-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_UDIV_CONTINUE2:%.*]] ]
@@ -2054,8 +2056,6 @@ define i32 @scalarize_induction_variable_05(ptr %a, i32 %x, i1 %c, i32 %n) {
; IND-NEXT: br label [[PRED_UDIV_CONTINUE2]]
; IND: pred.udiv.continue2:
; IND-NEXT: [[TMP10:%.*]] = phi <2 x i32> [ [[TMP5]], [[PRED_UDIV_CONTINUE]] ], [ [[TMP9]], [[PRED_UDIV_IF1]] ]
-; IND-NEXT: [[TMP11:%.*]] = xor <2 x i1> [[BROADCAST_SPLATINSERT]], <i1 true, i1 poison>
-; IND-NEXT: [[TMP12:%.*]] = shufflevector <2 x i1> [[TMP11]], <2 x i1> poison, <2 x i32> zeroinitializer
; IND-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP12]], <2 x i32> [[WIDE_LOAD]], <2 x i32> [[TMP10]]
; IND-NEXT: [[TMP13]] = add <2 x i32> [[PREDPHI]], [[VEC_PHI]]
; IND-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 2
@@ -2097,7 +2097,8 @@ define i32 @scalarize_induction_variable_05(ptr %a, i32 %x, i1 %c, i32 %n) {
; UNROLL: vector.ph:
; UNROLL-NEXT: [[N_VEC:%.*]] = and i32 [[SMAX]], 2147483644
; UNROLL-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i1> poison, i1 [[C:%.*]], i64 0
-; UNROLL-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i1> [[BROADCAST_SPLATINSERT]], <2 x i1> poison, <2 x i32> zeroinitializer
+; UNROLL-NEXT: [[TMP27:%.*]] = xor <2 x i1> [[BROADCAST_SPLATINSERT]], <i1 true, i1 poison>
+; UNROLL-NEXT: [[TMP28:%.*]] = shufflevector <2 x i1> [[TMP27]], <2 x i1> poison, <2 x i32> zeroinitializer
; UNROLL-NEXT: br label [[VECTOR_BODY:%.*]]
; UNROLL: vector.body:
; UNROLL-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_UDIV_CONTINUE8:%.*]] ]
@@ -2143,8 +2144,8 @@ define i32 @scalarize_induction_variable_05(ptr %a, i32 %x, i1 %c, i32 %n) {
; UNROLL-NEXT: br label [[PRED_UDIV_CONTINUE8]]
; UNROLL: pred.udiv.continue8:
; UNROLL-NEXT: [[TMP21:%.*]] = phi <2 x i32> [ [[TMP16]], [[PRED_UDIV_CONTINUE6]] ], [ [[TMP20]], [[PRED_UDIV_IF7]] ]
-; UNROLL-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[BROADCAST_SPLAT]], <2 x i32> [[TMP11]], <2 x i32> [[WIDE_LOAD]]
-; UNROLL-NEXT: [[PREDPHI9:%.*]] = select <2 x i1> [[BROADCAST_SPLAT]], <2 x i32> [[TMP21]], <2 x i32> [[WIDE_LOAD2]]
+; UNROLL-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP28]], <2 x i32> [[WIDE_LOAD]], <2 x i32> [[TMP11]]
+; UNROLL-NEXT: [[PREDPHI9:%.*]] = select <2 x i1> [[TMP28]], <2 x i32> [[WIDE_LOAD2]], <2 x i32> [[TMP21]]
; UNROLL-NEXT: [[TMP22]] = add <2 x i32> [[PREDPHI]], [[VEC_PHI]]
; UNROLL-NEXT: [[TMP23]] = add <2 x i32> [[PREDPHI9]], [[VEC_PHI1]]
; UNROLL-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
@@ -2189,6 +2190,7 @@ define i32 @scalarize_induction_variable_05(ptr %a, i32 %x, i1 %c, i32 %n) {
; UNROLL-NO-IC-NEXT: [[N_VEC:%.*]] = sub i32 [[SMAX]], [[N_MOD_VF]]
; UNROLL-NO-IC-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i1> poison, i1 [[C:%.*]], i64 0
; UNROLL-NO-IC-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i1> [[BROADCAST_SPLATINSERT]], <2 x i1> poison, <2 x i32> zeroinitializer
+; UNROLL-NO-IC-NEXT: [[TMP27:%.*]] = xor <2 x i1> [[BROADCAST_SPLAT]], splat (i1 true)
; UNROLL-NO-IC-NEXT: br label [[VECTOR_BODY:%.*]]
; UNROLL-NO-IC: vector.body:
; UNROLL-NO-IC-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_UDIV_CONTINUE8:%.*]] ]
@@ -2239,10 +2241,8 @@ define i32 @scalarize_induction_variable_05(ptr %a, i32 %x, i1 %c, i32 %n) {
; UNROLL-NO-IC-NEXT: br label [[PRED_UDIV_CONTINUE8]]
; UNROLL-NO-IC: pred.udiv.continue8:
; UNROLL-NO-IC-NEXT: [[TMP26:%.*]] = phi <2 x i32> [ [[TMP20]], [[PRED_UDIV_CONTINUE6]] ], [ [[TMP25]], [[PRED_UDIV_IF7]] ]
-; UNROLL-NO-IC-NEXT: [[TMP27:%.*]] = xor <2 x i1> [[BROADCAST_SPLAT]], splat (i1 true)
-; UNROLL-NO-IC-NEXT: [[TMP28:%.*]] = xor <2 x i1> [[BROADCAST_SPLAT]], splat (i1 true)
; UNROLL-NO-IC-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP27]], <2 x i32> [[WIDE_LOAD]], <2 x i32> [[TMP14]]
-; UNROLL-NO-IC-NEXT: [[PREDPHI9:%.*]] = select <2 x i1> [[TMP28]], <2 x i32> [[WIDE_LOAD2]], <2 x i32> [[TMP26]]
+; UNROLL-NO-IC-NEXT: [[PREDPHI9:%.*]] = select <2 x i1> [[TMP27]], <2 x i32> [[WIDE_LOAD2]], <2 x i32> [[TMP26]]
; UNROLL-NO-IC-NEXT: [[TMP29]] = add <2 x i32> [[PREDPHI]], [[VEC_PHI]]
; UNROLL-NO-IC-NEXT: [[TMP30]] = add <2 x i32> [[PREDPHI9]], [[VEC_PHI1]]
; UNROLL-NO-IC-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
@@ -2284,7 +2284,8 @@ define i32 @scalarize_induction_variable_05(ptr %a, i32 %x, i1 %c, i32 %n) {
; INTERLEAVE: vector.ph:
; INTERLEAVE-NEXT: [[N_VEC:%.*]] = and i32 [[SMAX]], 2147483640
; INTERLEAVE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i1> poison, i1 [[C:%.*]], i64 0
-; INTERLEAVE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i1> [[BROADCAST_SPLATINSERT]], <4 x i1> poison, <4 x i32> zeroinitializer
+; INTERLEAVE-NEXT: [[TMP47:%.*]] = xor <4 x i1> [[BROADCAST_SPLATINSERT]], <i1 true, i1 poison, i1 poison, i1 poison>
+; INTERLEAVE-NEXT: [[TMP48:%.*]] = shufflevector <4 x i1> [[TMP47]], <4 x i1> poison, <4 x i32> zeroinitializer
; INTERLEAVE-NEXT: br label [[VECTOR_BODY:%.*]]
; INTERLEAVE: vector.body:
; INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_UDIV_CONTINUE16:%.*]] ]
@@ -2366,8 +2367,8 @@ define i32 @scalarize_induction_variable_05(ptr %a, i32 %x, i1 %c, i32 %n) {
; INTERLEAVE-NEXT: br label [[PRED_UDIV_CONTINUE16]]
; INTERLEAVE: pred.udiv.continue16:
; INTERLEAVE-NEXT: [[TMP41:%.*]] = phi <4 x i32> [ [[TMP36]], [[PRED_UDIV_CONTINUE14]] ], [ [[TMP40]], [[PRED_UDIV_IF15]] ]
-; INTERLEAVE-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[BROADCAST_SPLAT]], <4 x i32> [[TMP21]], <4 x i32> [[WIDE_LOAD]]
-; INTERLEAVE-NEXT: [[PREDPHI17:%.*]] = select <4 x i1> [[BROADCAST_SPLAT]], <4 x i32> [[TMP41]], <4 x i32> [[WIDE_LOAD2]]
+; INTERLEAVE-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[TMP48]], <4 x i32> [[WIDE_LOAD]], <4 x i32> [[TMP21]]
+; INTERLEAVE-NEXT: [[PREDPHI17:%.*]] = select <4 x i1> [[TMP48]], <4 x i32> [[WIDE_LOAD2]], <4 x i32> [[TMP41]]
; INTERLEAVE-NEXT: [[TMP42]] = add <4 x i32> [[PREDPHI]], [[VEC_PHI]]
; INTERLEAVE-NEXT: [[TMP43]] = add <4 x i32> [[PREDPHI17]], [[VEC_PHI1]]
; INTERLEAVE-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
diff --git a/llvm/test/Transforms/LoopVectorize/invariant-store-vectorization-2.ll b/llvm/test/Transforms/LoopVectorize/invariant-store-vectorization-2.ll
index bc1c1bf..e8ad6a3 100644
--- a/llvm/test/Transforms/LoopVectorize/invariant-store-vectorization-2.ll
+++ b/llvm/test/Transforms/LoopVectorize/invariant-store-vectorization-2.ll
@@ -134,12 +134,12 @@ define void @inv_val_store_to_inv_address_conditional_inv(ptr %a, i64 %n, ptr %b
; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[SMAX2]], 9223372036854775804
+; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x i1> poison, i1 [[CMP]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[NTRUNC]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x i1> poison, i1 [[CMP]], i64 3
-; CHECK-NEXT: [[BROADCAST_SPLAT6:%.*]] = insertelement <4 x i32> poison, i32 [[K]], i64 3
+; CHECK-NEXT: [[BROADCAST_SPLAT6:%.*]] = insertelement <4 x i32> poison, i32 [[K]], i64 0
; CHECK-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[TMP1]], <4 x i32> [[BROADCAST_SPLAT]], <4 x i32> [[BROADCAST_SPLAT6]]
-; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x i32> [[PREDPHI]], i64 3
+; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x i32> [[PREDPHI]], i64 0
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
diff --git a/llvm/test/Transforms/LoopVectorize/pr37248.ll b/llvm/test/Transforms/LoopVectorize/pr37248.ll
index ed7762f..fe660a8 100644
--- a/llvm/test/Transforms/LoopVectorize/pr37248.ll
+++ b/llvm/test/Transforms/LoopVectorize/pr37248.ll
@@ -41,26 +41,26 @@ define void @f1(ptr noalias %b, i1 %c, i32 %start) {
; CHECK-NEXT: [[IND_END:%.*]] = sub i32 [[START]], [[N_VEC]]
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i1> poison, i1 [[C]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i1> [[BROADCAST_SPLATINSERT]], <2 x i1> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP10:%.*]] = xor <2 x i1> [[BROADCAST_SPLAT]], splat (i1 true)
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE3:%.*]] ]
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = sub i32 [[START]], [[INDEX]]
-; CHECK-NEXT: [[TMP10:%.*]] = trunc i32 [[OFFSET_IDX]] to i16
-; CHECK-NEXT: [[TMP11:%.*]] = add i16 [[TMP10]], 0
-; CHECK-NEXT: [[TMP12:%.*]] = xor <2 x i1> [[BROADCAST_SPLAT]], splat (i1 true)
-; CHECK-NEXT: [[TMP13:%.*]] = extractelement <2 x i1> [[TMP12]], i32 0
+; CHECK-NEXT: [[TMP11:%.*]] = trunc i32 [[OFFSET_IDX]] to i16
+; CHECK-NEXT: [[TMP12:%.*]] = add i16 [[TMP11]], 0
+; CHECK-NEXT: [[TMP13:%.*]] = extractelement <2 x i1> [[TMP10]], i32 0
; CHECK-NEXT: br i1 [[TMP13]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
; CHECK: pred.store.if:
; CHECK-NEXT: store i32 10, ptr [[B]], align 1
; CHECK-NEXT: br label [[PRED_STORE_CONTINUE]]
; CHECK: pred.store.continue:
-; CHECK-NEXT: [[TMP14:%.*]] = extractelement <2 x i1> [[TMP12]], i32 1
+; CHECK-NEXT: [[TMP14:%.*]] = extractelement <2 x i1> [[TMP10]], i32 1
; CHECK-NEXT: br i1 [[TMP14]], label [[PRED_STORE_IF2:%.*]], label [[PRED_STORE_CONTINUE3]]
; CHECK: pred.store.if2:
; CHECK-NEXT: store i32 10, ptr [[B]], align 1
; CHECK-NEXT: br label [[PRED_STORE_CONTINUE3]]
; CHECK: pred.store.continue3:
-; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds [2 x i16], ptr @a, i16 0, i16 [[TMP11]]
+; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds [2 x i16], ptr @a, i16 0, i16 [[TMP12]]
; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds i16, ptr [[TMP15]], i32 0
; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i16, ptr [[TMP16]], i32 -1
; CHECK-NEXT: store <2 x i16> zeroinitializer, ptr [[TMP17]], align 1
diff --git a/llvm/test/Transforms/LoopVectorize/pr55167-fold-tail-live-out.ll b/llvm/test/Transforms/LoopVectorize/pr55167-fold-tail-live-out.ll
index 4f47e66..a129a4b 100644
--- a/llvm/test/Transforms/LoopVectorize/pr55167-fold-tail-live-out.ll
+++ b/llvm/test/Transforms/LoopVectorize/pr55167-fold-tail-live-out.ll
@@ -6,25 +6,25 @@ define i32 @test(i32 %a, i1 %c.1, i1 %c.2 ) #0 {
; CHECK-NEXT: bb:
; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
-; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i32> poison, i32 [[A:%.*]], i64 0
-; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i32> [[BROADCAST_SPLATINSERT]], <2 x i32> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT: [[TMP2:%.*]] = xor <2 x i32> [[BROADCAST_SPLAT]], splat (i32 1)
; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <2 x i1> poison, i1 [[C_1:%.*]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <2 x i1> [[BROADCAST_SPLATINSERT1]], <2 x i1> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP4:%.*]] = xor <2 x i1> [[BROADCAST_SPLAT2]], splat (i1 true)
; CHECK-NEXT: [[BROADCAST_SPLATINSERT3:%.*]] = insertelement <2 x i1> poison, i1 [[C_2:%.*]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT4:%.*]] = shufflevector <2 x i1> [[BROADCAST_SPLATINSERT3]], <2 x i1> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP6:%.*]] = xor <2 x i1> [[BROADCAST_SPLAT4]], splat (i1 true)
+; CHECK-NEXT: [[TMP7:%.*]] = select <2 x i1> [[TMP4]], <2 x i1> [[TMP6]], <2 x i1> zeroinitializer
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT4:%.*]] = insertelement <2 x i32> poison, i32 [[A:%.*]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT5:%.*]] = shufflevector <2 x i32> [[BROADCAST_SPLATINSERT4]], <2 x i32> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP2:%.*]] = xor <2 x i32> [[BROADCAST_SPLAT5]], splat (i32 1)
+; CHECK-NEXT: [[TMP5:%.*]] = select <2 x i1> [[TMP4]], <2 x i1> [[BROADCAST_SPLAT4]], <2 x i1> zeroinitializer
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <2 x i32> [ <i32 6, i32 7>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <2 x i32> [ <i32 35902, i32 0>, [[VECTOR_PH]] ], [ [[PREDPHI7:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP4:%.*]] = xor <2 x i1> [[BROADCAST_SPLAT2]], splat (i1 true)
; CHECK-NEXT: [[TMP0:%.*]] = add <2 x i32> [[VEC_PHI]], splat (i32 10)
-; CHECK-NEXT: [[TMP6:%.*]] = xor <2 x i1> [[BROADCAST_SPLAT4]], splat (i1 true)
-; CHECK-NEXT: [[TMP7:%.*]] = select <2 x i1> [[TMP4]], <2 x i1> [[TMP6]], <2 x i1> zeroinitializer
; CHECK-NEXT: [[TMP1:%.*]] = add <2 x i32> [[TMP0]], splat (i32 20)
; CHECK-NEXT: [[TMP3:%.*]] = add <2 x i32> [[TMP1]], [[TMP2]]
-; CHECK-NEXT: [[TMP5:%.*]] = select <2 x i1> [[TMP4]], <2 x i1> [[BROADCAST_SPLAT4]], <2 x i1> zeroinitializer
; CHECK-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP5]], <2 x i32> splat (i32 9), <2 x i32> [[VEC_IND]]
; CHECK-NEXT: [[PREDPHI5:%.*]] = select <2 x i1> [[TMP7]], <2 x i32> splat (i32 9), <2 x i32> [[PREDPHI]]
; CHECK-NEXT: [[PREDPHI6:%.*]] = select <2 x i1> [[TMP5]], <2 x i32> [[TMP0]], <2 x i32> [[VEC_PHI]]
diff --git a/llvm/test/Transforms/LoopVectorize/reduction-small-size.ll b/llvm/test/Transforms/LoopVectorize/reduction-small-size.ll
index 8a8439f..ca971f1 100644
--- a/llvm/test/Transforms/LoopVectorize/reduction-small-size.ll
+++ b/llvm/test/Transforms/LoopVectorize/reduction-small-size.ll
@@ -89,13 +89,13 @@ define i8 @PR34687_no_undef(i1 %c, i32 %x, i32 %n) {
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i1> [[BROADCAST_SPLATINSERT]], <4 x i1> poison, <4 x i32> zeroinitializer
; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <4 x i32> poison, i32 [[X:%.*]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT1]], <4 x i32> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP0:%.*]] = select <4 x i1> [[BROADCAST_SPLAT]], <4 x i32> [[BROADCAST_SPLAT2]], <4 x i32> splat (i32 1)
+; CHECK-NEXT: [[TMP1:%.*]] = sdiv <4 x i32> splat (i32 99), [[TMP0]]
+; CHECK-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[BROADCAST_SPLAT]], <4 x i32> [[TMP1]], <4 x i32> zeroinitializer
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP6:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = select <4 x i1> [[BROADCAST_SPLAT]], <4 x i32> [[BROADCAST_SPLAT2]], <4 x i32> splat (i32 1)
-; CHECK-NEXT: [[TMP1:%.*]] = sdiv <4 x i32> splat (i32 99), [[TMP0]]
-; CHECK-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[BROADCAST_SPLAT]], <4 x i32> [[TMP1]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[TMP3:%.*]] = and <4 x i32> [[VEC_PHI]], splat (i32 255)
; CHECK-NEXT: [[TMP4:%.*]] = add <4 x i32> [[TMP3]], [[PREDPHI]]
; CHECK-NEXT: [[TMP5:%.*]] = trunc <4 x i32> [[TMP4]] to <4 x i8>
diff --git a/llvm/test/Transforms/LoopVectorize/select-cmp.ll b/llvm/test/Transforms/LoopVectorize/select-cmp.ll
index 301526c..550e52d 100644
--- a/llvm/test/Transforms/LoopVectorize/select-cmp.ll
+++ b/llvm/test/Transforms/LoopVectorize/select-cmp.ll
@@ -1006,11 +1006,11 @@ define i32 @select_i32_from_icmp_same_inputs(i32 %a, i32 %b, i64 %n) {
; CHECK-VF4IC1-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[A]], i64 0
; CHECK-VF4IC1-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
; CHECK-VF4IC1-NEXT: [[TMP0:%.*]] = icmp eq <4 x i32> [[BROADCAST_SPLAT]], splat (i32 3)
+; CHECK-VF4IC1-NEXT: [[TMP1:%.*]] = xor <4 x i1> [[TMP0]], splat (i1 true)
; CHECK-VF4IC1-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK-VF4IC1: [[VECTOR_BODY]]:
; CHECK-VF4IC1-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-VF4IC1-NEXT: [[VEC_PHI:%.*]] = phi <4 x i1> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP2:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-VF4IC1-NEXT: [[TMP1:%.*]] = xor <4 x i1> [[TMP0]], splat (i1 true)
; CHECK-VF4IC1-NEXT: [[TMP2]] = or <4 x i1> [[VEC_PHI]], [[TMP1]]
; CHECK-VF4IC1-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-VF4IC1-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -1048,6 +1048,7 @@ define i32 @select_i32_from_icmp_same_inputs(i32 %a, i32 %b, i64 %n) {
; CHECK-VF4IC4-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[A]], i64 0
; CHECK-VF4IC4-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
; CHECK-VF4IC4-NEXT: [[TMP0:%.*]] = icmp eq <4 x i32> [[BROADCAST_SPLAT]], splat (i32 3)
+; CHECK-VF4IC4-NEXT: [[TMP4:%.*]] = xor <4 x i1> [[TMP0]], splat (i1 true)
; CHECK-VF4IC4-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK-VF4IC4: [[VECTOR_BODY]]:
; CHECK-VF4IC4-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -1055,13 +1056,9 @@ define i32 @select_i32_from_icmp_same_inputs(i32 %a, i32 %b, i64 %n) {
; CHECK-VF4IC4-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i1> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP6:%.*]], %[[VECTOR_BODY]] ]
; CHECK-VF4IC4-NEXT: [[VEC_PHI2:%.*]] = phi <4 x i1> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ]
; CHECK-VF4IC4-NEXT: [[VEC_PHI3:%.*]] = phi <4 x i1> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-VF4IC4-NEXT: [[TMP1:%.*]] = xor <4 x i1> [[TMP0]], splat (i1 true)
-; CHECK-VF4IC4-NEXT: [[TMP2:%.*]] = xor <4 x i1> [[TMP0]], splat (i1 true)
-; CHECK-VF4IC4-NEXT: [[TMP3:%.*]] = xor <4 x i1> [[TMP0]], splat (i1 true)
-; CHECK-VF4IC4-NEXT: [[TMP4:%.*]] = xor <4 x i1> [[TMP0]], splat (i1 true)
-; CHECK-VF4IC4-NEXT: [[TMP5]] = or <4 x i1> [[VEC_PHI]], [[TMP1]]
-; CHECK-VF4IC4-NEXT: [[TMP6]] = or <4 x i1> [[VEC_PHI1]], [[TMP2]]
-; CHECK-VF4IC4-NEXT: [[TMP7]] = or <4 x i1> [[VEC_PHI2]], [[TMP3]]
+; CHECK-VF4IC4-NEXT: [[TMP5]] = or <4 x i1> [[VEC_PHI]], [[TMP4]]
+; CHECK-VF4IC4-NEXT: [[TMP6]] = or <4 x i1> [[VEC_PHI1]], [[TMP4]]
+; CHECK-VF4IC4-NEXT: [[TMP7]] = or <4 x i1> [[VEC_PHI2]], [[TMP4]]
; CHECK-VF4IC4-NEXT: [[TMP8]] = or <4 x i1> [[VEC_PHI3]], [[TMP4]]
; CHECK-VF4IC4-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
; CHECK-VF4IC4-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
@@ -1100,6 +1097,7 @@ define i32 @select_i32_from_icmp_same_inputs(i32 %a, i32 %b, i64 %n) {
; CHECK-VF1IC4-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 4
; CHECK-VF1IC4-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
; CHECK-VF1IC4-NEXT: [[TMP0:%.*]] = icmp eq i32 [[A]], 3
+; CHECK-VF1IC4-NEXT: [[TMP4:%.*]] = xor i1 [[TMP0]], true
; CHECK-VF1IC4-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK-VF1IC4: [[VECTOR_BODY]]:
; CHECK-VF1IC4-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
@@ -1107,13 +1105,9 @@ define i32 @select_i32_from_icmp_same_inputs(i32 %a, i32 %b, i64 %n) {
; CHECK-VF1IC4-NEXT: [[VEC_PHI1:%.*]] = phi i1 [ false, %[[VECTOR_PH]] ], [ [[TMP6:%.*]], %[[VECTOR_BODY]] ]
; CHECK-VF1IC4-NEXT: [[VEC_PHI2:%.*]] = phi i1 [ false, %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ]
; CHECK-VF1IC4-NEXT: [[VEC_PHI3:%.*]] = phi i1 [ false, %[[VECTOR_PH]] ], [ [[TMP8:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-VF1IC4-NEXT: [[TMP1:%.*]] = xor i1 [[TMP0]], true
-; CHECK-VF1IC4-NEXT: [[TMP2:%.*]] = xor i1 [[TMP0]], true
-; CHECK-VF1IC4-NEXT: [[TMP3:%.*]] = xor i1 [[TMP0]], true
-; CHECK-VF1IC4-NEXT: [[TMP4:%.*]] = xor i1 [[TMP0]], true
-; CHECK-VF1IC4-NEXT: [[TMP5]] = or i1 [[VEC_PHI]], [[TMP1]]
-; CHECK-VF1IC4-NEXT: [[TMP6]] = or i1 [[VEC_PHI1]], [[TMP2]]
-; CHECK-VF1IC4-NEXT: [[TMP7]] = or i1 [[VEC_PHI2]], [[TMP3]]
+; CHECK-VF1IC4-NEXT: [[TMP5]] = or i1 [[VEC_PHI]], [[TMP4]]
+; CHECK-VF1IC4-NEXT: [[TMP6]] = or i1 [[VEC_PHI1]], [[TMP4]]
+; CHECK-VF1IC4-NEXT: [[TMP7]] = or i1 [[VEC_PHI2]], [[TMP4]]
; CHECK-VF1IC4-NEXT: [[TMP8]] = or i1 [[VEC_PHI3]], [[TMP4]]
; CHECK-VF1IC4-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-VF1IC4-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
diff --git a/llvm/test/Transforms/LoopVectorize/single_early_exit.ll b/llvm/test/Transforms/LoopVectorize/single_early_exit.ll
index 7590bb9..4ba9cc6 100644
--- a/llvm/test/Transforms/LoopVectorize/single_early_exit.ll
+++ b/llvm/test/Transforms/LoopVectorize/single_early_exit.ll
@@ -281,12 +281,12 @@ define i32 @diff_blocks_invariant_early_exit_cond(ptr %s) {
; CHECK: vector.ph:
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i1> poison, i1 [[COND]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i1> [[BROADCAST_SPLATINSERT]], <4 x i1> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP0:%.*]] = xor <4 x i1> [[BROADCAST_SPLAT]], splat (i1 true)
+; CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP0]])
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4
-; CHECK-NEXT: [[TMP0:%.*]] = xor <4 x i1> [[BROADCAST_SPLAT]], splat (i1 true)
-; CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.vector.reduce.or.v4i1(<4 x i1> [[TMP0]])
; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 [[INDEX_NEXT]], 276
; CHECK-NEXT: [[TMP3:%.*]] = or i1 [[TMP1]], [[TMP2]]
; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_SPLIT:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
diff --git a/llvm/test/Transforms/LoopVectorize/tail-folding-switch.ll b/llvm/test/Transforms/LoopVectorize/tail-folding-switch.ll
index 31732f0..892ddcc 100644
--- a/llvm/test/Transforms/LoopVectorize/tail-folding-switch.ll
+++ b/llvm/test/Transforms/LoopVectorize/tail-folding-switch.ll
@@ -10,12 +10,12 @@ define void @tail_fold_switch(ptr %dst, i32 %0) {
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[TMP0]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP2:%.*]] = icmp eq <4 x i32> [[BROADCAST_SPLAT]], splat (i32 1)
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE6:.*]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[PRED_STORE_CONTINUE6]] ]
; CHECK-NEXT: [[TMP1:%.*]] = icmp ule <4 x i64> [[VEC_IND]], splat (i64 4)
-; CHECK-NEXT: [[TMP2:%.*]] = icmp eq <4 x i32> [[BROADCAST_SPLAT]], splat (i32 1)
; CHECK-NEXT: [[TMP3:%.*]] = select <4 x i1> [[TMP1]], <4 x i1> [[TMP2]], <4 x i1> zeroinitializer
; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i1> [[TMP3]], i32 0
; CHECK-NEXT: br i1 [[TMP4]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]]
diff --git a/llvm/test/Transforms/LoopVectorize/uncountable-single-exit-loops.ll b/llvm/test/Transforms/LoopVectorize/uncountable-single-exit-loops.ll
new file mode 100644
index 0000000..2520613
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/uncountable-single-exit-loops.ll
@@ -0,0 +1,52 @@
+; REQUIRES: asserts
+; RUN: opt -p loop-vectorize -debug %s 2>&1 | FileCheck %s
+
+
+; CHECK-LABEL: LV: Checking a loop in 'latch_exit_cannot_compute_btc_due_to_step'
+; CHECK: LV: Did not find one integer induction var.
+; CHECK-NEXT: LV: Not vectorizing: Early exit is not the latch predecessor.
+; CHECK-NEXT: LV: Interleaving disabled by the pass manager
+; CHECK-NEXT: LV: Not vectorizing: Cannot prove legality.
+
+; CHECK-LABEL: LV: Checking a loop in 'header_exit_cannot_compute_btc_due_to_step'
+; CHECK: LV: Found an induction variable.
+; CHECK-NEXT: LV: Did not find one integer induction var.
+; CHECK-NEXT: LV: Not vectorizing: Cannot determine exact exit count for latch block.
+; CHECK-NEXT: LV: Interleaving disabled by the pass manager
+; CHECK-NEXT: LV: Not vectorizing: Cannot prove legality.
+
+; CHECK-NOT: vector.body
+define void @latch_exit_cannot_compute_btc_due_to_step(ptr %dst, i64 %step) {
+entry:
+ br label %loop
+
+loop: ; preds = %loop, %for.cond.us
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+ %iv.next = add i64 %iv, %step
+ %gep = getelementptr i8, ptr %dst, i64 %iv
+ store i8 0, ptr %gep, align 1
+ %ec = icmp eq i64 %iv.next, 1000
+ br i1 %ec, label %loop, label %exit
+
+exit:
+ ret void
+}
+
+define void @header_exit_cannot_compute_btc_due_to_step(ptr %dst, i64 %step) {
+entry:
+ br label %loop.header
+
+loop.header:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.latch ]
+ %iv.next = add i64 %iv, %step
+ %ec = icmp eq i64 %iv.next, 1000
+ br i1 %ec, label %loop.latch, label %exit
+
+loop.latch:
+ %gep = getelementptr i8, ptr %dst, i64 %iv
+ store i8 0, ptr %gep, align 1
+ br label %loop.header
+
+exit:
+ ret void
+}
diff --git a/llvm/test/Transforms/LoopVectorize/uniform-blend.ll b/llvm/test/Transforms/LoopVectorize/uniform-blend.ll
index 72011ca..7f5e0f3a 100644
--- a/llvm/test/Transforms/LoopVectorize/uniform-blend.ll
+++ b/llvm/test/Transforms/LoopVectorize/uniform-blend.ll
@@ -133,11 +133,11 @@ define void @blend_chain_iv(i1 %c) {
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i1> poison, i1 [[C]], i64 0
; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i1> [[BROADCAST_SPLATINSERT]], <4 x i1> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP0:%.*]] = select <4 x i1> [[BROADCAST_SPLAT]], <4 x i1> [[BROADCAST_SPLAT]], <4 x i1> zeroinitializer
; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
; CHECK: [[VECTOR_BODY]]:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
-; CHECK-NEXT: [[TMP0:%.*]] = select <4 x i1> [[BROADCAST_SPLAT]], <4 x i1> [[BROADCAST_SPLAT]], <4 x i1> zeroinitializer
; CHECK-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[TMP0]], <4 x i64> [[VEC_IND]], <4 x i64> undef
; CHECK-NEXT: [[PREDPHI1:%.*]] = select <4 x i1> [[BROADCAST_SPLAT]], <4 x i64> [[PREDPHI]], <4 x i64> undef
; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x i64> [[PREDPHI1]], i32 0
diff --git a/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination.ll b/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination.ll
index fe5811e..85b44a7 100644
--- a/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination.ll
+++ b/llvm/test/Transforms/LoopVectorize/vector-loop-backedge-elimination.ll
@@ -707,6 +707,542 @@ outer.latch:
exit:
ret void
}
+
+declare void @llvm.assume(i1)
+
+; Test case for https://github.com/llvm/llvm-project/issues/121897.
+define void @scev_expand_step(i64 %x, ptr %dst) {
+; VF8UF1-LABEL: define void @scev_expand_step(
+; VF8UF1-SAME: i64 [[X:%.*]], ptr [[DST:%.*]]) {
+; VF8UF1-NEXT: [[ENTRY:.*]]:
+; VF8UF1-NEXT: [[C:%.*]] = icmp eq i64 [[X]], 65536
+; VF8UF1-NEXT: call void @llvm.assume(i1 [[C]])
+; VF8UF1-NEXT: [[FR:%.*]] = freeze i64 [[X]]
+; VF8UF1-NEXT: [[STEP:%.*]] = add i64 [[FR]], -65534
+; VF8UF1-NEXT: [[TMP0:%.*]] = udiv i64 15, [[STEP]]
+; VF8UF1-NEXT: [[TMP1:%.*]] = add nuw nsw i64 [[TMP0]], 1
+; VF8UF1-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; VF8UF1: [[VECTOR_PH]]:
+; VF8UF1-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP1]], 7
+; VF8UF1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 8
+; VF8UF1-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
+; VF8UF1-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[TMP1]], 1
+; VF8UF1-NEXT: [[TMP2:%.*]] = mul i64 [[N_VEC]], [[STEP]]
+; VF8UF1-NEXT: br label %[[VECTOR_BODY:.*]]
+; VF8UF1: [[VECTOR_BODY]]:
+; VF8UF1-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0
+; VF8UF1-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <8 x i64> [[BROADCAST_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer
+; VF8UF1-NEXT: [[TMP3:%.*]] = icmp ule <8 x i64> <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7>, [[BROADCAST_SPLAT]]
+; VF8UF1-NEXT: [[TMP4:%.*]] = extractelement <8 x i1> [[TMP3]], i32 0
+; VF8UF1-NEXT: br i1 [[TMP4]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]]
+; VF8UF1: [[PRED_STORE_IF]]:
+; VF8UF1-NEXT: [[TMP5:%.*]] = mul i64 0, [[STEP]]
+; VF8UF1-NEXT: [[TMP6:%.*]] = add i64 0, [[TMP5]]
+; VF8UF1-NEXT: [[TMP7:%.*]] = add i64 [[TMP6]], [[STEP]]
+; VF8UF1-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP7]]
+; VF8UF1-NEXT: store i8 0, ptr [[TMP8]], align 1
+; VF8UF1-NEXT: br label %[[PRED_STORE_CONTINUE]]
+; VF8UF1: [[PRED_STORE_CONTINUE]]:
+; VF8UF1-NEXT: [[TMP9:%.*]] = extractelement <8 x i1> [[TMP3]], i32 1
+; VF8UF1-NEXT: br i1 [[TMP9]], label %[[PRED_STORE_IF1:.*]], label %[[PRED_STORE_CONTINUE2:.*]]
+; VF8UF1: [[PRED_STORE_IF1]]:
+; VF8UF1-NEXT: [[TMP10:%.*]] = mul i64 1, [[STEP]]
+; VF8UF1-NEXT: [[TMP11:%.*]] = add i64 0, [[TMP10]]
+; VF8UF1-NEXT: [[TMP12:%.*]] = add i64 [[TMP11]], [[STEP]]
+; VF8UF1-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP12]]
+; VF8UF1-NEXT: store i8 0, ptr [[TMP13]], align 1
+; VF8UF1-NEXT: br label %[[PRED_STORE_CONTINUE2]]
+; VF8UF1: [[PRED_STORE_CONTINUE2]]:
+; VF8UF1-NEXT: [[TMP14:%.*]] = extractelement <8 x i1> [[TMP3]], i32 2
+; VF8UF1-NEXT: br i1 [[TMP14]], label %[[PRED_STORE_IF3:.*]], label %[[PRED_STORE_CONTINUE4:.*]]
+; VF8UF1: [[PRED_STORE_IF3]]:
+; VF8UF1-NEXT: [[TMP15:%.*]] = mul i64 2, [[STEP]]
+; VF8UF1-NEXT: [[TMP16:%.*]] = add i64 0, [[TMP15]]
+; VF8UF1-NEXT: [[TMP17:%.*]] = add i64 [[TMP16]], [[STEP]]
+; VF8UF1-NEXT: [[TMP18:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP17]]
+; VF8UF1-NEXT: store i8 0, ptr [[TMP18]], align 1
+; VF8UF1-NEXT: br label %[[PRED_STORE_CONTINUE4]]
+; VF8UF1: [[PRED_STORE_CONTINUE4]]:
+; VF8UF1-NEXT: [[TMP19:%.*]] = extractelement <8 x i1> [[TMP3]], i32 3
+; VF8UF1-NEXT: br i1 [[TMP19]], label %[[PRED_STORE_IF5:.*]], label %[[PRED_STORE_CONTINUE6:.*]]
+; VF8UF1: [[PRED_STORE_IF5]]:
+; VF8UF1-NEXT: [[TMP20:%.*]] = mul i64 3, [[STEP]]
+; VF8UF1-NEXT: [[TMP21:%.*]] = add i64 0, [[TMP20]]
+; VF8UF1-NEXT: [[TMP22:%.*]] = add i64 [[TMP21]], [[STEP]]
+; VF8UF1-NEXT: [[TMP23:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP22]]
+; VF8UF1-NEXT: store i8 0, ptr [[TMP23]], align 1
+; VF8UF1-NEXT: br label %[[PRED_STORE_CONTINUE6]]
+; VF8UF1: [[PRED_STORE_CONTINUE6]]:
+; VF8UF1-NEXT: [[TMP24:%.*]] = extractelement <8 x i1> [[TMP3]], i32 4
+; VF8UF1-NEXT: br i1 [[TMP24]], label %[[PRED_STORE_IF7:.*]], label %[[PRED_STORE_CONTINUE8:.*]]
+; VF8UF1: [[PRED_STORE_IF7]]:
+; VF8UF1-NEXT: [[TMP25:%.*]] = mul i64 4, [[STEP]]
+; VF8UF1-NEXT: [[TMP26:%.*]] = add i64 0, [[TMP25]]
+; VF8UF1-NEXT: [[TMP27:%.*]] = add i64 [[TMP26]], [[STEP]]
+; VF8UF1-NEXT: [[TMP28:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP27]]
+; VF8UF1-NEXT: store i8 0, ptr [[TMP28]], align 1
+; VF8UF1-NEXT: br label %[[PRED_STORE_CONTINUE8]]
+; VF8UF1: [[PRED_STORE_CONTINUE8]]:
+; VF8UF1-NEXT: [[TMP29:%.*]] = extractelement <8 x i1> [[TMP3]], i32 5
+; VF8UF1-NEXT: br i1 [[TMP29]], label %[[PRED_STORE_IF9:.*]], label %[[PRED_STORE_CONTINUE10:.*]]
+; VF8UF1: [[PRED_STORE_IF9]]:
+; VF8UF1-NEXT: [[TMP30:%.*]] = mul i64 5, [[STEP]]
+; VF8UF1-NEXT: [[TMP31:%.*]] = add i64 0, [[TMP30]]
+; VF8UF1-NEXT: [[TMP32:%.*]] = add i64 [[TMP31]], [[STEP]]
+; VF8UF1-NEXT: [[TMP33:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP32]]
+; VF8UF1-NEXT: store i8 0, ptr [[TMP33]], align 1
+; VF8UF1-NEXT: br label %[[PRED_STORE_CONTINUE10]]
+; VF8UF1: [[PRED_STORE_CONTINUE10]]:
+; VF8UF1-NEXT: [[TMP34:%.*]] = extractelement <8 x i1> [[TMP3]], i32 6
+; VF8UF1-NEXT: br i1 [[TMP34]], label %[[PRED_STORE_IF11:.*]], label %[[PRED_STORE_CONTINUE12:.*]]
+; VF8UF1: [[PRED_STORE_IF11]]:
+; VF8UF1-NEXT: [[TMP35:%.*]] = mul i64 6, [[STEP]]
+; VF8UF1-NEXT: [[TMP36:%.*]] = add i64 0, [[TMP35]]
+; VF8UF1-NEXT: [[TMP37:%.*]] = add i64 [[TMP36]], [[STEP]]
+; VF8UF1-NEXT: [[TMP38:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP37]]
+; VF8UF1-NEXT: store i8 0, ptr [[TMP38]], align 1
+; VF8UF1-NEXT: br label %[[PRED_STORE_CONTINUE12]]
+; VF8UF1: [[PRED_STORE_CONTINUE12]]:
+; VF8UF1-NEXT: [[TMP39:%.*]] = extractelement <8 x i1> [[TMP3]], i32 7
+; VF8UF1-NEXT: br i1 [[TMP39]], label %[[PRED_STORE_IF13:.*]], label %[[PRED_STORE_CONTINUE14:.*]]
+; VF8UF1: [[PRED_STORE_IF13]]:
+; VF8UF1-NEXT: [[TMP40:%.*]] = mul i64 7, [[STEP]]
+; VF8UF1-NEXT: [[TMP41:%.*]] = add i64 0, [[TMP40]]
+; VF8UF1-NEXT: [[TMP42:%.*]] = add i64 [[TMP41]], [[STEP]]
+; VF8UF1-NEXT: [[TMP43:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP42]]
+; VF8UF1-NEXT: store i8 0, ptr [[TMP43]], align 1
+; VF8UF1-NEXT: br label %[[PRED_STORE_CONTINUE14]]
+; VF8UF1: [[PRED_STORE_CONTINUE14]]:
+; VF8UF1-NEXT: br label %[[MIDDLE_BLOCK:.*]]
+; VF8UF1: [[MIDDLE_BLOCK]]:
+; VF8UF1-NEXT: br i1 true, label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; VF8UF1: [[SCALAR_PH]]:
+; VF8UF1-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP2]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; VF8UF1-NEXT: br label %[[LOOP:.*]]
+; VF8UF1: [[LOOP]]:
+; VF8UF1-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; VF8UF1-NEXT: [[IV_NEXT]] = add i64 [[IV]], [[STEP]]
+; VF8UF1-NEXT: [[GEP_DST:%.*]] = getelementptr i8, ptr [[DST]], i64 [[IV_NEXT]]
+; VF8UF1-NEXT: store i8 0, ptr [[GEP_DST]], align 1
+; VF8UF1-NEXT: [[EC:%.*]] = icmp slt i64 [[IV_NEXT]], 16
+; VF8UF1-NEXT: br i1 [[EC]], label %[[LOOP]], label %[[EXIT]], !llvm.loop [[LOOP7:![0-9]+]]
+; VF8UF1: [[EXIT]]:
+; VF8UF1-NEXT: ret void
+;
+; VF8UF2-LABEL: define void @scev_expand_step(
+; VF8UF2-SAME: i64 [[X:%.*]], ptr [[DST:%.*]]) {
+; VF8UF2-NEXT: [[ENTRY:.*]]:
+; VF8UF2-NEXT: [[C:%.*]] = icmp eq i64 [[X]], 65536
+; VF8UF2-NEXT: call void @llvm.assume(i1 [[C]])
+; VF8UF2-NEXT: [[FR:%.*]] = freeze i64 [[X]]
+; VF8UF2-NEXT: [[STEP:%.*]] = add i64 [[FR]], -65534
+; VF8UF2-NEXT: [[TMP0:%.*]] = udiv i64 15, [[STEP]]
+; VF8UF2-NEXT: [[TMP1:%.*]] = add nuw nsw i64 [[TMP0]], 1
+; VF8UF2-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; VF8UF2: [[VECTOR_PH]]:
+; VF8UF2-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP1]], 15
+; VF8UF2-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 16
+; VF8UF2-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
+; VF8UF2-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[TMP1]], 1
+; VF8UF2-NEXT: [[TMP2:%.*]] = mul i64 [[N_VEC]], [[STEP]]
+; VF8UF2-NEXT: br label %[[VECTOR_BODY:.*]]
+; VF8UF2: [[VECTOR_BODY]]:
+; VF8UF2-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0
+; VF8UF2-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <8 x i64> [[BROADCAST_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer
+; VF8UF2-NEXT: [[TMP3:%.*]] = icmp ule <8 x i64> <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7>, [[BROADCAST_SPLAT]]
+; VF8UF2-NEXT: [[TMP4:%.*]] = icmp ule <8 x i64> <i64 8, i64 9, i64 10, i64 11, i64 12, i64 13, i64 14, i64 15>, [[BROADCAST_SPLAT]]
+; VF8UF2-NEXT: [[TMP5:%.*]] = extractelement <8 x i1> [[TMP3]], i32 0
+; VF8UF2-NEXT: br i1 [[TMP5]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]]
+; VF8UF2: [[PRED_STORE_IF]]:
+; VF8UF2-NEXT: [[TMP6:%.*]] = mul i64 0, [[STEP]]
+; VF8UF2-NEXT: [[TMP7:%.*]] = add i64 0, [[TMP6]]
+; VF8UF2-NEXT: [[TMP8:%.*]] = add i64 [[TMP7]], [[STEP]]
+; VF8UF2-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP8]]
+; VF8UF2-NEXT: store i8 0, ptr [[TMP9]], align 1
+; VF8UF2-NEXT: br label %[[PRED_STORE_CONTINUE]]
+; VF8UF2: [[PRED_STORE_CONTINUE]]:
+; VF8UF2-NEXT: [[TMP10:%.*]] = extractelement <8 x i1> [[TMP3]], i32 1
+; VF8UF2-NEXT: br i1 [[TMP10]], label %[[PRED_STORE_IF1:.*]], label %[[PRED_STORE_CONTINUE2:.*]]
+; VF8UF2: [[PRED_STORE_IF1]]:
+; VF8UF2-NEXT: [[TMP11:%.*]] = mul i64 1, [[STEP]]
+; VF8UF2-NEXT: [[TMP12:%.*]] = add i64 0, [[TMP11]]
+; VF8UF2-NEXT: [[TMP13:%.*]] = add i64 [[TMP12]], [[STEP]]
+; VF8UF2-NEXT: [[TMP14:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP13]]
+; VF8UF2-NEXT: store i8 0, ptr [[TMP14]], align 1
+; VF8UF2-NEXT: br label %[[PRED_STORE_CONTINUE2]]
+; VF8UF2: [[PRED_STORE_CONTINUE2]]:
+; VF8UF2-NEXT: [[TMP15:%.*]] = extractelement <8 x i1> [[TMP3]], i32 2
+; VF8UF2-NEXT: br i1 [[TMP15]], label %[[PRED_STORE_IF3:.*]], label %[[PRED_STORE_CONTINUE4:.*]]
+; VF8UF2: [[PRED_STORE_IF3]]:
+; VF8UF2-NEXT: [[TMP16:%.*]] = mul i64 2, [[STEP]]
+; VF8UF2-NEXT: [[TMP17:%.*]] = add i64 0, [[TMP16]]
+; VF8UF2-NEXT: [[TMP18:%.*]] = add i64 [[TMP17]], [[STEP]]
+; VF8UF2-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP18]]
+; VF8UF2-NEXT: store i8 0, ptr [[TMP19]], align 1
+; VF8UF2-NEXT: br label %[[PRED_STORE_CONTINUE4]]
+; VF8UF2: [[PRED_STORE_CONTINUE4]]:
+; VF8UF2-NEXT: [[TMP20:%.*]] = extractelement <8 x i1> [[TMP3]], i32 3
+; VF8UF2-NEXT: br i1 [[TMP20]], label %[[PRED_STORE_IF5:.*]], label %[[PRED_STORE_CONTINUE6:.*]]
+; VF8UF2: [[PRED_STORE_IF5]]:
+; VF8UF2-NEXT: [[TMP21:%.*]] = mul i64 3, [[STEP]]
+; VF8UF2-NEXT: [[TMP22:%.*]] = add i64 0, [[TMP21]]
+; VF8UF2-NEXT: [[TMP23:%.*]] = add i64 [[TMP22]], [[STEP]]
+; VF8UF2-NEXT: [[TMP24:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP23]]
+; VF8UF2-NEXT: store i8 0, ptr [[TMP24]], align 1
+; VF8UF2-NEXT: br label %[[PRED_STORE_CONTINUE6]]
+; VF8UF2: [[PRED_STORE_CONTINUE6]]:
+; VF8UF2-NEXT: [[TMP25:%.*]] = extractelement <8 x i1> [[TMP3]], i32 4
+; VF8UF2-NEXT: br i1 [[TMP25]], label %[[PRED_STORE_IF7:.*]], label %[[PRED_STORE_CONTINUE8:.*]]
+; VF8UF2: [[PRED_STORE_IF7]]:
+; VF8UF2-NEXT: [[TMP26:%.*]] = mul i64 4, [[STEP]]
+; VF8UF2-NEXT: [[TMP27:%.*]] = add i64 0, [[TMP26]]
+; VF8UF2-NEXT: [[TMP28:%.*]] = add i64 [[TMP27]], [[STEP]]
+; VF8UF2-NEXT: [[TMP29:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP28]]
+; VF8UF2-NEXT: store i8 0, ptr [[TMP29]], align 1
+; VF8UF2-NEXT: br label %[[PRED_STORE_CONTINUE8]]
+; VF8UF2: [[PRED_STORE_CONTINUE8]]:
+; VF8UF2-NEXT: [[TMP30:%.*]] = extractelement <8 x i1> [[TMP3]], i32 5
+; VF8UF2-NEXT: br i1 [[TMP30]], label %[[PRED_STORE_IF9:.*]], label %[[PRED_STORE_CONTINUE10:.*]]
+; VF8UF2: [[PRED_STORE_IF9]]:
+; VF8UF2-NEXT: [[TMP31:%.*]] = mul i64 5, [[STEP]]
+; VF8UF2-NEXT: [[TMP32:%.*]] = add i64 0, [[TMP31]]
+; VF8UF2-NEXT: [[TMP33:%.*]] = add i64 [[TMP32]], [[STEP]]
+; VF8UF2-NEXT: [[TMP34:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP33]]
+; VF8UF2-NEXT: store i8 0, ptr [[TMP34]], align 1
+; VF8UF2-NEXT: br label %[[PRED_STORE_CONTINUE10]]
+; VF8UF2: [[PRED_STORE_CONTINUE10]]:
+; VF8UF2-NEXT: [[TMP35:%.*]] = extractelement <8 x i1> [[TMP3]], i32 6
+; VF8UF2-NEXT: br i1 [[TMP35]], label %[[PRED_STORE_IF11:.*]], label %[[PRED_STORE_CONTINUE12:.*]]
+; VF8UF2: [[PRED_STORE_IF11]]:
+; VF8UF2-NEXT: [[TMP36:%.*]] = mul i64 6, [[STEP]]
+; VF8UF2-NEXT: [[TMP37:%.*]] = add i64 0, [[TMP36]]
+; VF8UF2-NEXT: [[TMP38:%.*]] = add i64 [[TMP37]], [[STEP]]
+; VF8UF2-NEXT: [[TMP39:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP38]]
+; VF8UF2-NEXT: store i8 0, ptr [[TMP39]], align 1
+; VF8UF2-NEXT: br label %[[PRED_STORE_CONTINUE12]]
+; VF8UF2: [[PRED_STORE_CONTINUE12]]:
+; VF8UF2-NEXT: [[TMP40:%.*]] = extractelement <8 x i1> [[TMP3]], i32 7
+; VF8UF2-NEXT: br i1 [[TMP40]], label %[[PRED_STORE_IF13:.*]], label %[[PRED_STORE_CONTINUE14:.*]]
+; VF8UF2: [[PRED_STORE_IF13]]:
+; VF8UF2-NEXT: [[TMP41:%.*]] = mul i64 7, [[STEP]]
+; VF8UF2-NEXT: [[TMP42:%.*]] = add i64 0, [[TMP41]]
+; VF8UF2-NEXT: [[TMP43:%.*]] = add i64 [[TMP42]], [[STEP]]
+; VF8UF2-NEXT: [[TMP44:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP43]]
+; VF8UF2-NEXT: store i8 0, ptr [[TMP44]], align 1
+; VF8UF2-NEXT: br label %[[PRED_STORE_CONTINUE14]]
+; VF8UF2: [[PRED_STORE_CONTINUE14]]:
+; VF8UF2-NEXT: [[TMP45:%.*]] = extractelement <8 x i1> [[TMP4]], i32 0
+; VF8UF2-NEXT: br i1 [[TMP45]], label %[[PRED_STORE_IF15:.*]], label %[[PRED_STORE_CONTINUE16:.*]]
+; VF8UF2: [[PRED_STORE_IF15]]:
+; VF8UF2-NEXT: [[TMP46:%.*]] = mul i64 8, [[STEP]]
+; VF8UF2-NEXT: [[TMP47:%.*]] = add i64 0, [[TMP46]]
+; VF8UF2-NEXT: [[TMP48:%.*]] = add i64 [[TMP47]], [[STEP]]
+; VF8UF2-NEXT: [[TMP49:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP48]]
+; VF8UF2-NEXT: store i8 0, ptr [[TMP49]], align 1
+; VF8UF2-NEXT: br label %[[PRED_STORE_CONTINUE16]]
+; VF8UF2: [[PRED_STORE_CONTINUE16]]:
+; VF8UF2-NEXT: [[TMP50:%.*]] = extractelement <8 x i1> [[TMP4]], i32 1
+; VF8UF2-NEXT: br i1 [[TMP50]], label %[[PRED_STORE_IF17:.*]], label %[[PRED_STORE_CONTINUE18:.*]]
+; VF8UF2: [[PRED_STORE_IF17]]:
+; VF8UF2-NEXT: [[TMP51:%.*]] = mul i64 9, [[STEP]]
+; VF8UF2-NEXT: [[TMP52:%.*]] = add i64 0, [[TMP51]]
+; VF8UF2-NEXT: [[TMP53:%.*]] = add i64 [[TMP52]], [[STEP]]
+; VF8UF2-NEXT: [[TMP54:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP53]]
+; VF8UF2-NEXT: store i8 0, ptr [[TMP54]], align 1
+; VF8UF2-NEXT: br label %[[PRED_STORE_CONTINUE18]]
+; VF8UF2: [[PRED_STORE_CONTINUE18]]:
+; VF8UF2-NEXT: [[TMP55:%.*]] = extractelement <8 x i1> [[TMP4]], i32 2
+; VF8UF2-NEXT: br i1 [[TMP55]], label %[[PRED_STORE_IF19:.*]], label %[[PRED_STORE_CONTINUE20:.*]]
+; VF8UF2: [[PRED_STORE_IF19]]:
+; VF8UF2-NEXT: [[TMP56:%.*]] = mul i64 10, [[STEP]]
+; VF8UF2-NEXT: [[TMP57:%.*]] = add i64 0, [[TMP56]]
+; VF8UF2-NEXT: [[TMP58:%.*]] = add i64 [[TMP57]], [[STEP]]
+; VF8UF2-NEXT: [[TMP59:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP58]]
+; VF8UF2-NEXT: store i8 0, ptr [[TMP59]], align 1
+; VF8UF2-NEXT: br label %[[PRED_STORE_CONTINUE20]]
+; VF8UF2: [[PRED_STORE_CONTINUE20]]:
+; VF8UF2-NEXT: [[TMP60:%.*]] = extractelement <8 x i1> [[TMP4]], i32 3
+; VF8UF2-NEXT: br i1 [[TMP60]], label %[[PRED_STORE_IF21:.*]], label %[[PRED_STORE_CONTINUE22:.*]]
+; VF8UF2: [[PRED_STORE_IF21]]:
+; VF8UF2-NEXT: [[TMP61:%.*]] = mul i64 11, [[STEP]]
+; VF8UF2-NEXT: [[TMP62:%.*]] = add i64 0, [[TMP61]]
+; VF8UF2-NEXT: [[TMP63:%.*]] = add i64 [[TMP62]], [[STEP]]
+; VF8UF2-NEXT: [[TMP64:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP63]]
+; VF8UF2-NEXT: store i8 0, ptr [[TMP64]], align 1
+; VF8UF2-NEXT: br label %[[PRED_STORE_CONTINUE22]]
+; VF8UF2: [[PRED_STORE_CONTINUE22]]:
+; VF8UF2-NEXT: [[TMP65:%.*]] = extractelement <8 x i1> [[TMP4]], i32 4
+; VF8UF2-NEXT: br i1 [[TMP65]], label %[[PRED_STORE_IF23:.*]], label %[[PRED_STORE_CONTINUE24:.*]]
+; VF8UF2: [[PRED_STORE_IF23]]:
+; VF8UF2-NEXT: [[TMP66:%.*]] = mul i64 12, [[STEP]]
+; VF8UF2-NEXT: [[TMP67:%.*]] = add i64 0, [[TMP66]]
+; VF8UF2-NEXT: [[TMP68:%.*]] = add i64 [[TMP67]], [[STEP]]
+; VF8UF2-NEXT: [[TMP69:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP68]]
+; VF8UF2-NEXT: store i8 0, ptr [[TMP69]], align 1
+; VF8UF2-NEXT: br label %[[PRED_STORE_CONTINUE24]]
+; VF8UF2: [[PRED_STORE_CONTINUE24]]:
+; VF8UF2-NEXT: [[TMP70:%.*]] = extractelement <8 x i1> [[TMP4]], i32 5
+; VF8UF2-NEXT: br i1 [[TMP70]], label %[[PRED_STORE_IF25:.*]], label %[[PRED_STORE_CONTINUE26:.*]]
+; VF8UF2: [[PRED_STORE_IF25]]:
+; VF8UF2-NEXT: [[TMP71:%.*]] = mul i64 13, [[STEP]]
+; VF8UF2-NEXT: [[TMP72:%.*]] = add i64 0, [[TMP71]]
+; VF8UF2-NEXT: [[TMP73:%.*]] = add i64 [[TMP72]], [[STEP]]
+; VF8UF2-NEXT: [[TMP74:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP73]]
+; VF8UF2-NEXT: store i8 0, ptr [[TMP74]], align 1
+; VF8UF2-NEXT: br label %[[PRED_STORE_CONTINUE26]]
+; VF8UF2: [[PRED_STORE_CONTINUE26]]:
+; VF8UF2-NEXT: [[TMP75:%.*]] = extractelement <8 x i1> [[TMP4]], i32 6
+; VF8UF2-NEXT: br i1 [[TMP75]], label %[[PRED_STORE_IF27:.*]], label %[[PRED_STORE_CONTINUE28:.*]]
+; VF8UF2: [[PRED_STORE_IF27]]:
+; VF8UF2-NEXT: [[TMP76:%.*]] = mul i64 14, [[STEP]]
+; VF8UF2-NEXT: [[TMP77:%.*]] = add i64 0, [[TMP76]]
+; VF8UF2-NEXT: [[TMP78:%.*]] = add i64 [[TMP77]], [[STEP]]
+; VF8UF2-NEXT: [[TMP79:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP78]]
+; VF8UF2-NEXT: store i8 0, ptr [[TMP79]], align 1
+; VF8UF2-NEXT: br label %[[PRED_STORE_CONTINUE28]]
+; VF8UF2: [[PRED_STORE_CONTINUE28]]:
+; VF8UF2-NEXT: [[TMP80:%.*]] = extractelement <8 x i1> [[TMP4]], i32 7
+; VF8UF2-NEXT: br i1 [[TMP80]], label %[[PRED_STORE_IF29:.*]], label %[[PRED_STORE_CONTINUE30:.*]]
+; VF8UF2: [[PRED_STORE_IF29]]:
+; VF8UF2-NEXT: [[TMP81:%.*]] = mul i64 15, [[STEP]]
+; VF8UF2-NEXT: [[TMP82:%.*]] = add i64 0, [[TMP81]]
+; VF8UF2-NEXT: [[TMP83:%.*]] = add i64 [[TMP82]], [[STEP]]
+; VF8UF2-NEXT: [[TMP84:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP83]]
+; VF8UF2-NEXT: store i8 0, ptr [[TMP84]], align 1
+; VF8UF2-NEXT: br label %[[PRED_STORE_CONTINUE30]]
+; VF8UF2: [[PRED_STORE_CONTINUE30]]:
+; VF8UF2-NEXT: br label %[[MIDDLE_BLOCK:.*]]
+; VF8UF2: [[MIDDLE_BLOCK]]:
+; VF8UF2-NEXT: br i1 true, label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; VF8UF2: [[SCALAR_PH]]:
+; VF8UF2-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP2]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; VF8UF2-NEXT: br label %[[LOOP:.*]]
+; VF8UF2: [[LOOP]]:
+; VF8UF2-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; VF8UF2-NEXT: [[IV_NEXT]] = add i64 [[IV]], [[STEP]]
+; VF8UF2-NEXT: [[GEP_DST:%.*]] = getelementptr i8, ptr [[DST]], i64 [[IV_NEXT]]
+; VF8UF2-NEXT: store i8 0, ptr [[GEP_DST]], align 1
+; VF8UF2-NEXT: [[EC:%.*]] = icmp slt i64 [[IV_NEXT]], 16
+; VF8UF2-NEXT: br i1 [[EC]], label %[[LOOP]], label %[[EXIT]], !llvm.loop [[LOOP5:![0-9]+]]
+; VF8UF2: [[EXIT]]:
+; VF8UF2-NEXT: ret void
+;
+; VF16UF1-LABEL: define void @scev_expand_step(
+; VF16UF1-SAME: i64 [[X:%.*]], ptr [[DST:%.*]]) {
+; VF16UF1-NEXT: [[ENTRY:.*]]:
+; VF16UF1-NEXT: [[C:%.*]] = icmp eq i64 [[X]], 65536
+; VF16UF1-NEXT: call void @llvm.assume(i1 [[C]])
+; VF16UF1-NEXT: [[FR:%.*]] = freeze i64 [[X]]
+; VF16UF1-NEXT: [[STEP:%.*]] = add i64 [[FR]], -65534
+; VF16UF1-NEXT: [[TMP0:%.*]] = udiv i64 15, [[STEP]]
+; VF16UF1-NEXT: [[TMP1:%.*]] = add nuw nsw i64 [[TMP0]], 1
+; VF16UF1-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; VF16UF1: [[VECTOR_PH]]:
+; VF16UF1-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP1]], 15
+; VF16UF1-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 16
+; VF16UF1-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
+; VF16UF1-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[TMP1]], 1
+; VF16UF1-NEXT: [[TMP2:%.*]] = mul i64 [[N_VEC]], [[STEP]]
+; VF16UF1-NEXT: br label %[[VECTOR_BODY:.*]]
+; VF16UF1: [[VECTOR_BODY]]:
+; VF16UF1-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0
+; VF16UF1-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i64> [[BROADCAST_SPLATINSERT]], <16 x i64> poison, <16 x i32> zeroinitializer
+; VF16UF1-NEXT: [[TMP3:%.*]] = icmp ule <16 x i64> <i64 0, i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8, i64 9, i64 10, i64 11, i64 12, i64 13, i64 14, i64 15>, [[BROADCAST_SPLAT]]
+; VF16UF1-NEXT: [[TMP4:%.*]] = extractelement <16 x i1> [[TMP3]], i32 0
+; VF16UF1-NEXT: br i1 [[TMP4]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]]
+; VF16UF1: [[PRED_STORE_IF]]:
+; VF16UF1-NEXT: [[TMP5:%.*]] = mul i64 0, [[STEP]]
+; VF16UF1-NEXT: [[TMP6:%.*]] = add i64 0, [[TMP5]]
+; VF16UF1-NEXT: [[TMP7:%.*]] = add i64 [[TMP6]], [[STEP]]
+; VF16UF1-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP7]]
+; VF16UF1-NEXT: store i8 0, ptr [[TMP8]], align 1
+; VF16UF1-NEXT: br label %[[PRED_STORE_CONTINUE]]
+; VF16UF1: [[PRED_STORE_CONTINUE]]:
+; VF16UF1-NEXT: [[TMP9:%.*]] = extractelement <16 x i1> [[TMP3]], i32 1
+; VF16UF1-NEXT: br i1 [[TMP9]], label %[[PRED_STORE_IF1:.*]], label %[[PRED_STORE_CONTINUE2:.*]]
+; VF16UF1: [[PRED_STORE_IF1]]:
+; VF16UF1-NEXT: [[TMP10:%.*]] = mul i64 1, [[STEP]]
+; VF16UF1-NEXT: [[TMP11:%.*]] = add i64 0, [[TMP10]]
+; VF16UF1-NEXT: [[TMP12:%.*]] = add i64 [[TMP11]], [[STEP]]
+; VF16UF1-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP12]]
+; VF16UF1-NEXT: store i8 0, ptr [[TMP13]], align 1
+; VF16UF1-NEXT: br label %[[PRED_STORE_CONTINUE2]]
+; VF16UF1: [[PRED_STORE_CONTINUE2]]:
+; VF16UF1-NEXT: [[TMP14:%.*]] = extractelement <16 x i1> [[TMP3]], i32 2
+; VF16UF1-NEXT: br i1 [[TMP14]], label %[[PRED_STORE_IF3:.*]], label %[[PRED_STORE_CONTINUE4:.*]]
+; VF16UF1: [[PRED_STORE_IF3]]:
+; VF16UF1-NEXT: [[TMP15:%.*]] = mul i64 2, [[STEP]]
+; VF16UF1-NEXT: [[TMP16:%.*]] = add i64 0, [[TMP15]]
+; VF16UF1-NEXT: [[TMP17:%.*]] = add i64 [[TMP16]], [[STEP]]
+; VF16UF1-NEXT: [[TMP18:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP17]]
+; VF16UF1-NEXT: store i8 0, ptr [[TMP18]], align 1
+; VF16UF1-NEXT: br label %[[PRED_STORE_CONTINUE4]]
+; VF16UF1: [[PRED_STORE_CONTINUE4]]:
+; VF16UF1-NEXT: [[TMP19:%.*]] = extractelement <16 x i1> [[TMP3]], i32 3
+; VF16UF1-NEXT: br i1 [[TMP19]], label %[[PRED_STORE_IF5:.*]], label %[[PRED_STORE_CONTINUE6:.*]]
+; VF16UF1: [[PRED_STORE_IF5]]:
+; VF16UF1-NEXT: [[TMP20:%.*]] = mul i64 3, [[STEP]]
+; VF16UF1-NEXT: [[TMP21:%.*]] = add i64 0, [[TMP20]]
+; VF16UF1-NEXT: [[TMP22:%.*]] = add i64 [[TMP21]], [[STEP]]
+; VF16UF1-NEXT: [[TMP23:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP22]]
+; VF16UF1-NEXT: store i8 0, ptr [[TMP23]], align 1
+; VF16UF1-NEXT: br label %[[PRED_STORE_CONTINUE6]]
+; VF16UF1: [[PRED_STORE_CONTINUE6]]:
+; VF16UF1-NEXT: [[TMP24:%.*]] = extractelement <16 x i1> [[TMP3]], i32 4
+; VF16UF1-NEXT: br i1 [[TMP24]], label %[[PRED_STORE_IF7:.*]], label %[[PRED_STORE_CONTINUE8:.*]]
+; VF16UF1: [[PRED_STORE_IF7]]:
+; VF16UF1-NEXT: [[TMP25:%.*]] = mul i64 4, [[STEP]]
+; VF16UF1-NEXT: [[TMP26:%.*]] = add i64 0, [[TMP25]]
+; VF16UF1-NEXT: [[TMP27:%.*]] = add i64 [[TMP26]], [[STEP]]
+; VF16UF1-NEXT: [[TMP28:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP27]]
+; VF16UF1-NEXT: store i8 0, ptr [[TMP28]], align 1
+; VF16UF1-NEXT: br label %[[PRED_STORE_CONTINUE8]]
+; VF16UF1: [[PRED_STORE_CONTINUE8]]:
+; VF16UF1-NEXT: [[TMP29:%.*]] = extractelement <16 x i1> [[TMP3]], i32 5
+; VF16UF1-NEXT: br i1 [[TMP29]], label %[[PRED_STORE_IF9:.*]], label %[[PRED_STORE_CONTINUE10:.*]]
+; VF16UF1: [[PRED_STORE_IF9]]:
+; VF16UF1-NEXT: [[TMP30:%.*]] = mul i64 5, [[STEP]]
+; VF16UF1-NEXT: [[TMP31:%.*]] = add i64 0, [[TMP30]]
+; VF16UF1-NEXT: [[TMP32:%.*]] = add i64 [[TMP31]], [[STEP]]
+; VF16UF1-NEXT: [[TMP33:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP32]]
+; VF16UF1-NEXT: store i8 0, ptr [[TMP33]], align 1
+; VF16UF1-NEXT: br label %[[PRED_STORE_CONTINUE10]]
+; VF16UF1: [[PRED_STORE_CONTINUE10]]:
+; VF16UF1-NEXT: [[TMP34:%.*]] = extractelement <16 x i1> [[TMP3]], i32 6
+; VF16UF1-NEXT: br i1 [[TMP34]], label %[[PRED_STORE_IF11:.*]], label %[[PRED_STORE_CONTINUE12:.*]]
+; VF16UF1: [[PRED_STORE_IF11]]:
+; VF16UF1-NEXT: [[TMP35:%.*]] = mul i64 6, [[STEP]]
+; VF16UF1-NEXT: [[TMP36:%.*]] = add i64 0, [[TMP35]]
+; VF16UF1-NEXT: [[TMP37:%.*]] = add i64 [[TMP36]], [[STEP]]
+; VF16UF1-NEXT: [[TMP38:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP37]]
+; VF16UF1-NEXT: store i8 0, ptr [[TMP38]], align 1
+; VF16UF1-NEXT: br label %[[PRED_STORE_CONTINUE12]]
+; VF16UF1: [[PRED_STORE_CONTINUE12]]:
+; VF16UF1-NEXT: [[TMP39:%.*]] = extractelement <16 x i1> [[TMP3]], i32 7
+; VF16UF1-NEXT: br i1 [[TMP39]], label %[[PRED_STORE_IF13:.*]], label %[[PRED_STORE_CONTINUE14:.*]]
+; VF16UF1: [[PRED_STORE_IF13]]:
+; VF16UF1-NEXT: [[TMP40:%.*]] = mul i64 7, [[STEP]]
+; VF16UF1-NEXT: [[TMP41:%.*]] = add i64 0, [[TMP40]]
+; VF16UF1-NEXT: [[TMP42:%.*]] = add i64 [[TMP41]], [[STEP]]
+; VF16UF1-NEXT: [[TMP43:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP42]]
+; VF16UF1-NEXT: store i8 0, ptr [[TMP43]], align 1
+; VF16UF1-NEXT: br label %[[PRED_STORE_CONTINUE14]]
+; VF16UF1: [[PRED_STORE_CONTINUE14]]:
+; VF16UF1-NEXT: [[TMP44:%.*]] = extractelement <16 x i1> [[TMP3]], i32 8
+; VF16UF1-NEXT: br i1 [[TMP44]], label %[[PRED_STORE_IF15:.*]], label %[[PRED_STORE_CONTINUE16:.*]]
+; VF16UF1: [[PRED_STORE_IF15]]:
+; VF16UF1-NEXT: [[TMP45:%.*]] = mul i64 8, [[STEP]]
+; VF16UF1-NEXT: [[TMP46:%.*]] = add i64 0, [[TMP45]]
+; VF16UF1-NEXT: [[TMP47:%.*]] = add i64 [[TMP46]], [[STEP]]
+; VF16UF1-NEXT: [[TMP48:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP47]]
+; VF16UF1-NEXT: store i8 0, ptr [[TMP48]], align 1
+; VF16UF1-NEXT: br label %[[PRED_STORE_CONTINUE16]]
+; VF16UF1: [[PRED_STORE_CONTINUE16]]:
+; VF16UF1-NEXT: [[TMP49:%.*]] = extractelement <16 x i1> [[TMP3]], i32 9
+; VF16UF1-NEXT: br i1 [[TMP49]], label %[[PRED_STORE_IF17:.*]], label %[[PRED_STORE_CONTINUE18:.*]]
+; VF16UF1: [[PRED_STORE_IF17]]:
+; VF16UF1-NEXT: [[TMP50:%.*]] = mul i64 9, [[STEP]]
+; VF16UF1-NEXT: [[TMP51:%.*]] = add i64 0, [[TMP50]]
+; VF16UF1-NEXT: [[TMP52:%.*]] = add i64 [[TMP51]], [[STEP]]
+; VF16UF1-NEXT: [[TMP53:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP52]]
+; VF16UF1-NEXT: store i8 0, ptr [[TMP53]], align 1
+; VF16UF1-NEXT: br label %[[PRED_STORE_CONTINUE18]]
+; VF16UF1: [[PRED_STORE_CONTINUE18]]:
+; VF16UF1-NEXT: [[TMP54:%.*]] = extractelement <16 x i1> [[TMP3]], i32 10
+; VF16UF1-NEXT: br i1 [[TMP54]], label %[[PRED_STORE_IF19:.*]], label %[[PRED_STORE_CONTINUE20:.*]]
+; VF16UF1: [[PRED_STORE_IF19]]:
+; VF16UF1-NEXT: [[TMP55:%.*]] = mul i64 10, [[STEP]]
+; VF16UF1-NEXT: [[TMP56:%.*]] = add i64 0, [[TMP55]]
+; VF16UF1-NEXT: [[TMP57:%.*]] = add i64 [[TMP56]], [[STEP]]
+; VF16UF1-NEXT: [[TMP58:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP57]]
+; VF16UF1-NEXT: store i8 0, ptr [[TMP58]], align 1
+; VF16UF1-NEXT: br label %[[PRED_STORE_CONTINUE20]]
+; VF16UF1: [[PRED_STORE_CONTINUE20]]:
+; VF16UF1-NEXT: [[TMP59:%.*]] = extractelement <16 x i1> [[TMP3]], i32 11
+; VF16UF1-NEXT: br i1 [[TMP59]], label %[[PRED_STORE_IF21:.*]], label %[[PRED_STORE_CONTINUE22:.*]]
+; VF16UF1: [[PRED_STORE_IF21]]:
+; VF16UF1-NEXT: [[TMP60:%.*]] = mul i64 11, [[STEP]]
+; VF16UF1-NEXT: [[TMP61:%.*]] = add i64 0, [[TMP60]]
+; VF16UF1-NEXT: [[TMP62:%.*]] = add i64 [[TMP61]], [[STEP]]
+; VF16UF1-NEXT: [[TMP63:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP62]]
+; VF16UF1-NEXT: store i8 0, ptr [[TMP63]], align 1
+; VF16UF1-NEXT: br label %[[PRED_STORE_CONTINUE22]]
+; VF16UF1: [[PRED_STORE_CONTINUE22]]:
+; VF16UF1-NEXT: [[TMP64:%.*]] = extractelement <16 x i1> [[TMP3]], i32 12
+; VF16UF1-NEXT: br i1 [[TMP64]], label %[[PRED_STORE_IF23:.*]], label %[[PRED_STORE_CONTINUE24:.*]]
+; VF16UF1: [[PRED_STORE_IF23]]:
+; VF16UF1-NEXT: [[TMP65:%.*]] = mul i64 12, [[STEP]]
+; VF16UF1-NEXT: [[TMP66:%.*]] = add i64 0, [[TMP65]]
+; VF16UF1-NEXT: [[TMP67:%.*]] = add i64 [[TMP66]], [[STEP]]
+; VF16UF1-NEXT: [[TMP68:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP67]]
+; VF16UF1-NEXT: store i8 0, ptr [[TMP68]], align 1
+; VF16UF1-NEXT: br label %[[PRED_STORE_CONTINUE24]]
+; VF16UF1: [[PRED_STORE_CONTINUE24]]:
+; VF16UF1-NEXT: [[TMP69:%.*]] = extractelement <16 x i1> [[TMP3]], i32 13
+; VF16UF1-NEXT: br i1 [[TMP69]], label %[[PRED_STORE_IF25:.*]], label %[[PRED_STORE_CONTINUE26:.*]]
+; VF16UF1: [[PRED_STORE_IF25]]:
+; VF16UF1-NEXT: [[TMP70:%.*]] = mul i64 13, [[STEP]]
+; VF16UF1-NEXT: [[TMP71:%.*]] = add i64 0, [[TMP70]]
+; VF16UF1-NEXT: [[TMP72:%.*]] = add i64 [[TMP71]], [[STEP]]
+; VF16UF1-NEXT: [[TMP73:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP72]]
+; VF16UF1-NEXT: store i8 0, ptr [[TMP73]], align 1
+; VF16UF1-NEXT: br label %[[PRED_STORE_CONTINUE26]]
+; VF16UF1: [[PRED_STORE_CONTINUE26]]:
+; VF16UF1-NEXT: [[TMP74:%.*]] = extractelement <16 x i1> [[TMP3]], i32 14
+; VF16UF1-NEXT: br i1 [[TMP74]], label %[[PRED_STORE_IF27:.*]], label %[[PRED_STORE_CONTINUE28:.*]]
+; VF16UF1: [[PRED_STORE_IF27]]:
+; VF16UF1-NEXT: [[TMP75:%.*]] = mul i64 14, [[STEP]]
+; VF16UF1-NEXT: [[TMP76:%.*]] = add i64 0, [[TMP75]]
+; VF16UF1-NEXT: [[TMP77:%.*]] = add i64 [[TMP76]], [[STEP]]
+; VF16UF1-NEXT: [[TMP78:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP77]]
+; VF16UF1-NEXT: store i8 0, ptr [[TMP78]], align 1
+; VF16UF1-NEXT: br label %[[PRED_STORE_CONTINUE28]]
+; VF16UF1: [[PRED_STORE_CONTINUE28]]:
+; VF16UF1-NEXT: [[TMP79:%.*]] = extractelement <16 x i1> [[TMP3]], i32 15
+; VF16UF1-NEXT: br i1 [[TMP79]], label %[[PRED_STORE_IF29:.*]], label %[[PRED_STORE_CONTINUE30:.*]]
+; VF16UF1: [[PRED_STORE_IF29]]:
+; VF16UF1-NEXT: [[TMP80:%.*]] = mul i64 15, [[STEP]]
+; VF16UF1-NEXT: [[TMP81:%.*]] = add i64 0, [[TMP80]]
+; VF16UF1-NEXT: [[TMP82:%.*]] = add i64 [[TMP81]], [[STEP]]
+; VF16UF1-NEXT: [[TMP83:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP82]]
+; VF16UF1-NEXT: store i8 0, ptr [[TMP83]], align 1
+; VF16UF1-NEXT: br label %[[PRED_STORE_CONTINUE30]]
+; VF16UF1: [[PRED_STORE_CONTINUE30]]:
+; VF16UF1-NEXT: br label %[[MIDDLE_BLOCK:.*]]
+; VF16UF1: [[MIDDLE_BLOCK]]:
+; VF16UF1-NEXT: br i1 true, label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; VF16UF1: [[SCALAR_PH]]:
+; VF16UF1-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP2]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; VF16UF1-NEXT: br label %[[LOOP:.*]]
+; VF16UF1: [[LOOP]]:
+; VF16UF1-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; VF16UF1-NEXT: [[IV_NEXT]] = add i64 [[IV]], [[STEP]]
+; VF16UF1-NEXT: [[GEP_DST:%.*]] = getelementptr i8, ptr [[DST]], i64 [[IV_NEXT]]
+; VF16UF1-NEXT: store i8 0, ptr [[GEP_DST]], align 1
+; VF16UF1-NEXT: [[EC:%.*]] = icmp slt i64 [[IV_NEXT]], 16
+; VF16UF1-NEXT: br i1 [[EC]], label %[[LOOP]], label %[[EXIT]], !llvm.loop [[LOOP5:![0-9]+]]
+; VF16UF1: [[EXIT]]:
+; VF16UF1-NEXT: ret void
+;
+entry:
+ %c = icmp eq i64 %x, 65536
+ call void @llvm.assume(i1 %c)
+ %fr = freeze i64 %x
+ %step = add i64 %fr, -65534
+ br label %loop
+
+loop:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+ %iv.next = add i64 %iv, %step
+ %gep.dst = getelementptr i8, ptr %dst, i64 %iv.next
+ store i8 0, ptr %gep.dst, align 1
+ %ec = icmp slt i64 %iv.next, 16
+ br i1 %ec, label %loop, label %exit
+
+exit:
+ ret void
+}
+
;.
; VF8UF1: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
; VF8UF1: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
@@ -715,16 +1251,19 @@ exit:
; VF8UF1: [[LOOP4]] = distinct !{[[LOOP4]], [[META2]], [[META1]]}
; VF8UF1: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]], [[META2]]}
; VF8UF1: [[LOOP6]] = distinct !{[[LOOP6]], [[META2]], [[META1]]}
+; VF8UF1: [[LOOP7]] = distinct !{[[LOOP7]], [[META2]], [[META1]]}
;.
; VF8UF2: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
; VF8UF2: [[META1]] = !{!"llvm.loop.unroll.runtime.disable"}
; VF8UF2: [[META2]] = !{!"llvm.loop.isvectorized", i32 1}
; VF8UF2: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]], [[META2]]}
; VF8UF2: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]}
+; VF8UF2: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]], [[META2]]}
;.
; VF16UF1: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
; VF16UF1: [[META1]] = !{!"llvm.loop.unroll.runtime.disable"}
; VF16UF1: [[META2]] = !{!"llvm.loop.isvectorized", i32 1}
; VF16UF1: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]], [[META2]]}
; VF16UF1: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]}
+; VF16UF1: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]], [[META2]]}
;.
diff --git a/llvm/test/Transforms/MemProfContextDisambiguation/recursive.ll b/llvm/test/Transforms/MemProfContextDisambiguation/recursive.ll
new file mode 100644
index 0000000..759d511
--- /dev/null
+++ b/llvm/test/Transforms/MemProfContextDisambiguation/recursive.ll
@@ -0,0 +1,159 @@
+;; Test recursion handling during cloning.
+;;
+;; Original code looks like:
+;;
+;; #include <stdlib.h>
+;; #include <string.h>
+;; #include <unistd.h>
+;; __attribute((noinline)) char *D() {
+;; return new char[10];
+;; }
+;; __attribute((noinline)) char *B(int n);
+;; __attribute((noinline)) char *C(int n) {
+;; if (!n) {
+;; return D();
+;; }
+;; return B(n-1);
+;; }
+;; __attribute((noinline)) char *B(int n) {
+;; return C(n);
+;; }
+;; int main(int argc, char **argv) {
+;; char *x = B(1);
+;; char *y = B(1);
+;; char *z = B(0);
+;; memset(x, 0, 10);
+;; memset(y, 0, 10);
+;; memset(z, 0, 10);
+;; free(x);
+;; sleep(200);
+;; free(y);
+;; free(z);
+;; return 0;
+;; }
+;;
+;; The IR was then reduced using llvm-reduce with the expected FileCheck input.
+
+;; By default we should enable cloning of contexts involved with recursive
+;; cycles, but not through the cycle itself. I.e. until full support for
+;; recursion is added, the cloned recursive call from C back to B (line 12) will
+;; not be updated to call a clone.
+; RUN: opt -passes=memprof-context-disambiguation -supports-hot-cold-new \
+; RUN: -memprof-verify-ccg -memprof-verify-nodes \
+; RUN: -pass-remarks=memprof-context-disambiguation \
+; RUN: %s -S 2>&1 | FileCheck %s \
+; RUN: --implicit-check-not "memprof_recursive3.cc:12:10: call in clone _Z1Ci.memprof.1 assigned" \
+; RUN: --check-prefix=ALL --check-prefix=ALLOW-RECUR-CALLSITES --check-prefix=ALLOW-RECUR-CONTEXTS
+
+;; Skipping recursive callsites should result in no cloning.
+; RUN: opt -passes=memprof-context-disambiguation -supports-hot-cold-new \
+; RUN: -memprof-verify-ccg -memprof-verify-nodes \
+; RUN: -pass-remarks=memprof-context-disambiguation \
+; RUN: -memprof-allow-recursive-callsites=false \
+; RUN: %s -S 2>&1 | FileCheck %s \
+; RUN: --implicit-check-not "memprof_recursive3.cc:12:10: call in clone _Z1Ci.memprof.1 assigned" \
+; RUN: --implicit-check-not="created clone" \
+; RUN: --implicit-check-not="marked with memprof allocation attribute cold" \
+; RUN: --check-prefix=ALL
+
+;; Skipping recursive contexts should prevent spurious call to cloned version of
+;; B from the context starting at memprof_recursive.cc:19:13, which is actually
+;; recursive (until that support is added).
+; RUN: opt -passes=memprof-context-disambiguation -supports-hot-cold-new \
+; RUN: -memprof-verify-ccg -memprof-verify-nodes \
+; RUN: -pass-remarks=memprof-context-disambiguation \
+; RUN: -memprof-allow-recursive-contexts=false \
+; RUN: %s -S 2>&1 | FileCheck %s \
+; RUN: --implicit-check-not "memprof_recursive3.cc:12:10: call in clone _Z1Ci.memprof.1 assigned" \
+; RUN: --check-prefix=ALL --check-prefix=ALLOW-RECUR-CALLSITES --check-prefix=SKIP-RECUR-CONTEXTS
+
+; ALLOW-RECUR-CALLSITES: memprof_recursive.cc:4:0: created clone _Z1Dv.memprof.1
+; ALLOW-RECUR-CALLSITES: memprof_recursive.cc:8:0: created clone _Z1Ci.memprof.1
+; ALLOW-RECUR-CALLSITES: memprof_recursive.cc:14:0: created clone _Z1Bi.memprof.1
+; ALLOW-RECUR-CALLSITES: memprof_recursive.cc:20:13: call in clone main assigned to call function clone _Z1Bi.memprof.1
+;; We should only call the cold clone for the recursive context if we enabled
+;; recursive contexts via -memprof-allow-recursive-contexts=true (default).
+; ALLOW-RECUR-CONTEXTS: memprof_recursive.cc:19:13: call in clone main assigned to call function clone _Z1Bi.memprof.1
+; ALLOW-RECUR-CALLSITES: memprof_recursive.cc:15:10: call in clone _Z1Bi.memprof.1 assigned to call function clone _Z1Ci.memprof.1
+; ALLOW-RECUR-CALLSITES: memprof_recursive.cc:10:12: call in clone _Z1Ci.memprof.1 assigned to call function clone _Z1Dv.memprof.1
+; ALLOW-RECUR-CALLSITES: memprof_recursive.cc:5:10: call in clone _Z1Dv.memprof.1 marked with memprof allocation attribute cold
+;; We should call the original B for the recursive context if we have
+;; disabled recursive contexts via -memprof-allow-recursive-contexts=false.
+; SKIP-RECUR-CONTEXTS: memprof_recursive.cc:19:13: call in clone main assigned to call function clone _Z1Bi
+; ALLOW-RECUR-CALLSITES: memprof_recursive.cc:12:10: call in clone _Z1Ci assigned to call function clone _Z1Bi
+; ALLOW-RECUR-CALLSITES: memprof_recursive.cc:18:13: call in clone main assigned to call function clone _Z1Bi
+; ALLOW-RECUR-CALLSITES: memprof_recursive.cc:15:10: call in clone _Z1Bi assigned to call function clone _Z1Ci
+; ALLOW-RECUR-CALLSITES: memprof_recursive.cc:10:12: call in clone _Z1Ci assigned to call function clone _Z1Dv
+; ALL: memprof_recursive.cc:5:10: call in clone _Z1Dv marked with memprof allocation attribute notcold
+
+target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+define ptr @_Z1Dv() !dbg !3 {
+entry:
+ %call = tail call ptr @_Znam(i64 10), !dbg !6, !memprof !7, !callsite !14
+ ret ptr null
+}
+
+define ptr @_Z1Ci(i32 %n) !dbg !15 {
+entry:
+ %call = tail call ptr @_Z1Dv(), !dbg !16, !callsite !17
+ br label %return
+
+if.end: ; No predecessors!
+ %call1 = tail call ptr @_Z1Bi(i32 0), !dbg !18, !callsite !19
+ br label %return
+
+return: ; preds = %if.end, %entry
+ ret ptr null
+}
+
+define ptr @_Z1Bi(i32 %n) !dbg !20 {
+entry:
+ %call = tail call ptr @_Z1Ci(i32 0), !dbg !21, !callsite !22
+ ret ptr null
+}
+
+define i32 @main() {
+entry:
+ %call = tail call ptr @_Z1Bi(i32 0), !dbg !23, !callsite !25
+ %call1 = tail call ptr @_Z1Bi(i32 0), !dbg !26, !callsite !27
+ %call2 = tail call ptr @_Z1Bi(i32 0), !dbg !28, !callsite !29
+ ret i32 0
+}
+
+declare ptr @_Znam(i64)
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!2}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus_14, file: !1, producer: "clang version 20.0.0git (https://github.com/llvm/llvm-project.git 7aec6dc477f8148ed066d10dfc7a012a51b6599c)", isOptimized: true, runtimeVersion: 0, emissionKind: LineTablesOnly, splitDebugInlining: false, debugInfoForProfiling: true, nameTableKind: None)
+!1 = !DIFile(filename: "memprof_recursive.cc", directory: ".", checksumkind: CSK_MD5, checksum: "2f15f63b187a0e0d40e7fdd18b10576a")
+!2 = !{i32 2, !"Debug Info Version", i32 3}
+!3 = distinct !DISubprogram(name: "D", linkageName: "_Z1Dv", scope: !1, file: !1, line: 4, type: !4, scopeLine: 4, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0)
+!4 = !DISubroutineType(types: !5)
+!5 = !{}
+!6 = !DILocation(line: 5, column: 10, scope: !3)
+!7 = !{!8, !10, !12}
+!8 = !{!9, !"cold"}
+!9 = !{i64 6541423618768552252, i64 -200552803509692312, i64 -2954124005641725917, i64 6307901912192269588}
+!10 = !{!11, !"notcold"}
+!11 = !{i64 6541423618768552252, i64 -200552803509692312, i64 -2954124005641725917, i64 -7155190423157709404, i64 -2954124005641725917, i64 8632435727821051414}
+!12 = !{!13, !"cold"}
+!13 = !{i64 6541423618768552252, i64 -200552803509692312, i64 -2954124005641725917, i64 -7155190423157709404, i64 -2954124005641725917, i64 -3421689549917153178}
+!14 = !{i64 6541423618768552252}
+!15 = distinct !DISubprogram(name: "C", linkageName: "_Z1Ci", scope: !1, file: !1, line: 8, type: !4, scopeLine: 8, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0)
+!16 = !DILocation(line: 10, column: 12, scope: !15)
+!17 = !{i64 -200552803509692312}
+!18 = !DILocation(line: 12, column: 10, scope: !15)
+!19 = !{i64 -7155190423157709404}
+!20 = distinct !DISubprogram(name: "B", linkageName: "_Z1Bi", scope: !1, file: !1, line: 14, type: !4, scopeLine: 14, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0)
+!21 = !DILocation(line: 15, column: 10, scope: !20)
+!22 = !{i64 -2954124005641725917}
+!23 = !DILocation(line: 18, column: 13, scope: !24)
+!24 = distinct !DISubprogram(name: "main", scope: !1, file: !1, line: 17, type: !4, scopeLine: 17, flags: DIFlagPrototyped | DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0)
+!25 = !{i64 8632435727821051414}
+!26 = !DILocation(line: 19, column: 13, scope: !24)
+!27 = !{i64 -3421689549917153178}
+!28 = !DILocation(line: 20, column: 13, scope: !24)
+!29 = !{i64 6307901912192269588}
diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/block_scaling_decompr_8bit.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/block_scaling_decompr_8bit.ll
index 7d95244..9f3e09d 100644
--- a/llvm/test/Transforms/PhaseOrdering/AArch64/block_scaling_decompr_8bit.ll
+++ b/llvm/test/Transforms/PhaseOrdering/AArch64/block_scaling_decompr_8bit.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
-; RUN: opt -passes='default<O3>' -S %s | FileCheck %s
+; RUN: opt -passes="default<O3>" -S %s | FileCheck %s
target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128-Fn32"
target triple = "aarch64"
@@ -416,21 +416,23 @@ define internal noundef <8 x i16> @_ZL24cmplx_mul_combined_re_im11__Int16x8_t20c
; CHECK-NEXT: [[SCALE_SROA_0_0_EXTRACT_TRUNC:%.*]] = trunc i64 [[SCALE_COERCE]] to i16
; CHECK-NEXT: [[SCALE_SROA_2_0_EXTRACT_SHIFT36:%.*]] = lshr i64 [[SCALE_COERCE]], 16
; CHECK-NEXT: [[SCALE_SROA_2_0_EXTRACT_TRUNC:%.*]] = trunc i64 [[SCALE_SROA_2_0_EXTRACT_SHIFT36]] to i16
+; CHECK-NEXT: [[SHUFFLE_I:%.*]] = shufflevector <8 x i16> [[A]], <8 x i16> poison, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
; CHECK-NEXT: [[VECINIT_I19:%.*]] = insertelement <8 x i16> poison, i16 [[SCALE_SROA_0_0_EXTRACT_TRUNC]], i64 0
; CHECK-NEXT: [[VECINIT_I:%.*]] = insertelement <8 x i16> poison, i16 [[SCALE_SROA_2_0_EXTRACT_TRUNC]], i64 0
; CHECK-NEXT: [[VECINIT7_I:%.*]] = shufflevector <8 x i16> [[VECINIT_I]], <8 x i16> poison, <8 x i32> zeroinitializer
; CHECK-NEXT: [[VQNEGQ_V1_I:%.*]] = tail call <8 x i16> @llvm.aarch64.neon.sqneg.v8i16(<8 x i16> [[VECINIT7_I]])
+; CHECK-NEXT: [[VBSL5_I:%.*]] = shufflevector <8 x i16> [[VQNEGQ_V1_I]], <8 x i16> [[VECINIT_I]], <8 x i32> <i32 0, i32 8, i32 2, i32 8, i32 4, i32 8, i32 6, i32 8>
; CHECK-NEXT: [[SHUFFLE_I85:%.*]] = shufflevector <8 x i16> [[A]], <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
; CHECK-NEXT: [[SHUFFLE_I82:%.*]] = shufflevector <8 x i16> [[VECINIT_I19]], <8 x i16> poison, <4 x i32> zeroinitializer
; CHECK-NEXT: [[VQDMULL_V2_I72:%.*]] = tail call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> [[SHUFFLE_I85]], <4 x i16> [[SHUFFLE_I82]])
; CHECK-NEXT: [[SHUFFLE_I97:%.*]] = shufflevector <8 x i16> [[A]], <8 x i16> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
; CHECK-NEXT: [[VQDMULL_V2_I:%.*]] = tail call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> [[SHUFFLE_I97]], <4 x i16> [[SHUFFLE_I82]])
-; CHECK-NEXT: [[SHUFFLE_I79:%.*]] = shufflevector <8 x i16> [[A]], <8 x i16> poison, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
-; CHECK-NEXT: [[SHUFFLE_I76:%.*]] = shufflevector <8 x i16> [[VQNEGQ_V1_I]], <8 x i16> [[VECINIT_I]], <4 x i32> <i32 0, i32 8, i32 2, i32 8>
+; CHECK-NEXT: [[SHUFFLE_I79:%.*]] = shufflevector <8 x i16> [[SHUFFLE_I]], <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT: [[SHUFFLE_I76:%.*]] = shufflevector <8 x i16> [[VBSL5_I]], <8 x i16> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
; CHECK-NEXT: [[VQDMLAL2_I106:%.*]] = tail call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> [[SHUFFLE_I79]], <4 x i16> [[SHUFFLE_I76]])
; CHECK-NEXT: [[VQDMLAL_V3_I107:%.*]] = tail call <4 x i32> @llvm.aarch64.neon.sqadd.v4i32(<4 x i32> [[VQDMULL_V2_I72]], <4 x i32> [[VQDMLAL2_I106]])
-; CHECK-NEXT: [[SHUFFLE_I91:%.*]] = shufflevector <8 x i16> [[A]], <8 x i16> poison, <4 x i32> <i32 5, i32 4, i32 7, i32 6>
-; CHECK-NEXT: [[SHUFFLE_I88:%.*]] = shufflevector <8 x i16> [[VQNEGQ_V1_I]], <8 x i16> [[VECINIT_I]], <4 x i32> <i32 4, i32 8, i32 6, i32 8>
+; CHECK-NEXT: [[SHUFFLE_I91:%.*]] = shufflevector <8 x i16> [[SHUFFLE_I]], <8 x i16> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+; CHECK-NEXT: [[SHUFFLE_I88:%.*]] = shufflevector <8 x i16> [[VBSL5_I]], <8 x i16> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
; CHECK-NEXT: [[VQDMLAL2_I:%.*]] = tail call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> [[SHUFFLE_I91]], <4 x i16> [[SHUFFLE_I88]])
; CHECK-NEXT: [[VQDMLAL_V3_I:%.*]] = tail call <4 x i32> @llvm.aarch64.neon.sqadd.v4i32(<4 x i32> [[VQDMULL_V2_I]], <4 x i32> [[VQDMLAL2_I]])
; CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x i32> [[VQDMLAL_V3_I107]] to <8 x i16>
diff --git a/llvm/test/Transforms/PhaseOrdering/X86/hadd.ll b/llvm/test/Transforms/PhaseOrdering/X86/hadd.ll
index 3c72d38..a4aea02 100644
--- a/llvm/test/Transforms/PhaseOrdering/X86/hadd.ll
+++ b/llvm/test/Transforms/PhaseOrdering/X86/hadd.ll
@@ -566,22 +566,16 @@ define <4 x float> @add_v4f32_012u(<4 x float> %a, <4 x float> %b) {
; SSE2-NEXT: ret <4 x float> [[RESULT1]]
;
; SSE4-LABEL: @add_v4f32_012u(
-; SSE4-NEXT: [[SHIFT:%.*]] = shufflevector <4 x float> [[B:%.*]], <4 x float> poison, <4 x i32> <i32 1, i32 poison, i32 poison, i32 poison>
-; SSE4-NEXT: [[TMP1:%.*]] = fadd <4 x float> [[B]], [[SHIFT]]
-; SSE4-NEXT: [[TMP2:%.*]] = shufflevector <4 x float> [[A:%.*]], <4 x float> poison, <4 x i32> <i32 1, i32 2, i32 poison, i32 poison>
-; SSE4-NEXT: [[TMP3:%.*]] = shufflevector <4 x float> [[A]], <4 x float> poison, <4 x i32> <i32 0, i32 3, i32 poison, i32 poison>
+; SSE4-NEXT: [[TMP2:%.*]] = shufflevector <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x i32> <i32 1, i32 2, i32 4, i32 poison>
+; SSE4-NEXT: [[TMP3:%.*]] = shufflevector <4 x float> [[A]], <4 x float> [[B]], <4 x i32> <i32 0, i32 3, i32 5, i32 poison>
; SSE4-NEXT: [[TMP4:%.*]] = fadd <4 x float> [[TMP2]], [[TMP3]]
-; SSE4-NEXT: [[RESULT:%.*]] = shufflevector <4 x float> [[TMP4]], <4 x float> [[TMP1]], <4 x i32> <i32 0, i32 1, i32 4, i32 poison>
-; SSE4-NEXT: ret <4 x float> [[RESULT]]
+; SSE4-NEXT: ret <4 x float> [[TMP4]]
;
; AVX2-LABEL: @add_v4f32_012u(
-; AVX2-NEXT: [[SHIFT:%.*]] = shufflevector <4 x float> [[B:%.*]], <4 x float> poison, <4 x i32> <i32 1, i32 poison, i32 poison, i32 poison>
-; AVX2-NEXT: [[TMP1:%.*]] = fadd <4 x float> [[B]], [[SHIFT]]
-; AVX2-NEXT: [[TMP2:%.*]] = shufflevector <4 x float> [[A:%.*]], <4 x float> poison, <4 x i32> <i32 1, i32 2, i32 poison, i32 poison>
-; AVX2-NEXT: [[TMP3:%.*]] = shufflevector <4 x float> [[A]], <4 x float> poison, <4 x i32> <i32 0, i32 3, i32 poison, i32 poison>
+; AVX2-NEXT: [[TMP2:%.*]] = shufflevector <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x i32> <i32 1, i32 2, i32 4, i32 poison>
+; AVX2-NEXT: [[TMP3:%.*]] = shufflevector <4 x float> [[A]], <4 x float> [[B]], <4 x i32> <i32 0, i32 3, i32 5, i32 poison>
; AVX2-NEXT: [[TMP4:%.*]] = fadd <4 x float> [[TMP2]], [[TMP3]]
-; AVX2-NEXT: [[RESULT:%.*]] = shufflevector <4 x float> [[TMP4]], <4 x float> [[TMP1]], <4 x i32> <i32 0, i32 1, i32 4, i32 poison>
-; AVX2-NEXT: ret <4 x float> [[RESULT]]
+; AVX2-NEXT: ret <4 x float> [[TMP4]]
;
; AVX512-LABEL: @add_v4f32_012u(
; AVX512-NEXT: [[TMP1:%.*]] = shufflevector <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x i32> <i32 0, i32 2, i32 4, i32 poison>
diff --git a/llvm/test/Transforms/PhaseOrdering/X86/hsub.ll b/llvm/test/Transforms/PhaseOrdering/X86/hsub.ll
index cf1c948..bcb316a 100644
--- a/llvm/test/Transforms/PhaseOrdering/X86/hsub.ll
+++ b/llvm/test/Transforms/PhaseOrdering/X86/hsub.ll
@@ -566,22 +566,16 @@ define <4 x float> @sub_v4f32_012u(<4 x float> %a, <4 x float> %b) {
; SSE2-NEXT: ret <4 x float> [[RESULT1]]
;
; SSE4-LABEL: @sub_v4f32_012u(
-; SSE4-NEXT: [[SHIFT:%.*]] = shufflevector <4 x float> [[B:%.*]], <4 x float> poison, <4 x i32> <i32 1, i32 poison, i32 poison, i32 poison>
-; SSE4-NEXT: [[TMP1:%.*]] = fsub <4 x float> [[B]], [[SHIFT]]
-; SSE4-NEXT: [[TMP2:%.*]] = shufflevector <4 x float> [[A:%.*]], <4 x float> poison, <4 x i32> <i32 0, i32 2, i32 poison, i32 poison>
-; SSE4-NEXT: [[TMP3:%.*]] = shufflevector <4 x float> [[A]], <4 x float> poison, <4 x i32> <i32 1, i32 3, i32 poison, i32 poison>
+; SSE4-NEXT: [[TMP2:%.*]] = shufflevector <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x i32> <i32 0, i32 2, i32 4, i32 poison>
+; SSE4-NEXT: [[TMP3:%.*]] = shufflevector <4 x float> [[A]], <4 x float> [[B]], <4 x i32> <i32 1, i32 3, i32 5, i32 poison>
; SSE4-NEXT: [[TMP4:%.*]] = fsub <4 x float> [[TMP2]], [[TMP3]]
-; SSE4-NEXT: [[RESULT:%.*]] = shufflevector <4 x float> [[TMP4]], <4 x float> [[TMP1]], <4 x i32> <i32 0, i32 1, i32 4, i32 poison>
-; SSE4-NEXT: ret <4 x float> [[RESULT]]
+; SSE4-NEXT: ret <4 x float> [[TMP4]]
;
; AVX2-LABEL: @sub_v4f32_012u(
-; AVX2-NEXT: [[SHIFT:%.*]] = shufflevector <4 x float> [[B:%.*]], <4 x float> poison, <4 x i32> <i32 1, i32 poison, i32 poison, i32 poison>
-; AVX2-NEXT: [[TMP1:%.*]] = fsub <4 x float> [[B]], [[SHIFT]]
-; AVX2-NEXT: [[TMP2:%.*]] = shufflevector <4 x float> [[A:%.*]], <4 x float> poison, <4 x i32> <i32 0, i32 2, i32 poison, i32 poison>
-; AVX2-NEXT: [[TMP3:%.*]] = shufflevector <4 x float> [[A]], <4 x float> poison, <4 x i32> <i32 1, i32 3, i32 poison, i32 poison>
+; AVX2-NEXT: [[TMP2:%.*]] = shufflevector <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x i32> <i32 0, i32 2, i32 4, i32 poison>
+; AVX2-NEXT: [[TMP3:%.*]] = shufflevector <4 x float> [[A]], <4 x float> [[B]], <4 x i32> <i32 1, i32 3, i32 5, i32 poison>
; AVX2-NEXT: [[TMP4:%.*]] = fsub <4 x float> [[TMP2]], [[TMP3]]
-; AVX2-NEXT: [[RESULT:%.*]] = shufflevector <4 x float> [[TMP4]], <4 x float> [[TMP1]], <4 x i32> <i32 0, i32 1, i32 4, i32 poison>
-; AVX2-NEXT: ret <4 x float> [[RESULT]]
+; AVX2-NEXT: ret <4 x float> [[TMP4]]
;
; AVX512-LABEL: @sub_v4f32_012u(
; AVX512-NEXT: [[TMP1:%.*]] = shufflevector <4 x float> [[A:%.*]], <4 x float> [[B:%.*]], <4 x i32> <i32 0, i32 2, i32 4, i32 poison>
diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/reduce-fadd.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/reduce-fadd.ll
index 6dceabe..00a4417 100644
--- a/llvm/test/Transforms/SLPVectorizer/AArch64/reduce-fadd.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/reduce-fadd.ll
@@ -80,16 +80,11 @@ define half @reduce_fast_half8(<8 x half> %vec8) {
; NOFP16-LABEL: define half @reduce_fast_half8(
; NOFP16-SAME: <8 x half> [[VEC8:%.*]]) #[[ATTR0]] {
; NOFP16-NEXT: [[ENTRY:.*:]]
-; NOFP16-NEXT: [[ELT4:%.*]] = extractelement <8 x half> [[VEC8]], i64 4
-; NOFP16-NEXT: [[ELT5:%.*]] = extractelement <8 x half> [[VEC8]], i64 5
-; NOFP16-NEXT: [[ELT6:%.*]] = extractelement <8 x half> [[VEC8]], i64 6
-; NOFP16-NEXT: [[ELT7:%.*]] = extractelement <8 x half> [[VEC8]], i64 7
; NOFP16-NEXT: [[TMP0:%.*]] = shufflevector <8 x half> [[VEC8]], <8 x half> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
; NOFP16-NEXT: [[TMP1:%.*]] = call fast half @llvm.vector.reduce.fadd.v4f16(half 0xH0000, <4 x half> [[TMP0]])
-; NOFP16-NEXT: [[OP_RDX:%.*]] = fadd fast half [[TMP1]], [[ELT4]]
-; NOFP16-NEXT: [[OP_RDX1:%.*]] = fadd fast half [[ELT5]], [[ELT6]]
-; NOFP16-NEXT: [[OP_RDX2:%.*]] = fadd fast half [[OP_RDX]], [[OP_RDX1]]
-; NOFP16-NEXT: [[OP_RDX3:%.*]] = fadd fast half [[OP_RDX2]], [[ELT7]]
+; NOFP16-NEXT: [[TMP2:%.*]] = shufflevector <8 x half> [[VEC8]], <8 x half> poison, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+; NOFP16-NEXT: [[TMP3:%.*]] = call fast half @llvm.vector.reduce.fadd.v4f16(half 0xH0000, <4 x half> [[TMP2]])
+; NOFP16-NEXT: [[OP_RDX3:%.*]] = fadd fast half [[TMP1]], [[TMP3]]
; NOFP16-NEXT: ret half [[OP_RDX3]]
;
; FULLFP16-LABEL: define half @reduce_fast_half8(
diff --git a/llvm/test/Transforms/SLPVectorizer/RISCV/long-gep-chains.ll b/llvm/test/Transforms/SLPVectorizer/RISCV/long-gep-chains.ll
new file mode 100644
index 0000000..cf1ed54
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/RISCV/long-gep-chains.ll
@@ -0,0 +1,76 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -S -passes=slp-vectorizer -mtriple=riscv64-unknown-linux -mattr=+v < %s | FileCheck %s
+
+define i64 @test(ptr %arg, i32 %arg1, i64 %i) {
+; CHECK-LABEL: define i64 @test(
+; CHECK-SAME: ptr [[ARG:%.*]], i32 [[ARG1:%.*]], i64 [[I:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[BB:.*:]]
+; CHECK-NEXT: [[I2:%.*]] = getelementptr i8, ptr [[ARG]], i64 [[I]]
+; CHECK-NEXT: [[I3:%.*]] = getelementptr i8, ptr [[I2]], i64 [[I]]
+; CHECK-NEXT: [[I4:%.*]] = getelementptr i8, ptr [[I3]], i64 [[I]]
+; CHECK-NEXT: [[I5:%.*]] = getelementptr i8, ptr [[I4]], i64 [[I]]
+; CHECK-NEXT: [[I6:%.*]] = getelementptr i8, ptr [[I5]], i64 [[I]]
+; CHECK-NEXT: [[I7:%.*]] = getelementptr i8, ptr [[I6]], i64 [[I]]
+; CHECK-NEXT: [[I8:%.*]] = getelementptr i8, ptr [[I7]], i64 [[I]]
+; CHECK-NEXT: [[I9:%.*]] = getelementptr i8, ptr [[I8]], i64 [[I]]
+; CHECK-NEXT: [[I10:%.*]] = getelementptr i8, ptr [[I9]], i64 [[I]]
+; CHECK-NEXT: [[I11:%.*]] = getelementptr i8, ptr [[I10]], i64 [[I]]
+; CHECK-NEXT: [[I12:%.*]] = getelementptr i8, ptr [[I11]], i64 [[I]]
+; CHECK-NEXT: [[I13:%.*]] = getelementptr i8, ptr [[I12]], i64 [[I]]
+; CHECK-NEXT: [[I14:%.*]] = getelementptr i8, ptr [[I13]], i64 [[I]]
+; CHECK-NEXT: [[I140:%.*]] = load i8, ptr [[I14]], align 1
+; CHECK-NEXT: [[I1412:%.*]] = zext i8 [[I140]] to i32
+; CHECK-NEXT: [[I142:%.*]] = mul i32 [[ARG1]], [[I1412]]
+; CHECK-NEXT: [[I143:%.*]] = getelementptr i8, ptr [[I13]], i64 15
+; CHECK-NEXT: [[I144:%.*]] = load i8, ptr [[I143]], align 1
+; CHECK-NEXT: [[I1453:%.*]] = zext i8 [[I144]] to i32
+; CHECK-NEXT: [[I146:%.*]] = mul i32 [[ARG1]], [[I1453]]
+; CHECK-NEXT: [[I147:%.*]] = getelementptr i8, ptr [[I13]], i64 14
+; CHECK-NEXT: [[I148:%.*]] = load i8, ptr [[I147]], align 1
+; CHECK-NEXT: [[I1494:%.*]] = zext i8 [[I148]] to i32
+; CHECK-NEXT: [[I150:%.*]] = mul i32 [[ARG1]], [[I1494]]
+; CHECK-NEXT: [[I151:%.*]] = getelementptr i8, ptr [[I13]], i64 13
+; CHECK-NEXT: [[I152:%.*]] = load i8, ptr [[I151]], align 1
+; CHECK-NEXT: [[I1535:%.*]] = zext i8 [[I152]] to i32
+; CHECK-NEXT: [[I154:%.*]] = mul i32 [[ARG1]], [[I1535]]
+; CHECK-NEXT: [[I1311:%.*]] = or i32 [[I142]], [[I146]]
+; CHECK-NEXT: [[I1312:%.*]] = or i32 [[I1311]], [[I150]]
+; CHECK-NEXT: [[I1313:%.*]] = or i32 [[I1312]], [[I154]]
+; CHECK-NEXT: [[I1536:%.*]] = zext i32 [[I1313]] to i64
+; CHECK-NEXT: ret i64 [[I1536]]
+;
+bb:
+ %i2 = getelementptr i8, ptr %arg, i64 %i
+ %i3 = getelementptr i8, ptr %i2, i64 %i
+ %i4 = getelementptr i8, ptr %i3, i64 %i
+ %i5 = getelementptr i8, ptr %i4, i64 %i
+ %i6 = getelementptr i8, ptr %i5, i64 %i
+ %i7 = getelementptr i8, ptr %i6, i64 %i
+ %i8 = getelementptr i8, ptr %i7, i64 %i
+ %i9 = getelementptr i8, ptr %i8, i64 %i
+ %i10 = getelementptr i8, ptr %i9, i64 %i
+ %i11 = getelementptr i8, ptr %i10, i64 %i
+ %i12 = getelementptr i8, ptr %i11, i64 %i
+ %i13 = getelementptr i8, ptr %i12, i64 %i
+ %i14 = getelementptr i8, ptr %i13, i64 %i
+ %i140 = load i8, ptr %i14, align 1
+ %i1412 = zext i8 %i140 to i32
+ %i142 = mul i32 %arg1, %i1412
+ %i143 = getelementptr i8, ptr %i13, i64 15
+ %i144 = load i8, ptr %i143, align 1
+ %i1453 = zext i8 %i144 to i32
+ %i146 = mul i32 %arg1, %i1453
+ %i147 = getelementptr i8, ptr %i13, i64 14
+ %i148 = load i8, ptr %i147, align 1
+ %i1494 = zext i8 %i148 to i32
+ %i150 = mul i32 %arg1, %i1494
+ %i151 = getelementptr i8, ptr %i13, i64 13
+ %i152 = load i8, ptr %i151, align 1
+ %i1535 = zext i8 %i152 to i32
+ %i154 = mul i32 %arg1, %i1535
+ %i1311 = or i32 %i142, %i146
+ %i1312 = or i32 %i1311, %i150
+ %i1313 = or i32 %i1312, %i154
+ %i1536 = zext i32 %i1313 to i64
+ ret i64 %i1536
+}
diff --git a/llvm/test/Transforms/StraightLineStrengthReduce/NVPTX/speculative-slsr.ll b/llvm/test/Transforms/StraightLineStrengthReduce/NVPTX/speculative-slsr.ll
index 92766d5..420e844 100644
--- a/llvm/test/Transforms/StraightLineStrengthReduce/NVPTX/speculative-slsr.ll
+++ b/llvm/test/Transforms/StraightLineStrengthReduce/NVPTX/speculative-slsr.ll
@@ -11,7 +11,7 @@ target triple = "nvptx64-nvidia-cuda"
; use((b + i) * s);
; }
; }
-define void @foo(i32 %b, i32 %s) {
+define ptx_kernel void @foo(i32 %b, i32 %s) {
; CHECK-LABEL: .visible .entry foo(
entry:
; CHECK: ld.param.u32 [[s:%r[0-9]+]], [foo_param_1];
@@ -65,7 +65,3 @@ for.inc.3: ; preds = %if.then.3, %for.inc
declare zeroext i1 @cond(i32)
declare void @use(i32)
-
-!nvvm.annotations = !{!0}
-
-!0 = !{ptr @foo, !"kernel", i32 1}
diff --git a/llvm/test/Transforms/VectorCombine/X86/extract-binop-inseltpoison.ll b/llvm/test/Transforms/VectorCombine/X86/extract-binop-inseltpoison.ll
index 6ef18e6..f3b7f7b 100644
--- a/llvm/test/Transforms/VectorCombine/X86/extract-binop-inseltpoison.ll
+++ b/llvm/test/Transforms/VectorCombine/X86/extract-binop-inseltpoison.ll
@@ -465,28 +465,13 @@ define <4 x float> @ins_bo_ext_ext_uses(<4 x float> %a, <4 x float> %b) {
}
define <4 x float> @PR34724(<4 x float> %a, <4 x float> %b) {
-; SSE-LABEL: @PR34724(
-; SSE-NEXT: [[A0:%.*]] = extractelement <4 x float> [[A:%.*]], i32 0
-; SSE-NEXT: [[A1:%.*]] = extractelement <4 x float> [[A]], i32 1
-; SSE-NEXT: [[SHIFT2:%.*]] = shufflevector <4 x float> [[A]], <4 x float> [[B1:%.*]], <4 x i32> <i32 poison, i32 2, i32 4, i32 6>
-; SSE-NEXT: [[B:%.*]] = shufflevector <4 x float> [[A]], <4 x float> [[B1]], <4 x i32> <i32 poison, i32 3, i32 5, i32 7>
-; SSE-NEXT: [[TMP3:%.*]] = fadd <4 x float> [[SHIFT2]], [[B]]
-; SSE-NEXT: ret <4 x float> [[TMP3]]
-;
-; AVX-LABEL: @PR34724(
-; AVX-NEXT: [[A0:%.*]] = extractelement <4 x float> [[A:%.*]], i32 0
-; AVX-NEXT: [[A1:%.*]] = extractelement <4 x float> [[A]], i32 1
-; AVX-NEXT: [[SHIFT:%.*]] = shufflevector <4 x float> [[A]], <4 x float> poison, <4 x i32> <i32 poison, i32 poison, i32 3, i32 poison>
-; AVX-NEXT: [[TMP1:%.*]] = fadd <4 x float> [[A]], [[SHIFT]]
-; AVX-NEXT: [[SHIFT1:%.*]] = shufflevector <4 x float> [[B:%.*]], <4 x float> poison, <4 x i32> <i32 1, i32 poison, i32 poison, i32 poison>
-; AVX-NEXT: [[TMP2:%.*]] = fadd <4 x float> [[B]], [[SHIFT1]]
-; AVX-NEXT: [[B01:%.*]] = extractelement <4 x float> [[TMP2]], i32 0
-; AVX-NEXT: [[SHIFT2:%.*]] = shufflevector <4 x float> [[B]], <4 x float> poison, <4 x i32> <i32 poison, i32 poison, i32 poison, i32 2>
-; AVX-NEXT: [[TMP3:%.*]] = fadd <4 x float> [[SHIFT2]], [[B]]
-; AVX-NEXT: [[V1:%.*]] = shufflevector <4 x float> [[TMP1]], <4 x float> poison, <4 x i32> <i32 poison, i32 2, i32 poison, i32 poison>
-; AVX-NEXT: [[V2:%.*]] = insertelement <4 x float> [[V1]], float [[B01]], i32 2
-; AVX-NEXT: [[V3:%.*]] = shufflevector <4 x float> [[V2]], <4 x float> [[TMP3]], <4 x i32> <i32 0, i32 1, i32 2, i32 7>
-; AVX-NEXT: ret <4 x float> [[V3]]
+; CHECK-LABEL: @PR34724(
+; CHECK-NEXT: [[A0:%.*]] = extractelement <4 x float> [[A:%.*]], i32 0
+; CHECK-NEXT: [[A1:%.*]] = extractelement <4 x float> [[A]], i32 1
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x float> [[A]], <4 x float> [[B:%.*]], <4 x i32> <i32 poison, i32 2, i32 4, i32 6>
+; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <4 x float> [[A]], <4 x float> [[B]], <4 x i32> <i32 poison, i32 3, i32 5, i32 7>
+; CHECK-NEXT: [[V3:%.*]] = fadd <4 x float> [[TMP1]], [[TMP2]]
+; CHECK-NEXT: ret <4 x float> [[V3]]
;
%a0 = extractelement <4 x float> %a, i32 0
%a1 = extractelement <4 x float> %a, i32 1
diff --git a/llvm/test/Transforms/VectorCombine/X86/extract-binop.ll b/llvm/test/Transforms/VectorCombine/X86/extract-binop.ll
index 307fbf7..c125b73 100644
--- a/llvm/test/Transforms/VectorCombine/X86/extract-binop.ll
+++ b/llvm/test/Transforms/VectorCombine/X86/extract-binop.ll
@@ -465,34 +465,19 @@ define <4 x float> @ins_bo_ext_ext_uses(<4 x float> %a, <4 x float> %b) {
}
define <4 x float> @PR34724(<4 x float> %a, <4 x float> %b) {
-; SSE-LABEL: @PR34724(
-; SSE-NEXT: [[A0:%.*]] = extractelement <4 x float> [[A:%.*]], i32 0
-; SSE-NEXT: [[A1:%.*]] = extractelement <4 x float> [[A]], i32 1
-; SSE-NEXT: [[SHIFT:%.*]] = shufflevector <4 x float> [[A]], <4 x float> poison, <4 x i32> <i32 poison, i32 poison, i32 3, i32 poison>
-; SSE-NEXT: [[TMP1:%.*]] = fadd <4 x float> [[A]], [[SHIFT]]
-; SSE-NEXT: [[SHIFT1:%.*]] = shufflevector <4 x float> [[B:%.*]], <4 x float> poison, <4 x i32> <i32 1, i32 poison, i32 poison, i32 poison>
-; SSE-NEXT: [[TMP2:%.*]] = fadd <4 x float> [[B]], [[SHIFT1]]
-; SSE-NEXT: [[SHIFT2:%.*]] = shufflevector <4 x float> [[B]], <4 x float> poison, <4 x i32> <i32 poison, i32 poison, i32 poison, i32 2>
-; SSE-NEXT: [[TMP3:%.*]] = fadd <4 x float> [[SHIFT2]], [[B]]
-; SSE-NEXT: [[V1:%.*]] = shufflevector <4 x float> [[TMP1]], <4 x float> undef, <4 x i32> <i32 4, i32 2, i32 6, i32 7>
-; SSE-NEXT: [[V2:%.*]] = shufflevector <4 x float> [[V1]], <4 x float> [[TMP2]], <4 x i32> <i32 0, i32 1, i32 4, i32 3>
-; SSE-NEXT: [[V3:%.*]] = shufflevector <4 x float> [[V2]], <4 x float> [[TMP3]], <4 x i32> <i32 0, i32 1, i32 2, i32 7>
-; SSE-NEXT: ret <4 x float> [[V3]]
-;
-; AVX-LABEL: @PR34724(
-; AVX-NEXT: [[A0:%.*]] = extractelement <4 x float> [[A:%.*]], i32 0
-; AVX-NEXT: [[A1:%.*]] = extractelement <4 x float> [[A]], i32 1
-; AVX-NEXT: [[SHIFT:%.*]] = shufflevector <4 x float> [[A]], <4 x float> poison, <4 x i32> <i32 poison, i32 poison, i32 3, i32 poison>
-; AVX-NEXT: [[TMP1:%.*]] = fadd <4 x float> [[A]], [[SHIFT]]
-; AVX-NEXT: [[SHIFT1:%.*]] = shufflevector <4 x float> [[B:%.*]], <4 x float> poison, <4 x i32> <i32 1, i32 poison, i32 poison, i32 poison>
-; AVX-NEXT: [[TMP2:%.*]] = fadd <4 x float> [[B]], [[SHIFT1]]
-; AVX-NEXT: [[B01:%.*]] = extractelement <4 x float> [[TMP2]], i32 0
-; AVX-NEXT: [[SHIFT2:%.*]] = shufflevector <4 x float> [[B]], <4 x float> poison, <4 x i32> <i32 poison, i32 poison, i32 poison, i32 2>
-; AVX-NEXT: [[TMP3:%.*]] = fadd <4 x float> [[SHIFT2]], [[B]]
-; AVX-NEXT: [[V1:%.*]] = shufflevector <4 x float> [[TMP1]], <4 x float> undef, <4 x i32> <i32 4, i32 2, i32 6, i32 7>
-; AVX-NEXT: [[V2:%.*]] = insertelement <4 x float> [[V1]], float [[B01]], i32 2
-; AVX-NEXT: [[V3:%.*]] = shufflevector <4 x float> [[V2]], <4 x float> [[TMP3]], <4 x i32> <i32 0, i32 1, i32 2, i32 7>
-; AVX-NEXT: ret <4 x float> [[V3]]
+; CHECK-LABEL: @PR34724(
+; CHECK-NEXT: [[A0:%.*]] = extractelement <4 x float> [[A:%.*]], i32 0
+; CHECK-NEXT: [[A1:%.*]] = extractelement <4 x float> [[A]], i32 1
+; CHECK-NEXT: [[SHIFT:%.*]] = shufflevector <4 x float> [[A]], <4 x float> poison, <4 x i32> <i32 poison, i32 poison, i32 3, i32 poison>
+; CHECK-NEXT: [[TMP1:%.*]] = fadd <4 x float> [[A]], [[SHIFT]]
+; CHECK-NEXT: [[SHIFT1:%.*]] = shufflevector <4 x float> [[B:%.*]], <4 x float> poison, <4 x i32> <i32 1, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP2:%.*]] = fadd <4 x float> [[B]], [[SHIFT1]]
+; CHECK-NEXT: [[SHIFT2:%.*]] = shufflevector <4 x float> [[B]], <4 x float> poison, <4 x i32> <i32 poison, i32 poison, i32 poison, i32 2>
+; CHECK-NEXT: [[TMP3:%.*]] = fadd <4 x float> [[SHIFT2]], [[B]]
+; CHECK-NEXT: [[V1:%.*]] = shufflevector <4 x float> [[TMP1]], <4 x float> undef, <4 x i32> <i32 4, i32 2, i32 6, i32 7>
+; CHECK-NEXT: [[V2:%.*]] = shufflevector <4 x float> [[V1]], <4 x float> [[TMP2]], <4 x i32> <i32 0, i32 1, i32 4, i32 3>
+; CHECK-NEXT: [[V3:%.*]] = shufflevector <4 x float> [[V2]], <4 x float> [[TMP3]], <4 x i32> <i32 0, i32 1, i32 2, i32 7>
+; CHECK-NEXT: ret <4 x float> [[V3]]
;
%a0 = extractelement <4 x float> %a, i32 0
%a1 = extractelement <4 x float> %a, i32 1
diff --git a/llvm/test/Transforms/VectorCombine/X86/shuffle-of-cmps.ll b/llvm/test/Transforms/VectorCombine/X86/shuffle-of-cmps.ll
index 95068ad..f9108ef 100644
--- a/llvm/test/Transforms/VectorCombine/X86/shuffle-of-cmps.ll
+++ b/llvm/test/Transforms/VectorCombine/X86/shuffle-of-cmps.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt < %s -passes=vector-combine -S -mtriple=x86_64-- -mattr=sse2 | FileCheck %s --check-prefixes=CHECK,SSE,SSE2
; RUN: opt < %s -passes=vector-combine -S -mtriple=x86_64-- -mattr=sse4.2 | FileCheck %s --check-prefixes=CHECK,SSE,SSE4
-; RUN: opt < %s -passes=vector-combine -S -mtriple=x86_64-- -mattr=avx2 | FileCheck %s --check-prefixes=CHECK,AVX,AVX2
-; RUN: opt < %s -passes=vector-combine -S -mtriple=x86_64-- -mattr=avx512vl | FileCheck %s --check-prefixes=CHECK,AVX,AVX512
+; RUN: opt < %s -passes=vector-combine -S -mtriple=x86_64-- -mattr=avx2 | FileCheck %s --check-prefixes=CHECK,AVX2
+; RUN: opt < %s -passes=vector-combine -S -mtriple=x86_64-- -mattr=avx512vl | FileCheck %s --check-prefixes=CHECK,AVX512
declare void @use(<4 x i1>)
@@ -105,8 +105,8 @@ define <4 x i32> @shuf_icmp_ugt_v4i32(<4 x i32> %x, <4 x i32> %y, <4 x i32> %z,
define <4 x i32> @shuf_fcmp_oeq_v4i32(<4 x float> %x, <4 x float> %y, <4 x float> %z) {
; SSE2-LABEL: define <4 x i32> @shuf_fcmp_oeq_v4i32(
; SSE2-SAME: <4 x float> [[X:%.*]], <4 x float> [[Y:%.*]], <4 x float> [[Z:%.*]]) #[[ATTR0]] {
-; SSE2-NEXT: [[TMP1:%.*]] = shufflevector <4 x float> [[X]], <4 x float> poison, <4 x i32> <i32 poison, i32 poison, i32 2, i32 0>
-; SSE2-NEXT: [[TMP2:%.*]] = shufflevector <4 x float> [[Y]], <4 x float> [[Z]], <4 x i32> <i32 poison, i32 poison, i32 6, i32 0>
+; SSE2-NEXT: [[TMP1:%.*]] = shufflevector <4 x float> [[X]], <4 x float> poison, <4 x i32> <i32 poison, i32 poison, i32 0, i32 0>
+; SSE2-NEXT: [[TMP2:%.*]] = shufflevector <4 x float> [[Y]], <4 x float> [[Z]], <4 x i32> <i32 poison, i32 poison, i32 4, i32 0>
; SSE2-NEXT: [[S:%.*]] = fcmp oeq <4 x float> [[TMP1]], [[TMP2]]
; SSE2-NEXT: [[R:%.*]] = sext <4 x i1> [[S]] to <4 x i32>
; SSE2-NEXT: ret <4 x i32> [[R]]
@@ -115,21 +115,29 @@ define <4 x i32> @shuf_fcmp_oeq_v4i32(<4 x float> %x, <4 x float> %y, <4 x float
; SSE4-SAME: <4 x float> [[X:%.*]], <4 x float> [[Y:%.*]], <4 x float> [[Z:%.*]]) #[[ATTR0]] {
; SSE4-NEXT: [[B0:%.*]] = fcmp oeq <4 x float> [[X]], [[Y]]
; SSE4-NEXT: [[B1:%.*]] = fcmp oeq <4 x float> [[X]], [[Z]]
-; SSE4-NEXT: [[S:%.*]] = shufflevector <4 x i1> [[B0]], <4 x i1> [[B1]], <4 x i32> <i32 poison, i32 poison, i32 6, i32 0>
+; SSE4-NEXT: [[S:%.*]] = shufflevector <4 x i1> [[B0]], <4 x i1> [[B1]], <4 x i32> <i32 poison, i32 poison, i32 4, i32 0>
; SSE4-NEXT: [[R:%.*]] = sext <4 x i1> [[S]] to <4 x i32>
; SSE4-NEXT: ret <4 x i32> [[R]]
;
-; AVX-LABEL: define <4 x i32> @shuf_fcmp_oeq_v4i32(
-; AVX-SAME: <4 x float> [[X:%.*]], <4 x float> [[Y:%.*]], <4 x float> [[Z:%.*]]) #[[ATTR0]] {
-; AVX-NEXT: [[B0:%.*]] = fcmp oeq <4 x float> [[X]], [[Y]]
-; AVX-NEXT: [[B1:%.*]] = fcmp oeq <4 x float> [[X]], [[Z]]
-; AVX-NEXT: [[S:%.*]] = shufflevector <4 x i1> [[B0]], <4 x i1> [[B1]], <4 x i32> <i32 poison, i32 poison, i32 6, i32 0>
-; AVX-NEXT: [[R:%.*]] = sext <4 x i1> [[S]] to <4 x i32>
-; AVX-NEXT: ret <4 x i32> [[R]]
+; AVX2-LABEL: define <4 x i32> @shuf_fcmp_oeq_v4i32(
+; AVX2-SAME: <4 x float> [[X:%.*]], <4 x float> [[Y:%.*]], <4 x float> [[Z:%.*]]) #[[ATTR0]] {
+; AVX2-NEXT: [[B0:%.*]] = fcmp oeq <4 x float> [[X]], [[Y]]
+; AVX2-NEXT: [[B1:%.*]] = fcmp oeq <4 x float> [[X]], [[Z]]
+; AVX2-NEXT: [[S:%.*]] = shufflevector <4 x i1> [[B0]], <4 x i1> [[B1]], <4 x i32> <i32 poison, i32 poison, i32 4, i32 0>
+; AVX2-NEXT: [[R:%.*]] = sext <4 x i1> [[S]] to <4 x i32>
+; AVX2-NEXT: ret <4 x i32> [[R]]
+;
+; AVX512-LABEL: define <4 x i32> @shuf_fcmp_oeq_v4i32(
+; AVX512-SAME: <4 x float> [[X:%.*]], <4 x float> [[Y:%.*]], <4 x float> [[Z:%.*]]) #[[ATTR0]] {
+; AVX512-NEXT: [[TMP1:%.*]] = shufflevector <4 x float> [[X]], <4 x float> poison, <4 x i32> <i32 poison, i32 poison, i32 0, i32 0>
+; AVX512-NEXT: [[TMP2:%.*]] = shufflevector <4 x float> [[Y]], <4 x float> [[Z]], <4 x i32> <i32 poison, i32 poison, i32 4, i32 0>
+; AVX512-NEXT: [[S:%.*]] = fcmp oeq <4 x float> [[TMP1]], [[TMP2]]
+; AVX512-NEXT: [[R:%.*]] = sext <4 x i1> [[S]] to <4 x i32>
+; AVX512-NEXT: ret <4 x i32> [[R]]
;
%b0 = fcmp oeq <4 x float> %x, %y
%b1 = fcmp oeq <4 x float> %x, %z
- %s = shufflevector <4 x i1> %b0, <4 x i1> %b1, <4 x i32> <i32 poison, i32 poison, i32 6, i32 0>
+ %s = shufflevector <4 x i1> %b0, <4 x i1> %b1, <4 x i32> <i32 poison, i32 poison, i32 4, i32 0>
%r = sext <4 x i1> %s to <4 x i32>
ret <4 x i32> %r
}
diff --git a/llvm/test/tools/UpdateTestChecks/lit.local.cfg b/llvm/test/tools/UpdateTestChecks/lit.local.cfg
index a954eb7..7147769 100644
--- a/llvm/test/tools/UpdateTestChecks/lit.local.cfg
+++ b/llvm/test/tools/UpdateTestChecks/lit.local.cfg
@@ -10,7 +10,7 @@ except ImportError:
from pipes import quote as shell_quote
-def add_update_script_substition(
+def add_update_script_substitution(
name, python_exe=config.python_executable, extra_args=""
):
assert name.startswith("%")
@@ -33,26 +33,26 @@ llc_path = os.path.join(config.llvm_tools_dir, "llc")
if os.path.isfile(llc_path):
config.available_features.add("llc-binary")
llc_arg = "--llc-binary " + shell_quote(llc_path)
- add_update_script_substition("%update_llc_test_checks", extra_args=llc_arg)
- add_update_script_substition("%update_mir_test_checks", extra_args=llc_arg)
+ add_update_script_substitution("%update_llc_test_checks", extra_args=llc_arg)
+ add_update_script_substitution("%update_mir_test_checks", extra_args=llc_arg)
opt_path = os.path.join(config.llvm_tools_dir, "opt")
if os.path.isfile(opt_path):
config.available_features.add("opt-binary")
opt_arg = "--opt-binary " + shell_quote(opt_path)
- add_update_script_substition("%update_test_checks", extra_args=opt_arg)
- add_update_script_substition("%update_analyze_test_checks", extra_args=opt_arg)
+ add_update_script_substitution("%update_test_checks", extra_args=opt_arg)
+ add_update_script_substitution("%update_analyze_test_checks", extra_args=opt_arg)
llvm_mca_path = os.path.join(config.llvm_tools_dir, "llvm-mca")
if os.path.isfile(llvm_mca_path):
config.available_features.add("llvm-mca-binary")
mca_arg = "--llvm-mca-binary " + shell_quote(llvm_mca_path)
- add_update_script_substition("%update_test_checks", extra_args=mca_arg)
+ add_update_script_substitution("%update_test_checks", extra_args=mca_arg)
split_file_path = os.path.join(config.llvm_tools_dir, "split-file")
if os.path.isfile(split_file_path):
- add_update_script_substition("%update_test_body")
+ add_update_script_substitution("%update_test_body")
llvm_mc_path = os.path.join(config.llvm_tools_dir, "llvm-mc")
if os.path.isfile(llvm_mc_path):
- add_update_script_substition("%update_mc_test_checks")
+ add_update_script_substitution("%update_mc_test_checks")
diff --git a/llvm/test/tools/dxil-dis/fastmath.ll b/llvm/test/tools/dxil-dis/fastmath.ll
new file mode 100644
index 0000000..7f4ba5b
--- /dev/null
+++ b/llvm/test/tools/dxil-dis/fastmath.ll
@@ -0,0 +1,23 @@
+; RUN: llc %s --filetype=obj -o - | dxil-dis -o - | FileCheck %s
+target triple = "dxil-unknown-shadermodel6.7-library"
+
+define float @fma(float %0, float %1, float %2) #0 {
+ ; verify reassoc and contract are converted to fast
+ ; CHECK: %4 = fmul fast float %0, %1
+ %4 = fmul reassoc float %0, %1
+ ; CHECK-NEXT: %5 = fadd fast float %4, %2
+ %5 = fadd contract float %4, %2
+ ; verify these are converted to a single fast flag
+ ; CHECK-NEXT: %6 = fmul fast float %0, %1
+ %6 = fmul reassoc contract float %0, %1
+ ; verify these flags are maintained
+ ; CHECK-NEXT: %7 = fadd nnan ninf nsz arcp float %0, %1
+ %7 = fadd nnan ninf nsz arcp float %0, %1
+ ; verify that afn is removed
+ ; CHECK-NEXT: %8 = fmul float %0, %1
+ %8 = fmul afn float %0, %1
+ ret float %5
+}
+
+attributes #0 = { norecurse nounwind readnone willreturn "disable-tail-calls"="false" "waveops-include-helper-lanes" "fp32-denorm-mode"="any" "hlsl.export" }
+
diff --git a/llvm/test/tools/llvm-cov/Inputs/mcdc-templates-merge.cpp b/llvm/test/tools/llvm-cov/Inputs/mcdc-templates-merge.cpp
new file mode 100644
index 0000000..0abb308
--- /dev/null
+++ b/llvm/test/tools/llvm-cov/Inputs/mcdc-templates-merge.cpp
@@ -0,0 +1,51 @@
+#include <cstdio>
+
+template <typename Ty>
+bool ab(Ty a, Ty b) {
+ return (a && b);
+}
+// CHECK: [[@LINE-2]]| 4| return
+
+// CHECK: MC/DC Coverage for Decision{{[:]}} 50.00%
+// CHECK: MC/DC Coverage for Decision{{[:]}} 50.00%
+// CHECK: MC/DC Coverage for Decision{{[:]}} 0.00%
+// CHECK-NOT: MC/DC Coverage for Decision{{[:]}}
+
+// CHECK: _Z2abIbEbT_S0_{{[:]}}
+// CHECK: | | MC/DC Coverage for Decision{{[:]}} 50.00%
+
+// CHECK: _Z2abIxEbT_S0_{{[:]}}
+// CHECK: | | MC/DC Coverage for Decision{{[:]}} 50.00%
+
+// CHECK: Unexecuted instantiation{{[:]}} _Z2abIdEbT_S0_
+
+template <bool C>
+bool Cab(bool a, bool b) {
+ return (a && b && C);
+}
+// CHECK: [[@LINE-2]]| 4| return
+
+// CHECK: MC/DC Coverage for Decision{{[:]}} 0.00%
+// CHECK: MC/DC Coverage for Decision{{[:]}} 50.00%
+// CHECK-NOT: MC/DC Coverage for Decision{{[:]}}
+
+// CHECK: _Z3CabILb0EEbbb{{[:]}}
+// CHECK: | | MC/DC Coverage for Decision{{[:]}} 0.00%
+
+// CHECK: _Z3CabILb1EEbbb{{[:]}}
+// CHECK: | | MC/DC Coverage for Decision{{[:]}} 50.00%
+
+// CHECK: [[@LINE+1]]| 1|int main
+int main(int argc, char **argv) {
+ printf("%d\n", Cab<false>(false, false));
+ printf("%d\n", Cab<false>(true, true));
+ printf("%d\n", Cab<true>(true, false));
+ printf("%d\n", Cab<true>(true, true));
+ printf("%d\n", ab(false, false));
+ printf("%d\n", ab(true, true));
+ printf("%d\n", ab(1LL, 0LL));
+ printf("%d\n", ab(1LL, 1LL));
+ if (argc == 2)
+ printf("%d\n", ab(0.0, 0.0));
+ return 0;
+}
diff --git a/llvm/test/tools/llvm-cov/Inputs/mcdc-templates-merge.proftext b/llvm/test/tools/llvm-cov/Inputs/mcdc-templates-merge.proftext
new file mode 100644
index 0000000..6136946
--- /dev/null
+++ b/llvm/test/tools/llvm-cov/Inputs/mcdc-templates-merge.proftext
@@ -0,0 +1,73 @@
+_Z2abIbEbT_S0_
+# Func Hash:
+1550
+# Num Counters:
+3
+# Counter Values:
+2
+1
+1
+# Num Bitmap Bytes:
+$1
+# Bitmap Byte Values:
+0x5
+
+
+_Z2abIxEbT_S0_
+# Func Hash:
+1550
+# Num Counters:
+3
+# Counter Values:
+2
+2
+1
+# Num Bitmap Bytes:
+$1
+# Bitmap Byte Values:
+0x6
+
+
+_Z3CabILb0EEbbb
+# Func Hash:
+99214
+# Num Counters:
+5
+# Counter Values:
+2
+1
+0
+1
+1
+# Num Bitmap Bytes:
+$1
+# Bitmap Byte Values:
+0x5
+
+
+_Z3CabILb1EEbbb
+# Func Hash:
+99214
+# Num Counters:
+5
+# Counter Values:
+2
+1
+1
+2
+1
+# Num Bitmap Bytes:
+$1
+# Bitmap Byte Values:
+0xa
+
+
+main
+# Func Hash:
+175973464
+# Num Counters:
+2
+# Counter Values:
+1
+0
+
diff --git a/llvm/test/tools/llvm-cov/Inputs/mcdc-templates-merge.yaml b/llvm/test/tools/llvm-cov/Inputs/mcdc-templates-merge.yaml
new file mode 100644
index 0000000..ba46319
--- /dev/null
+++ b/llvm/test/tools/llvm-cov/Inputs/mcdc-templates-merge.yaml
@@ -0,0 +1,105 @@
+--- !ELF
+FileHeader:
+ Class: ELFCLASS64
+ Data: ELFDATA2LSB
+ OSABI: ELFOSABI_GNU
+ Type: ET_REL
+ Machine: EM_X86_64
+ SectionHeaderStringTable: .strtab
+Sections:
+ - Name: __llvm_covfun
+ Type: SHT_PROGBITS
+ Flags: [ SHF_GNU_RETAIN ]
+ AddressAlign: 0x8
+ Content: FAD58DE7366495DB2500000058247D0A00000000249EC986A505B62F0101010105050127210C020109070010200502000700100500110185808080080501050021
+ - Name: '__llvm_covfun (1)'
+ Type: SHT_PROGBITS
+ Flags: [ SHF_GNU_RETAIN ]
+ AddressAlign: 0x8
+ Content: 9CC3B348501DBFE8480000008E83010000000000249EC986A505B62F010103010D0D1105090901171A020201010B000C01000B0011280403000B0016300D02010300000B000C0D0010001130110603020000100011050015001630000A02000000150016
+ - Name: '__llvm_covfun (2)'
+ Type: SHT_PROGBITS
+ Flags: [ SHF_GNU_RETAIN ]
+ AddressAlign: 0x8
+ Content: 9873627177A03F8E460000008E83010000000000249EC986A505B62F010102010D0D110901171A020201010B000C01000B0011280403000B0016300D02010300000B000C0D0010001130110603020000100011050015001630090002000000150016
+ - Name: '__llvm_covfun (3)'
+ Type: SHT_PROGBITS
+ Flags: [ SHF_GNU_RETAIN ]
+ AddressAlign: 0x8
+ Content: BF407A207503B266320000000E06000000000000249EC986A505B62F0101020105050906010415020201010B000C280302000B0011300502010200000B000C050010001130090602000000100011
+ - Name: '__llvm_covfun (4)'
+ Type: SHT_PROGBITS
+ Flags: [ SHF_GNU_RETAIN ]
+ AddressAlign: 0x8
+ Content: 8A05A22CB467C37D320000000E06000000000000249EC986A505B62F0101020105050906010415020201010B000C280302000B0011300502010200000B000C050010001130090602000000100011
+ - Name: '__llvm_covfun (5)'
+ Type: SHT_PROGBITS
+ Flags: [ SHF_GNU_RETAIN ]
+ AddressAlign: 0x8
+ Content: 1700192CAC8F3F26320000000E06000000000000249EC986A505B62F0101020105050906010415020201010B000C280302000B0011300502010200000B000C050010001130090602000000100011
+ - Name: __llvm_covmap
+ Type: SHT_PROGBITS
+ Flags: [ SHF_GNU_RETAIN ]
+ AddressAlign: 0x8
+ Content: 000000001D0000000000000006000000021A0000186D6364632D74656D706C617465732D6D657267652E637070000000
+ - Name: __llvm_prf_names
+ Type: SHT_PROGBITS
+ Flags: [ SHF_ALLOC, SHF_GNU_RETAIN ]
+ AddressAlign: 0x1
+ Content: 51006D61696E015F5A33436162494C62304545626262015F5A33436162494C62314545626262015F5A32616249624562545F53305F015F5A32616249784562545F53305F015F5A32616249644562545F53305F
+ - Type: SectionHeaderTable
+ Sections:
+ - Name: .strtab
+ - Name: __llvm_covfun
+ - Name: '__llvm_covfun (1)'
+ - Name: '__llvm_covfun (2)'
+ - Name: '__llvm_covfun (3)'
+ - Name: '__llvm_covfun (4)'
+ - Name: '__llvm_covfun (5)'
+ - Name: __llvm_covmap
+ - Name: __llvm_prf_names
+ - Name: .symtab
+Symbols:
+ - Name: __llvm_covmap
+ Type: STT_SECTION
+ Section: __llvm_covmap
+ - Name: __llvm_prf_names
+ Type: STT_SECTION
+ Section: __llvm_prf_names
+ - Name: __covrec_DB956436E78DD5FAu
+ Type: STT_OBJECT
+ Section: __llvm_covfun
+ Binding: STB_WEAK
+ Size: 0x41
+ Other: [ STV_HIDDEN ]
+ - Name: __covrec_E8BF1D5048B3C39Cu
+ Type: STT_OBJECT
+ Section: '__llvm_covfun (1)'
+ Binding: STB_WEAK
+ Size: 0x64
+ Other: [ STV_HIDDEN ]
+ - Name: __covrec_8E3FA07771627398u
+ Type: STT_OBJECT
+ Section: '__llvm_covfun (2)'
+ Binding: STB_WEAK
+ Size: 0x62
+ Other: [ STV_HIDDEN ]
+ - Name: __covrec_66B20375207A40BFu
+ Type: STT_OBJECT
+ Section: '__llvm_covfun (3)'
+ Binding: STB_WEAK
+ Size: 0x4E
+ Other: [ STV_HIDDEN ]
+ - Name: __covrec_7DC367B42CA2058Au
+ Type: STT_OBJECT
+ Section: '__llvm_covfun (4)'
+ Binding: STB_WEAK
+ Size: 0x4E
+ Other: [ STV_HIDDEN ]
+ - Name: __covrec_263F8FAC2C190017u
+ Type: STT_OBJECT
+ Section: '__llvm_covfun (5)'
+ Binding: STB_WEAK
+ Size: 0x4E
+ Other: [ STV_HIDDEN ]
+...
diff --git a/llvm/test/tools/llvm-cov/branch-macros.test b/llvm/test/tools/llvm-cov/branch-macros.test
index 2ff4355..b60da15 100644
--- a/llvm/test/tools/llvm-cov/branch-macros.test
+++ b/llvm/test/tools/llvm-cov/branch-macros.test
@@ -1,5 +1,7 @@
// RUN: llvm-profdata merge %S/Inputs/branch-macros.proftext -o %t.profdata
// RUN: llvm-cov show --show-expansions --show-branches=count %S/Inputs/branch-macros.o32l -instr-profile %t.profdata -path-equivalence=/tmp,%S/Inputs | FileCheck %S/Inputs/branch-macros.cpp -check-prefixes=CHECK,BRCOV -D#C=999
+// RUN: llvm-cov report --show-branch-summary %S/Inputs/branch-macros.o32l -instr-profile %t.profdata -path-equivalence=/tmp,%S/Inputs %S/Inputs/branch-macros.cpp | FileCheck %s -check-prefix=FILE
+
// RUN: llvm-cov show --binary-counters --show-expansions --show-branches=count %S/Inputs/branch-macros.o32l -instr-profile %t.profdata -path-equivalence=/tmp,%S/Inputs | FileCheck %S/Inputs/branch-macros.cpp -check-prefixes=CHECK,BRCOV -D#C=1
// RUN: llvm-cov report --show-branch-summary %S/Inputs/branch-macros.o32l -instr-profile %t.profdata -show-functions -path-equivalence=/tmp,%S/Inputs %S/Inputs/branch-macros.cpp | FileCheck %s -check-prefix=REPORT
@@ -14,3 +16,9 @@
// REPORT-NEXT: main 1 0 100.00% 6 0 100.00% 0 0 0.00%
// REPORT-NEXT: ---
// REPORT-NEXT: TOTAL 16 4 75.00% 32 0 100.00% 40 16 60.00%
+
+// FILE: Filename Regions Missed Regions Cover Functions Missed Functions Executed Lines Missed Lines Cover Branches Missed Branches Cover
+// FILE-NEXT: ---
+// FILE-NEXT: branch-macros.cpp 16 4 75.00% 3 0 100.00% 32 0 100.00% 40 16 60.00%
+// FILE-NEXT: ---
+// FILE-NEXT: TOTAL 16 4 75.00% 3 0 100.00% 32 0 100.00% 40 16 60.00%
diff --git a/llvm/test/tools/llvm-cov/mcdc-templates-merge.test b/llvm/test/tools/llvm-cov/mcdc-templates-merge.test
new file mode 100644
index 0000000..cacd3b6b
--- /dev/null
+++ b/llvm/test/tools/llvm-cov/mcdc-templates-merge.test
@@ -0,0 +1,27 @@
+# Test `merge-instantiations=merge/any/all`
+
+RUN: yaml2obj %S/Inputs/mcdc-templates-merge.yaml -o %t.o
+RUN: llvm-profdata merge %S/Inputs/mcdc-templates-merge.proftext -o %t.profdata
+
+RUN: llvm-cov show --show-mcdc -show-instantiations=true %t.o -instr-profile %t.profdata -path-equivalence=.,%S/Inputs | FileCheck %S/Inputs/mcdc-templates-merge.cpp
+
+RUN: llvm-cov report --show-mcdc-summary %t.o -instr-profile %t.profdata -path-equivalence=.,%S/Inputs | FileCheck %s
+
+REPORT: mcdc-templates-merge.cpp
+
+# Regions
+CHECK: 10 1 90.00%
+
+# Functions
+CHECK: 3 0 100.00%
+
+# Lines
+CHECK: 19 1 94.74%
+
+# Branches
+CHECK: 11 3 72.73%
+
+# MC/DC Conditions
+CHECK: 4 2 50.00%
+
+REPORT: TOTAL
diff --git a/llvm/test/tools/llvm-gsymutil/ARM_AArch64/macho-merged-funcs-dwarf.yaml b/llvm/test/tools/llvm-gsymutil/ARM_AArch64/macho-merged-funcs-dwarf.yaml
index 97dfc61..522c576 100644
--- a/llvm/test/tools/llvm-gsymutil/ARM_AArch64/macho-merged-funcs-dwarf.yaml
+++ b/llvm/test/tools/llvm-gsymutil/ARM_AArch64/macho-merged-funcs-dwarf.yaml
@@ -71,11 +71,11 @@
#### TODO: Fix non-determinism leading that is currently worked around with `{{[1-3]}}` below.
# CHECK-MERGED-LOOKUP: Found 3 functions at address 0x0000000000000248:
-# CHECK-MERGED-LOOKUP-NEXT: 0x0000000000000248: my_func_0{{[1-3]}} @ /tmp/test_gsym_yaml/out/file_0{{[1-3]}}.cpp:5
-# CHECK-MERGED-LOOKUP-NEXT-NEXT: 0x0000000000000248: my_func_0{{[1-3]}} @ /tmp/test_gsym_yaml/out/file_0{{[1-3]}}.cpp:5
-# CHECK-MERGED-LOOKUP-NEXT-NEXT: 0x0000000000000248: my_func_0{{[1-3]}} @ /tmp/test_gsym_yaml/out/file_0{{[1-3]}}.cpp:5
+# CHECK-MERGED-LOOKUP-NEXT: 0x0000000000000248: my_func_0{{[1-3]}} @ /tmp/test_gsym_yaml{{[/\\]}}out/file_0{{[1-3]}}.cpp:5
+# CHECK-MERGED-LOOKUP-NEXT-NEXT: 0x0000000000000248: my_func_0{{[1-3]}} @ /tmp/test_gsym_yaml{{[/\\]}}out/file_0{{[1-3]}}.cpp:5
+# CHECK-MERGED-LOOKUP-NEXT-NEXT: 0x0000000000000248: my_func_0{{[1-3]}} @ /tmp/test_gsym_yaml{{[/\\]}}out/file_0{{[1-3]}}.cpp:5
-# CHECK-NORMAL-LOOKUP: 0x0000000000000248: my_func_0{{[1-3]}} @ /tmp/test_gsym_yaml/out/file_0{{[1-3]}}.cpp:5
+# CHECK-NORMAL-LOOKUP: 0x0000000000000248: my_func_0{{[1-3]}} @ /tmp/test_gsym_yaml{{[/\\]}}out/file_0{{[1-3]}}.cpp:5
--- !mach-o
diff --git a/llvm/test/tools/llvm-mca/ARM/cortex-a57-memory-instructions.s b/llvm/test/tools/llvm-mca/ARM/cortex-a57-memory-instructions.s
index 04c95f62f..36a2f04 100644
--- a/llvm/test/tools/llvm-mca/ARM/cortex-a57-memory-instructions.s
+++ b/llvm/test/tools/llvm-mca/ARM/cortex-a57-memory-instructions.s
@@ -325,11 +325,11 @@
# CHECK-NEXT: 2 1 1.00 * strd r4, r5, [r12], -r10
# CHECK-NEXT: 1 1 1.00 * strh r3, [r4]
# CHECK-NEXT: 1 1 1.00 * strh r2, [r7, #4]
-# CHECK-NEXT: 2 1 1.00 U strh r1, [r8, #64]!
+# CHECK-NEXT: 2 1 1.00 * strh r1, [r8, #64]!
# CHECK-NEXT: 2 1 1.00 * strh r12, [sp], #4
# CHECK-NEXT: 1 1 1.00 * strh r6, [r5, r4]
-# CHECK-NEXT: 2 1 1.00 U strh r3, [r8, r11]!
-# CHECK-NEXT: 2 1 1.00 U strh r1, [r2, -r1]!
+# CHECK-NEXT: 2 1 1.00 * strh r3, [r8, r11]!
+# CHECK-NEXT: 2 1 1.00 * strh r1, [r2, -r1]!
# CHECK-NEXT: 2 1 1.00 * strh r9, [r7], r2
# CHECK-NEXT: 2 1 1.00 * strh r4, [r3], -r2
# CHECK-NEXT: 2 1 1.00 U strht r2, [r5], #76
diff --git a/llvm/test/tools/llvm-mca/RISCV/SiFiveP400/div.s b/llvm/test/tools/llvm-mca/RISCV/SiFiveP400/div.s
new file mode 100644
index 0000000..c42b4a9
--- /dev/null
+++ b/llvm/test/tools/llvm-mca/RISCV/SiFiveP400/div.s
@@ -0,0 +1,1009 @@
+# NOTE: Assertions have been autogenerated by utils/update_mca_test_checks.py
+# RUN: llvm-mca -mtriple=riscv64 -mcpu=sifive-p470 -iterations=1 < %s | FileCheck %s
+
+vsetvli zero, zero, e8, mf8, tu, mu
+vdiv.vv v8, v16, v24
+vsetvli zero, zero, e8, mf4, tu, mu
+vdiv.vv v8, v16, v24
+vsetvli zero, zero, e8, mf2, tu, mu
+vdiv.vv v8, v16, v24
+vsetvli zero, zero, e8, m1, tu, mu
+vdiv.vv v8, v16, v24
+vsetvli zero, zero, e8, m1, tu, mu
+vdiv.vv v8, v16, v24
+vsetvli zero, zero, e8, m2, tu, mu
+vdiv.vv v8, v16, v24
+vsetvli zero, zero, e8, m4, tu, mu
+vdiv.vv v8, v16, v24
+vsetvli zero, zero, e8, m8, tu, mu
+vdiv.vv v8, v16, v24
+vsetvli zero, zero, e16, mf8, tu, mu
+vdiv.vv v8, v16, v24
+vsetvli zero, zero, e16, mf4, tu, mu
+vdiv.vv v8, v16, v24
+vsetvli zero, zero, e16, mf2, tu, mu
+vdiv.vv v8, v16, v24
+vsetvli zero, zero, e16, m1, tu, mu
+vdiv.vv v8, v16, v24
+vsetvli zero, zero, e16, m1, tu, mu
+vdiv.vv v8, v16, v24
+vsetvli zero, zero, e16, m2, tu, mu
+vdiv.vv v8, v16, v24
+vsetvli zero, zero, e16, m4, tu, mu
+vdiv.vv v8, v16, v24
+vsetvli zero, zero, e16, m8, tu, mu
+vdiv.vv v8, v16, v24
+vsetvli zero, zero, e32, mf8, tu, mu
+vdiv.vv v8, v16, v24
+vsetvli zero, zero, e32, mf4, tu, mu
+vdiv.vv v8, v16, v24
+vsetvli zero, zero, e32, mf2, tu, mu
+vdiv.vv v8, v16, v24
+vsetvli zero, zero, e32, m1, tu, mu
+vdiv.vv v8, v16, v24
+vsetvli zero, zero, e32, m1, tu, mu
+vdiv.vv v8, v16, v24
+vsetvli zero, zero, e32, m2, tu, mu
+vdiv.vv v8, v16, v24
+vsetvli zero, zero, e32, m4, tu, mu
+vdiv.vv v8, v16, v24
+vsetvli zero, zero, e32, m8, tu, mu
+vdiv.vv v8, v16, v24
+vsetvli zero, zero, e64, mf8, tu, mu
+vdiv.vv v8, v16, v24
+vsetvli zero, zero, e64, mf4, tu, mu
+vdiv.vv v8, v16, v24
+vsetvli zero, zero, e64, mf2, tu, mu
+vdiv.vv v8, v16, v24
+vsetvli zero, zero, e64, m1, tu, mu
+vdiv.vv v8, v16, v24
+vsetvli zero, zero, e64, m1, tu, mu
+vdiv.vv v8, v16, v24
+vsetvli zero, zero, e64, m2, tu, mu
+vdiv.vv v8, v16, v24
+vsetvli zero, zero, e64, m4, tu, mu
+vdiv.vv v8, v16, v24
+vsetvli zero, zero, e64, m8, tu, mu
+vdiv.vv v8, v16, v24
+
+vsetvli zero, zero, e8, mf8, tu, mu
+vdiv.vx v8, v16, a0
+vsetvli zero, zero, e8, mf4, tu, mu
+vdiv.vx v8, v16, a0
+vsetvli zero, zero, e8, mf2, tu, mu
+vdiv.vx v8, v16, a0
+vsetvli zero, zero, e8, m1, tu, mu
+vdiv.vx v8, v16, a0
+vsetvli zero, zero, e8, m1, tu, mu
+vdiv.vx v8, v16, a0
+vsetvli zero, zero, e8, m2, tu, mu
+vdiv.vx v8, v16, a0
+vsetvli zero, zero, e8, m4, tu, mu
+vdiv.vx v8, v16, a0
+vsetvli zero, zero, e8, m8, tu, mu
+vdiv.vx v8, v16, a0
+vsetvli zero, zero, e16, mf8, tu, mu
+vdiv.vx v8, v16, a0
+vsetvli zero, zero, e16, mf4, tu, mu
+vdiv.vx v8, v16, a0
+vsetvli zero, zero, e16, mf2, tu, mu
+vdiv.vx v8, v16, a0
+vsetvli zero, zero, e16, m1, tu, mu
+vdiv.vx v8, v16, a0
+vsetvli zero, zero, e16, m1, tu, mu
+vdiv.vx v8, v16, a0
+vsetvli zero, zero, e16, m2, tu, mu
+vdiv.vx v8, v16, a0
+vsetvli zero, zero, e16, m4, tu, mu
+vdiv.vx v8, v16, a0
+vsetvli zero, zero, e16, m8, tu, mu
+vdiv.vx v8, v16, a0
+vsetvli zero, zero, e32, mf8, tu, mu
+vdiv.vx v8, v16, a0
+vsetvli zero, zero, e32, mf4, tu, mu
+vdiv.vx v8, v16, a0
+vsetvli zero, zero, e32, mf2, tu, mu
+vdiv.vx v8, v16, a0
+vsetvli zero, zero, e32, m1, tu, mu
+vdiv.vx v8, v16, a0
+vsetvli zero, zero, e32, m1, tu, mu
+vdiv.vx v8, v16, a0
+vsetvli zero, zero, e32, m2, tu, mu
+vdiv.vx v8, v16, a0
+vsetvli zero, zero, e32, m4, tu, mu
+vdiv.vx v8, v16, a0
+vsetvli zero, zero, e32, m8, tu, mu
+vdiv.vx v8, v16, a0
+vsetvli zero, zero, e64, mf8, tu, mu
+vdiv.vx v8, v16, a0
+vsetvli zero, zero, e64, mf4, tu, mu
+vdiv.vx v8, v16, a0
+vsetvli zero, zero, e64, mf2, tu, mu
+vdiv.vx v8, v16, a0
+vsetvli zero, zero, e64, m1, tu, mu
+vdiv.vx v8, v16, a0
+vsetvli zero, zero, e64, m1, tu, mu
+vdiv.vx v8, v16, a0
+vsetvli zero, zero, e64, m2, tu, mu
+vdiv.vx v8, v16, a0
+vsetvli zero, zero, e64, m4, tu, mu
+vdiv.vx v8, v16, a0
+vsetvli zero, zero, e64, m8, tu, mu
+vdiv.vx v8, v16, a0
+
+vsetvli zero, zero, e8, mf8, tu, mu
+vfdiv.vv v8, v16, v24
+vsetvli zero, zero, e8, mf4, tu, mu
+vfdiv.vv v8, v16, v24
+vsetvli zero, zero, e8, mf2, tu, mu
+vfdiv.vv v8, v16, v24
+vsetvli zero, zero, e8, m1, tu, mu
+vfdiv.vv v8, v16, v24
+vsetvli zero, zero, e8, m1, tu, mu
+vfdiv.vv v8, v16, v24
+vsetvli zero, zero, e8, m2, tu, mu
+vfdiv.vv v8, v16, v24
+vsetvli zero, zero, e8, m4, tu, mu
+vfdiv.vv v8, v16, v24
+vsetvli zero, zero, e8, m8, tu, mu
+vfdiv.vv v8, v16, v24
+vsetvli zero, zero, e16, mf8, tu, mu
+vfdiv.vv v8, v16, v24
+vsetvli zero, zero, e16, mf4, tu, mu
+vfdiv.vv v8, v16, v24
+vsetvli zero, zero, e16, mf2, tu, mu
+vfdiv.vv v8, v16, v24
+vsetvli zero, zero, e16, m1, tu, mu
+vfdiv.vv v8, v16, v24
+vsetvli zero, zero, e16, m1, tu, mu
+vfdiv.vv v8, v16, v24
+vsetvli zero, zero, e16, m2, tu, mu
+vfdiv.vv v8, v16, v24
+vsetvli zero, zero, e16, m4, tu, mu
+vfdiv.vv v8, v16, v24
+vsetvli zero, zero, e16, m8, tu, mu
+vfdiv.vv v8, v16, v24
+vsetvli zero, zero, e32, mf8, tu, mu
+vfdiv.vv v8, v16, v24
+vsetvli zero, zero, e32, mf4, tu, mu
+vfdiv.vv v8, v16, v24
+vsetvli zero, zero, e32, mf2, tu, mu
+vfdiv.vv v8, v16, v24
+vsetvli zero, zero, e32, m1, tu, mu
+vfdiv.vv v8, v16, v24
+vsetvli zero, zero, e32, m1, tu, mu
+vfdiv.vv v8, v16, v24
+vsetvli zero, zero, e32, m2, tu, mu
+vfdiv.vv v8, v16, v24
+vsetvli zero, zero, e32, m4, tu, mu
+vfdiv.vv v8, v16, v24
+vsetvli zero, zero, e32, m8, tu, mu
+vfdiv.vv v8, v16, v24
+vsetvli zero, zero, e64, mf8, tu, mu
+vfdiv.vv v8, v16, v24
+vsetvli zero, zero, e64, mf4, tu, mu
+vfdiv.vv v8, v16, v24
+vsetvli zero, zero, e64, mf2, tu, mu
+vfdiv.vv v8, v16, v24
+vsetvli zero, zero, e64, m1, tu, mu
+vfdiv.vv v8, v16, v24
+vsetvli zero, zero, e64, m1, tu, mu
+vfdiv.vv v8, v16, v24
+vsetvli zero, zero, e64, m2, tu, mu
+vfdiv.vv v8, v16, v24
+vsetvli zero, zero, e64, m4, tu, mu
+vfdiv.vv v8, v16, v24
+vsetvli zero, zero, e64, m8, tu, mu
+vfdiv.vv v8, v16, v24
+
+vsetvli zero, zero, e8, mf8, tu, mu
+vfdiv.vf v8, v16, fa0
+vsetvli zero, zero, e8, mf4, tu, mu
+vfdiv.vf v8, v16, fa0
+vsetvli zero, zero, e8, mf2, tu, mu
+vfdiv.vf v8, v16, fa0
+vsetvli zero, zero, e8, m1, tu, mu
+vfdiv.vf v8, v16, fa0
+vsetvli zero, zero, e8, m1, tu, mu
+vfdiv.vf v8, v16, fa0
+vsetvli zero, zero, e8, m2, tu, mu
+vfdiv.vf v8, v16, fa0
+vsetvli zero, zero, e8, m4, tu, mu
+vfdiv.vf v8, v16, fa0
+vsetvli zero, zero, e8, m8, tu, mu
+vfdiv.vf v8, v16, fa0
+vsetvli zero, zero, e16, mf8, tu, mu
+vfdiv.vf v8, v16, fa0
+vsetvli zero, zero, e16, mf4, tu, mu
+vfdiv.vf v8, v16, fa0
+vsetvli zero, zero, e16, mf2, tu, mu
+vfdiv.vf v8, v16, fa0
+vsetvli zero, zero, e16, m1, tu, mu
+vfdiv.vf v8, v16, fa0
+vsetvli zero, zero, e16, m1, tu, mu
+vfdiv.vf v8, v16, fa0
+vsetvli zero, zero, e16, m2, tu, mu
+vfdiv.vf v8, v16, fa0
+vsetvli zero, zero, e16, m4, tu, mu
+vfdiv.vf v8, v16, fa0
+vsetvli zero, zero, e16, m8, tu, mu
+vfdiv.vf v8, v16, fa0
+vsetvli zero, zero, e32, mf8, tu, mu
+vfdiv.vf v8, v16, fa0
+vsetvli zero, zero, e32, mf4, tu, mu
+vfdiv.vf v8, v16, fa0
+vsetvli zero, zero, e32, mf2, tu, mu
+vfdiv.vf v8, v16, fa0
+vsetvli zero, zero, e32, m1, tu, mu
+vfdiv.vf v8, v16, fa0
+vsetvli zero, zero, e32, m1, tu, mu
+vfdiv.vf v8, v16, fa0
+vsetvli zero, zero, e32, m2, tu, mu
+vfdiv.vf v8, v16, fa0
+vsetvli zero, zero, e32, m4, tu, mu
+vfdiv.vf v8, v16, fa0
+vsetvli zero, zero, e32, m8, tu, mu
+vfdiv.vf v8, v16, fa0
+vsetvli zero, zero, e64, mf8, tu, mu
+vfdiv.vf v8, v16, fa0
+vsetvli zero, zero, e64, mf4, tu, mu
+vfdiv.vf v8, v16, fa0
+vsetvli zero, zero, e64, mf2, tu, mu
+vfdiv.vf v8, v16, fa0
+vsetvli zero, zero, e64, m1, tu, mu
+vfdiv.vf v8, v16, fa0
+vsetvli zero, zero, e64, m1, tu, mu
+vfdiv.vf v8, v16, fa0
+vsetvli zero, zero, e64, m2, tu, mu
+vfdiv.vf v8, v16, fa0
+vsetvli zero, zero, e64, m4, tu, mu
+vfdiv.vf v8, v16, fa0
+vsetvli zero, zero, e64, m8, tu, mu
+vfdiv.vf v8, v16, fa0
+
+vsetvli zero, zero, e8, mf8, tu, mu
+vfsqrt.v v8, v16
+vsetvli zero, zero, e8, mf4, tu, mu
+vfsqrt.v v8, v16
+vsetvli zero, zero, e8, mf2, tu, mu
+vfsqrt.v v8, v16
+vsetvli zero, zero, e8, m1, tu, mu
+vfsqrt.v v8, v16
+vsetvli zero, zero, e8, m1, tu, mu
+vfsqrt.v v8, v16
+vsetvli zero, zero, e8, m2, tu, mu
+vfsqrt.v v8, v16
+vsetvli zero, zero, e8, m4, tu, mu
+vfsqrt.v v8, v16
+vsetvli zero, zero, e8, m8, tu, mu
+vfsqrt.v v8, v16
+vsetvli zero, zero, e16, mf8, tu, mu
+vfsqrt.v v8, v16
+vsetvli zero, zero, e16, mf4, tu, mu
+vfsqrt.v v8, v16
+vsetvli zero, zero, e16, mf2, tu, mu
+vfsqrt.v v8, v16
+vsetvli zero, zero, e16, m1, tu, mu
+vfsqrt.v v8, v16
+vsetvli zero, zero, e16, m1, tu, mu
+vfsqrt.v v8, v16
+vsetvli zero, zero, e16, m2, tu, mu
+vfsqrt.v v8, v16
+vsetvli zero, zero, e16, m4, tu, mu
+vfsqrt.v v8, v16
+vsetvli zero, zero, e16, m8, tu, mu
+vfsqrt.v v8, v16
+vsetvli zero, zero, e32, mf8, tu, mu
+vfsqrt.v v8, v16
+vsetvli zero, zero, e32, mf4, tu, mu
+vfsqrt.v v8, v16
+vsetvli zero, zero, e32, mf2, tu, mu
+vfsqrt.v v8, v16
+vsetvli zero, zero, e32, m1, tu, mu
+vfsqrt.v v8, v16
+vsetvli zero, zero, e32, m1, tu, mu
+vfsqrt.v v8, v16
+vsetvli zero, zero, e32, m2, tu, mu
+vfsqrt.v v8, v16
+vsetvli zero, zero, e32, m4, tu, mu
+vfsqrt.v v8, v16
+vsetvli zero, zero, e32, m8, tu, mu
+vfsqrt.v v8, v16
+vsetvli zero, zero, e64, mf8, tu, mu
+vfsqrt.v v8, v16
+vsetvli zero, zero, e64, mf4, tu, mu
+vfsqrt.v v8, v16
+vsetvli zero, zero, e64, mf2, tu, mu
+vfsqrt.v v8, v16
+vsetvli zero, zero, e64, m1, tu, mu
+vfsqrt.v v8, v16
+vsetvli zero, zero, e64, m1, tu, mu
+vfsqrt.v v8, v16
+vsetvli zero, zero, e64, m2, tu, mu
+vfsqrt.v v8, v16
+vsetvli zero, zero, e64, m4, tu, mu
+vfsqrt.v v8, v16
+vsetvli zero, zero, e64, m8, tu, mu
+vfsqrt.v v8, v16
+
+# CHECK: Iterations: 1
+# CHECK-NEXT: Instructions: 320
+# CHECK-NEXT: Total Cycles: 22358
+# CHECK-NEXT: Total uOps: 320
+
+# CHECK: Dispatch Width: 3
+# CHECK-NEXT: uOps Per Cycle: 0.01
+# CHECK-NEXT: IPC: 0.01
+# CHECK-NEXT: Block RThroughput: 14361.0
+
+# CHECK: Instruction Info:
+# CHECK-NEXT: [1]: #uOps
+# CHECK-NEXT: [2]: Latency
+# CHECK-NEXT: [3]: RThroughput
+# CHECK-NEXT: [4]: MayLoad
+# CHECK-NEXT: [5]: MayStore
+# CHECK-NEXT: [6]: HasSideEffects (U)
+
+# CHECK: [1] [2] [3] [4] [5] [6] Instructions:
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e8, mf8, tu, mu
+# CHECK-NEXT: 1 51 51.00 vdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e8, mf4, tu, mu
+# CHECK-NEXT: 1 51 51.00 vdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e8, mf2, tu, mu
+# CHECK-NEXT: 1 51 51.00 vdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e8, m1, tu, mu
+# CHECK-NEXT: 1 51 51.00 vdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e8, m1, tu, mu
+# CHECK-NEXT: 1 51 51.00 vdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e8, m2, tu, mu
+# CHECK-NEXT: 1 102 102.00 vdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e8, m4, tu, mu
+# CHECK-NEXT: 1 204 204.00 vdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e8, m8, tu, mu
+# CHECK-NEXT: 1 408 408.00 vdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e16, mf8, tu, mu
+# CHECK-NEXT: 1 408 408.00 vdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e16, mf4, tu, mu
+# CHECK-NEXT: 1 45 45.00 vdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e16, mf2, tu, mu
+# CHECK-NEXT: 1 45 45.00 vdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e16, m1, tu, mu
+# CHECK-NEXT: 1 45 45.00 vdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e16, m1, tu, mu
+# CHECK-NEXT: 1 45 45.00 vdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e16, m2, tu, mu
+# CHECK-NEXT: 1 90 90.00 vdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e16, m4, tu, mu
+# CHECK-NEXT: 1 180 180.00 vdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e16, m8, tu, mu
+# CHECK-NEXT: 1 360 360.00 vdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e32, mf8, tu, mu
+# CHECK-NEXT: 1 408 408.00 vdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e32, mf4, tu, mu
+# CHECK-NEXT: 1 408 408.00 vdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e32, mf2, tu, mu
+# CHECK-NEXT: 1 42 42.00 vdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e32, m1, tu, mu
+# CHECK-NEXT: 1 42 42.00 vdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e32, m1, tu, mu
+# CHECK-NEXT: 1 42 42.00 vdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e32, m2, tu, mu
+# CHECK-NEXT: 1 84 84.00 vdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e32, m4, tu, mu
+# CHECK-NEXT: 1 168 168.00 vdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e32, m8, tu, mu
+# CHECK-NEXT: 1 336 336.00 vdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e64, mf8, tu, mu
+# CHECK-NEXT: 1 408 408.00 vdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e64, mf4, tu, mu
+# CHECK-NEXT: 1 408 408.00 vdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e64, mf2, tu, mu
+# CHECK-NEXT: 1 408 408.00 vdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e64, m1, tu, mu
+# CHECK-NEXT: 1 72 72.00 vdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e64, m1, tu, mu
+# CHECK-NEXT: 1 72 72.00 vdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e64, m2, tu, mu
+# CHECK-NEXT: 1 144 144.00 vdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e64, m4, tu, mu
+# CHECK-NEXT: 1 288 288.00 vdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e64, m8, tu, mu
+# CHECK-NEXT: 1 576 576.00 vdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e8, mf8, tu, mu
+# CHECK-NEXT: 1 51 51.00 vdiv.vx v8, v16, a0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e8, mf4, tu, mu
+# CHECK-NEXT: 1 51 51.00 vdiv.vx v8, v16, a0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e8, mf2, tu, mu
+# CHECK-NEXT: 1 51 51.00 vdiv.vx v8, v16, a0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e8, m1, tu, mu
+# CHECK-NEXT: 1 51 51.00 vdiv.vx v8, v16, a0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e8, m1, tu, mu
+# CHECK-NEXT: 1 51 51.00 vdiv.vx v8, v16, a0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e8, m2, tu, mu
+# CHECK-NEXT: 1 102 102.00 vdiv.vx v8, v16, a0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e8, m4, tu, mu
+# CHECK-NEXT: 1 204 204.00 vdiv.vx v8, v16, a0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e8, m8, tu, mu
+# CHECK-NEXT: 1 408 408.00 vdiv.vx v8, v16, a0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e16, mf8, tu, mu
+# CHECK-NEXT: 1 408 408.00 vdiv.vx v8, v16, a0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e16, mf4, tu, mu
+# CHECK-NEXT: 1 45 45.00 vdiv.vx v8, v16, a0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e16, mf2, tu, mu
+# CHECK-NEXT: 1 45 45.00 vdiv.vx v8, v16, a0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e16, m1, tu, mu
+# CHECK-NEXT: 1 45 45.00 vdiv.vx v8, v16, a0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e16, m1, tu, mu
+# CHECK-NEXT: 1 45 45.00 vdiv.vx v8, v16, a0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e16, m2, tu, mu
+# CHECK-NEXT: 1 90 90.00 vdiv.vx v8, v16, a0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e16, m4, tu, mu
+# CHECK-NEXT: 1 180 180.00 vdiv.vx v8, v16, a0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e16, m8, tu, mu
+# CHECK-NEXT: 1 360 360.00 vdiv.vx v8, v16, a0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e32, mf8, tu, mu
+# CHECK-NEXT: 1 408 408.00 vdiv.vx v8, v16, a0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e32, mf4, tu, mu
+# CHECK-NEXT: 1 408 408.00 vdiv.vx v8, v16, a0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e32, mf2, tu, mu
+# CHECK-NEXT: 1 42 42.00 vdiv.vx v8, v16, a0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e32, m1, tu, mu
+# CHECK-NEXT: 1 42 42.00 vdiv.vx v8, v16, a0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e32, m1, tu, mu
+# CHECK-NEXT: 1 42 42.00 vdiv.vx v8, v16, a0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e32, m2, tu, mu
+# CHECK-NEXT: 1 84 84.00 vdiv.vx v8, v16, a0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e32, m4, tu, mu
+# CHECK-NEXT: 1 168 168.00 vdiv.vx v8, v16, a0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e32, m8, tu, mu
+# CHECK-NEXT: 1 336 336.00 vdiv.vx v8, v16, a0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e64, mf8, tu, mu
+# CHECK-NEXT: 1 408 408.00 vdiv.vx v8, v16, a0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e64, mf4, tu, mu
+# CHECK-NEXT: 1 408 408.00 vdiv.vx v8, v16, a0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e64, mf2, tu, mu
+# CHECK-NEXT: 1 408 408.00 vdiv.vx v8, v16, a0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e64, m1, tu, mu
+# CHECK-NEXT: 1 72 72.00 vdiv.vx v8, v16, a0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e64, m1, tu, mu
+# CHECK-NEXT: 1 72 72.00 vdiv.vx v8, v16, a0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e64, m2, tu, mu
+# CHECK-NEXT: 1 144 144.00 vdiv.vx v8, v16, a0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e64, m4, tu, mu
+# CHECK-NEXT: 1 288 288.00 vdiv.vx v8, v16, a0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e64, m8, tu, mu
+# CHECK-NEXT: 1 576 576.00 vdiv.vx v8, v16, a0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e8, mf8, tu, mu
+# CHECK-NEXT: 1 232 232.00 vfdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e8, mf4, tu, mu
+# CHECK-NEXT: 1 232 232.00 vfdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e8, mf2, tu, mu
+# CHECK-NEXT: 1 232 232.00 vfdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e8, m1, tu, mu
+# CHECK-NEXT: 1 232 232.00 vfdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e8, m1, tu, mu
+# CHECK-NEXT: 1 232 232.00 vfdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e8, m2, tu, mu
+# CHECK-NEXT: 1 232 232.00 vfdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e8, m4, tu, mu
+# CHECK-NEXT: 1 232 232.00 vfdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e8, m8, tu, mu
+# CHECK-NEXT: 1 232 232.00 vfdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e16, mf8, tu, mu
+# CHECK-NEXT: 1 232 232.00 vfdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e16, mf4, tu, mu
+# CHECK-NEXT: 1 29 29.00 vfdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e16, mf2, tu, mu
+# CHECK-NEXT: 1 29 29.00 vfdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e16, m1, tu, mu
+# CHECK-NEXT: 1 29 29.00 vfdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e16, m1, tu, mu
+# CHECK-NEXT: 1 29 29.00 vfdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e16, m2, tu, mu
+# CHECK-NEXT: 1 58 58.00 vfdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e16, m4, tu, mu
+# CHECK-NEXT: 1 116 116.00 vfdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e16, m8, tu, mu
+# CHECK-NEXT: 1 232 232.00 vfdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e32, mf8, tu, mu
+# CHECK-NEXT: 1 232 232.00 vfdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e32, mf4, tu, mu
+# CHECK-NEXT: 1 232 232.00 vfdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e32, mf2, tu, mu
+# CHECK-NEXT: 1 25 25.00 vfdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e32, m1, tu, mu
+# CHECK-NEXT: 1 25 25.00 vfdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e32, m1, tu, mu
+# CHECK-NEXT: 1 25 25.00 vfdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e32, m2, tu, mu
+# CHECK-NEXT: 1 50 50.00 vfdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e32, m4, tu, mu
+# CHECK-NEXT: 1 100 100.00 vfdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e32, m8, tu, mu
+# CHECK-NEXT: 1 200 200.00 vfdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e64, mf8, tu, mu
+# CHECK-NEXT: 1 232 232.00 vfdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e64, mf4, tu, mu
+# CHECK-NEXT: 1 232 232.00 vfdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e64, mf2, tu, mu
+# CHECK-NEXT: 1 232 232.00 vfdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e64, m1, tu, mu
+# CHECK-NEXT: 1 37 37.00 vfdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e64, m1, tu, mu
+# CHECK-NEXT: 1 37 37.00 vfdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e64, m2, tu, mu
+# CHECK-NEXT: 1 74 74.00 vfdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e64, m4, tu, mu
+# CHECK-NEXT: 1 148 148.00 vfdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e64, m8, tu, mu
+# CHECK-NEXT: 1 296 296.00 vfdiv.vv v8, v16, v24
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e8, mf8, tu, mu
+# CHECK-NEXT: 1 232 232.00 vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e8, mf4, tu, mu
+# CHECK-NEXT: 1 232 232.00 vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e8, mf2, tu, mu
+# CHECK-NEXT: 1 232 232.00 vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e8, m1, tu, mu
+# CHECK-NEXT: 1 232 232.00 vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e8, m1, tu, mu
+# CHECK-NEXT: 1 232 232.00 vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e8, m2, tu, mu
+# CHECK-NEXT: 1 232 232.00 vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e8, m4, tu, mu
+# CHECK-NEXT: 1 232 232.00 vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e8, m8, tu, mu
+# CHECK-NEXT: 1 232 232.00 vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e16, mf8, tu, mu
+# CHECK-NEXT: 1 232 232.00 vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e16, mf4, tu, mu
+# CHECK-NEXT: 1 29 29.00 vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e16, mf2, tu, mu
+# CHECK-NEXT: 1 29 29.00 vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e16, m1, tu, mu
+# CHECK-NEXT: 1 29 29.00 vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e16, m1, tu, mu
+# CHECK-NEXT: 1 29 29.00 vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e16, m2, tu, mu
+# CHECK-NEXT: 1 58 58.00 vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e16, m4, tu, mu
+# CHECK-NEXT: 1 116 116.00 vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e16, m8, tu, mu
+# CHECK-NEXT: 1 232 232.00 vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e32, mf8, tu, mu
+# CHECK-NEXT: 1 232 232.00 vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e32, mf4, tu, mu
+# CHECK-NEXT: 1 232 232.00 vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e32, mf2, tu, mu
+# CHECK-NEXT: 1 25 25.00 vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e32, m1, tu, mu
+# CHECK-NEXT: 1 25 25.00 vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e32, m1, tu, mu
+# CHECK-NEXT: 1 25 25.00 vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e32, m2, tu, mu
+# CHECK-NEXT: 1 50 50.00 vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e32, m4, tu, mu
+# CHECK-NEXT: 1 100 100.00 vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e32, m8, tu, mu
+# CHECK-NEXT: 1 200 200.00 vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e64, mf8, tu, mu
+# CHECK-NEXT: 1 232 232.00 vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e64, mf4, tu, mu
+# CHECK-NEXT: 1 232 232.00 vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e64, mf2, tu, mu
+# CHECK-NEXT: 1 232 232.00 vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e64, m1, tu, mu
+# CHECK-NEXT: 1 37 37.00 vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e64, m1, tu, mu
+# CHECK-NEXT: 1 37 37.00 vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e64, m2, tu, mu
+# CHECK-NEXT: 1 74 74.00 vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e64, m4, tu, mu
+# CHECK-NEXT: 1 148 148.00 vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e64, m8, tu, mu
+# CHECK-NEXT: 1 296 296.00 vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e8, mf8, tu, mu
+# CHECK-NEXT: 1 232 232.00 vfsqrt.v v8, v16
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e8, mf4, tu, mu
+# CHECK-NEXT: 1 232 232.00 vfsqrt.v v8, v16
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e8, mf2, tu, mu
+# CHECK-NEXT: 1 232 232.00 vfsqrt.v v8, v16
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e8, m1, tu, mu
+# CHECK-NEXT: 1 232 232.00 vfsqrt.v v8, v16
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e8, m1, tu, mu
+# CHECK-NEXT: 1 232 232.00 vfsqrt.v v8, v16
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e8, m2, tu, mu
+# CHECK-NEXT: 1 232 232.00 vfsqrt.v v8, v16
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e8, m4, tu, mu
+# CHECK-NEXT: 1 232 232.00 vfsqrt.v v8, v16
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e8, m8, tu, mu
+# CHECK-NEXT: 1 232 232.00 vfsqrt.v v8, v16
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e16, mf8, tu, mu
+# CHECK-NEXT: 1 232 232.00 vfsqrt.v v8, v16
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e16, mf4, tu, mu
+# CHECK-NEXT: 1 29 29.00 vfsqrt.v v8, v16
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e16, mf2, tu, mu
+# CHECK-NEXT: 1 29 29.00 vfsqrt.v v8, v16
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e16, m1, tu, mu
+# CHECK-NEXT: 1 29 29.00 vfsqrt.v v8, v16
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e16, m1, tu, mu
+# CHECK-NEXT: 1 29 29.00 vfsqrt.v v8, v16
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e16, m2, tu, mu
+# CHECK-NEXT: 1 58 58.00 vfsqrt.v v8, v16
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e16, m4, tu, mu
+# CHECK-NEXT: 1 116 116.00 vfsqrt.v v8, v16
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e16, m8, tu, mu
+# CHECK-NEXT: 1 232 232.00 vfsqrt.v v8, v16
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e32, mf8, tu, mu
+# CHECK-NEXT: 1 232 232.00 vfsqrt.v v8, v16
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e32, mf4, tu, mu
+# CHECK-NEXT: 1 232 232.00 vfsqrt.v v8, v16
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e32, mf2, tu, mu
+# CHECK-NEXT: 1 25 25.00 vfsqrt.v v8, v16
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e32, m1, tu, mu
+# CHECK-NEXT: 1 25 25.00 vfsqrt.v v8, v16
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e32, m1, tu, mu
+# CHECK-NEXT: 1 25 25.00 vfsqrt.v v8, v16
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e32, m2, tu, mu
+# CHECK-NEXT: 1 50 50.00 vfsqrt.v v8, v16
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e32, m4, tu, mu
+# CHECK-NEXT: 1 100 100.00 vfsqrt.v v8, v16
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e32, m8, tu, mu
+# CHECK-NEXT: 1 200 200.00 vfsqrt.v v8, v16
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e64, mf8, tu, mu
+# CHECK-NEXT: 1 232 232.00 vfsqrt.v v8, v16
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e64, mf4, tu, mu
+# CHECK-NEXT: 1 232 232.00 vfsqrt.v v8, v16
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e64, mf2, tu, mu
+# CHECK-NEXT: 1 232 232.00 vfsqrt.v v8, v16
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e64, m1, tu, mu
+# CHECK-NEXT: 1 37 37.00 vfsqrt.v v8, v16
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e64, m1, tu, mu
+# CHECK-NEXT: 1 37 37.00 vfsqrt.v v8, v16
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e64, m2, tu, mu
+# CHECK-NEXT: 1 74 74.00 vfsqrt.v v8, v16
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e64, m4, tu, mu
+# CHECK-NEXT: 1 148 148.00 vfsqrt.v v8, v16
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e64, m8, tu, mu
+# CHECK-NEXT: 1 296 296.00 vfsqrt.v v8, v16
+
+# CHECK: Resources:
+# CHECK-NEXT: [0] - SiFiveP400Div
+# CHECK-NEXT: [1] - SiFiveP400FEXQ0
+# CHECK-NEXT: [2] - SiFiveP400FloatDiv
+# CHECK-NEXT: [3] - SiFiveP400IEXQ0
+# CHECK-NEXT: [4] - SiFiveP400IEXQ1
+# CHECK-NEXT: [5] - SiFiveP400IEXQ2
+# CHECK-NEXT: [6] - SiFiveP400Load
+# CHECK-NEXT: [7] - SiFiveP400Store
+# CHECK-NEXT: [8] - SiFiveP400VDiv
+# CHECK-NEXT: [9] - SiFiveP400VEXQ0
+# CHECK-NEXT: [10] - SiFiveP400VFloatDiv
+# CHECK-NEXT: [11] - SiFiveP400VLD
+# CHECK-NEXT: [12] - SiFiveP400VST
+
+# CHECK: Resource pressure per iteration:
+# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [10] [11] [12]
+# CHECK-NEXT: - - - - 160.00 - - - 12186.00 725.00 14361.00 - -
+
+# CHECK: Resource pressure by instruction:
+# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [10] [11] [12] Instructions:
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e8, mf8, tu, mu
+# CHECK-NEXT: - - - - - - - - 51.00 1.00 - - - vdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e8, mf4, tu, mu
+# CHECK-NEXT: - - - - - - - - 51.00 1.00 - - - vdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e8, mf2, tu, mu
+# CHECK-NEXT: - - - - - - - - 51.00 1.00 - - - vdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e8, m1, tu, mu
+# CHECK-NEXT: - - - - - - - - 51.00 1.00 - - - vdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e8, m1, tu, mu
+# CHECK-NEXT: - - - - - - - - 51.00 1.00 - - - vdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e8, m2, tu, mu
+# CHECK-NEXT: - - - - - - - - 102.00 2.00 - - - vdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e8, m4, tu, mu
+# CHECK-NEXT: - - - - - - - - 204.00 4.00 - - - vdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e8, m8, tu, mu
+# CHECK-NEXT: - - - - - - - - 408.00 8.00 - - - vdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e16, mf8, tu, mu
+# CHECK-NEXT: - - - - - - - - 408.00 8.00 - - - vdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e16, mf4, tu, mu
+# CHECK-NEXT: - - - - - - - - 45.00 1.00 - - - vdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e16, mf2, tu, mu
+# CHECK-NEXT: - - - - - - - - 45.00 1.00 - - - vdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e16, m1, tu, mu
+# CHECK-NEXT: - - - - - - - - 45.00 1.00 - - - vdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e16, m1, tu, mu
+# CHECK-NEXT: - - - - - - - - 45.00 1.00 - - - vdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e16, m2, tu, mu
+# CHECK-NEXT: - - - - - - - - 90.00 2.00 - - - vdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e16, m4, tu, mu
+# CHECK-NEXT: - - - - - - - - 180.00 4.00 - - - vdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e16, m8, tu, mu
+# CHECK-NEXT: - - - - - - - - 360.00 8.00 - - - vdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e32, mf8, tu, mu
+# CHECK-NEXT: - - - - - - - - 408.00 8.00 - - - vdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e32, mf4, tu, mu
+# CHECK-NEXT: - - - - - - - - 408.00 8.00 - - - vdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e32, mf2, tu, mu
+# CHECK-NEXT: - - - - - - - - 42.00 1.00 - - - vdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e32, m1, tu, mu
+# CHECK-NEXT: - - - - - - - - 42.00 1.00 - - - vdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e32, m1, tu, mu
+# CHECK-NEXT: - - - - - - - - 42.00 1.00 - - - vdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e32, m2, tu, mu
+# CHECK-NEXT: - - - - - - - - 84.00 2.00 - - - vdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e32, m4, tu, mu
+# CHECK-NEXT: - - - - - - - - 168.00 4.00 - - - vdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e32, m8, tu, mu
+# CHECK-NEXT: - - - - - - - - 336.00 8.00 - - - vdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e64, mf8, tu, mu
+# CHECK-NEXT: - - - - - - - - 408.00 8.00 - - - vdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e64, mf4, tu, mu
+# CHECK-NEXT: - - - - - - - - 408.00 8.00 - - - vdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e64, mf2, tu, mu
+# CHECK-NEXT: - - - - - - - - 408.00 8.00 - - - vdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e64, m1, tu, mu
+# CHECK-NEXT: - - - - - - - - 72.00 1.00 - - - vdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e64, m1, tu, mu
+# CHECK-NEXT: - - - - - - - - 72.00 1.00 - - - vdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e64, m2, tu, mu
+# CHECK-NEXT: - - - - - - - - 144.00 2.00 - - - vdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e64, m4, tu, mu
+# CHECK-NEXT: - - - - - - - - 288.00 4.00 - - - vdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e64, m8, tu, mu
+# CHECK-NEXT: - - - - - - - - 576.00 8.00 - - - vdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e8, mf8, tu, mu
+# CHECK-NEXT: - - - - - - - - 51.00 1.00 - - - vdiv.vx v8, v16, a0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e8, mf4, tu, mu
+# CHECK-NEXT: - - - - - - - - 51.00 1.00 - - - vdiv.vx v8, v16, a0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e8, mf2, tu, mu
+# CHECK-NEXT: - - - - - - - - 51.00 1.00 - - - vdiv.vx v8, v16, a0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e8, m1, tu, mu
+# CHECK-NEXT: - - - - - - - - 51.00 1.00 - - - vdiv.vx v8, v16, a0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e8, m1, tu, mu
+# CHECK-NEXT: - - - - - - - - 51.00 1.00 - - - vdiv.vx v8, v16, a0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e8, m2, tu, mu
+# CHECK-NEXT: - - - - - - - - 102.00 2.00 - - - vdiv.vx v8, v16, a0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e8, m4, tu, mu
+# CHECK-NEXT: - - - - - - - - 204.00 4.00 - - - vdiv.vx v8, v16, a0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e8, m8, tu, mu
+# CHECK-NEXT: - - - - - - - - 408.00 8.00 - - - vdiv.vx v8, v16, a0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e16, mf8, tu, mu
+# CHECK-NEXT: - - - - - - - - 408.00 8.00 - - - vdiv.vx v8, v16, a0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e16, mf4, tu, mu
+# CHECK-NEXT: - - - - - - - - 45.00 1.00 - - - vdiv.vx v8, v16, a0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e16, mf2, tu, mu
+# CHECK-NEXT: - - - - - - - - 45.00 1.00 - - - vdiv.vx v8, v16, a0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e16, m1, tu, mu
+# CHECK-NEXT: - - - - - - - - 45.00 1.00 - - - vdiv.vx v8, v16, a0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e16, m1, tu, mu
+# CHECK-NEXT: - - - - - - - - 45.00 1.00 - - - vdiv.vx v8, v16, a0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e16, m2, tu, mu
+# CHECK-NEXT: - - - - - - - - 90.00 2.00 - - - vdiv.vx v8, v16, a0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e16, m4, tu, mu
+# CHECK-NEXT: - - - - - - - - 180.00 4.00 - - - vdiv.vx v8, v16, a0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e16, m8, tu, mu
+# CHECK-NEXT: - - - - - - - - 360.00 8.00 - - - vdiv.vx v8, v16, a0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e32, mf8, tu, mu
+# CHECK-NEXT: - - - - - - - - 408.00 8.00 - - - vdiv.vx v8, v16, a0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e32, mf4, tu, mu
+# CHECK-NEXT: - - - - - - - - 408.00 8.00 - - - vdiv.vx v8, v16, a0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e32, mf2, tu, mu
+# CHECK-NEXT: - - - - - - - - 42.00 1.00 - - - vdiv.vx v8, v16, a0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e32, m1, tu, mu
+# CHECK-NEXT: - - - - - - - - 42.00 1.00 - - - vdiv.vx v8, v16, a0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e32, m1, tu, mu
+# CHECK-NEXT: - - - - - - - - 42.00 1.00 - - - vdiv.vx v8, v16, a0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e32, m2, tu, mu
+# CHECK-NEXT: - - - - - - - - 84.00 2.00 - - - vdiv.vx v8, v16, a0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e32, m4, tu, mu
+# CHECK-NEXT: - - - - - - - - 168.00 4.00 - - - vdiv.vx v8, v16, a0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e32, m8, tu, mu
+# CHECK-NEXT: - - - - - - - - 336.00 8.00 - - - vdiv.vx v8, v16, a0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e64, mf8, tu, mu
+# CHECK-NEXT: - - - - - - - - 408.00 8.00 - - - vdiv.vx v8, v16, a0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e64, mf4, tu, mu
+# CHECK-NEXT: - - - - - - - - 408.00 8.00 - - - vdiv.vx v8, v16, a0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e64, mf2, tu, mu
+# CHECK-NEXT: - - - - - - - - 408.00 8.00 - - - vdiv.vx v8, v16, a0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e64, m1, tu, mu
+# CHECK-NEXT: - - - - - - - - 72.00 1.00 - - - vdiv.vx v8, v16, a0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e64, m1, tu, mu
+# CHECK-NEXT: - - - - - - - - 72.00 1.00 - - - vdiv.vx v8, v16, a0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e64, m2, tu, mu
+# CHECK-NEXT: - - - - - - - - 144.00 2.00 - - - vdiv.vx v8, v16, a0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e64, m4, tu, mu
+# CHECK-NEXT: - - - - - - - - 288.00 4.00 - - - vdiv.vx v8, v16, a0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e64, m8, tu, mu
+# CHECK-NEXT: - - - - - - - - 576.00 8.00 - - - vdiv.vx v8, v16, a0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e8, mf8, tu, mu
+# CHECK-NEXT: - - - - - - - - - 8.00 232.00 - - vfdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e8, mf4, tu, mu
+# CHECK-NEXT: - - - - - - - - - 8.00 232.00 - - vfdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e8, mf2, tu, mu
+# CHECK-NEXT: - - - - - - - - - 8.00 232.00 - - vfdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e8, m1, tu, mu
+# CHECK-NEXT: - - - - - - - - - 8.00 232.00 - - vfdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e8, m1, tu, mu
+# CHECK-NEXT: - - - - - - - - - 8.00 232.00 - - vfdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e8, m2, tu, mu
+# CHECK-NEXT: - - - - - - - - - 8.00 232.00 - - vfdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e8, m4, tu, mu
+# CHECK-NEXT: - - - - - - - - - 8.00 232.00 - - vfdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e8, m8, tu, mu
+# CHECK-NEXT: - - - - - - - - - 8.00 232.00 - - vfdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e16, mf8, tu, mu
+# CHECK-NEXT: - - - - - - - - - 8.00 232.00 - - vfdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e16, mf4, tu, mu
+# CHECK-NEXT: - - - - - - - - - 1.00 29.00 - - vfdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e16, mf2, tu, mu
+# CHECK-NEXT: - - - - - - - - - 1.00 29.00 - - vfdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e16, m1, tu, mu
+# CHECK-NEXT: - - - - - - - - - 1.00 29.00 - - vfdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e16, m1, tu, mu
+# CHECK-NEXT: - - - - - - - - - 1.00 29.00 - - vfdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e16, m2, tu, mu
+# CHECK-NEXT: - - - - - - - - - 2.00 58.00 - - vfdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e16, m4, tu, mu
+# CHECK-NEXT: - - - - - - - - - 4.00 116.00 - - vfdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e16, m8, tu, mu
+# CHECK-NEXT: - - - - - - - - - 8.00 232.00 - - vfdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e32, mf8, tu, mu
+# CHECK-NEXT: - - - - - - - - - 8.00 232.00 - - vfdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e32, mf4, tu, mu
+# CHECK-NEXT: - - - - - - - - - 8.00 232.00 - - vfdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e32, mf2, tu, mu
+# CHECK-NEXT: - - - - - - - - - 1.00 25.00 - - vfdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e32, m1, tu, mu
+# CHECK-NEXT: - - - - - - - - - 1.00 25.00 - - vfdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e32, m1, tu, mu
+# CHECK-NEXT: - - - - - - - - - 1.00 25.00 - - vfdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e32, m2, tu, mu
+# CHECK-NEXT: - - - - - - - - - 2.00 50.00 - - vfdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e32, m4, tu, mu
+# CHECK-NEXT: - - - - - - - - - 4.00 100.00 - - vfdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e32, m8, tu, mu
+# CHECK-NEXT: - - - - - - - - - 8.00 200.00 - - vfdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e64, mf8, tu, mu
+# CHECK-NEXT: - - - - - - - - - 8.00 232.00 - - vfdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e64, mf4, tu, mu
+# CHECK-NEXT: - - - - - - - - - 8.00 232.00 - - vfdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e64, mf2, tu, mu
+# CHECK-NEXT: - - - - - - - - - 8.00 232.00 - - vfdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e64, m1, tu, mu
+# CHECK-NEXT: - - - - - - - - - 1.00 37.00 - - vfdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e64, m1, tu, mu
+# CHECK-NEXT: - - - - - - - - - 1.00 37.00 - - vfdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e64, m2, tu, mu
+# CHECK-NEXT: - - - - - - - - - 2.00 74.00 - - vfdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e64, m4, tu, mu
+# CHECK-NEXT: - - - - - - - - - 4.00 148.00 - - vfdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e64, m8, tu, mu
+# CHECK-NEXT: - - - - - - - - - 8.00 296.00 - - vfdiv.vv v8, v16, v24
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e8, mf8, tu, mu
+# CHECK-NEXT: - - - - - - - - - 8.00 232.00 - - vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e8, mf4, tu, mu
+# CHECK-NEXT: - - - - - - - - - 8.00 232.00 - - vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e8, mf2, tu, mu
+# CHECK-NEXT: - - - - - - - - - 8.00 232.00 - - vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e8, m1, tu, mu
+# CHECK-NEXT: - - - - - - - - - 8.00 232.00 - - vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e8, m1, tu, mu
+# CHECK-NEXT: - - - - - - - - - 8.00 232.00 - - vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e8, m2, tu, mu
+# CHECK-NEXT: - - - - - - - - - 8.00 232.00 - - vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e8, m4, tu, mu
+# CHECK-NEXT: - - - - - - - - - 8.00 232.00 - - vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e8, m8, tu, mu
+# CHECK-NEXT: - - - - - - - - - 8.00 232.00 - - vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e16, mf8, tu, mu
+# CHECK-NEXT: - - - - - - - - - 8.00 232.00 - - vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e16, mf4, tu, mu
+# CHECK-NEXT: - - - - - - - - - 1.00 29.00 - - vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e16, mf2, tu, mu
+# CHECK-NEXT: - - - - - - - - - 1.00 29.00 - - vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e16, m1, tu, mu
+# CHECK-NEXT: - - - - - - - - - 1.00 29.00 - - vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e16, m1, tu, mu
+# CHECK-NEXT: - - - - - - - - - 1.00 29.00 - - vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e16, m2, tu, mu
+# CHECK-NEXT: - - - - - - - - - 2.00 58.00 - - vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e16, m4, tu, mu
+# CHECK-NEXT: - - - - - - - - - 4.00 116.00 - - vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e16, m8, tu, mu
+# CHECK-NEXT: - - - - - - - - - 8.00 232.00 - - vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e32, mf8, tu, mu
+# CHECK-NEXT: - - - - - - - - - 8.00 232.00 - - vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e32, mf4, tu, mu
+# CHECK-NEXT: - - - - - - - - - 8.00 232.00 - - vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e32, mf2, tu, mu
+# CHECK-NEXT: - - - - - - - - - 1.00 25.00 - - vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e32, m1, tu, mu
+# CHECK-NEXT: - - - - - - - - - 1.00 25.00 - - vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e32, m1, tu, mu
+# CHECK-NEXT: - - - - - - - - - 1.00 25.00 - - vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e32, m2, tu, mu
+# CHECK-NEXT: - - - - - - - - - 2.00 50.00 - - vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e32, m4, tu, mu
+# CHECK-NEXT: - - - - - - - - - 4.00 100.00 - - vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e32, m8, tu, mu
+# CHECK-NEXT: - - - - - - - - - 8.00 200.00 - - vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e64, mf8, tu, mu
+# CHECK-NEXT: - - - - - - - - - 8.00 232.00 - - vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e64, mf4, tu, mu
+# CHECK-NEXT: - - - - - - - - - 8.00 232.00 - - vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e64, mf2, tu, mu
+# CHECK-NEXT: - - - - - - - - - 8.00 232.00 - - vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e64, m1, tu, mu
+# CHECK-NEXT: - - - - - - - - - 1.00 37.00 - - vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e64, m1, tu, mu
+# CHECK-NEXT: - - - - - - - - - 1.00 37.00 - - vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e64, m2, tu, mu
+# CHECK-NEXT: - - - - - - - - - 2.00 74.00 - - vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e64, m4, tu, mu
+# CHECK-NEXT: - - - - - - - - - 4.00 148.00 - - vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e64, m8, tu, mu
+# CHECK-NEXT: - - - - - - - - - 8.00 296.00 - - vfdiv.vf v8, v16, fa0
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e8, mf8, tu, mu
+# CHECK-NEXT: - - - - - - - - - 8.00 232.00 - - vfsqrt.v v8, v16
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e8, mf4, tu, mu
+# CHECK-NEXT: - - - - - - - - - 8.00 232.00 - - vfsqrt.v v8, v16
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e8, mf2, tu, mu
+# CHECK-NEXT: - - - - - - - - - 8.00 232.00 - - vfsqrt.v v8, v16
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e8, m1, tu, mu
+# CHECK-NEXT: - - - - - - - - - 8.00 232.00 - - vfsqrt.v v8, v16
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e8, m1, tu, mu
+# CHECK-NEXT: - - - - - - - - - 8.00 232.00 - - vfsqrt.v v8, v16
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e8, m2, tu, mu
+# CHECK-NEXT: - - - - - - - - - 8.00 232.00 - - vfsqrt.v v8, v16
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e8, m4, tu, mu
+# CHECK-NEXT: - - - - - - - - - 8.00 232.00 - - vfsqrt.v v8, v16
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e8, m8, tu, mu
+# CHECK-NEXT: - - - - - - - - - 8.00 232.00 - - vfsqrt.v v8, v16
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e16, mf8, tu, mu
+# CHECK-NEXT: - - - - - - - - - 8.00 232.00 - - vfsqrt.v v8, v16
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e16, mf4, tu, mu
+# CHECK-NEXT: - - - - - - - - - 1.00 29.00 - - vfsqrt.v v8, v16
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e16, mf2, tu, mu
+# CHECK-NEXT: - - - - - - - - - 1.00 29.00 - - vfsqrt.v v8, v16
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e16, m1, tu, mu
+# CHECK-NEXT: - - - - - - - - - 1.00 29.00 - - vfsqrt.v v8, v16
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e16, m1, tu, mu
+# CHECK-NEXT: - - - - - - - - - 1.00 29.00 - - vfsqrt.v v8, v16
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e16, m2, tu, mu
+# CHECK-NEXT: - - - - - - - - - 2.00 58.00 - - vfsqrt.v v8, v16
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e16, m4, tu, mu
+# CHECK-NEXT: - - - - - - - - - 4.00 116.00 - - vfsqrt.v v8, v16
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e16, m8, tu, mu
+# CHECK-NEXT: - - - - - - - - - 8.00 232.00 - - vfsqrt.v v8, v16
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e32, mf8, tu, mu
+# CHECK-NEXT: - - - - - - - - - 8.00 232.00 - - vfsqrt.v v8, v16
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e32, mf4, tu, mu
+# CHECK-NEXT: - - - - - - - - - 8.00 232.00 - - vfsqrt.v v8, v16
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e32, mf2, tu, mu
+# CHECK-NEXT: - - - - - - - - - 1.00 25.00 - - vfsqrt.v v8, v16
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e32, m1, tu, mu
+# CHECK-NEXT: - - - - - - - - - 1.00 25.00 - - vfsqrt.v v8, v16
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e32, m1, tu, mu
+# CHECK-NEXT: - - - - - - - - - 1.00 25.00 - - vfsqrt.v v8, v16
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e32, m2, tu, mu
+# CHECK-NEXT: - - - - - - - - - 2.00 50.00 - - vfsqrt.v v8, v16
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e32, m4, tu, mu
+# CHECK-NEXT: - - - - - - - - - 4.00 100.00 - - vfsqrt.v v8, v16
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e32, m8, tu, mu
+# CHECK-NEXT: - - - - - - - - - 8.00 200.00 - - vfsqrt.v v8, v16
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e64, mf8, tu, mu
+# CHECK-NEXT: - - - - - - - - - 8.00 232.00 - - vfsqrt.v v8, v16
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e64, mf4, tu, mu
+# CHECK-NEXT: - - - - - - - - - 8.00 232.00 - - vfsqrt.v v8, v16
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e64, mf2, tu, mu
+# CHECK-NEXT: - - - - - - - - - 8.00 232.00 - - vfsqrt.v v8, v16
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e64, m1, tu, mu
+# CHECK-NEXT: - - - - - - - - - 1.00 37.00 - - vfsqrt.v v8, v16
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e64, m1, tu, mu
+# CHECK-NEXT: - - - - - - - - - 1.00 37.00 - - vfsqrt.v v8, v16
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e64, m2, tu, mu
+# CHECK-NEXT: - - - - - - - - - 2.00 74.00 - - vfsqrt.v v8, v16
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e64, m4, tu, mu
+# CHECK-NEXT: - - - - - - - - - 4.00 148.00 - - vfsqrt.v v8, v16
+# CHECK-NEXT: - - - - 1.00 - - - - - - - - vsetvli zero, zero, e64, m8, tu, mu
+# CHECK-NEXT: - - - - - - - - - 8.00 296.00 - - vfsqrt.v v8, v16
diff --git a/llvm/test/tools/llvm-mca/RISCV/SiFiveP400/mul-cpop.s b/llvm/test/tools/llvm-mca/RISCV/SiFiveP400/mul-cpop.s
new file mode 100644
index 0000000..5f7a1d1
--- /dev/null
+++ b/llvm/test/tools/llvm-mca/RISCV/SiFiveP400/mul-cpop.s
@@ -0,0 +1,60 @@
+# NOTE: Assertions have been autogenerated by utils/update_mca_test_checks.py
+# RUN: llvm-mca -mtriple=riscv64 -mcpu=sifive-p470 -iterations=1 < %s | FileCheck %s
+
+mul s6, s6, s7
+
+mulw s4, s4, a2
+
+cpop t1, t1
+
+cpopw t2, t2
+
+# CHECK: Iterations: 1
+# CHECK-NEXT: Instructions: 4
+# CHECK-NEXT: Total Cycles: 8
+# CHECK-NEXT: Total uOps: 4
+
+# CHECK: Dispatch Width: 3
+# CHECK-NEXT: uOps Per Cycle: 0.50
+# CHECK-NEXT: IPC: 0.50
+# CHECK-NEXT: Block RThroughput: 4.0
+
+# CHECK: Instruction Info:
+# CHECK-NEXT: [1]: #uOps
+# CHECK-NEXT: [2]: Latency
+# CHECK-NEXT: [3]: RThroughput
+# CHECK-NEXT: [4]: MayLoad
+# CHECK-NEXT: [5]: MayStore
+# CHECK-NEXT: [6]: HasSideEffects (U)
+
+# CHECK: [1] [2] [3] [4] [5] [6] Instructions:
+# CHECK-NEXT: 1 2 1.00 mul s6, s6, s7
+# CHECK-NEXT: 1 2 1.00 mulw s4, s4, a2
+# CHECK-NEXT: 1 2 1.00 cpop t1, t1
+# CHECK-NEXT: 1 2 1.00 cpopw t2, t2
+
+# CHECK: Resources:
+# CHECK-NEXT: [0] - SiFiveP400Div
+# CHECK-NEXT: [1] - SiFiveP400FEXQ0
+# CHECK-NEXT: [2] - SiFiveP400FloatDiv
+# CHECK-NEXT: [3] - SiFiveP400IEXQ0
+# CHECK-NEXT: [4] - SiFiveP400IEXQ1
+# CHECK-NEXT: [5] - SiFiveP400IEXQ2
+# CHECK-NEXT: [6] - SiFiveP400Load
+# CHECK-NEXT: [7] - SiFiveP400Store
+# CHECK-NEXT: [8] - SiFiveP400VDiv
+# CHECK-NEXT: [9] - SiFiveP400VEXQ0
+# CHECK-NEXT: [10] - SiFiveP400VFloatDiv
+# CHECK-NEXT: [11] - SiFiveP400VLD
+# CHECK-NEXT: [12] - SiFiveP400VST
+
+# CHECK: Resource pressure per iteration:
+# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [10] [11] [12]
+# CHECK-NEXT: - - - - - 4.00 - - - - - - -
+
+# CHECK: Resource pressure by instruction:
+# CHECK-NEXT: [0] [1] [2] [3] [4] [5] [6] [7] [8] [9] [10] [11] [12] Instructions:
+# CHECK-NEXT: - - - - - 1.00 - - - - - - - mul s6, s6, s7
+# CHECK-NEXT: - - - - - 1.00 - - - - - - - mulw s4, s4, a2
+# CHECK-NEXT: - - - - - 1.00 - - - - - - - cpop t1, t1
+# CHECK-NEXT: - - - - - 1.00 - - - - - - - cpopw t2, t2
diff --git a/llvm/test/tools/llvm-objcopy/MachO/strip-with-encryption-info.test b/llvm/test/tools/llvm-objcopy/MachO/strip-with-encryption-info.test
new file mode 100644
index 0000000..19b06b1
--- /dev/null
+++ b/llvm/test/tools/llvm-objcopy/MachO/strip-with-encryption-info.test
@@ -0,0 +1,217 @@
+# RUN: rm -rf %t && mkdir %t
+# RUN: yaml2obj %s -o %t/original
+# RUN: llvm-strip --strip-all %t/original -o %t/stripped
+# RUN: llvm-readobj --macho-segment %t/stripped | FileCheck %s
+
+# CHECK-LABEL: Name: __PAGEZERO
+# CHECK: fileoff: 16384
+
+# CHECK-LABEL: Name: __TEXT
+# CHECK: fileoff: 16384
+
+# The YAML below is the following code
+# int main(int argc, char **argv) { return 0; }
+# Compiled on macOS against the macOS SDK and passing `-Wl,-encryptable`
+# Contents are removed, since they are not important for the test. We need a
+# small text segment (smaller than a page).
+--- !mach-o
+FileHeader:
+ magic: 0xFEEDFACF
+ cputype: 0x100000C
+ cpusubtype: 0x0
+ filetype: 0x2
+ ncmds: 15
+ sizeofcmds: 696
+ flags: 0x200085
+ reserved: 0x0
+LoadCommands:
+ - cmd: LC_SEGMENT_64
+ cmdsize: 72
+ segname: __PAGEZERO
+ vmaddr: 0
+ vmsize: 4294967296
+ fileoff: 0
+ filesize: 0
+ maxprot: 0
+ initprot: 0
+ nsects: 0
+ flags: 0
+ - cmd: LC_SEGMENT_64
+ cmdsize: 232
+ segname: __TEXT
+ vmaddr: 4294967296
+ vmsize: 32768
+ fileoff: 0
+ filesize: 32768
+ maxprot: 5
+ initprot: 5
+ nsects: 2
+ flags: 0
+ Sections:
+ - sectname: __text
+ segname: __TEXT
+ addr: 0x100004000
+ size: 32
+ offset: 0x4000
+ align: 2
+ reloff: 0x0
+ nreloc: 0
+ flags: 0x80000400
+ reserved1: 0x0
+ reserved2: 0x0
+ reserved3: 0x0
+ - sectname: __unwind_info
+ segname: __TEXT
+ addr: 0x100004020
+ size: 4152
+ offset: 0x4020
+ align: 2
+ reloff: 0x0
+ nreloc: 0
+ flags: 0x0
+ reserved1: 0x0
+ reserved2: 0x0
+ reserved3: 0x0
+ - cmd: LC_SEGMENT_64
+ cmdsize: 72
+ segname: __LINKEDIT
+ vmaddr: 4295000064
+ vmsize: 592
+ fileoff: 32768
+ filesize: 592
+ maxprot: 1
+ initprot: 1
+ nsects: 0
+ flags: 0
+ - cmd: LC_DYLD_CHAINED_FIXUPS
+ cmdsize: 16
+ dataoff: 32768
+ datasize: 48
+ - cmd: LC_DYLD_EXPORTS_TRIE
+ cmdsize: 16
+ dataoff: 32816
+ datasize: 48
+ - cmd: LC_SYMTAB
+ cmdsize: 24
+ symoff: 32872
+ nsyms: 2
+ stroff: 32904
+ strsize: 32
+ - cmd: LC_DYSYMTAB
+ cmdsize: 80
+ ilocalsym: 0
+ nlocalsym: 0
+ iextdefsym: 0
+ nextdefsym: 2
+ iundefsym: 2
+ nundefsym: 0
+ tocoff: 0
+ ntoc: 0
+ modtaboff: 0
+ nmodtab: 0
+ extrefsymoff: 0
+ nextrefsyms: 0
+ indirectsymoff: 0
+ nindirectsyms: 0
+ extreloff: 0
+ nextrel: 0
+ locreloff: 0
+ nlocrel: 0
+ - cmd: LC_ENCRYPTION_INFO_64
+ cmdsize: 24
+ cryptoff: 16384
+ cryptsize: 16384
+ cryptid: 0
+ pad: 0
+ - cmd: LC_LOAD_DYLINKER
+ cmdsize: 32
+ name: 12
+ Content: '/usr/lib/dyld'
+ ZeroPadBytes: 7
+ - cmd: LC_UUID
+ cmdsize: 24
+ uuid: 4C4C4447-5555-3144-A18A-01E9EB7E7D92
+ - cmd: LC_BUILD_VERSION
+ cmdsize: 32
+ platform: 1
+ minos: 983040
+ sdk: 983552
+ ntools: 1
+ Tools:
+ - tool: 4
+ version: 1310720
+ - cmd: LC_MAIN
+ cmdsize: 24
+ entryoff: 16384
+ stacksize: 0
+ - cmd: LC_FUNCTION_STARTS
+ cmdsize: 16
+ dataoff: 32864
+ datasize: 8
+ - cmd: LC_DATA_IN_CODE
+ cmdsize: 16
+ dataoff: 32872
+ datasize: 0
+ - cmd: LC_CODE_SIGNATURE
+ cmdsize: 16
+ dataoff: 32944
+ datasize: 416
+LinkEditData:
+ ExportTrie:
+ TerminalSize: 0
+ NodeOffset: 0
+ Name: ''
+ Flags: 0x0
+ Address: 0x0
+ Other: 0x0
+ ImportName: ''
+ Children:
+ - TerminalSize: 0
+ NodeOffset: 5
+ Name: _
+ Flags: 0x0
+ Address: 0x0
+ Other: 0x0
+ ImportName: ''
+ Children:
+ - TerminalSize: 4
+ NodeOffset: 33
+ Name: main
+ Flags: 0x0
+ Address: 0x4000
+ Other: 0x0
+ ImportName: ''
+ - TerminalSize: 2
+ NodeOffset: 39
+ Name: _mh_execute_header
+ Flags: 0x0
+ Address: 0x0
+ Other: 0x0
+ ImportName: ''
+ NameList:
+ - n_strx: 2
+ n_type: 0xF
+ n_sect: 1
+ n_desc: 0
+ n_value: 4294983680
+ - n_strx: 8
+ n_type: 0xF
+ n_sect: 1
+ n_desc: 16
+ n_value: 4294967296
+ StringTable:
+ - ' '
+ - _main
+ - __mh_execute_header
+ - ''
+ - ''
+ - ''
+ - ''
+ FunctionStarts: [ 0x4000 ]
+ ChainedFixups: [ 0x0, 0x0, 0x0, 0x0, 0x20, 0x0, 0x0, 0x0, 0x30, 0x0,
+ 0x0, 0x0, 0x30, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x3, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 ]
+...
+
diff --git a/llvm/test/tools/llvm-profgen/context-depth.test b/llvm/test/tools/llvm-profgen/context-depth.test
new file mode 100644
index 0000000..4eaa5fa
--- /dev/null
+++ b/llvm/test/tools/llvm-profgen/context-depth.test
@@ -0,0 +1,125 @@
+; Test --csprof-max-context-depth and --csprof-max-unsymbolized-context-depth
+
+; RUN: llvm-profgen --format=text --perfscript=%S/Inputs/recursion-compression-pseudoprobe.perfscript --binary=%S/Inputs/recursion-compression-pseudoprobe.perfbin --output=%t --compress-recursion=0 --profile-summary-hot-count=0 --csprof-max-context-depth=0 --csspgo-preinliner=0 --gen-cs-nested-profile=0
+; RUN: FileCheck %s --input-file %t -check-prefix=CHECK-MAX-CTX-DEPTH
+; RUN: llvm-profgen --format=text --perfscript=%S/Inputs/recursion-compression-pseudoprobe.perfscript --binary=%S/Inputs/recursion-compression-pseudoprobe.perfbin --output=%t --compress-recursion=0 --profile-summary-hot-count=0 --csprof-max-unsymbolized-context-depth=2 --csspgo-preinliner=0 --gen-cs-nested-profile=0 --skip-symbolization
+; RUN: FileCheck %s --input-file %t -check-prefix=CHECK-MAX-UNSYM-CTX-DEPTH
+; RUN: llvm-profgen --format=text --perfscript=%S/Inputs/recursion-compression-pseudoprobe.perfscript --binary=%S/Inputs/recursion-compression-pseudoprobe.perfbin --output=%t --compress-recursion=0 --profile-summary-hot-count=0 --csprof-max-unsymbolized-context-depth=2 --csspgo-preinliner=0 --gen-cs-nested-profile=0
+; RUN: FileCheck %s --input-file %t -check-prefix=CHECK-MAX-UNSYM-CTX-DEPTH-PROF
+; RUN: llvm-profgen --format=text --perfscript=%S/Inputs/recursion-compression-pseudoprobe.perfscript --binary=%S/Inputs/recursion-compression-pseudoprobe.perfbin --output=%t --compress-recursion=0 --profile-summary-hot-count=0 --csprof-max-unsymbolized-context-depth=2 --csprof-max-context-depth=0 --csspgo-preinliner=0 --gen-cs-nested-profile=0
+; RUN: FileCheck %s --input-file %t -check-prefix=CHECK-MAX-CTX-DEPTH
+
+
+; CHECK-MAX-CTX-DEPTH: [fb]:19:6
+; CHECK-MAX-CTX-DEPTH: 1: 6
+; CHECK-MAX-CTX-DEPTH: 2: 3
+; CHECK-MAX-CTX-DEPTH: 3: 3
+; CHECK-MAX-CTX-DEPTH: 4: 0
+; CHECK-MAX-CTX-DEPTH: 5: 4 fb:4
+; CHECK-MAX-CTX-DEPTH: 6: 3 fa:3
+; CHECK-MAX-CTX-DEPTH: !CFGChecksum: 563022570642068
+; CHECK-MAX-CTX-DEPTH: [fa]:14:4
+; CHECK-MAX-CTX-DEPTH: 1: 4
+; CHECK-MAX-CTX-DEPTH: 3: 4
+; CHECK-MAX-CTX-DEPTH: 4: 2
+; CHECK-MAX-CTX-DEPTH: 5: 1
+; CHECK-MAX-CTX-DEPTH: 6: 0
+; CHECK-MAX-CTX-DEPTH: 7: 2 fb:2
+; CHECK-MAX-CTX-DEPTH: 8: 1 fa:1
+; CHECK-MAX-CTX-DEPTH: !CFGChecksum: 563070469352221
+
+
+; CHECK-MAX-UNSYM-CTX-DEPTH: [0x7ab @ 0x7ab]
+; CHECK-MAX-UNSYM-CTX-DEPTH: 3
+; CHECK-MAX-UNSYM-CTX-DEPTH: 7a0-7a7:1
+; CHECK-MAX-UNSYM-CTX-DEPTH: 7a0-7ab:3
+; CHECK-MAX-UNSYM-CTX-DEPTH: 7b2-7b5:1
+; CHECK-MAX-UNSYM-CTX-DEPTH: 3
+; CHECK-MAX-UNSYM-CTX-DEPTH: 7a7->7b2:1
+; CHECK-MAX-UNSYM-CTX-DEPTH: 7ab->7a0:4
+; CHECK-MAX-UNSYM-CTX-DEPTH: 7b5->7c0:1
+; CHECK-MAX-UNSYM-CTX-DEPTH: [0x7ab @ 0x7b5]
+; CHECK-MAX-UNSYM-CTX-DEPTH: 1
+; CHECK-MAX-UNSYM-CTX-DEPTH: 7c0-7d4:1
+; CHECK-MAX-UNSYM-CTX-DEPTH: 1
+; CHECK-MAX-UNSYM-CTX-DEPTH: 7d4->7c0:1
+; CHECK-MAX-UNSYM-CTX-DEPTH: [0x7b5 @ 0x7d4]
+; CHECK-MAX-UNSYM-CTX-DEPTH: 2
+; CHECK-MAX-UNSYM-CTX-DEPTH: 7c0-7cd:1
+; CHECK-MAX-UNSYM-CTX-DEPTH: 7db-7e0:1
+; CHECK-MAX-UNSYM-CTX-DEPTH: 2
+; CHECK-MAX-UNSYM-CTX-DEPTH: 7cd->7db:1
+; CHECK-MAX-UNSYM-CTX-DEPTH: 7e0->7a0:1
+; CHECK-MAX-UNSYM-CTX-DEPTH: [0x7b5 @ 0x7e0]
+; CHECK-MAX-UNSYM-CTX-DEPTH: 2
+; CHECK-MAX-UNSYM-CTX-DEPTH: 7a0-7a7:1
+; CHECK-MAX-UNSYM-CTX-DEPTH: 7b2-7b5:1
+; CHECK-MAX-UNSYM-CTX-DEPTH: 2
+; CHECK-MAX-UNSYM-CTX-DEPTH: 7a7->7b2:1
+; CHECK-MAX-UNSYM-CTX-DEPTH: 7b5->7c0:1
+; CHECK-MAX-UNSYM-CTX-DEPTH: [0x7d4 @ 0x7e0]
+; CHECK-MAX-UNSYM-CTX-DEPTH: 2
+; CHECK-MAX-UNSYM-CTX-DEPTH: 7a0-7a7:1
+; CHECK-MAX-UNSYM-CTX-DEPTH: 7b2-7b5:1
+; CHECK-MAX-UNSYM-CTX-DEPTH: 2
+; CHECK-MAX-UNSYM-CTX-DEPTH: 7a7->7b2:1
+; CHECK-MAX-UNSYM-CTX-DEPTH: 7b5->7c0:1
+; CHECK-MAX-UNSYM-CTX-DEPTH: [0x7e0 @ 0x7b5]
+; CHECK-MAX-UNSYM-CTX-DEPTH: 2
+; CHECK-MAX-UNSYM-CTX-DEPTH: 7c0-7cd:2
+; CHECK-MAX-UNSYM-CTX-DEPTH: 7db-7e0:1
+; CHECK-MAX-UNSYM-CTX-DEPTH: 2
+; CHECK-MAX-UNSYM-CTX-DEPTH: 7cd->7db:2
+; CHECK-MAX-UNSYM-CTX-DEPTH: 7e0->7a0:1
+
+; CHECK-MAX-UNSYM-CTX-DEPTH-PROF: [fb:5 @ fb:5 @ fb]:13:4
+; CHECK-MAX-UNSYM-CTX-DEPTH-PROF: 1: 4
+; CHECK-MAX-UNSYM-CTX-DEPTH-PROF: 2: 3
+; CHECK-MAX-UNSYM-CTX-DEPTH-PROF: 3: 1
+; CHECK-MAX-UNSYM-CTX-DEPTH-PROF: 4: 0
+; CHECK-MAX-UNSYM-CTX-DEPTH-PROF: 5: 4 fb:4
+; CHECK-MAX-UNSYM-CTX-DEPTH-PROF: 6: 1 fa:1
+; CHECK-MAX-UNSYM-CTX-DEPTH-PROF: !CFGChecksum: 563022570642068
+; CHECK-MAX-UNSYM-CTX-DEPTH-PROF: [fa:7 @ fb:6 @ fa]:6:2
+; CHECK-MAX-UNSYM-CTX-DEPTH-PROF: 1: 2
+; CHECK-MAX-UNSYM-CTX-DEPTH-PROF: 3: 2
+; CHECK-MAX-UNSYM-CTX-DEPTH-PROF: 4: 1
+; CHECK-MAX-UNSYM-CTX-DEPTH-PROF: 5: 0
+; CHECK-MAX-UNSYM-CTX-DEPTH-PROF: 6: 0
+; CHECK-MAX-UNSYM-CTX-DEPTH-PROF: 7: 1 fb:1
+; CHECK-MAX-UNSYM-CTX-DEPTH-PROF: 8: 0
+; CHECK-MAX-UNSYM-CTX-DEPTH-PROF: !CFGChecksum: 563070469352221
+; CHECK-MAX-UNSYM-CTX-DEPTH-PROF: [fb:5 @ fb:6 @ fa]:4:1
+; CHECK-MAX-UNSYM-CTX-DEPTH-PROF: 1: 1
+; CHECK-MAX-UNSYM-CTX-DEPTH-PROF: 3: 1
+; CHECK-MAX-UNSYM-CTX-DEPTH-PROF: 4: 0
+; CHECK-MAX-UNSYM-CTX-DEPTH-PROF: 5: 1
+; CHECK-MAX-UNSYM-CTX-DEPTH-PROF: 6: 0
+; CHECK-MAX-UNSYM-CTX-DEPTH-PROF: 7: 0
+; CHECK-MAX-UNSYM-CTX-DEPTH-PROF: 8: 1 fa:1
+; CHECK-MAX-UNSYM-CTX-DEPTH-PROF: !CFGChecksum: 563070469352221
+; CHECK-MAX-UNSYM-CTX-DEPTH-PROF: [fb:6 @ fa:8 @ fa]:4:1
+; CHECK-MAX-UNSYM-CTX-DEPTH-PROF: 1: 1
+; CHECK-MAX-UNSYM-CTX-DEPTH-PROF: 3: 1
+; CHECK-MAX-UNSYM-CTX-DEPTH-PROF: 4: 1
+; CHECK-MAX-UNSYM-CTX-DEPTH-PROF: 5: 0
+; CHECK-MAX-UNSYM-CTX-DEPTH-PROF: 6: 0
+; CHECK-MAX-UNSYM-CTX-DEPTH-PROF: 7: 1 fb:1
+; CHECK-MAX-UNSYM-CTX-DEPTH-PROF: 8: 0
+; CHECK-MAX-UNSYM-CTX-DEPTH-PROF: !CFGChecksum: 563070469352221
+; CHECK-MAX-UNSYM-CTX-DEPTH-PROF: [fa:8 @ fa:7 @ fb]:3:1
+; CHECK-MAX-UNSYM-CTX-DEPTH-PROF: 1: 1
+; CHECK-MAX-UNSYM-CTX-DEPTH-PROF: 2: 0
+; CHECK-MAX-UNSYM-CTX-DEPTH-PROF: 3: 1
+; CHECK-MAX-UNSYM-CTX-DEPTH-PROF: 4: 0
+; CHECK-MAX-UNSYM-CTX-DEPTH-PROF: 5: 0
+; CHECK-MAX-UNSYM-CTX-DEPTH-PROF: 6: 1 fa:1
+; CHECK-MAX-UNSYM-CTX-DEPTH-PROF: !CFGChecksum: 563022570642068
+; CHECK-MAX-UNSYM-CTX-DEPTH-PROF: [fb:6 @ fa:7 @ fb]:3:1
+; CHECK-MAX-UNSYM-CTX-DEPTH-PROF: 1: 1
+; CHECK-MAX-UNSYM-CTX-DEPTH-PROF: 2: 0
+; CHECK-MAX-UNSYM-CTX-DEPTH-PROF: 3: 1
+; CHECK-MAX-UNSYM-CTX-DEPTH-PROF: 4: 0
+; CHECK-MAX-UNSYM-CTX-DEPTH-PROF: 5: 0
+; CHECK-MAX-UNSYM-CTX-DEPTH-PROF: 6: 1 fa:1
+; CHECK-MAX-UNSYM-CTX-DEPTH-PROF: !CFGChecksum: 563022570642068
diff --git a/llvm/test/tools/llvm-profgen/recursion-compression-pseudoprobe.test b/llvm/test/tools/llvm-profgen/recursion-compression-pseudoprobe.test
index c673028..b8e3e24 100644
--- a/llvm/test/tools/llvm-profgen/recursion-compression-pseudoprobe.test
+++ b/llvm/test/tools/llvm-profgen/recursion-compression-pseudoprobe.test
@@ -9,9 +9,6 @@
; RUN: FileCheck %s --input-file %t --check-prefix=CHECK-UNWINDER
; RUN: llvm-profgen --format=text --perfscript=%S/Inputs/recursion-compression-pseudoprobe-nommap.perfscript --binary=%S/Inputs/recursion-compression-pseudoprobe.perfbin --output=%t --profile-summary-hot-count=0 --csspgo-preinliner=0 --gen-cs-nested-profile=0
; RUN: FileCheck %s --input-file %t
-; RUN: llvm-profgen --format=text --perfscript=%S/Inputs/recursion-compression-pseudoprobe.perfscript --binary=%S/Inputs/recursion-compression-pseudoprobe.perfbin --output=%t --compress-recursion=0 --profile-summary-hot-count=0 --csprof-max-context-depth=0 --csspgo-preinliner=0 --gen-cs-nested-profile=0
-; RUN: FileCheck %s --input-file %t -check-prefix=CHECK-MAX-CTX-DEPTH
-
; CHECK-UNCOMPRESS: [main:2 @ foo:5 @ fa:8 @ fa:7 @ fb:5 @ fb:5 @ fb:5 @ fb:5 @ fb:5 @ fb:5 @ fb:5 @ fb:5 @ fb:6 @ fa]:4:1
; CHECK-UNCOMPRESS: 1: 1
@@ -68,23 +65,6 @@
; CHECK-UNCOMPRESS: [main:2 @ foo:5 @ fa:8 @ fa:7 @ fb:5 @ fb:5 @ fb:5 @ fb:5 @ fb]:1:0
; CHECK-UNCOMPRESS: 5: 1 fb:1
; CHECK-UNCOMPRESS: !CFGChecksum: 563022570642068
-; CHECK-MAX-CTX-DEPTH: [fb]:19:6
-; CHECK-MAX-CTX-DEPTH: 1: 6
-; CHECK-MAX-CTX-DEPTH: 2: 3
-; CHECK-MAX-CTX-DEPTH: 3: 3
-; CHECK-MAX-CTX-DEPTH: 4: 0
-; CHECK-MAX-CTX-DEPTH: 5: 4 fb:4
-; CHECK-MAX-CTX-DEPTH: 6: 3 fa:3
-; CHECK-MAX-CTX-DEPTH: !CFGChecksum: 563022570642068
-; CHECK-MAX-CTX-DEPTH: [fa]:14:4
-; CHECK-MAX-CTX-DEPTH: 1: 4
-; CHECK-MAX-CTX-DEPTH: 3: 4
-; CHECK-MAX-CTX-DEPTH: 4: 2
-; CHECK-MAX-CTX-DEPTH: 5: 1
-; CHECK-MAX-CTX-DEPTH: 6: 0
-; CHECK-MAX-CTX-DEPTH: 7: 2 fb:2
-; CHECK-MAX-CTX-DEPTH: 8: 1 fa:1
-; CHECK-MAX-CTX-DEPTH: !CFGChecksum: 563070469352221
; CHECK: [main:2 @ foo:5 @ fa:8 @ fa:7 @ fb:5 @ fb]:13:4