aboutsummaryrefslogtreecommitdiff
path: root/llvm
diff options
context:
space:
mode:
Diffstat (limited to 'llvm')
-rw-r--r--llvm/docs/GettingStarted.rst2
-rw-r--r--llvm/lib/CodeGen/TargetLoweringBase.cpp34
-rw-r--r--llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp3
-rw-r--r--llvm/lib/Object/IRSymtab.cpp11
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp10
-rw-r--r--llvm/lib/Target/AMDGPU/GCNSubtarget.cpp57
-rw-r--r--llvm/lib/Target/AMDGPU/GCNSubtarget.h4
-rw-r--r--llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp29
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstrInfo.cpp26
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstrInfo.h6
-rw-r--r--llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp5
-rw-r--r--llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp31
-rw-r--r--llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp61
-rw-r--r--llvm/lib/Target/AMDGPU/SIRegisterInfo.h5
-rw-r--r--llvm/lib/Target/ARM/ARMISelLowering.cpp11
-rw-r--r--llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp37
-rw-r--r--llvm/lib/Target/Mips/MipsAsmPrinter.cpp8
-rw-r--r--llvm/lib/Target/Mips/MipsISelLowering.cpp29
-rw-r--r--llvm/lib/Target/Mips/MipsISelLowering.h1
-rw-r--r--llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp96
-rw-r--r--llvm/lib/Transforms/Scalar/LoopFuse.cpp25
-rw-r--r--llvm/test/Analysis/CostModel/RISCV/arith-fp.ll3684
-rw-r--r--llvm/test/CodeGen/AArch64/andcompare.ll3190
-rw-r--r--llvm/test/CodeGen/AArch64/andorbrcompare.ll532
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-ccmp.ll852
-rw-r--r--llvm/test/CodeGen/AArch64/cmp-chains.ll566
-rw-r--r--llvm/test/CodeGen/AArch64/dag-combine-select.ll88
-rw-r--r--llvm/test/CodeGen/AArch64/fp16_intrinsic_scalar_1op.ll130
-rw-r--r--llvm/test/CodeGen/AArch64/fp16_intrinsic_scalar_2op.ll202
-rw-r--r--llvm/test/CodeGen/AMDGPU/gfx1250-scratch-scope-se.ll95
-rw-r--r--llvm/test/CodeGen/AMDGPU/global-load-xcnt.ll88
-rw-r--r--llvm/test/CodeGen/AMDGPU/partial-regcopy-and-spill-missed-at-regalloc.ll38
-rw-r--r--llvm/test/CodeGen/AMDGPU/wwm-regalloc-error.ll2
-rw-r--r--llvm/test/CodeGen/LoongArch/lasx/build-vector.ll215
-rw-r--r--llvm/test/CodeGen/LoongArch/lsx/build-vector.ll74
-rw-r--r--llvm/test/CodeGen/Mips/abiflags-soft-float.ll12
-rw-r--r--llvm/test/CodeGen/Mips/nan_lowering.ll25
-rw-r--r--llvm/test/CodeGen/Mips/qnan.ll14
-rw-r--r--llvm/test/CodeGen/PowerPC/froundeven-legalization.ll111
-rw-r--r--llvm/test/CodeGen/X86/embed-bitcode.ll13
-rw-r--r--llvm/test/CodeGen/X86/late-tail-dup-computed-goto.mir128
-rw-r--r--llvm/test/ThinLTO/X86/memprof_func_assign_fix.ll145
-rw-r--r--llvm/test/Transforms/LoopFusion/sunk-phi-nodes.ll65
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/interleaved-masked-access.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/only-compute-cost-for-vplan-vfs.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/preserve-dbg-loc.ll3
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll47
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-bin-unary-ops-args.ll (renamed from llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-bin-unary-ops-args.ll)770
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-call-intrinsics.ll (renamed from llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-call-intrinsics.ll)509
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll (renamed from llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-cast-intrinsics.ll)584
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cond-reduction.ll (renamed from llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-cond-reduction.ll)12
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cost.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-div.ll (renamed from llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-div.ll)149
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll (renamed from llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-fixed-order-recurrence.ll)68
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-gather-scatter.ll (renamed from llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-gather-scatter.ll)56
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll (renamed from llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-inloop-reduction.ll)6
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll (renamed from llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-interleave.ll)6
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-intermediate-store.ll (renamed from llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-intermediate-store.ll)12
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-iv32.ll (renamed from llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-iv32.ll)6
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-known-no-overflow.ll (renamed from llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-known-no-overflow.ll)3
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-masked-loadstore.ll (renamed from llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-masked-loadstore.ll)51
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-no-masking.ll (renamed from llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-no-masking.ll)6
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-ordered-reduction.ll (renamed from llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-ordered-reduction.ll)49
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction-cost.ll (renamed from llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-reduction-cost.ll)3
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll (renamed from llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-reduction.ll)6
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll (renamed from llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-reverse-load-store.ll)161
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-safe-dep-distance.ll (renamed from llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-safe-dep-distance.ll)127
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-uniform-store.ll (renamed from llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-uniform-store.ll)2
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/type-info-cache-evl-crash.ll2
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll67
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll6
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-call-intrinsics.ll3
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-cast-intrinsics.ll3
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics-fixed-order-recurrence.ll3
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics-reduction.ll12
-rw-r--r--llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics.ll6
-rw-r--r--llvm/test/Transforms/MemProfContextDisambiguation/func_assign_fix.ll130
-rwxr-xr-xllvm/utils/gn/build/write_vcsrevision.py17
81 files changed, 8290 insertions, 5375 deletions
diff --git a/llvm/docs/GettingStarted.rst b/llvm/docs/GettingStarted.rst
index 3036dae..e4dbb64b 100644
--- a/llvm/docs/GettingStarted.rst
+++ b/llvm/docs/GettingStarted.rst
@@ -240,8 +240,10 @@ Linux x86\ :sup:`1` GCC, Clang
Linux amd64 GCC, Clang
Linux ARM GCC, Clang
Linux AArch64 GCC, Clang
+Linux LoongArch GCC, Clang
Linux Mips GCC, Clang
Linux PowerPC GCC, Clang
+Linux RISC-V GCC, Clang
Linux SystemZ GCC, Clang
Solaris V9 (Ultrasparc) GCC
DragonFlyBSD amd64 GCC, Clang
diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp
index d4a3455..68b8a00 100644
--- a/llvm/lib/CodeGen/TargetLoweringBase.cpp
+++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp
@@ -806,7 +806,17 @@ void TargetLoweringBase::initActions() {
ISD::SDIVFIX, ISD::SDIVFIXSAT,
ISD::UDIVFIX, ISD::UDIVFIXSAT,
ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT,
- ISD::IS_FPCLASS},
+ ISD::IS_FPCLASS, ISD::FCBRT,
+ ISD::FLOG, ISD::FLOG2,
+ ISD::FLOG10, ISD::FEXP,
+ ISD::FEXP2, ISD::FEXP10,
+ ISD::FFLOOR, ISD::FNEARBYINT,
+ ISD::FCEIL, ISD::FRINT,
+ ISD::FTRUNC, ISD::FROUNDEVEN,
+ ISD::FTAN, ISD::FACOS,
+ ISD::FASIN, ISD::FATAN,
+ ISD::FCOSH, ISD::FSINH,
+ ISD::FTANH, ISD::FATAN2},
VT, Expand);
// Overflow operations default to expand
@@ -852,13 +862,12 @@ void TargetLoweringBase::initActions() {
// These operations default to expand for vector types.
if (VT.isVector())
- setOperationAction(
- {ISD::FCOPYSIGN, ISD::SIGN_EXTEND_INREG, ISD::ANY_EXTEND_VECTOR_INREG,
- ISD::SIGN_EXTEND_VECTOR_INREG, ISD::ZERO_EXTEND_VECTOR_INREG,
- ISD::SPLAT_VECTOR, ISD::LRINT, ISD::LLRINT, ISD::LROUND,
- ISD::LLROUND, ISD::FTAN, ISD::FACOS, ISD::FASIN, ISD::FATAN,
- ISD::FCOSH, ISD::FSINH, ISD::FTANH, ISD::FATAN2},
- VT, Expand);
+ setOperationAction({ISD::FCOPYSIGN, ISD::SIGN_EXTEND_INREG,
+ ISD::ANY_EXTEND_VECTOR_INREG,
+ ISD::SIGN_EXTEND_VECTOR_INREG,
+ ISD::ZERO_EXTEND_VECTOR_INREG, ISD::SPLAT_VECTOR,
+ ISD::LRINT, ISD::LLRINT, ISD::LROUND, ISD::LLROUND},
+ VT, Expand);
// Constrained floating-point operations default to expand.
#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
@@ -914,15 +923,6 @@ void TargetLoweringBase::initActions() {
{MVT::bf16, MVT::f16, MVT::f32, MVT::f64, MVT::f80, MVT::f128},
Expand);
- // These library functions default to expand.
- setOperationAction({ISD::FCBRT, ISD::FLOG, ISD::FLOG2, ISD::FLOG10,
- ISD::FEXP, ISD::FEXP2, ISD::FEXP10, ISD::FFLOOR,
- ISD::FNEARBYINT, ISD::FCEIL, ISD::FRINT, ISD::FTRUNC,
- ISD::FROUNDEVEN, ISD::FTAN, ISD::FACOS, ISD::FASIN,
- ISD::FATAN, ISD::FCOSH, ISD::FSINH, ISD::FTANH,
- ISD::FATAN2},
- {MVT::f32, MVT::f64, MVT::f128}, Expand);
-
// Insert custom handling default for llvm.canonicalize.*.
setOperationAction(ISD::FCANONICALIZE,
{MVT::f16, MVT::f32, MVT::f64, MVT::f128}, Expand);
diff --git a/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp b/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
index 408d07b..725e951 100644
--- a/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
+++ b/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
@@ -1734,7 +1734,8 @@ MCSection *TargetLoweringObjectFileCOFF::getExplicitSectionGlobal(
Name == getInstrProfSectionName(IPSK_covdata, Triple::COFF,
/*AddSegmentInfo=*/false) ||
Name == getInstrProfSectionName(IPSK_covname, Triple::COFF,
- /*AddSegmentInfo=*/false))
+ /*AddSegmentInfo=*/false) ||
+ Name == ".llvmbc" || Name == ".llvmcmd")
Kind = SectionKind::getMetadata();
int Selection = 0;
unsigned Characteristics = getCOFFSectionFlags(Kind, TM);
diff --git a/llvm/lib/Object/IRSymtab.cpp b/llvm/lib/Object/IRSymtab.cpp
index 2579fa3..0f19495 100644
--- a/llvm/lib/Object/IRSymtab.cpp
+++ b/llvm/lib/Object/IRSymtab.cpp
@@ -8,11 +8,11 @@
#include "llvm/Object/IRSymtab.h"
#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/StringSet.h"
#include "llvm/Bitcode/BitcodeReader.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/IR/Comdat.h"
@@ -213,9 +213,10 @@ Expected<int> Builder::getComdatIndex(const Comdat *C, const Module *M) {
return P.first->second;
}
-static DenseSet<StringRef> buildPreservedSymbolsSet(const Triple &TT) {
- DenseSet<StringRef> PreservedSymbolSet(std::begin(PreservedSymbols),
- std::end(PreservedSymbols));
+static StringSet<> buildPreservedSymbolsSet(const Triple &TT) {
+ StringSet<> PreservedSymbolSet;
+ PreservedSymbolSet.insert(std::begin(PreservedSymbols),
+ std::end(PreservedSymbols));
// FIXME: Do we need to pass in ABI fields from TargetOptions?
RTLIB::RuntimeLibcallsInfo Libcalls(TT);
for (RTLIB::LibcallImpl Impl : Libcalls.getLibcallImpls()) {
@@ -280,7 +281,7 @@ Error Builder::addSymbol(const ModuleSymbolTable &Msymtab,
setStr(Sym.IRName, GV->getName());
- static const DenseSet<StringRef> PreservedSymbolsSet =
+ static const StringSet<> PreservedSymbolsSet =
buildPreservedSymbolsSet(GV->getParent()->getTargetTriple());
bool IsPreservedSymbol = PreservedSymbolsSet.contains(GV->getName());
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index e3ca09e..f25ce87 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -391,8 +391,9 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(const TargetMachine &TM,
// Library functions. These default to Expand, but we have instructions
// for them.
setOperationAction({ISD::FCEIL, ISD::FPOW, ISD::FABS, ISD::FFLOOR,
- ISD::FROUNDEVEN, ISD::FTRUNC, ISD::FMINNUM, ISD::FMAXNUM},
- MVT::f32, Legal);
+ ISD::FROUNDEVEN, ISD::FTRUNC},
+ {MVT::f16, MVT::f32}, Legal);
+ setOperationAction({ISD::FMINNUM, ISD::FMAXNUM}, MVT::f32, Legal);
setOperationAction(ISD::FLOG2, MVT::f32, Custom);
setOperationAction(ISD::FROUND, {MVT::f32, MVT::f64}, Custom);
@@ -412,9 +413,10 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::FREM, {MVT::f16, MVT::f32, MVT::f64}, Custom);
- if (Subtarget->has16BitInsts())
+ if (Subtarget->has16BitInsts()) {
setOperationAction(ISD::IS_FPCLASS, {MVT::f16, MVT::f32, MVT::f64}, Legal);
- else {
+ setOperationAction({ISD::FLOG2, ISD::FEXP2}, MVT::f16, Legal);
+ } else {
setOperationAction(ISD::IS_FPCLASS, {MVT::f32, MVT::f64}, Legal);
setOperationAction({ISD::FLOG2, ISD::FEXP2}, MVT::f16, Custom);
}
diff --git a/llvm/lib/Target/AMDGPU/GCNSubtarget.cpp b/llvm/lib/Target/AMDGPU/GCNSubtarget.cpp
index 9a2bab1..0a0a107 100644
--- a/llvm/lib/Target/AMDGPU/GCNSubtarget.cpp
+++ b/llvm/lib/Target/AMDGPU/GCNSubtarget.cpp
@@ -537,6 +537,63 @@ unsigned GCNSubtarget::getMaxNumVGPRs(const MachineFunction &MF) const {
return getMaxNumVGPRs(MF.getFunction());
}
+std::pair<unsigned, unsigned>
+GCNSubtarget::getMaxNumVectorRegs(const Function &F) const {
+ const unsigned MaxVectorRegs = getMaxNumVGPRs(F);
+
+ unsigned MaxNumVGPRs = MaxVectorRegs;
+ unsigned MaxNumAGPRs = 0;
+
+ // On GFX90A, the number of VGPRs and AGPRs need not be equal. Theoretically,
+ // a wave may have up to 512 total vector registers combining together both
+ // VGPRs and AGPRs. Hence, in an entry function without calls and without
+ // AGPRs used within it, it is possible to use the whole vector register
+ // budget for VGPRs.
+ //
+ // TODO: it shall be possible to estimate maximum AGPR/VGPR pressure and split
+ // register file accordingly.
+ if (hasGFX90AInsts()) {
+ unsigned MinNumAGPRs = 0;
+ const unsigned TotalNumAGPRs = AMDGPU::AGPR_32RegClass.getNumRegs();
+ const unsigned TotalNumVGPRs = AMDGPU::VGPR_32RegClass.getNumRegs();
+
+ const std::pair<unsigned, unsigned> DefaultNumAGPR = {~0u, ~0u};
+
+ // TODO: The lower bound should probably force the number of required
+ // registers up, overriding amdgpu-waves-per-eu.
+ std::tie(MinNumAGPRs, MaxNumAGPRs) =
+ AMDGPU::getIntegerPairAttribute(F, "amdgpu-agpr-alloc", DefaultNumAGPR,
+ /*OnlyFirstRequired=*/true);
+
+ if (MinNumAGPRs == DefaultNumAGPR.first) {
+ // Default to splitting half the registers if AGPRs are required.
+ MinNumAGPRs = MaxNumAGPRs = MaxVectorRegs / 2;
+ } else {
+ // Align to accum_offset's allocation granularity.
+ MinNumAGPRs = alignTo(MinNumAGPRs, 4);
+
+ MinNumAGPRs = std::min(MinNumAGPRs, TotalNumAGPRs);
+ }
+
+ // Clamp values to be inbounds of our limits, and ensure min <= max.
+
+ MaxNumAGPRs = std::min(std::max(MinNumAGPRs, MaxNumAGPRs), MaxVectorRegs);
+ MinNumAGPRs = std::min(std::min(MinNumAGPRs, TotalNumAGPRs), MaxNumAGPRs);
+
+ MaxNumVGPRs = std::min(MaxVectorRegs - MinNumAGPRs, TotalNumVGPRs);
+ MaxNumAGPRs = std::min(MaxVectorRegs - MaxNumVGPRs, MaxNumAGPRs);
+
+ assert(MaxNumVGPRs + MaxNumAGPRs <= MaxVectorRegs &&
+ MaxNumAGPRs <= TotalNumAGPRs && MaxNumVGPRs <= TotalNumVGPRs &&
+ "invalid register counts");
+ } else if (hasMAIInsts()) {
+ // On gfx908 the number of AGPRs always equals the number of VGPRs.
+ MaxNumAGPRs = MaxNumVGPRs = MaxVectorRegs;
+ }
+
+ return std::pair(MaxNumVGPRs, MaxNumAGPRs);
+}
+
void GCNSubtarget::adjustSchedDependency(
SUnit *Def, int DefOpIdx, SUnit *Use, int UseOpIdx, SDep &Dep,
const TargetSchedModel *SchedModel) const {
diff --git a/llvm/lib/Target/AMDGPU/GCNSubtarget.h b/llvm/lib/Target/AMDGPU/GCNSubtarget.h
index 88a269f..785ede3 100644
--- a/llvm/lib/Target/AMDGPU/GCNSubtarget.h
+++ b/llvm/lib/Target/AMDGPU/GCNSubtarget.h
@@ -1667,6 +1667,10 @@ public:
return getMaxNumVGPRs(F);
}
+ /// Return a pair of maximum numbers of VGPRs and AGPRs that meet the number
+ /// of waves per execution unit required for the function \p MF.
+ std::pair<unsigned, unsigned> getMaxNumVectorRegs(const Function &F) const;
+
/// \returns Maximum number of VGPRs that meets number of waves per execution
/// unit requirement for function \p MF, or number of VGPRs explicitly
/// requested using "amdgpu-num-vgpr" attribute attached to function \p MF.
diff --git a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
index dd3f2fe..520c321 100644
--- a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
@@ -552,7 +552,7 @@ public:
(!Inst.mayLoad() || SIInstrInfo::isAtomicNoRet(Inst))) {
// FLAT and SCRATCH instructions may access scratch. Other VMEM
// instructions do not.
- if (SIInstrInfo::isFLAT(Inst) && mayAccessScratchThroughFlat(Inst))
+ if (TII->mayAccessScratchThroughFlat(Inst))
return SCRATCH_WRITE_ACCESS;
return VMEM_WRITE_ACCESS;
}
@@ -565,7 +565,6 @@ public:
bool mayAccessVMEMThroughFlat(const MachineInstr &MI) const;
bool mayAccessLDSThroughFlat(const MachineInstr &MI) const;
- bool mayAccessScratchThroughFlat(const MachineInstr &MI) const;
bool isVmemAccess(const MachineInstr &MI) const;
bool generateWaitcntInstBefore(MachineInstr &MI,
WaitcntBrackets &ScoreBrackets,
@@ -2160,32 +2159,6 @@ bool SIInsertWaitcnts::mayAccessLDSThroughFlat(const MachineInstr &MI) const {
return false;
}
-// This is a flat memory operation. Check to see if it has memory tokens for
-// either scratch or FLAT.
-bool SIInsertWaitcnts::mayAccessScratchThroughFlat(
- const MachineInstr &MI) const {
- assert(TII->isFLAT(MI));
-
- // SCRATCH instructions always access scratch.
- if (TII->isFLATScratch(MI))
- return true;
-
- // GLOBAL instructions never access scratch.
- if (TII->isFLATGlobal(MI))
- return false;
-
- // If there are no memory operands then conservatively assume the flat
- // operation may access scratch.
- if (MI.memoperands_empty())
- return true;
-
- // See if any memory operand specifies an address space that involves scratch.
- return any_of(MI.memoperands(), [](const MachineMemOperand *Memop) {
- unsigned AS = Memop->getAddrSpace();
- return AS == AMDGPUAS::PRIVATE_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS;
- });
-}
-
bool SIInsertWaitcnts::isVmemAccess(const MachineInstr &MI) const {
return (TII->isFLAT(MI) && mayAccessVMEMThroughFlat(MI)) ||
(TII->isVMEM(MI) && !AMDGPU::getMUBUFIsBufferInv(MI.getOpcode()));
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index 8d6c1d0..2aa6b4e 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -4249,6 +4249,32 @@ bool SIInstrInfo::isAlwaysGDS(uint16_t Opcode) const {
Opcode == AMDGPU::DS_SUB_GS_REG_RTN || isGWS(Opcode);
}
+bool SIInstrInfo::mayAccessScratchThroughFlat(const MachineInstr &MI) const {
+ if (!isFLAT(MI) || isFLATGlobal(MI))
+ return false;
+
+ // If scratch is not initialized, we can never access it.
+ if (MI.getMF()->getFunction().hasFnAttribute("amdgpu-no-flat-scratch-init"))
+ return false;
+
+ // SCRATCH instructions always access scratch.
+ if (isFLATScratch(MI))
+ return true;
+
+ // If there are no memory operands then conservatively assume the flat
+ // operation may access scratch.
+ if (MI.memoperands_empty())
+ return true;
+
+ // TODO (?): Does this need to be taught how to read noalias.addrspace ?
+
+ // See if any memory operand specifies an address space that involves scratch.
+ return any_of(MI.memoperands(), [](const MachineMemOperand *Memop) {
+ unsigned AS = Memop->getAddrSpace();
+ return AS == AMDGPUAS::PRIVATE_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS;
+ });
+}
+
bool SIInstrInfo::modifiesModeRegister(const MachineInstr &MI) {
// Skip the full operand and register alias search modifiesRegister
// does. There's only a handful of instructions that touch this, it's only an
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.h b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
index 2ffb783..e042b59 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.h
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
@@ -678,6 +678,12 @@ public:
return get(Opcode).TSFlags & SIInstrFlags::FLAT;
}
+ /// \returns true for SCRATCH_ instructions, or FLAT_ instructions with
+ /// SCRATCH_ memory operands.
+ /// Conservatively correct; will return true if \p MI cannot be proven
+ /// to not hit scratch.
+ bool mayAccessScratchThroughFlat(const MachineInstr &MI) const;
+
static bool isBlockLoadStore(uint16_t Opcode) {
switch (Opcode) {
case AMDGPU::SI_BLOCK_SPILL_V1024_SAVE:
diff --git a/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp b/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp
index 9f61bf8..9509199 100644
--- a/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp
+++ b/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp
@@ -351,6 +351,7 @@ void SILowerSGPRSpills::determineRegsForWWMAllocation(MachineFunction &MF,
MachineRegisterInfo &MRI = MF.getRegInfo();
BitVector ReservedRegs = TRI->getReservedRegs(MF);
BitVector NonWwmAllocMask(TRI->getNumRegs());
+ const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
// FIXME: MaxNumVGPRsForWwmAllocation might need to be adjusted in the future
// to have a balanced allocation between WWM values and per-thread vector
@@ -359,7 +360,7 @@ void SILowerSGPRSpills::determineRegsForWWMAllocation(MachineFunction &MF,
NumRegs =
std::min(static_cast<unsigned>(MFI->getSGPRSpillVGPRs().size()), NumRegs);
- auto [MaxNumVGPRs, MaxNumAGPRs] = TRI->getMaxNumVectorRegs(MF);
+ auto [MaxNumVGPRs, MaxNumAGPRs] = ST.getMaxNumVectorRegs(MF.getFunction());
// Try to use the highest available registers for now. Later after
// vgpr-regalloc, they can be shifted to the lowest range.
unsigned I = 0;
@@ -376,7 +377,7 @@ void SILowerSGPRSpills::determineRegsForWWMAllocation(MachineFunction &MF,
// Reserve an arbitrary register and report the error.
TRI->markSuperRegs(RegMask, AMDGPU::VGPR0);
MF.getFunction().getContext().emitError(
- "can't find enough VGPRs for wwm-regalloc");
+ "cannot find enough VGPRs for wwm-regalloc");
}
}
diff --git a/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp b/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp
index 0e8a420..607825e 100644
--- a/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp
+++ b/llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp
@@ -321,7 +321,8 @@ public:
bool IsNonTemporal,
bool IsLastUse = false) const = 0;
- virtual bool expandSystemScopeStore(MachineBasicBlock::iterator &MI) const {
+ virtual bool finalizeStore(MachineBasicBlock::iterator &MI,
+ bool Atomic) const {
return false;
};
@@ -602,7 +603,8 @@ public:
bool IsVolatile, bool IsNonTemporal,
bool IsLastUse) const override;
- bool expandSystemScopeStore(MachineBasicBlock::iterator &MI) const override;
+ bool finalizeStore(MachineBasicBlock::iterator &MI,
+ bool Atomic) const override;
bool insertRelease(MachineBasicBlock::iterator &MI, SIAtomicScope Scope,
SIAtomicAddrSpace AddrSpace, bool IsCrossAddrSpaceOrdering,
@@ -2551,11 +2553,25 @@ bool SIGfx12CacheControl::enableVolatileAndOrNonTemporal(
return Changed;
}
-bool SIGfx12CacheControl::expandSystemScopeStore(
- MachineBasicBlock::iterator &MI) const {
+bool SIGfx12CacheControl::finalizeStore(MachineBasicBlock::iterator &MI,
+ bool Atomic) const {
MachineOperand *CPol = TII->getNamedOperand(*MI, OpName::cpol);
- if (CPol && ((CPol->getImm() & CPol::SCOPE) == CPol::SCOPE_SYS))
- return insertWaitsBeforeSystemScopeStore(MI);
+ if (!CPol)
+ return false;
+
+ const unsigned Scope = CPol->getImm() & CPol::SCOPE;
+
+ // GFX12.0 only: Extra waits needed before system scope stores.
+ if (!ST.hasGFX1250Insts()) {
+ if (!Atomic && Scope == CPol::SCOPE_SYS)
+ return insertWaitsBeforeSystemScopeStore(MI);
+ return false;
+ }
+
+ // GFX12.5 only: Require SCOPE_SE on stores that may hit the scratch address
+ // space.
+ if (TII->mayAccessScratchThroughFlat(*MI) && Scope == CPol::SCOPE_CU)
+ return setScope(MI, CPol::SCOPE_SE);
return false;
}
@@ -2674,6 +2690,7 @@ bool SIMemoryLegalizer::expandStore(const SIMemOpInfo &MOI,
MOI.getIsCrossAddressSpaceOrdering(),
Position::BEFORE);
+ Changed |= CC->finalizeStore(MI, /*Atomic=*/true);
return Changed;
}
@@ -2686,7 +2703,7 @@ bool SIMemoryLegalizer::expandStore(const SIMemOpInfo &MOI,
// GFX12 specific, scope(desired coherence domain in cache hierarchy) is
// instruction field, do not confuse it with atomic scope.
- Changed |= CC->expandSystemScopeStore(MI);
+ Changed |= CC->finalizeStore(MI, /*Atomic=*/false);
return Changed;
}
diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
index 84cfa87..f3acc5c 100644
--- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp
@@ -572,65 +572,6 @@ MCRegister SIRegisterInfo::reservedPrivateSegmentBufferReg(
return getAlignedHighSGPRForRC(MF, /*Align=*/4, &AMDGPU::SGPR_128RegClass);
}
-std::pair<unsigned, unsigned>
-SIRegisterInfo::getMaxNumVectorRegs(const MachineFunction &MF) const {
- const unsigned MaxVectorRegs = ST.getMaxNumVGPRs(MF);
-
- unsigned MaxNumVGPRs = MaxVectorRegs;
- unsigned MaxNumAGPRs = 0;
-
- // On GFX90A, the number of VGPRs and AGPRs need not be equal. Theoretically,
- // a wave may have up to 512 total vector registers combining together both
- // VGPRs and AGPRs. Hence, in an entry function without calls and without
- // AGPRs used within it, it is possible to use the whole vector register
- // budget for VGPRs.
- //
- // TODO: it shall be possible to estimate maximum AGPR/VGPR pressure and split
- // register file accordingly.
- if (ST.hasGFX90AInsts()) {
- unsigned MinNumAGPRs = 0;
- const unsigned TotalNumAGPRs = AMDGPU::AGPR_32RegClass.getNumRegs();
- const unsigned TotalNumVGPRs = AMDGPU::VGPR_32RegClass.getNumRegs();
-
- const std::pair<unsigned, unsigned> DefaultNumAGPR = {~0u, ~0u};
-
- // TODO: Move this logic into subtarget on IR function
- //
- // TODO: The lower bound should probably force the number of required
- // registers up, overriding amdgpu-waves-per-eu.
- std::tie(MinNumAGPRs, MaxNumAGPRs) = AMDGPU::getIntegerPairAttribute(
- MF.getFunction(), "amdgpu-agpr-alloc", DefaultNumAGPR,
- /*OnlyFirstRequired=*/true);
-
- if (MinNumAGPRs == DefaultNumAGPR.first) {
- // Default to splitting half the registers if AGPRs are required.
- MinNumAGPRs = MaxNumAGPRs = MaxVectorRegs / 2;
- } else {
- // Align to accum_offset's allocation granularity.
- MinNumAGPRs = alignTo(MinNumAGPRs, 4);
-
- MinNumAGPRs = std::min(MinNumAGPRs, TotalNumAGPRs);
- }
-
- // Clamp values to be inbounds of our limits, and ensure min <= max.
-
- MaxNumAGPRs = std::min(std::max(MinNumAGPRs, MaxNumAGPRs), MaxVectorRegs);
- MinNumAGPRs = std::min(std::min(MinNumAGPRs, TotalNumAGPRs), MaxNumAGPRs);
-
- MaxNumVGPRs = std::min(MaxVectorRegs - MinNumAGPRs, TotalNumVGPRs);
- MaxNumAGPRs = std::min(MaxVectorRegs - MaxNumVGPRs, MaxNumAGPRs);
-
- assert(MaxNumVGPRs + MaxNumAGPRs <= MaxVectorRegs &&
- MaxNumAGPRs <= TotalNumAGPRs && MaxNumVGPRs <= TotalNumVGPRs &&
- "invalid register counts");
- } else if (ST.hasMAIInsts()) {
- // On gfx908 the number of AGPRs always equals the number of VGPRs.
- MaxNumAGPRs = MaxNumVGPRs = MaxVectorRegs;
- }
-
- return std::pair(MaxNumVGPRs, MaxNumAGPRs);
-}
-
BitVector SIRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
BitVector Reserved(getNumRegs());
Reserved.set(AMDGPU::MODE);
@@ -742,7 +683,7 @@ BitVector SIRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
// Reserve VGPRs/AGPRs.
//
- auto [MaxNumVGPRs, MaxNumAGPRs] = getMaxNumVectorRegs(MF);
+ auto [MaxNumVGPRs, MaxNumAGPRs] = ST.getMaxNumVectorRegs(MF.getFunction());
for (const TargetRegisterClass *RC : regclasses()) {
if (RC->isBaseClass() && isVGPRClass(RC)) {
diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.h b/llvm/lib/Target/AMDGPU/SIRegisterInfo.h
index 0008e5f..5508f07 100644
--- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.h
+++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.h
@@ -90,11 +90,6 @@ public:
/// spilling is needed.
MCRegister reservedPrivateSegmentBufferReg(const MachineFunction &MF) const;
- /// Return a pair of maximum numbers of VGPRs and AGPRs that meet the number
- /// of waves per execution unit required for the function \p MF.
- std::pair<unsigned, unsigned>
- getMaxNumVectorRegs(const MachineFunction &MF) const;
-
BitVector getReservedRegs(const MachineFunction &MF) const override;
bool isAsmClobberable(const MachineFunction &MF,
MCRegister PhysReg) const override;
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index fca5dff..066b392 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -370,6 +370,11 @@ void ARMTargetLowering::addMVEVectorTypes(bool HasMVEFP) {
setOperationAction(ISD::FMINNUM, VT, Legal);
setOperationAction(ISD::FMAXNUM, VT, Legal);
setOperationAction(ISD::FROUND, VT, Legal);
+ setOperationAction(ISD::FROUNDEVEN, VT, Legal);
+ setOperationAction(ISD::FRINT, VT, Legal);
+ setOperationAction(ISD::FTRUNC, VT, Legal);
+ setOperationAction(ISD::FFLOOR, VT, Legal);
+ setOperationAction(ISD::FCEIL, VT, Legal);
setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
setOperationAction(ISD::VECREDUCE_FMUL, VT, Custom);
setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
@@ -1507,6 +1512,12 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM_,
setOperationAction(ISD::FLOG2, MVT::f16, Promote);
setOperationAction(ISD::FROUND, MVT::f16, Legal);
+ setOperationAction(ISD::FROUNDEVEN, MVT::f16, Legal);
+ setOperationAction(ISD::FTRUNC, MVT::f16, Legal);
+ setOperationAction(ISD::FNEARBYINT, MVT::f16, Legal);
+ setOperationAction(ISD::FRINT, MVT::f16, Legal);
+ setOperationAction(ISD::FFLOOR, MVT::f16, Legal);
+ setOperationAction(ISD::FCEIL, MVT::f16, Legal);
}
if (Subtarget->hasNEON()) {
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index 613cfb5..d96136c 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -2385,13 +2385,6 @@ SDValue LoongArchTargetLowering::lowerBF16_TO_FP(SDValue Op,
return Res;
}
-static bool isConstantBUILD_VECTOR(const BuildVectorSDNode *Op) {
- for (unsigned i = 0; i < Op->getNumOperands(); ++i)
- if (isIntOrFPConstant(Op->getOperand(i)))
- return true;
- return false;
-}
-
// Lower BUILD_VECTOR as broadcast load (if possible).
// For example:
// %a = load i8, ptr %ptr
@@ -2441,10 +2434,14 @@ SDValue LoongArchTargetLowering::lowerBUILD_VECTOR(SDValue Op,
SelectionDAG &DAG) const {
BuildVectorSDNode *Node = cast<BuildVectorSDNode>(Op);
EVT ResTy = Op->getValueType(0);
+ unsigned NumElts = ResTy.getVectorNumElements();
SDLoc DL(Op);
APInt SplatValue, SplatUndef;
unsigned SplatBitSize;
bool HasAnyUndefs;
+ bool IsConstant = false;
+ bool UseSameConstant = true;
+ SDValue ConstantValue;
bool Is128Vec = ResTy.is128BitVector();
bool Is256Vec = ResTy.is256BitVector();
@@ -2495,13 +2492,35 @@ SDValue LoongArchTargetLowering::lowerBUILD_VECTOR(SDValue Op,
if (DAG.isSplatValue(Op, /*AllowUndefs=*/false))
return Op;
- if (!isConstantBUILD_VECTOR(Node)) {
+ for (unsigned i = 0; i < NumElts; ++i) {
+ SDValue Opi = Node->getOperand(i);
+ if (isIntOrFPConstant(Opi)) {
+ IsConstant = true;
+ if (!ConstantValue.getNode())
+ ConstantValue = Opi;
+ else if (ConstantValue != Opi)
+ UseSameConstant = false;
+ }
+ }
+
+ // If the type of BUILD_VECTOR is v2f64, custom legalizing it has no benefits.
+ if (IsConstant && UseSameConstant && ResTy != MVT::v2f64) {
+ SDValue Result = DAG.getSplatBuildVector(ResTy, DL, ConstantValue);
+ for (unsigned i = 0; i < NumElts; ++i) {
+ SDValue Opi = Node->getOperand(i);
+ if (!isIntOrFPConstant(Opi))
+ Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, ResTy, Result, Opi,
+ DAG.getConstant(i, DL, Subtarget.getGRLenVT()));
+ }
+ return Result;
+ }
+
+ if (!IsConstant) {
// Use INSERT_VECTOR_ELT operations rather than expand to stores.
// The resulting code is the same length as the expansion, but it doesn't
// use memory operations.
assert(ResTy.isVector());
- unsigned NumElts = ResTy.getVectorNumElements();
SDValue Op0 = Node->getOperand(0);
SDValue Vector = DAG.getUNDEF(ResTy);
diff --git a/llvm/lib/Target/Mips/MipsAsmPrinter.cpp b/llvm/lib/Target/Mips/MipsAsmPrinter.cpp
index ca03310..a2e48ab 100644
--- a/llvm/lib/Target/Mips/MipsAsmPrinter.cpp
+++ b/llvm/lib/Target/Mips/MipsAsmPrinter.cpp
@@ -737,14 +737,18 @@ void MipsAsmPrinter::emitStartOfAsmFile(Module &M) {
if (FS.empty() && M.size() && F->hasFnAttribute("target-features"))
FS = F->getFnAttribute("target-features").getValueAsString();
+ std::string strFS = FS.str();
+ if (M.size() && F->getFnAttribute("use-soft-float").getValueAsBool())
+ strFS += strFS.empty() ? "+soft-float" : ",+soft-float";
+
// Compute MIPS architecture attributes based on the default subtarget
// that we'd have constructed.
// FIXME: For ifunc related functions we could iterate over and look
// for a feature string that doesn't match the default one.
StringRef CPU = MIPS_MC::selectMipsCPU(TT, TM.getTargetCPU());
const MipsTargetMachine &MTM = static_cast<const MipsTargetMachine &>(TM);
- const MipsSubtarget STI(TT, CPU, FS, MTM.isLittleEndian(), MTM,
- std::nullopt);
+ const MipsSubtarget STI(TT, CPU, StringRef(strFS), MTM.isLittleEndian(),
+ MTM, std::nullopt);
bool IsABICalls = STI.isABICalls();
const MipsABIInfo &ABI = MTM.getABI();
diff --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp
index 0e581a7..ec6b382 100644
--- a/llvm/lib/Target/Mips/MipsISelLowering.cpp
+++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp
@@ -522,9 +522,6 @@ MipsTargetLowering::MipsTargetLowering(const MipsTargetMachine &TM,
setOperationAction(ISD::TRAP, MVT::Other, Legal);
- setOperationAction(ISD::ConstantFP, MVT::f32, Custom);
- setOperationAction(ISD::ConstantFP, MVT::f64, Custom);
-
setTargetDAGCombine({ISD::SDIVREM, ISD::UDIVREM, ISD::SELECT, ISD::AND,
ISD::OR, ISD::ADD, ISD::SUB, ISD::AssertZext, ISD::SHL,
ISD::SIGN_EXTEND});
@@ -1360,8 +1357,6 @@ LowerOperation(SDValue Op, SelectionDAG &DAG) const
case ISD::FP_TO_SINT: return lowerFP_TO_SINT(Op, DAG);
case ISD::READCYCLECOUNTER:
return lowerREADCYCLECOUNTER(Op, DAG);
- case ISD::ConstantFP:
- return lowerConstantFP(Op, DAG);
}
return SDValue();
}
@@ -3019,30 +3014,6 @@ SDValue MipsTargetLowering::lowerFP_TO_SINT(SDValue Op,
return DAG.getNode(ISD::BITCAST, SDLoc(Op), Op.getValueType(), Trunc);
}
-SDValue MipsTargetLowering::lowerConstantFP(SDValue Op,
- SelectionDAG &DAG) const {
- SDLoc DL(Op);
- EVT VT = Op.getSimpleValueType();
- SDNode *N = Op.getNode();
- ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(N);
-
- if (!CFP->isNaN() || Subtarget.isNaN2008()) {
- return SDValue();
- }
-
- APFloat NaNValue = CFP->getValueAPF();
- auto &Sem = NaNValue.getSemantics();
-
- // The MSB of the mantissa should be zero for QNaNs in the MIPS legacy NaN
- // encodings, and one for sNaNs. Check every NaN constants and make sure
- // they are correctly encoded for legacy encodings.
- if (!NaNValue.isSignaling()) {
- APFloat RealQNaN = NaNValue.getSNaN(Sem);
- return DAG.getConstantFP(RealQNaN, DL, VT);
- }
- return SDValue();
-}
-
//===----------------------------------------------------------------------===//
// Calling Convention Implementation
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/Mips/MipsISelLowering.h b/llvm/lib/Target/Mips/MipsISelLowering.h
index 31ac5d4..c65c76c 100644
--- a/llvm/lib/Target/Mips/MipsISelLowering.h
+++ b/llvm/lib/Target/Mips/MipsISelLowering.h
@@ -592,7 +592,6 @@ class TargetRegisterClass;
SDValue lowerEH_DWARF_CFA(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerREADCYCLECOUNTER(SDValue Op, SelectionDAG &DAG) const;
- SDValue lowerConstantFP(SDValue Op, SelectionDAG &DAG) const;
/// isEligibleForTailCallOptimization - Check whether the call is eligible
/// for tail call optimization.
diff --git a/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp b/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp
index 81de0a5..0164fcd 100644
--- a/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp
+++ b/llvm/lib/Transforms/IPO/MemProfContextDisambiguation.cpp
@@ -97,8 +97,6 @@ STATISTIC(MissingAllocForContextId,
"Number of missing alloc nodes for context ids");
STATISTIC(SkippedCallsCloning,
"Number of calls skipped during cloning due to unexpected operand");
-STATISTIC(MismatchedCloneAssignments,
- "Number of callsites assigned to call multiple non-matching clones");
static cl::opt<std::string> DotFilePathPrefix(
"memprof-dot-file-path-prefix", cl::init(""), cl::Hidden,
@@ -2062,20 +2060,6 @@ static bool isMemProfClone(const Function &F) {
return F.getName().contains(MemProfCloneSuffix);
}
-// Return the clone number of the given function by extracting it from the
-// memprof suffix. Assumes the caller has already confirmed it is a memprof
-// clone.
-static unsigned getMemProfCloneNum(const Function &F) {
- assert(isMemProfClone(F));
- auto Pos = F.getName().find_last_of('.');
- assert(Pos > 0);
- unsigned CloneNo;
- bool Err = F.getName().drop_front(Pos + 1).getAsInteger(10, CloneNo);
- assert(!Err);
- (void)Err;
- return CloneNo;
-}
-
std::string ModuleCallsiteContextGraph::getLabel(const Function *Func,
const Instruction *Call,
unsigned CloneNo) const {
@@ -3995,22 +3979,7 @@ IndexCallsiteContextGraph::getAllocationCallType(const CallInfo &Call) const {
void ModuleCallsiteContextGraph::updateCall(CallInfo &CallerCall,
FuncInfo CalleeFunc) {
- auto *CurF = cast<CallBase>(CallerCall.call())->getCalledFunction();
- auto NewCalleeCloneNo = CalleeFunc.cloneNo();
- if (isMemProfClone(*CurF)) {
- // If we already assigned this callsite to call a specific non-default
- // clone (i.e. not the original function which is clone 0), ensure that we
- // aren't trying to now update it to call a different clone, which is
- // indicative of a bug in the graph or function assignment.
- auto CurCalleeCloneNo = getMemProfCloneNum(*CurF);
- if (CurCalleeCloneNo != NewCalleeCloneNo) {
- LLVM_DEBUG(dbgs() << "Mismatch in call clone assignment: was "
- << CurCalleeCloneNo << " now " << NewCalleeCloneNo
- << "\n");
- MismatchedCloneAssignments++;
- }
- }
- if (NewCalleeCloneNo > 0)
+ if (CalleeFunc.cloneNo() > 0)
cast<CallBase>(CallerCall.call())->setCalledFunction(CalleeFunc.func());
OREGetter(CallerCall.call()->getFunction())
.emit(OptimizationRemark(DEBUG_TYPE, "MemprofCall", CallerCall.call())
@@ -4026,19 +3995,7 @@ void IndexCallsiteContextGraph::updateCall(CallInfo &CallerCall,
assert(CI &&
"Caller cannot be an allocation which should not have profiled calls");
assert(CI->Clones.size() > CallerCall.cloneNo());
- auto NewCalleeCloneNo = CalleeFunc.cloneNo();
- auto &CurCalleeCloneNo = CI->Clones[CallerCall.cloneNo()];
- // If we already assigned this callsite to call a specific non-default
- // clone (i.e. not the original function which is clone 0), ensure that we
- // aren't trying to now update it to call a different clone, which is
- // indicative of a bug in the graph or function assignment.
- if (CurCalleeCloneNo != 0 && CurCalleeCloneNo != NewCalleeCloneNo) {
- LLVM_DEBUG(dbgs() << "Mismatch in call clone assignment: was "
- << CurCalleeCloneNo << " now " << NewCalleeCloneNo
- << "\n");
- MismatchedCloneAssignments++;
- }
- CurCalleeCloneNo = NewCalleeCloneNo;
+ CI->Clones[CallerCall.cloneNo()] = CalleeFunc.cloneNo();
}
// Update the debug information attached to NewFunc to use the clone Name. Note
@@ -4746,19 +4703,6 @@ bool CallsiteContextGraph<DerivedCCG, FuncTy, CallTy>::assignFunctions() {
// where the callers were assigned to different clones of a function.
}
- auto FindFirstAvailFuncClone = [&]() {
- // Find first function in FuncClonesToCallMap without an assigned
- // clone of this callsite Node. We should always have one
- // available at this point due to the earlier cloning when the
- // FuncClonesToCallMap size was smaller than the clone number.
- for (auto &CF : FuncClonesToCallMap) {
- if (!FuncCloneToCurNodeCloneMap.count(CF.first))
- return CF.first;
- }
- assert(false &&
- "Expected an available func clone for this callsite clone");
- };
-
// See if we can use existing function clone. Walk through
// all caller edges to see if any have already been assigned to
// a clone of this callsite's function. If we can use it, do so. If not,
@@ -4875,7 +4819,16 @@ bool CallsiteContextGraph<DerivedCCG, FuncTy, CallTy>::assignFunctions() {
// clone of OrigFunc for another caller during this iteration over
// its caller edges.
if (!FuncCloneAssignedToCurCallsiteClone) {
- FuncCloneAssignedToCurCallsiteClone = FindFirstAvailFuncClone();
+ // Find first function in FuncClonesToCallMap without an assigned
+ // clone of this callsite Node. We should always have one
+ // available at this point due to the earlier cloning when the
+ // FuncClonesToCallMap size was smaller than the clone number.
+ for (auto &CF : FuncClonesToCallMap) {
+ if (!FuncCloneToCurNodeCloneMap.count(CF.first)) {
+ FuncCloneAssignedToCurCallsiteClone = CF.first;
+ break;
+ }
+ }
assert(FuncCloneAssignedToCurCallsiteClone);
// Assign Clone to FuncCloneAssignedToCurCallsiteClone
AssignCallsiteCloneToFuncClone(
@@ -4889,31 +4842,6 @@ bool CallsiteContextGraph<DerivedCCG, FuncTy, CallTy>::assignFunctions() {
FuncCloneAssignedToCurCallsiteClone);
}
}
- // If we didn't assign a function clone to this callsite clone yet, e.g.
- // none of its callers has a non-null call, do the assignment here.
- // We want to ensure that every callsite clone is assigned to some
- // function clone, so that the call updates below work as expected.
- // In particular if this is the original callsite, we want to ensure it
- // is assigned to the original function, otherwise the original function
- // will appear available for assignment to other callsite clones,
- // leading to unintended effects. For one, the unknown and not updated
- // callers will call into cloned paths leading to the wrong hints,
- // because they still call the original function (clone 0). Also,
- // because all callsites start out as being clone 0 by default, we can't
- // easily distinguish between callsites explicitly assigned to clone 0
- // vs those never assigned, which can lead to multiple updates of the
- // calls when invoking updateCall below, with mismatched clone values.
- // TODO: Add a flag to the callsite nodes or some other mechanism to
- // better distinguish and identify callsite clones that are not getting
- // assigned to function clones as expected.
- if (!FuncCloneAssignedToCurCallsiteClone) {
- FuncCloneAssignedToCurCallsiteClone = FindFirstAvailFuncClone();
- assert(FuncCloneAssignedToCurCallsiteClone &&
- "No available func clone for this callsite clone");
- AssignCallsiteCloneToFuncClone(
- FuncCloneAssignedToCurCallsiteClone, Call, Clone,
- /*IsAlloc=*/AllocationCallToContextNodeMap.contains(Call));
- }
}
if (VerifyCCG) {
checkNode<DerivedCCG, FuncTy, CallTy>(Node);
diff --git a/llvm/lib/Transforms/Scalar/LoopFuse.cpp b/llvm/lib/Transforms/Scalar/LoopFuse.cpp
index d6bd92d..b5eb647 100644
--- a/llvm/lib/Transforms/Scalar/LoopFuse.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopFuse.cpp
@@ -1176,6 +1176,28 @@ private:
return true;
}
+ /// This function fixes PHI nodes after fusion in \p SafeToSink.
+ /// \p SafeToSink instructions are the instructions that are to be moved past
+ /// the fused loop. Thus, the PHI nodes in \p SafeToSink should be updated to
+ /// receive values from the fused loop if they are currently taking values
+ /// from the first loop (i.e. FC0)'s latch.
+ void fixPHINodes(ArrayRef<Instruction *> SafeToSink,
+ const FusionCandidate &FC0,
+ const FusionCandidate &FC1) const {
+ for (Instruction *Inst : SafeToSink) {
+ // No update needed for non-PHI nodes.
+ PHINode *Phi = dyn_cast<PHINode>(Inst);
+ if (!Phi)
+ continue;
+ for (unsigned I = 0; I < Phi->getNumIncomingValues(); I++) {
+ if (Phi->getIncomingBlock(I) != FC0.Latch)
+ continue;
+ assert(FC1.Latch && "FC1 latch is not set");
+ Phi->setIncomingBlock(I, FC1.Latch);
+ }
+ }
+ }
+
/// Collect instructions in the \p FC1 Preheader that can be hoisted
/// to the \p FC0 Preheader or sunk into the \p FC1 Body
bool collectMovablePreheaderInsts(
@@ -1481,6 +1503,9 @@ private:
assert(I->getParent() == FC1.Preheader);
I->moveBefore(*FC1.ExitBlock, FC1.ExitBlock->getFirstInsertionPt());
}
+ // PHI nodes in SinkInsts need to be updated to receive values from the
+ // fused loop.
+ fixPHINodes(SinkInsts, FC0, FC1);
}
/// Determine if two fusion candidates have identical guards
diff --git a/llvm/test/Analysis/CostModel/RISCV/arith-fp.ll b/llvm/test/Analysis/CostModel/RISCV/arith-fp.ll
index 6c974af..673bf38 100644
--- a/llvm/test/Analysis/CostModel/RISCV/arith-fp.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/arith-fp.ll
@@ -1,1579 +1,2413 @@
; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py
; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=riscv64 -mattr=+v,+f,+d,+zfh,+zvfh,+zvfbfmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=riscv64 -mattr=+v,+f,+d,+zfh,+zvfhmin,+zvfbfmin < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
+; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=riscv64 -mattr=+v,+f,+d < %s | FileCheck %s --check-prefixes=CHECK,NO-ZFHMIN
; Check that we don't crash querying costs when vectors are not enabled.
; RUN: opt -passes="print<cost-model>" 2>&1 -disable-output -mtriple=riscv64
define void @fadd() {
; CHECK-LABEL: 'fadd'
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %BF16 = fadd bfloat undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F32 = fadd float undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F64 = fadd double undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V1BF16 = fadd <1 x bfloat> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V2BF16 = fadd <2 x bfloat> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V4BF16 = fadd <4 x bfloat> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %V8BF16 = fadd <8 x bfloat> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %V16BF16 = fadd <16 x bfloat> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV1BF16 = fadd <vscale x 1 x bfloat> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV2BF16 = fadd <vscale x 2 x bfloat> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %NXV4BF16 = fadd <vscale x 4 x bfloat> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %NXV8BF16 = fadd <vscale x 8 x bfloat> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 36 for instruction: %NXV16BF16 = fadd <vscale x 16 x bfloat> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F32 = fadd <1 x float> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F32 = fadd <2 x float> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F32 = fadd <4 x float> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V8F32 = fadd <8 x float> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V16F32 = fadd <16 x float> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F32 = fadd <vscale x 1 x float> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2F32 = fadd <vscale x 2 x float> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV4F32 = fadd <vscale x 4 x float> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV8F32 = fadd <vscale x 8 x float> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV16F32 = fadd <vscale x 16 x float> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F64 = fadd <1 x double> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F64 = fadd <2 x double> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4F64 = fadd <4 x double> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V8F64 = fadd <8 x double> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F64 = fadd <vscale x 1 x double> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV2F64 = fadd <vscale x 2 x double> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV4F64 = fadd <vscale x 4 x double> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV8F64 = fadd <vscale x 8 x double> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V1F16_VP = call <1 x bfloat> @llvm.vp.fadd.v1bf16(<1 x bfloat> undef, <1 x bfloat> undef, <1 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2F16_VP = call <2 x bfloat> @llvm.vp.fadd.v2bf16(<2 x bfloat> undef, <2 x bfloat> undef, <2 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4F16_VP = call <4 x bfloat> @llvm.vp.fadd.v4bf16(<4 x bfloat> undef, <4 x bfloat> undef, <4 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V8F16_VP = call <8 x bfloat> @llvm.vp.fadd.v8bf16(<8 x bfloat> undef, <8 x bfloat> undef, <8 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V16F16_VP = call <16 x bfloat> @llvm.vp.fadd.v16bf16(<16 x bfloat> undef, <16 x bfloat> undef, <16 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F32_VP = call <1 x float> @llvm.vp.fadd.v1f32(<1 x float> undef, <1 x float> undef, <1 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F32_VP = call <2 x float> @llvm.vp.fadd.v2f32(<2 x float> undef, <2 x float> undef, <2 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F32_VP = call <4 x float> @llvm.vp.fadd.v4f32(<4 x float> undef, <4 x float> undef, <4 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V8F32_VP = call <8 x float> @llvm.vp.fadd.v8f32(<8 x float> undef, <8 x float> undef, <8 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V16F32_VP = call <16 x float> @llvm.vp.fadd.v16f32(<16 x float> undef, <16 x float> undef, <16 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F64_VP = call <1 x double> @llvm.vp.fadd.v1f64(<1 x double> undef, <1 x double> undef, <1 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F64_VP = call <2 x double> @llvm.vp.fadd.v2f64(<2 x double> undef, <2 x double> undef, <2 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4F64_VP = call <4 x double> @llvm.vp.fadd.v4f64(<4 x double> undef, <4 x double> undef, <4 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V8F64_VP = call <8 x double> @llvm.vp.fadd.v8f64(<8 x double> undef, <8 x double> undef, <8 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %NXV1F16_VP = call <vscale x 1 x bfloat> @llvm.vp.fadd.nxv1bf16(<vscale x 1 x bfloat> undef, <vscale x 1 x bfloat> undef, <vscale x 1 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %NXV2F16_VP = call <vscale x 2 x bfloat> @llvm.vp.fadd.nxv2bf16(<vscale x 2 x bfloat> undef, <vscale x 2 x bfloat> undef, <vscale x 2 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV4F16_VP = call <vscale x 4 x bfloat> @llvm.vp.fadd.nxv4bf16(<vscale x 4 x bfloat> undef, <vscale x 4 x bfloat> undef, <vscale x 4 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %NXV8F16_VP = call <vscale x 8 x bfloat> @llvm.vp.fadd.nxv8bf16(<vscale x 8 x bfloat> undef, <vscale x 8 x bfloat> undef, <vscale x 8 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %NXV16F16_VP = call <vscale x 16 x bfloat> @llvm.vp.fadd.nxv16bf16(<vscale x 16 x bfloat> undef, <vscale x 16 x bfloat> undef, <vscale x 16 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F32_VP = call <vscale x 1 x float> @llvm.vp.fadd.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2F32_VP = call <vscale x 2 x float> @llvm.vp.fadd.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV4F32_VP = call <vscale x 4 x float> @llvm.vp.fadd.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV8F32_VP = call <vscale x 8 x float> @llvm.vp.fadd.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef, <vscale x 8 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV16F32_VP = call <vscale x 16 x float> @llvm.vp.fadd.nxv16f32(<vscale x 16 x float> undef, <vscale x 16 x float> undef, <vscale x 16 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F64_VP = call <vscale x 1 x double> @llvm.vp.fadd.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV2F64_VP = call <vscale x 2 x double> @llvm.vp.fadd.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV4F64_VP = call <vscale x 4 x double> @llvm.vp.fadd.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef, <vscale x 4 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV8F64_VP = call <vscale x 8 x double> @llvm.vp.fadd.nxv8f64(<vscale x 8 x double> undef, <vscale x 8 x double> undef, <vscale x 8 x i1> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F32 = fadd float poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F64 = fadd double poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F32 = fadd <1 x float> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F32 = fadd <2 x float> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F32 = fadd <4 x float> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V8F32 = fadd <8 x float> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V16F32 = fadd <16 x float> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F32 = fadd <vscale x 1 x float> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2F32 = fadd <vscale x 2 x float> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV4F32 = fadd <vscale x 4 x float> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV8F32 = fadd <vscale x 8 x float> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV16F32 = fadd <vscale x 16 x float> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F64 = fadd <1 x double> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F64 = fadd <2 x double> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4F64 = fadd <4 x double> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V8F64 = fadd <8 x double> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F64 = fadd <vscale x 1 x double> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV2F64 = fadd <vscale x 2 x double> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV4F64 = fadd <vscale x 4 x double> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV8F64 = fadd <vscale x 8 x double> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F32_VP = call <1 x float> @llvm.vp.fadd.v1f32(<1 x float> poison, <1 x float> poison, <1 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F32_VP = call <2 x float> @llvm.vp.fadd.v2f32(<2 x float> poison, <2 x float> poison, <2 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F32_VP = call <4 x float> @llvm.vp.fadd.v4f32(<4 x float> poison, <4 x float> poison, <4 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V8F32_VP = call <8 x float> @llvm.vp.fadd.v8f32(<8 x float> poison, <8 x float> poison, <8 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V16F32_VP = call <16 x float> @llvm.vp.fadd.v16f32(<16 x float> poison, <16 x float> poison, <16 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F64_VP = call <1 x double> @llvm.vp.fadd.v1f64(<1 x double> poison, <1 x double> poison, <1 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F64_VP = call <2 x double> @llvm.vp.fadd.v2f64(<2 x double> poison, <2 x double> poison, <2 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4F64_VP = call <4 x double> @llvm.vp.fadd.v4f64(<4 x double> poison, <4 x double> poison, <4 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V8F64_VP = call <8 x double> @llvm.vp.fadd.v8f64(<8 x double> poison, <8 x double> poison, <8 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F32_VP = call <vscale x 1 x float> @llvm.vp.fadd.nxv1f32(<vscale x 1 x float> poison, <vscale x 1 x float> poison, <vscale x 1 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2F32_VP = call <vscale x 2 x float> @llvm.vp.fadd.nxv2f32(<vscale x 2 x float> poison, <vscale x 2 x float> poison, <vscale x 2 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV4F32_VP = call <vscale x 4 x float> @llvm.vp.fadd.nxv4f32(<vscale x 4 x float> poison, <vscale x 4 x float> poison, <vscale x 4 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV8F32_VP = call <vscale x 8 x float> @llvm.vp.fadd.nxv8f32(<vscale x 8 x float> poison, <vscale x 8 x float> poison, <vscale x 8 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV16F32_VP = call <vscale x 16 x float> @llvm.vp.fadd.nxv16f32(<vscale x 16 x float> poison, <vscale x 16 x float> poison, <vscale x 16 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F64_VP = call <vscale x 1 x double> @llvm.vp.fadd.nxv1f64(<vscale x 1 x double> poison, <vscale x 1 x double> poison, <vscale x 1 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV2F64_VP = call <vscale x 2 x double> @llvm.vp.fadd.nxv2f64(<vscale x 2 x double> poison, <vscale x 2 x double> poison, <vscale x 2 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV4F64_VP = call <vscale x 4 x double> @llvm.vp.fadd.nxv4f64(<vscale x 4 x double> poison, <vscale x 4 x double> poison, <vscale x 4 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV8F64_VP = call <vscale x 8 x double> @llvm.vp.fadd.nxv8f64(<vscale x 8 x double> poison, <vscale x 8 x double> poison, <vscale x 8 x i1> poison, i32 poison)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
- %BF16 = fadd bfloat undef, undef
- %F32 = fadd float undef, undef
- %F64 = fadd double undef, undef
-
- %V1BF16 = fadd <1 x bfloat> undef, undef
- %V2BF16 = fadd <2 x bfloat> undef, undef
- %V4BF16 = fadd <4 x bfloat> undef, undef
- %V8BF16 = fadd <8 x bfloat> undef, undef
- %V16BF16 = fadd <16 x bfloat> undef, undef
-
- %NXV1BF16 = fadd <vscale x 1 x bfloat> undef, undef
- %NXV2BF16 = fadd <vscale x 2 x bfloat> undef, undef
- %NXV4BF16 = fadd <vscale x 4 x bfloat> undef, undef
- %NXV8BF16 = fadd <vscale x 8 x bfloat> undef, undef
- %NXV16BF16 = fadd <vscale x 16 x bfloat> undef, undef
-
- %V1F32 = fadd <1 x float> undef, undef
- %V2F32 = fadd <2 x float> undef, undef
- %V4F32 = fadd <4 x float> undef, undef
- %V8F32 = fadd <8 x float> undef, undef
- %V16F32 = fadd <16 x float> undef, undef
-
- %NXV1F32 = fadd <vscale x 1 x float> undef, undef
- %NXV2F32 = fadd <vscale x 2 x float> undef, undef
- %NXV4F32 = fadd <vscale x 4 x float> undef, undef
- %NXV8F32 = fadd <vscale x 8 x float> undef, undef
- %NXV16F32 = fadd <vscale x 16 x float> undef, undef
-
- %V1F64 = fadd <1 x double> undef, undef
- %V2F64 = fadd <2 x double> undef, undef
- %V4F64 = fadd <4 x double> undef, undef
- %V8F64 = fadd <8 x double> undef, undef
-
- %NXV1F64 = fadd <vscale x 1 x double> undef, undef
- %NXV2F64 = fadd <vscale x 2 x double> undef, undef
- %NXV4F64 = fadd <vscale x 4 x double> undef, undef
- %NXV8F64 = fadd <vscale x 8 x double> undef, undef
-
- %V1F16_VP = call <1 x bfloat> @llvm.vp.fadd.v1f16(<1 x bfloat> undef, <1 x bfloat> undef, <1 x i1> undef, i32 undef)
- %V2F16_VP = call <2 x bfloat> @llvm.vp.fadd.v2f16(<2 x bfloat> undef, <2 x bfloat> undef, <2 x i1> undef, i32 undef)
- %V4F16_VP = call <4 x bfloat> @llvm.vp.fadd.v4f16(<4 x bfloat> undef, <4 x bfloat> undef, <4 x i1> undef, i32 undef)
- %V8F16_VP = call <8 x bfloat> @llvm.vp.fadd.v8f16(<8 x bfloat> undef, <8 x bfloat> undef, <8 x i1> undef, i32 undef)
- %V16F16_VP = call <16 x bfloat> @llvm.vp.fadd.v16f16(<16 x bfloat> undef, <16 x bfloat> undef, <16 x i1> undef, i32 undef)
-
- %V1F32_VP = call <1 x float> @llvm.vp.fadd.v1f32(<1 x float> undef, <1 x float> undef, <1 x i1> undef, i32 undef)
- %V2F32_VP = call <2 x float> @llvm.vp.fadd.v2f32(<2 x float> undef, <2 x float> undef, <2 x i1> undef, i32 undef)
- %V4F32_VP = call <4 x float> @llvm.vp.fadd.v4f32(<4 x float> undef, <4 x float> undef, <4 x i1> undef, i32 undef)
- %V8F32_VP = call <8 x float> @llvm.vp.fadd.v8f32(<8 x float> undef, <8 x float> undef, <8 x i1> undef, i32 undef)
- %V16F32_VP = call <16 x float> @llvm.vp.fadd.v16f32(<16 x float> undef, <16 x float> undef, <16 x i1> undef, i32 undef)
-
- %V1F64_VP = call <1 x double> @llvm.vp.fadd.v1f64(<1 x double> undef, <1 x double> undef, <1 x i1> undef, i32 undef)
- %V2F64_VP = call <2 x double> @llvm.vp.fadd.v2f64(<2 x double> undef, <2 x double> undef, <2 x i1> undef, i32 undef)
- %V4F64_VP = call <4 x double> @llvm.vp.fadd.v4f64(<4 x double> undef, <4 x double> undef, <4 x i1> undef, i32 undef)
- %V8F64_VP = call <8 x double> @llvm.vp.fadd.v8f64(<8 x double> undef, <8 x double> undef, <8 x i1> undef, i32 undef)
-
- %NXV1F16_VP = call <vscale x 1 x bfloat> @llvm.vp.fadd.nxv1f16(<vscale x 1 x bfloat> undef, <vscale x 1 x bfloat> undef, <vscale x 1 x i1> undef, i32 undef)
- %NXV2F16_VP = call <vscale x 2 x bfloat> @llvm.vp.fadd.nxv2f16(<vscale x 2 x bfloat> undef, <vscale x 2 x bfloat> undef, <vscale x 2 x i1> undef, i32 undef)
- %NXV4F16_VP = call <vscale x 4 x bfloat> @llvm.vp.fadd.nxv4f16(<vscale x 4 x bfloat> undef, <vscale x 4 x bfloat> undef, <vscale x 4 x i1> undef, i32 undef)
- %NXV8F16_VP = call <vscale x 8 x bfloat> @llvm.vp.fadd.nxv8f16(<vscale x 8 x bfloat> undef, <vscale x 8 x bfloat> undef, <vscale x 8 x i1> undef, i32 undef)
- %NXV16F16_VP = call <vscale x 16 x bfloat> @llvm.vp.fadd.nxv16f16(<vscale x 16 x bfloat> undef, <vscale x 16 x bfloat> undef, <vscale x 16 x i1> undef, i32 undef)
-
- %NXV1F32_VP = call <vscale x 1 x float> @llvm.vp.fadd.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x i1> undef, i32 undef)
- %NXV2F32_VP = call <vscale x 2 x float> @llvm.vp.fadd.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x i1> undef, i32 undef)
- %NXV4F32_VP = call <vscale x 4 x float> @llvm.vp.fadd.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x i1> undef, i32 undef)
- %NXV8F32_VP = call <vscale x 8 x float> @llvm.vp.fadd.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef, <vscale x 8 x i1> undef, i32 undef)
- %NXV16F32_VP = call <vscale x 16 x float> @llvm.vp.fadd.nxv16f32(<vscale x 16 x float> undef, <vscale x 16 x float> undef, <vscale x 16 x i1> undef, i32 undef)
-
- %NXV1F64_VP = call <vscale x 1 x double> @llvm.vp.fadd.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x i1> undef, i32 undef)
- %NXV2F64_VP = call <vscale x 2 x double> @llvm.vp.fadd.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x i1> undef, i32 undef)
- %NXV4F64_VP = call <vscale x 4 x double> @llvm.vp.fadd.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef, <vscale x 4 x i1> undef, i32 undef)
- %NXV8F64_VP = call <vscale x 8 x double> @llvm.vp.fadd.nxv8f64(<vscale x 8 x double> undef, <vscale x 8 x double> undef, <vscale x 8 x i1> undef, i32 undef)
+ %F32 = fadd float poison, poison
+ %F64 = fadd double poison, poison
+
+ %V1F32 = fadd <1 x float> poison, poison
+ %V2F32 = fadd <2 x float> poison, poison
+ %V4F32 = fadd <4 x float> poison, poison
+ %V8F32 = fadd <8 x float> poison, poison
+ %V16F32 = fadd <16 x float> poison, poison
+
+ %NXV1F32 = fadd <vscale x 1 x float> poison, poison
+ %NXV2F32 = fadd <vscale x 2 x float> poison, poison
+ %NXV4F32 = fadd <vscale x 4 x float> poison, poison
+ %NXV8F32 = fadd <vscale x 8 x float> poison, poison
+ %NXV16F32 = fadd <vscale x 16 x float> poison, poison
+
+ %V1F64 = fadd <1 x double> poison, poison
+ %V2F64 = fadd <2 x double> poison, poison
+ %V4F64 = fadd <4 x double> poison, poison
+ %V8F64 = fadd <8 x double> poison, poison
+
+ %NXV1F64 = fadd <vscale x 1 x double> poison, poison
+ %NXV2F64 = fadd <vscale x 2 x double> poison, poison
+ %NXV4F64 = fadd <vscale x 4 x double> poison, poison
+ %NXV8F64 = fadd <vscale x 8 x double> poison, poison
+
+ %V1F32_VP = call <1 x float> @llvm.vp.fadd(<1 x float> poison, <1 x float> poison, <1 x i1> poison, i32 poison)
+ %V2F32_VP = call <2 x float> @llvm.vp.fadd(<2 x float> poison, <2 x float> poison, <2 x i1> poison, i32 poison)
+ %V4F32_VP = call <4 x float> @llvm.vp.fadd(<4 x float> poison, <4 x float> poison, <4 x i1> poison, i32 poison)
+ %V8F32_VP = call <8 x float> @llvm.vp.fadd(<8 x float> poison, <8 x float> poison, <8 x i1> poison, i32 poison)
+ %V16F32_VP = call <16 x float> @llvm.vp.fadd(<16 x float> poison, <16 x float> poison, <16 x i1> poison, i32 poison)
+
+ %V1F64_VP = call <1 x double> @llvm.vp.fadd(<1 x double> poison, <1 x double> poison, <1 x i1> poison, i32 poison)
+ %V2F64_VP = call <2 x double> @llvm.vp.fadd(<2 x double> poison, <2 x double> poison, <2 x i1> poison, i32 poison)
+ %V4F64_VP = call <4 x double> @llvm.vp.fadd(<4 x double> poison, <4 x double> poison, <4 x i1> poison, i32 poison)
+ %V8F64_VP = call <8 x double> @llvm.vp.fadd(<8 x double> poison, <8 x double> poison, <8 x i1> poison, i32 poison)
+
+ %NXV1F32_VP = call <vscale x 1 x float> @llvm.vp.fadd(<vscale x 1 x float> poison, <vscale x 1 x float> poison, <vscale x 1 x i1> poison, i32 poison)
+ %NXV2F32_VP = call <vscale x 2 x float> @llvm.vp.fadd(<vscale x 2 x float> poison, <vscale x 2 x float> poison, <vscale x 2 x i1> poison, i32 poison)
+ %NXV4F32_VP = call <vscale x 4 x float> @llvm.vp.fadd(<vscale x 4 x float> poison, <vscale x 4 x float> poison, <vscale x 4 x i1> poison, i32 poison)
+ %NXV8F32_VP = call <vscale x 8 x float> @llvm.vp.fadd(<vscale x 8 x float> poison, <vscale x 8 x float> poison, <vscale x 8 x i1> poison, i32 poison)
+ %NXV16F32_VP = call <vscale x 16 x float> @llvm.vp.fadd(<vscale x 16 x float> poison, <vscale x 16 x float> poison, <vscale x 16 x i1> poison, i32 poison)
+
+ %NXV1F64_VP = call <vscale x 1 x double> @llvm.vp.fadd(<vscale x 1 x double> poison, <vscale x 1 x double> poison, <vscale x 1 x i1> poison, i32 poison)
+ %NXV2F64_VP = call <vscale x 2 x double> @llvm.vp.fadd(<vscale x 2 x double> poison, <vscale x 2 x double> poison, <vscale x 2 x i1> poison, i32 poison)
+ %NXV4F64_VP = call <vscale x 4 x double> @llvm.vp.fadd(<vscale x 4 x double> poison, <vscale x 4 x double> poison, <vscale x 4 x i1> poison, i32 poison)
+ %NXV8F64_VP = call <vscale x 8 x double> @llvm.vp.fadd(<vscale x 8 x double> poison, <vscale x 8 x double> poison, <vscale x 8 x i1> poison, i32 poison)
+
+ ret void
+}
+
+define void @fadd_bf16() {
+; ZVFH-LABEL: 'fadd_bf16'
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %BF16 = fadd bfloat poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V1BF16 = fadd <1 x bfloat> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V2BF16 = fadd <2 x bfloat> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V4BF16 = fadd <4 x bfloat> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %V8BF16 = fadd <8 x bfloat> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %V16BF16 = fadd <16 x bfloat> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV1BF16 = fadd <vscale x 1 x bfloat> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV2BF16 = fadd <vscale x 2 x bfloat> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %NXV4BF16 = fadd <vscale x 4 x bfloat> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %NXV8BF16 = fadd <vscale x 8 x bfloat> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 36 for instruction: %NXV16BF16 = fadd <vscale x 16 x bfloat> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V1BF16_VP = call <1 x bfloat> @llvm.vp.fadd.v1bf16(<1 x bfloat> poison, <1 x bfloat> poison, <1 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2BF16_VP = call <2 x bfloat> @llvm.vp.fadd.v2bf16(<2 x bfloat> poison, <2 x bfloat> poison, <2 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4BF16_VP = call <4 x bfloat> @llvm.vp.fadd.v4bf16(<4 x bfloat> poison, <4 x bfloat> poison, <4 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V8BF16_VP = call <8 x bfloat> @llvm.vp.fadd.v8bf16(<8 x bfloat> poison, <8 x bfloat> poison, <8 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V16BF16_VP = call <16 x bfloat> @llvm.vp.fadd.v16bf16(<16 x bfloat> poison, <16 x bfloat> poison, <16 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %NXV1BF16_VP = call <vscale x 1 x bfloat> @llvm.vp.fadd.nxv1bf16(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> poison, <vscale x 1 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %NXV2BF16_VP = call <vscale x 2 x bfloat> @llvm.vp.fadd.nxv2bf16(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> poison, <vscale x 2 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV4BF16_VP = call <vscale x 4 x bfloat> @llvm.vp.fadd.nxv4bf16(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> poison, <vscale x 4 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %NXV8BF16_VP = call <vscale x 8 x bfloat> @llvm.vp.fadd.nxv8bf16(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> poison, <vscale x 8 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %NXV16BF16_VP = call <vscale x 16 x bfloat> @llvm.vp.fadd.nxv16bf16(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> poison, <vscale x 16 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+; ZVFHMIN-LABEL: 'fadd_bf16'
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %BF16 = fadd bfloat poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V1BF16 = fadd <1 x bfloat> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V2BF16 = fadd <2 x bfloat> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V4BF16 = fadd <4 x bfloat> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %V8BF16 = fadd <8 x bfloat> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %V16BF16 = fadd <16 x bfloat> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV1BF16 = fadd <vscale x 1 x bfloat> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV2BF16 = fadd <vscale x 2 x bfloat> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %NXV4BF16 = fadd <vscale x 4 x bfloat> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %NXV8BF16 = fadd <vscale x 8 x bfloat> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 36 for instruction: %NXV16BF16 = fadd <vscale x 16 x bfloat> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V1BF16_VP = call <1 x bfloat> @llvm.vp.fadd.v1bf16(<1 x bfloat> poison, <1 x bfloat> poison, <1 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2BF16_VP = call <2 x bfloat> @llvm.vp.fadd.v2bf16(<2 x bfloat> poison, <2 x bfloat> poison, <2 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4BF16_VP = call <4 x bfloat> @llvm.vp.fadd.v4bf16(<4 x bfloat> poison, <4 x bfloat> poison, <4 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V8BF16_VP = call <8 x bfloat> @llvm.vp.fadd.v8bf16(<8 x bfloat> poison, <8 x bfloat> poison, <8 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V16BF16_VP = call <16 x bfloat> @llvm.vp.fadd.v16bf16(<16 x bfloat> poison, <16 x bfloat> poison, <16 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %NXV1BF16_VP = call <vscale x 1 x bfloat> @llvm.vp.fadd.nxv1bf16(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> poison, <vscale x 1 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %NXV2BF16_VP = call <vscale x 2 x bfloat> @llvm.vp.fadd.nxv2bf16(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> poison, <vscale x 2 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV4BF16_VP = call <vscale x 4 x bfloat> @llvm.vp.fadd.nxv4bf16(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> poison, <vscale x 4 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %NXV8BF16_VP = call <vscale x 8 x bfloat> @llvm.vp.fadd.nxv8bf16(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> poison, <vscale x 8 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %NXV16BF16_VP = call <vscale x 16 x bfloat> @llvm.vp.fadd.nxv16bf16(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> poison, <vscale x 16 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+; NO-ZFHMIN-LABEL: 'fadd_bf16'
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %BF16 = fadd bfloat poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1BF16 = fadd <1 x bfloat> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V2BF16 = fadd <2 x bfloat> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4BF16 = fadd <4 x bfloat> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8BF16 = fadd <8 x bfloat> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16BF16 = fadd <16 x bfloat> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV1BF16 = fadd <vscale x 1 x bfloat> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV2BF16 = fadd <vscale x 2 x bfloat> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV4BF16 = fadd <vscale x 4 x bfloat> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV8BF16 = fadd <vscale x 8 x bfloat> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV16BF16 = fadd <vscale x 16 x bfloat> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1BF16_VP = call <1 x bfloat> @llvm.vp.fadd.v1bf16(<1 x bfloat> poison, <1 x bfloat> poison, <1 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V2BF16_VP = call <2 x bfloat> @llvm.vp.fadd.v2bf16(<2 x bfloat> poison, <2 x bfloat> poison, <2 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4BF16_VP = call <4 x bfloat> @llvm.vp.fadd.v4bf16(<4 x bfloat> poison, <4 x bfloat> poison, <4 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8BF16_VP = call <8 x bfloat> @llvm.vp.fadd.v8bf16(<8 x bfloat> poison, <8 x bfloat> poison, <8 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16BF16_VP = call <16 x bfloat> @llvm.vp.fadd.v16bf16(<16 x bfloat> poison, <16 x bfloat> poison, <16 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV1BF16_VP = call <vscale x 1 x bfloat> @llvm.vp.fadd.nxv1bf16(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> poison, <vscale x 1 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV2BF16_VP = call <vscale x 2 x bfloat> @llvm.vp.fadd.nxv2bf16(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> poison, <vscale x 2 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV4BF16_VP = call <vscale x 4 x bfloat> @llvm.vp.fadd.nxv4bf16(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> poison, <vscale x 4 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV8BF16_VP = call <vscale x 8 x bfloat> @llvm.vp.fadd.nxv8bf16(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> poison, <vscale x 8 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV16BF16_VP = call <vscale x 16 x bfloat> @llvm.vp.fadd.nxv16bf16(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> poison, <vscale x 16 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+ %BF16 = fadd bfloat poison, poison
+
+ %V1BF16 = fadd <1 x bfloat> poison, poison
+ %V2BF16 = fadd <2 x bfloat> poison, poison
+ %V4BF16 = fadd <4 x bfloat> poison, poison
+ %V8BF16 = fadd <8 x bfloat> poison, poison
+ %V16BF16 = fadd <16 x bfloat> poison, poison
+
+ %NXV1BF16 = fadd <vscale x 1 x bfloat> poison, poison
+ %NXV2BF16 = fadd <vscale x 2 x bfloat> poison, poison
+ %NXV4BF16 = fadd <vscale x 4 x bfloat> poison, poison
+ %NXV8BF16 = fadd <vscale x 8 x bfloat> poison, poison
+ %NXV16BF16 = fadd <vscale x 16 x bfloat> poison, poison
+
+ %V1BF16_VP = call <1 x bfloat> @llvm.vp.fadd(<1 x bfloat> poison, <1 x bfloat> poison, <1 x i1> poison, i32 poison)
+ %V2BF16_VP = call <2 x bfloat> @llvm.vp.fadd(<2 x bfloat> poison, <2 x bfloat> poison, <2 x i1> poison, i32 poison)
+ %V4BF16_VP = call <4 x bfloat> @llvm.vp.fadd(<4 x bfloat> poison, <4 x bfloat> poison, <4 x i1> poison, i32 poison)
+ %V8BF16_VP = call <8 x bfloat> @llvm.vp.fadd(<8 x bfloat> poison, <8 x bfloat> poison, <8 x i1> poison, i32 poison)
+ %V16BF16_VP = call <16 x bfloat> @llvm.vp.fadd(<16 x bfloat> poison, <16 x bfloat> poison, <16 x i1> poison, i32 poison)
+
+ %NXV1BF16_VP = call <vscale x 1 x bfloat> @llvm.vp.fadd(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> poison, <vscale x 1 x i1> poison, i32 poison)
+ %NXV2BF16_VP = call <vscale x 2 x bfloat> @llvm.vp.fadd(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> poison, <vscale x 2 x i1> poison, i32 poison)
+ %NXV4BF16_VP = call <vscale x 4 x bfloat> @llvm.vp.fadd(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> poison, <vscale x 4 x i1> poison, i32 poison)
+ %NXV8BF16_VP = call <vscale x 8 x bfloat> @llvm.vp.fadd(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> poison, <vscale x 8 x i1> poison, i32 poison)
+ %NXV16BF16_VP = call <vscale x 16 x bfloat> @llvm.vp.fadd(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> poison, <vscale x 16 x i1> poison, i32 poison)
ret void
}
define void @fadd_f16() {
; ZVFH-LABEL: 'fadd_f16'
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F16 = fadd half undef, undef
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F16 = fadd <1 x half> undef, undef
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F16 = fadd <2 x half> undef, undef
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F16 = fadd <4 x half> undef, undef
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8F16 = fadd <8 x half> undef, undef
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16F16 = fadd <16 x half> undef, undef
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V32F16 = fadd <32 x half> undef, undef
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F16 = fadd <vscale x 1 x half> undef, undef
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2F16 = fadd <vscale x 2 x half> undef, undef
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV4F16 = fadd <vscale x 4 x half> undef, undef
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV8F16 = fadd <vscale x 8 x half> undef, undef
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV16F16 = fadd <vscale x 16 x half> undef, undef
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV32F16 = fadd <vscale x 32 x half> undef, undef
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F16_VP = call <1 x half> @llvm.vp.fadd.v1f16(<1 x half> undef, <1 x half> undef, <1 x i1> undef, i32 undef)
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F16_VP = call <2 x half> @llvm.vp.fadd.v2f16(<2 x half> undef, <2 x half> undef, <2 x i1> undef, i32 undef)
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F16_VP = call <4 x half> @llvm.vp.fadd.v4f16(<4 x half> undef, <4 x half> undef, <4 x i1> undef, i32 undef)
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8F16_VP = call <8 x half> @llvm.vp.fadd.v8f16(<8 x half> undef, <8 x half> undef, <8 x i1> undef, i32 undef)
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16F16_VP = call <16 x half> @llvm.vp.fadd.v16f16(<16 x half> undef, <16 x half> undef, <16 x i1> undef, i32 undef)
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F16_VP = call <vscale x 1 x half> @llvm.vp.fadd.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x i1> undef, i32 undef)
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2F16_VP = call <vscale x 2 x half> @llvm.vp.fadd.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x i1> undef, i32 undef)
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV4F16_VP = call <vscale x 4 x half> @llvm.vp.fadd.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x i1> undef, i32 undef)
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV8F16_VP = call <vscale x 8 x half> @llvm.vp.fadd.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x i1> undef, i32 undef)
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV16F16_VP = call <vscale x 16 x half> @llvm.vp.fadd.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, <vscale x 16 x i1> undef, i32 undef)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F16 = fadd half poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F16 = fadd <1 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F16 = fadd <2 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F16 = fadd <4 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8F16 = fadd <8 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16F16 = fadd <16 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V32F16 = fadd <32 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F16 = fadd <vscale x 1 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2F16 = fadd <vscale x 2 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV4F16 = fadd <vscale x 4 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV8F16 = fadd <vscale x 8 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV16F16 = fadd <vscale x 16 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV32F16 = fadd <vscale x 32 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F16_VP = call <1 x half> @llvm.vp.fadd.v1f16(<1 x half> poison, <1 x half> poison, <1 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F16_VP = call <2 x half> @llvm.vp.fadd.v2f16(<2 x half> poison, <2 x half> poison, <2 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F16_VP = call <4 x half> @llvm.vp.fadd.v4f16(<4 x half> poison, <4 x half> poison, <4 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8F16_VP = call <8 x half> @llvm.vp.fadd.v8f16(<8 x half> poison, <8 x half> poison, <8 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16F16_VP = call <16 x half> @llvm.vp.fadd.v16f16(<16 x half> poison, <16 x half> poison, <16 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F16_VP = call <vscale x 1 x half> @llvm.vp.fadd.nxv1f16(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2F16_VP = call <vscale x 2 x half> @llvm.vp.fadd.nxv2f16(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV4F16_VP = call <vscale x 4 x half> @llvm.vp.fadd.nxv4f16(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV8F16_VP = call <vscale x 8 x half> @llvm.vp.fadd.nxv8f16(<vscale x 8 x half> poison, <vscale x 8 x half> poison, <vscale x 8 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV16F16_VP = call <vscale x 16 x half> @llvm.vp.fadd.nxv16f16(<vscale x 16 x half> poison, <vscale x 16 x half> poison, <vscale x 16 x i1> poison, i32 poison)
; ZVFH-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
; ZVFHMIN-LABEL: 'fadd_f16'
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F16 = fadd half undef, undef
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V1F16 = fadd <1 x half> undef, undef
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V2F16 = fadd <2 x half> undef, undef
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V4F16 = fadd <4 x half> undef, undef
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %V8F16 = fadd <8 x half> undef, undef
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %V16F16 = fadd <16 x half> undef, undef
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 36 for instruction: %V32F16 = fadd <32 x half> undef, undef
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV1F16 = fadd <vscale x 1 x half> undef, undef
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV2F16 = fadd <vscale x 2 x half> undef, undef
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %NXV4F16 = fadd <vscale x 4 x half> undef, undef
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %NXV8F16 = fadd <vscale x 8 x half> undef, undef
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 36 for instruction: %NXV16F16 = fadd <vscale x 16 x half> undef, undef
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV32F16 = fadd <vscale x 32 x half> undef, undef
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V1F16_VP = call <1 x half> @llvm.vp.fadd.v1f16(<1 x half> undef, <1 x half> undef, <1 x i1> undef, i32 undef)
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2F16_VP = call <2 x half> @llvm.vp.fadd.v2f16(<2 x half> undef, <2 x half> undef, <2 x i1> undef, i32 undef)
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4F16_VP = call <4 x half> @llvm.vp.fadd.v4f16(<4 x half> undef, <4 x half> undef, <4 x i1> undef, i32 undef)
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V8F16_VP = call <8 x half> @llvm.vp.fadd.v8f16(<8 x half> undef, <8 x half> undef, <8 x i1> undef, i32 undef)
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V16F16_VP = call <16 x half> @llvm.vp.fadd.v16f16(<16 x half> undef, <16 x half> undef, <16 x i1> undef, i32 undef)
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %NXV1F16_VP = call <vscale x 1 x half> @llvm.vp.fadd.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x i1> undef, i32 undef)
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %NXV2F16_VP = call <vscale x 2 x half> @llvm.vp.fadd.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x i1> undef, i32 undef)
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV4F16_VP = call <vscale x 4 x half> @llvm.vp.fadd.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x i1> undef, i32 undef)
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %NXV8F16_VP = call <vscale x 8 x half> @llvm.vp.fadd.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x i1> undef, i32 undef)
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %NXV16F16_VP = call <vscale x 16 x half> @llvm.vp.fadd.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, <vscale x 16 x i1> undef, i32 undef)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F16 = fadd half poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V1F16 = fadd <1 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V2F16 = fadd <2 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V4F16 = fadd <4 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %V8F16 = fadd <8 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %V16F16 = fadd <16 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 36 for instruction: %V32F16 = fadd <32 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV1F16 = fadd <vscale x 1 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV2F16 = fadd <vscale x 2 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %NXV4F16 = fadd <vscale x 4 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %NXV8F16 = fadd <vscale x 8 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 36 for instruction: %NXV16F16 = fadd <vscale x 16 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV32F16 = fadd <vscale x 32 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V1F16_VP = call <1 x half> @llvm.vp.fadd.v1f16(<1 x half> poison, <1 x half> poison, <1 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2F16_VP = call <2 x half> @llvm.vp.fadd.v2f16(<2 x half> poison, <2 x half> poison, <2 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4F16_VP = call <4 x half> @llvm.vp.fadd.v4f16(<4 x half> poison, <4 x half> poison, <4 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V8F16_VP = call <8 x half> @llvm.vp.fadd.v8f16(<8 x half> poison, <8 x half> poison, <8 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V16F16_VP = call <16 x half> @llvm.vp.fadd.v16f16(<16 x half> poison, <16 x half> poison, <16 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %NXV1F16_VP = call <vscale x 1 x half> @llvm.vp.fadd.nxv1f16(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %NXV2F16_VP = call <vscale x 2 x half> @llvm.vp.fadd.nxv2f16(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV4F16_VP = call <vscale x 4 x half> @llvm.vp.fadd.nxv4f16(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %NXV8F16_VP = call <vscale x 8 x half> @llvm.vp.fadd.nxv8f16(<vscale x 8 x half> poison, <vscale x 8 x half> poison, <vscale x 8 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %NXV16F16_VP = call <vscale x 16 x half> @llvm.vp.fadd.nxv16f16(<vscale x 16 x half> poison, <vscale x 16 x half> poison, <vscale x 16 x i1> poison, i32 poison)
; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
- %F16 = fadd half undef, undef
-
- %V1F16 = fadd <1 x half> undef, undef
- %V2F16 = fadd <2 x half> undef, undef
- %V4F16 = fadd <4 x half> undef, undef
- %V8F16 = fadd <8 x half> undef, undef
- %V16F16 = fadd <16 x half> undef, undef
- %V32F16 = fadd <32 x half> undef, undef
-
- %NXV1F16 = fadd <vscale x 1 x half> undef, undef
- %NXV2F16 = fadd <vscale x 2 x half> undef, undef
- %NXV4F16 = fadd <vscale x 4 x half> undef, undef
- %NXV8F16 = fadd <vscale x 8 x half> undef, undef
- %NXV16F16 = fadd <vscale x 16 x half> undef, undef
- %NXV32F16 = fadd <vscale x 32 x half> undef, undef
-
- %V1F16_VP = call <1 x half> @llvm.vp.fadd.v1f16(<1 x half> undef, <1 x half> undef, <1 x i1> undef, i32 undef)
- %V2F16_VP = call <2 x half> @llvm.vp.fadd.v2f16(<2 x half> undef, <2 x half> undef, <2 x i1> undef, i32 undef)
- %V4F16_VP = call <4 x half> @llvm.vp.fadd.v4f16(<4 x half> undef, <4 x half> undef, <4 x i1> undef, i32 undef)
- %V8F16_VP = call <8 x half> @llvm.vp.fadd.v8f16(<8 x half> undef, <8 x half> undef, <8 x i1> undef, i32 undef)
- %V16F16_VP = call <16 x half> @llvm.vp.fadd.v16f16(<16 x half> undef, <16 x half> undef, <16 x i1> undef, i32 undef)
-
- %NXV1F16_VP = call <vscale x 1 x half> @llvm.vp.fadd.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x i1> undef, i32 undef)
- %NXV2F16_VP = call <vscale x 2 x half> @llvm.vp.fadd.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x i1> undef, i32 undef)
- %NXV4F16_VP = call <vscale x 4 x half> @llvm.vp.fadd.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x i1> undef, i32 undef)
- %NXV8F16_VP = call <vscale x 8 x half> @llvm.vp.fadd.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x i1> undef, i32 undef)
- %NXV16F16_VP = call <vscale x 16 x half> @llvm.vp.fadd.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, <vscale x 16 x i1> undef, i32 undef)
+; NO-ZFHMIN-LABEL: 'fadd_f16'
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F16 = fadd half poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F16 = fadd <1 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V2F16 = fadd <2 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4F16 = fadd <4 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8F16 = fadd <8 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16F16 = fadd <16 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %V32F16 = fadd <32 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV1F16 = fadd <vscale x 1 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV2F16 = fadd <vscale x 2 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV4F16 = fadd <vscale x 4 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV8F16 = fadd <vscale x 8 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV16F16 = fadd <vscale x 16 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV32F16 = fadd <vscale x 32 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F16_VP = call <1 x half> @llvm.vp.fadd.v1f16(<1 x half> poison, <1 x half> poison, <1 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V2F16_VP = call <2 x half> @llvm.vp.fadd.v2f16(<2 x half> poison, <2 x half> poison, <2 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4F16_VP = call <4 x half> @llvm.vp.fadd.v4f16(<4 x half> poison, <4 x half> poison, <4 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8F16_VP = call <8 x half> @llvm.vp.fadd.v8f16(<8 x half> poison, <8 x half> poison, <8 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16F16_VP = call <16 x half> @llvm.vp.fadd.v16f16(<16 x half> poison, <16 x half> poison, <16 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV1F16_VP = call <vscale x 1 x half> @llvm.vp.fadd.nxv1f16(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV2F16_VP = call <vscale x 2 x half> @llvm.vp.fadd.nxv2f16(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV4F16_VP = call <vscale x 4 x half> @llvm.vp.fadd.nxv4f16(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV8F16_VP = call <vscale x 8 x half> @llvm.vp.fadd.nxv8f16(<vscale x 8 x half> poison, <vscale x 8 x half> poison, <vscale x 8 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV16F16_VP = call <vscale x 16 x half> @llvm.vp.fadd.nxv16f16(<vscale x 16 x half> poison, <vscale x 16 x half> poison, <vscale x 16 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+ %F16 = fadd half poison, poison
+
+ %V1F16 = fadd <1 x half> poison, poison
+ %V2F16 = fadd <2 x half> poison, poison
+ %V4F16 = fadd <4 x half> poison, poison
+ %V8F16 = fadd <8 x half> poison, poison
+ %V16F16 = fadd <16 x half> poison, poison
+ %V32F16 = fadd <32 x half> poison, poison
+
+ %NXV1F16 = fadd <vscale x 1 x half> poison, poison
+ %NXV2F16 = fadd <vscale x 2 x half> poison, poison
+ %NXV4F16 = fadd <vscale x 4 x half> poison, poison
+ %NXV8F16 = fadd <vscale x 8 x half> poison, poison
+ %NXV16F16 = fadd <vscale x 16 x half> poison, poison
+ %NXV32F16 = fadd <vscale x 32 x half> poison, poison
+
+ %V1F16_VP = call <1 x half> @llvm.vp.fadd(<1 x half> poison, <1 x half> poison, <1 x i1> poison, i32 poison)
+ %V2F16_VP = call <2 x half> @llvm.vp.fadd(<2 x half> poison, <2 x half> poison, <2 x i1> poison, i32 poison)
+ %V4F16_VP = call <4 x half> @llvm.vp.fadd(<4 x half> poison, <4 x half> poison, <4 x i1> poison, i32 poison)
+ %V8F16_VP = call <8 x half> @llvm.vp.fadd(<8 x half> poison, <8 x half> poison, <8 x i1> poison, i32 poison)
+ %V16F16_VP = call <16 x half> @llvm.vp.fadd(<16 x half> poison, <16 x half> poison, <16 x i1> poison, i32 poison)
+
+ %NXV1F16_VP = call <vscale x 1 x half> @llvm.vp.fadd(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x i1> poison, i32 poison)
+ %NXV2F16_VP = call <vscale x 2 x half> @llvm.vp.fadd(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x i1> poison, i32 poison)
+ %NXV4F16_VP = call <vscale x 4 x half> @llvm.vp.fadd(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x i1> poison, i32 poison)
+ %NXV8F16_VP = call <vscale x 8 x half> @llvm.vp.fadd(<vscale x 8 x half> poison, <vscale x 8 x half> poison, <vscale x 8 x i1> poison, i32 poison)
+ %NXV16F16_VP = call <vscale x 16 x half> @llvm.vp.fadd(<vscale x 16 x half> poison, <vscale x 16 x half> poison, <vscale x 16 x i1> poison, i32 poison)
ret void
}
define void @fsub() {
; CHECK-LABEL: 'fsub'
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F16 = fsub half undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F32 = fsub float undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F64 = fsub double undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V1BF16 = fsub <1 x bfloat> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V2BF16 = fsub <2 x bfloat> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V4BF16 = fsub <4 x bfloat> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %V8BF16 = fsub <8 x bfloat> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %V16BF16 = fsub <16 x bfloat> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV1BF16 = fsub <vscale x 1 x bfloat> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV2BF16 = fsub <vscale x 2 x bfloat> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %NXV4BF16 = fsub <vscale x 4 x bfloat> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %NXV8BF16 = fsub <vscale x 8 x bfloat> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 36 for instruction: %NXV16BF16 = fsub <vscale x 16 x bfloat> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F32 = fsub <1 x float> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F32 = fsub <2 x float> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F32 = fsub <4 x float> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V8F32 = fsub <8 x float> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V16F32 = fsub <16 x float> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F32 = fsub <vscale x 1 x float> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2F32 = fsub <vscale x 2 x float> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV4F32 = fsub <vscale x 4 x float> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV8F32 = fsub <vscale x 8 x float> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV16F32 = fsub <vscale x 16 x float> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F64 = fsub <1 x double> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F64 = fsub <2 x double> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4F64 = fsub <4 x double> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V8F64 = fsub <8 x double> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F64 = fsub <vscale x 1 x double> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV2F64 = fsub <vscale x 2 x double> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV4F64 = fsub <vscale x 4 x double> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV8F64 = fsub <vscale x 8 x double> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V1F16_VP = call <1 x bfloat> @llvm.vp.fsub.v1bf16(<1 x bfloat> undef, <1 x bfloat> undef, <1 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2F16_VP = call <2 x bfloat> @llvm.vp.fsub.v2bf16(<2 x bfloat> undef, <2 x bfloat> undef, <2 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4F16_VP = call <4 x bfloat> @llvm.vp.fsub.v4bf16(<4 x bfloat> undef, <4 x bfloat> undef, <4 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V8F16_VP = call <8 x bfloat> @llvm.vp.fsub.v8bf16(<8 x bfloat> undef, <8 x bfloat> undef, <8 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V16F16_VP = call <16 x bfloat> @llvm.vp.fsub.v16bf16(<16 x bfloat> undef, <16 x bfloat> undef, <16 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F32_VP = call <1 x float> @llvm.vp.fsub.v1f32(<1 x float> undef, <1 x float> undef, <1 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F32_VP = call <2 x float> @llvm.vp.fsub.v2f32(<2 x float> undef, <2 x float> undef, <2 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F32_VP = call <4 x float> @llvm.vp.fsub.v4f32(<4 x float> undef, <4 x float> undef, <4 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V8F32_VP = call <8 x float> @llvm.vp.fsub.v8f32(<8 x float> undef, <8 x float> undef, <8 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V16F32_VP = call <16 x float> @llvm.vp.fsub.v16f32(<16 x float> undef, <16 x float> undef, <16 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F64_VP = call <1 x double> @llvm.vp.fsub.v1f64(<1 x double> undef, <1 x double> undef, <1 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F64_VP = call <2 x double> @llvm.vp.fsub.v2f64(<2 x double> undef, <2 x double> undef, <2 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4F64_VP = call <4 x double> @llvm.vp.fsub.v4f64(<4 x double> undef, <4 x double> undef, <4 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V8F64_VP = call <8 x double> @llvm.vp.fsub.v8f64(<8 x double> undef, <8 x double> undef, <8 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %NXV1F16_VP = call <vscale x 1 x bfloat> @llvm.vp.fsub.nxv1bf16(<vscale x 1 x bfloat> undef, <vscale x 1 x bfloat> undef, <vscale x 1 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %NXV2F16_VP = call <vscale x 2 x bfloat> @llvm.vp.fsub.nxv2bf16(<vscale x 2 x bfloat> undef, <vscale x 2 x bfloat> undef, <vscale x 2 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV4F16_VP = call <vscale x 4 x bfloat> @llvm.vp.fsub.nxv4bf16(<vscale x 4 x bfloat> undef, <vscale x 4 x bfloat> undef, <vscale x 4 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %NXV8F16_VP = call <vscale x 8 x bfloat> @llvm.vp.fsub.nxv8bf16(<vscale x 8 x bfloat> undef, <vscale x 8 x bfloat> undef, <vscale x 8 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %NXV16F16_VP = call <vscale x 16 x bfloat> @llvm.vp.fsub.nxv16bf16(<vscale x 16 x bfloat> undef, <vscale x 16 x bfloat> undef, <vscale x 16 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F32_VP = call <vscale x 1 x float> @llvm.vp.fsub.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2F32_VP = call <vscale x 2 x float> @llvm.vp.fsub.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV4F32_VP = call <vscale x 4 x float> @llvm.vp.fsub.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV8F32_VP = call <vscale x 8 x float> @llvm.vp.fsub.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef, <vscale x 8 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV16F32_VP = call <vscale x 16 x float> @llvm.vp.fsub.nxv16f32(<vscale x 16 x float> undef, <vscale x 16 x float> undef, <vscale x 16 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F64_VP = call <vscale x 1 x double> @llvm.vp.fsub.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV2F64_VP = call <vscale x 2 x double> @llvm.vp.fsub.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV4F64_VP = call <vscale x 4 x double> @llvm.vp.fsub.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef, <vscale x 4 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV8F64_VP = call <vscale x 8 x double> @llvm.vp.fsub.nxv8f64(<vscale x 8 x double> undef, <vscale x 8 x double> undef, <vscale x 8 x i1> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F32 = fsub float poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F64 = fsub double poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F32 = fsub <1 x float> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F32 = fsub <2 x float> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F32 = fsub <4 x float> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V8F32 = fsub <8 x float> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V16F32 = fsub <16 x float> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F32 = fsub <vscale x 1 x float> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2F32 = fsub <vscale x 2 x float> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV4F32 = fsub <vscale x 4 x float> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV8F32 = fsub <vscale x 8 x float> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV16F32 = fsub <vscale x 16 x float> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F64 = fsub <1 x double> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F64 = fsub <2 x double> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4F64 = fsub <4 x double> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V8F64 = fsub <8 x double> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F64 = fsub <vscale x 1 x double> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV2F64 = fsub <vscale x 2 x double> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV4F64 = fsub <vscale x 4 x double> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV8F64 = fsub <vscale x 8 x double> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F32_VP = call <1 x float> @llvm.vp.fsub.v1f32(<1 x float> poison, <1 x float> poison, <1 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F32_VP = call <2 x float> @llvm.vp.fsub.v2f32(<2 x float> poison, <2 x float> poison, <2 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F32_VP = call <4 x float> @llvm.vp.fsub.v4f32(<4 x float> poison, <4 x float> poison, <4 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V8F32_VP = call <8 x float> @llvm.vp.fsub.v8f32(<8 x float> poison, <8 x float> poison, <8 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V16F32_VP = call <16 x float> @llvm.vp.fsub.v16f32(<16 x float> poison, <16 x float> poison, <16 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F64_VP = call <1 x double> @llvm.vp.fsub.v1f64(<1 x double> poison, <1 x double> poison, <1 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F64_VP = call <2 x double> @llvm.vp.fsub.v2f64(<2 x double> poison, <2 x double> poison, <2 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4F64_VP = call <4 x double> @llvm.vp.fsub.v4f64(<4 x double> poison, <4 x double> poison, <4 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V8F64_VP = call <8 x double> @llvm.vp.fsub.v8f64(<8 x double> poison, <8 x double> poison, <8 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F32_VP = call <vscale x 1 x float> @llvm.vp.fsub.nxv1f32(<vscale x 1 x float> poison, <vscale x 1 x float> poison, <vscale x 1 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2F32_VP = call <vscale x 2 x float> @llvm.vp.fsub.nxv2f32(<vscale x 2 x float> poison, <vscale x 2 x float> poison, <vscale x 2 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV4F32_VP = call <vscale x 4 x float> @llvm.vp.fsub.nxv4f32(<vscale x 4 x float> poison, <vscale x 4 x float> poison, <vscale x 4 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV8F32_VP = call <vscale x 8 x float> @llvm.vp.fsub.nxv8f32(<vscale x 8 x float> poison, <vscale x 8 x float> poison, <vscale x 8 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV16F32_VP = call <vscale x 16 x float> @llvm.vp.fsub.nxv16f32(<vscale x 16 x float> poison, <vscale x 16 x float> poison, <vscale x 16 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F64_VP = call <vscale x 1 x double> @llvm.vp.fsub.nxv1f64(<vscale x 1 x double> poison, <vscale x 1 x double> poison, <vscale x 1 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV2F64_VP = call <vscale x 2 x double> @llvm.vp.fsub.nxv2f64(<vscale x 2 x double> poison, <vscale x 2 x double> poison, <vscale x 2 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV4F64_VP = call <vscale x 4 x double> @llvm.vp.fsub.nxv4f64(<vscale x 4 x double> poison, <vscale x 4 x double> poison, <vscale x 4 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV8F64_VP = call <vscale x 8 x double> @llvm.vp.fsub.nxv8f64(<vscale x 8 x double> poison, <vscale x 8 x double> poison, <vscale x 8 x i1> poison, i32 poison)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
- %F16 = fsub half undef, undef
- %F32 = fsub float undef, undef
- %F64 = fsub double undef, undef
-
- %V1BF16 = fsub <1 x bfloat> undef, undef
- %V2BF16 = fsub <2 x bfloat> undef, undef
- %V4BF16 = fsub <4 x bfloat> undef, undef
- %V8BF16 = fsub <8 x bfloat> undef, undef
- %V16BF16 = fsub <16 x bfloat> undef, undef
-
- %NXV1BF16 = fsub <vscale x 1 x bfloat> undef, undef
- %NXV2BF16 = fsub <vscale x 2 x bfloat> undef, undef
- %NXV4BF16 = fsub <vscale x 4 x bfloat> undef, undef
- %NXV8BF16 = fsub <vscale x 8 x bfloat> undef, undef
- %NXV16BF16 = fsub <vscale x 16 x bfloat> undef, undef
-
- %V1F32 = fsub <1 x float> undef, undef
- %V2F32 = fsub <2 x float> undef, undef
- %V4F32 = fsub <4 x float> undef, undef
- %V8F32 = fsub <8 x float> undef, undef
- %V16F32 = fsub <16 x float> undef, undef
-
- %NXV1F32 = fsub <vscale x 1 x float> undef, undef
- %NXV2F32 = fsub <vscale x 2 x float> undef, undef
- %NXV4F32 = fsub <vscale x 4 x float> undef, undef
- %NXV8F32 = fsub <vscale x 8 x float> undef, undef
- %NXV16F32 = fsub <vscale x 16 x float> undef, undef
-
- %V1F64 = fsub <1 x double> undef, undef
- %V2F64 = fsub <2 x double> undef, undef
- %V4F64 = fsub <4 x double> undef, undef
- %V8F64 = fsub <8 x double> undef, undef
-
- %NXV1F64 = fsub <vscale x 1 x double> undef, undef
- %NXV2F64 = fsub <vscale x 2 x double> undef, undef
- %NXV4F64 = fsub <vscale x 4 x double> undef, undef
- %NXV8F64 = fsub <vscale x 8 x double> undef, undef
-
- %V1F16_VP = call <1 x bfloat> @llvm.vp.fsub.v1f16(<1 x bfloat> undef, <1 x bfloat> undef, <1 x i1> undef, i32 undef)
- %V2F16_VP = call <2 x bfloat> @llvm.vp.fsub.v2f16(<2 x bfloat> undef, <2 x bfloat> undef, <2 x i1> undef, i32 undef)
- %V4F16_VP = call <4 x bfloat> @llvm.vp.fsub.v4f16(<4 x bfloat> undef, <4 x bfloat> undef, <4 x i1> undef, i32 undef)
- %V8F16_VP = call <8 x bfloat> @llvm.vp.fsub.v8f16(<8 x bfloat> undef, <8 x bfloat> undef, <8 x i1> undef, i32 undef)
- %V16F16_VP = call <16 x bfloat> @llvm.vp.fsub.v16f16(<16 x bfloat> undef, <16 x bfloat> undef, <16 x i1> undef, i32 undef)
-
- %V1F32_VP = call <1 x float> @llvm.vp.fsub.v1f32(<1 x float> undef, <1 x float> undef, <1 x i1> undef, i32 undef)
- %V2F32_VP = call <2 x float> @llvm.vp.fsub.v2f32(<2 x float> undef, <2 x float> undef, <2 x i1> undef, i32 undef)
- %V4F32_VP = call <4 x float> @llvm.vp.fsub.v4f32(<4 x float> undef, <4 x float> undef, <4 x i1> undef, i32 undef)
- %V8F32_VP = call <8 x float> @llvm.vp.fsub.v8f32(<8 x float> undef, <8 x float> undef, <8 x i1> undef, i32 undef)
- %V16F32_VP = call <16 x float> @llvm.vp.fsub.v16f32(<16 x float> undef, <16 x float> undef, <16 x i1> undef, i32 undef)
-
- %V1F64_VP = call <1 x double> @llvm.vp.fsub.v1f64(<1 x double> undef, <1 x double> undef, <1 x i1> undef, i32 undef)
- %V2F64_VP = call <2 x double> @llvm.vp.fsub.v2f64(<2 x double> undef, <2 x double> undef, <2 x i1> undef, i32 undef)
- %V4F64_VP = call <4 x double> @llvm.vp.fsub.v4f64(<4 x double> undef, <4 x double> undef, <4 x i1> undef, i32 undef)
- %V8F64_VP = call <8 x double> @llvm.vp.fsub.v8f64(<8 x double> undef, <8 x double> undef, <8 x i1> undef, i32 undef)
-
- %NXV1F16_VP = call <vscale x 1 x bfloat> @llvm.vp.fsub.nxv1f16(<vscale x 1 x bfloat> undef, <vscale x 1 x bfloat> undef, <vscale x 1 x i1> undef, i32 undef)
- %NXV2F16_VP = call <vscale x 2 x bfloat> @llvm.vp.fsub.nxv2f16(<vscale x 2 x bfloat> undef, <vscale x 2 x bfloat> undef, <vscale x 2 x i1> undef, i32 undef)
- %NXV4F16_VP = call <vscale x 4 x bfloat> @llvm.vp.fsub.nxv4f16(<vscale x 4 x bfloat> undef, <vscale x 4 x bfloat> undef, <vscale x 4 x i1> undef, i32 undef)
- %NXV8F16_VP = call <vscale x 8 x bfloat> @llvm.vp.fsub.nxv8f16(<vscale x 8 x bfloat> undef, <vscale x 8 x bfloat> undef, <vscale x 8 x i1> undef, i32 undef)
- %NXV16F16_VP = call <vscale x 16 x bfloat> @llvm.vp.fsub.nxv16f16(<vscale x 16 x bfloat> undef, <vscale x 16 x bfloat> undef, <vscale x 16 x i1> undef, i32 undef)
-
- %NXV1F32_VP = call <vscale x 1 x float> @llvm.vp.fsub.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x i1> undef, i32 undef)
- %NXV2F32_VP = call <vscale x 2 x float> @llvm.vp.fsub.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x i1> undef, i32 undef)
- %NXV4F32_VP = call <vscale x 4 x float> @llvm.vp.fsub.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x i1> undef, i32 undef)
- %NXV8F32_VP = call <vscale x 8 x float> @llvm.vp.fsub.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef, <vscale x 8 x i1> undef, i32 undef)
- %NXV16F32_VP = call <vscale x 16 x float> @llvm.vp.fsub.nxv16f32(<vscale x 16 x float> undef, <vscale x 16 x float> undef, <vscale x 16 x i1> undef, i32 undef)
-
- %NXV1F64_VP = call <vscale x 1 x double> @llvm.vp.fsub.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x i1> undef, i32 undef)
- %NXV2F64_VP = call <vscale x 2 x double> @llvm.vp.fsub.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x i1> undef, i32 undef)
- %NXV4F64_VP = call <vscale x 4 x double> @llvm.vp.fsub.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef, <vscale x 4 x i1> undef, i32 undef)
- %NXV8F64_VP = call <vscale x 8 x double> @llvm.vp.fsub.nxv8f64(<vscale x 8 x double> undef, <vscale x 8 x double> undef, <vscale x 8 x i1> undef, i32 undef)
+ %F32 = fsub float poison, poison
+ %F64 = fsub double poison, poison
+
+ %V1F32 = fsub <1 x float> poison, poison
+ %V2F32 = fsub <2 x float> poison, poison
+ %V4F32 = fsub <4 x float> poison, poison
+ %V8F32 = fsub <8 x float> poison, poison
+ %V16F32 = fsub <16 x float> poison, poison
+
+ %NXV1F32 = fsub <vscale x 1 x float> poison, poison
+ %NXV2F32 = fsub <vscale x 2 x float> poison, poison
+ %NXV4F32 = fsub <vscale x 4 x float> poison, poison
+ %NXV8F32 = fsub <vscale x 8 x float> poison, poison
+ %NXV16F32 = fsub <vscale x 16 x float> poison, poison
+
+ %V1F64 = fsub <1 x double> poison, poison
+ %V2F64 = fsub <2 x double> poison, poison
+ %V4F64 = fsub <4 x double> poison, poison
+ %V8F64 = fsub <8 x double> poison, poison
+
+ %NXV1F64 = fsub <vscale x 1 x double> poison, poison
+ %NXV2F64 = fsub <vscale x 2 x double> poison, poison
+ %NXV4F64 = fsub <vscale x 4 x double> poison, poison
+ %NXV8F64 = fsub <vscale x 8 x double> poison, poison
+
+ %V1F32_VP = call <1 x float> @llvm.vp.fsub(<1 x float> poison, <1 x float> poison, <1 x i1> poison, i32 poison)
+ %V2F32_VP = call <2 x float> @llvm.vp.fsub(<2 x float> poison, <2 x float> poison, <2 x i1> poison, i32 poison)
+ %V4F32_VP = call <4 x float> @llvm.vp.fsub(<4 x float> poison, <4 x float> poison, <4 x i1> poison, i32 poison)
+ %V8F32_VP = call <8 x float> @llvm.vp.fsub(<8 x float> poison, <8 x float> poison, <8 x i1> poison, i32 poison)
+ %V16F32_VP = call <16 x float> @llvm.vp.fsub(<16 x float> poison, <16 x float> poison, <16 x i1> poison, i32 poison)
+
+ %V1F64_VP = call <1 x double> @llvm.vp.fsub(<1 x double> poison, <1 x double> poison, <1 x i1> poison, i32 poison)
+ %V2F64_VP = call <2 x double> @llvm.vp.fsub(<2 x double> poison, <2 x double> poison, <2 x i1> poison, i32 poison)
+ %V4F64_VP = call <4 x double> @llvm.vp.fsub(<4 x double> poison, <4 x double> poison, <4 x i1> poison, i32 poison)
+ %V8F64_VP = call <8 x double> @llvm.vp.fsub(<8 x double> poison, <8 x double> poison, <8 x i1> poison, i32 poison)
+
+ %NXV1F32_VP = call <vscale x 1 x float> @llvm.vp.fsub(<vscale x 1 x float> poison, <vscale x 1 x float> poison, <vscale x 1 x i1> poison, i32 poison)
+ %NXV2F32_VP = call <vscale x 2 x float> @llvm.vp.fsub(<vscale x 2 x float> poison, <vscale x 2 x float> poison, <vscale x 2 x i1> poison, i32 poison)
+ %NXV4F32_VP = call <vscale x 4 x float> @llvm.vp.fsub(<vscale x 4 x float> poison, <vscale x 4 x float> poison, <vscale x 4 x i1> poison, i32 poison)
+ %NXV8F32_VP = call <vscale x 8 x float> @llvm.vp.fsub(<vscale x 8 x float> poison, <vscale x 8 x float> poison, <vscale x 8 x i1> poison, i32 poison)
+ %NXV16F32_VP = call <vscale x 16 x float> @llvm.vp.fsub(<vscale x 16 x float> poison, <vscale x 16 x float> poison, <vscale x 16 x i1> poison, i32 poison)
+
+ %NXV1F64_VP = call <vscale x 1 x double> @llvm.vp.fsub(<vscale x 1 x double> poison, <vscale x 1 x double> poison, <vscale x 1 x i1> poison, i32 poison)
+ %NXV2F64_VP = call <vscale x 2 x double> @llvm.vp.fsub(<vscale x 2 x double> poison, <vscale x 2 x double> poison, <vscale x 2 x i1> poison, i32 poison)
+ %NXV4F64_VP = call <vscale x 4 x double> @llvm.vp.fsub(<vscale x 4 x double> poison, <vscale x 4 x double> poison, <vscale x 4 x i1> poison, i32 poison)
+ %NXV8F64_VP = call <vscale x 8 x double> @llvm.vp.fsub(<vscale x 8 x double> poison, <vscale x 8 x double> poison, <vscale x 8 x i1> poison, i32 poison)
+
+ ret void
+}
+
+define void @fsub_bf16() {
+; ZVFH-LABEL: 'fsub_bf16'
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %BF16 = fsub bfloat poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V1BF16 = fsub <1 x bfloat> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V2BF16 = fsub <2 x bfloat> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V4BF16 = fsub <4 x bfloat> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %V8BF16 = fsub <8 x bfloat> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %V16BF16 = fsub <16 x bfloat> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 36 for instruction: %V32BF16 = fsub <32 x bfloat> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV1BF16 = fsub <vscale x 1 x bfloat> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV2BF16 = fsub <vscale x 2 x bfloat> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %NXV4BF16 = fsub <vscale x 4 x bfloat> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %NXV8BF16 = fsub <vscale x 8 x bfloat> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 36 for instruction: %NXV16BF16 = fsub <vscale x 16 x bfloat> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV32BF16 = fsub <vscale x 32 x bfloat> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V1BF16_VP = call <1 x bfloat> @llvm.vp.fsub.v1bf16(<1 x bfloat> poison, <1 x bfloat> poison, <1 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2BF16_VP = call <2 x bfloat> @llvm.vp.fsub.v2bf16(<2 x bfloat> poison, <2 x bfloat> poison, <2 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4BF16_VP = call <4 x bfloat> @llvm.vp.fsub.v4bf16(<4 x bfloat> poison, <4 x bfloat> poison, <4 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V8BF16_VP = call <8 x bfloat> @llvm.vp.fsub.v8bf16(<8 x bfloat> poison, <8 x bfloat> poison, <8 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V16BF16_VP = call <16 x bfloat> @llvm.vp.fsub.v16bf16(<16 x bfloat> poison, <16 x bfloat> poison, <16 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %NXV1BF16_VP = call <vscale x 1 x bfloat> @llvm.vp.fsub.nxv1bf16(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> poison, <vscale x 1 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %NXV2BF16_VP = call <vscale x 2 x bfloat> @llvm.vp.fsub.nxv2bf16(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> poison, <vscale x 2 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV4BF16_VP = call <vscale x 4 x bfloat> @llvm.vp.fsub.nxv4bf16(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> poison, <vscale x 4 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %NXV8BF16_VP = call <vscale x 8 x bfloat> @llvm.vp.fsub.nxv8bf16(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> poison, <vscale x 8 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %NXV16BF16_VP = call <vscale x 16 x bfloat> @llvm.vp.fsub.nxv16bf16(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> poison, <vscale x 16 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+; ZVFHMIN-LABEL: 'fsub_bf16'
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %BF16 = fsub bfloat poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V1BF16 = fsub <1 x bfloat> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V2BF16 = fsub <2 x bfloat> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V4BF16 = fsub <4 x bfloat> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %V8BF16 = fsub <8 x bfloat> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %V16BF16 = fsub <16 x bfloat> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 36 for instruction: %V32BF16 = fsub <32 x bfloat> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV1BF16 = fsub <vscale x 1 x bfloat> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV2BF16 = fsub <vscale x 2 x bfloat> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %NXV4BF16 = fsub <vscale x 4 x bfloat> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %NXV8BF16 = fsub <vscale x 8 x bfloat> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 36 for instruction: %NXV16BF16 = fsub <vscale x 16 x bfloat> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV32BF16 = fsub <vscale x 32 x bfloat> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V1BF16_VP = call <1 x bfloat> @llvm.vp.fsub.v1bf16(<1 x bfloat> poison, <1 x bfloat> poison, <1 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2BF16_VP = call <2 x bfloat> @llvm.vp.fsub.v2bf16(<2 x bfloat> poison, <2 x bfloat> poison, <2 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4BF16_VP = call <4 x bfloat> @llvm.vp.fsub.v4bf16(<4 x bfloat> poison, <4 x bfloat> poison, <4 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V8BF16_VP = call <8 x bfloat> @llvm.vp.fsub.v8bf16(<8 x bfloat> poison, <8 x bfloat> poison, <8 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V16BF16_VP = call <16 x bfloat> @llvm.vp.fsub.v16bf16(<16 x bfloat> poison, <16 x bfloat> poison, <16 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %NXV1BF16_VP = call <vscale x 1 x bfloat> @llvm.vp.fsub.nxv1bf16(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> poison, <vscale x 1 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %NXV2BF16_VP = call <vscale x 2 x bfloat> @llvm.vp.fsub.nxv2bf16(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> poison, <vscale x 2 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV4BF16_VP = call <vscale x 4 x bfloat> @llvm.vp.fsub.nxv4bf16(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> poison, <vscale x 4 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %NXV8BF16_VP = call <vscale x 8 x bfloat> @llvm.vp.fsub.nxv8bf16(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> poison, <vscale x 8 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %NXV16BF16_VP = call <vscale x 16 x bfloat> @llvm.vp.fsub.nxv16bf16(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> poison, <vscale x 16 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+; NO-ZFHMIN-LABEL: 'fsub_bf16'
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %BF16 = fsub bfloat poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1BF16 = fsub <1 x bfloat> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V2BF16 = fsub <2 x bfloat> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4BF16 = fsub <4 x bfloat> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8BF16 = fsub <8 x bfloat> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16BF16 = fsub <16 x bfloat> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %V32BF16 = fsub <32 x bfloat> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV1BF16 = fsub <vscale x 1 x bfloat> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV2BF16 = fsub <vscale x 2 x bfloat> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV4BF16 = fsub <vscale x 4 x bfloat> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV8BF16 = fsub <vscale x 8 x bfloat> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV16BF16 = fsub <vscale x 16 x bfloat> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV32BF16 = fsub <vscale x 32 x bfloat> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1BF16_VP = call <1 x bfloat> @llvm.vp.fsub.v1bf16(<1 x bfloat> poison, <1 x bfloat> poison, <1 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V2BF16_VP = call <2 x bfloat> @llvm.vp.fsub.v2bf16(<2 x bfloat> poison, <2 x bfloat> poison, <2 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4BF16_VP = call <4 x bfloat> @llvm.vp.fsub.v4bf16(<4 x bfloat> poison, <4 x bfloat> poison, <4 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8BF16_VP = call <8 x bfloat> @llvm.vp.fsub.v8bf16(<8 x bfloat> poison, <8 x bfloat> poison, <8 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16BF16_VP = call <16 x bfloat> @llvm.vp.fsub.v16bf16(<16 x bfloat> poison, <16 x bfloat> poison, <16 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV1BF16_VP = call <vscale x 1 x bfloat> @llvm.vp.fsub.nxv1bf16(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> poison, <vscale x 1 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV2BF16_VP = call <vscale x 2 x bfloat> @llvm.vp.fsub.nxv2bf16(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> poison, <vscale x 2 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV4BF16_VP = call <vscale x 4 x bfloat> @llvm.vp.fsub.nxv4bf16(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> poison, <vscale x 4 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV8BF16_VP = call <vscale x 8 x bfloat> @llvm.vp.fsub.nxv8bf16(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> poison, <vscale x 8 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV16BF16_VP = call <vscale x 16 x bfloat> @llvm.vp.fsub.nxv16bf16(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> poison, <vscale x 16 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+ %BF16 = fsub bfloat poison, poison
+
+ %V1BF16 = fsub <1 x bfloat> poison, poison
+ %V2BF16 = fsub <2 x bfloat> poison, poison
+ %V4BF16 = fsub <4 x bfloat> poison, poison
+ %V8BF16 = fsub <8 x bfloat> poison, poison
+ %V16BF16 = fsub <16 x bfloat> poison, poison
+ %V32BF16 = fsub <32 x bfloat> poison, poison
+
+ %NXV1BF16 = fsub <vscale x 1 x bfloat> poison, poison
+ %NXV2BF16 = fsub <vscale x 2 x bfloat> poison, poison
+ %NXV4BF16 = fsub <vscale x 4 x bfloat> poison, poison
+ %NXV8BF16 = fsub <vscale x 8 x bfloat> poison, poison
+ %NXV16BF16 = fsub <vscale x 16 x bfloat> poison, poison
+ %NXV32BF16 = fsub <vscale x 32 x bfloat> poison, poison
+
+ %V1BF16_VP = call <1 x bfloat> @llvm.vp.fsub(<1 x bfloat> poison, <1 x bfloat> poison, <1 x i1> poison, i32 poison)
+ %V2BF16_VP = call <2 x bfloat> @llvm.vp.fsub(<2 x bfloat> poison, <2 x bfloat> poison, <2 x i1> poison, i32 poison)
+ %V4BF16_VP = call <4 x bfloat> @llvm.vp.fsub(<4 x bfloat> poison, <4 x bfloat> poison, <4 x i1> poison, i32 poison)
+ %V8BF16_VP = call <8 x bfloat> @llvm.vp.fsub(<8 x bfloat> poison, <8 x bfloat> poison, <8 x i1> poison, i32 poison)
+ %V16BF16_VP = call <16 x bfloat> @llvm.vp.fsub(<16 x bfloat> poison, <16 x bfloat> poison, <16 x i1> poison, i32 poison)
+
+ %NXV1BF16_VP = call <vscale x 1 x bfloat> @llvm.vp.fsub(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> poison, <vscale x 1 x i1> poison, i32 poison)
+ %NXV2BF16_VP = call <vscale x 2 x bfloat> @llvm.vp.fsub(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> poison, <vscale x 2 x i1> poison, i32 poison)
+ %NXV4BF16_VP = call <vscale x 4 x bfloat> @llvm.vp.fsub(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> poison, <vscale x 4 x i1> poison, i32 poison)
+ %NXV8BF16_VP = call <vscale x 8 x bfloat> @llvm.vp.fsub(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> poison, <vscale x 8 x i1> poison, i32 poison)
+ %NXV16BF16_VP = call <vscale x 16 x bfloat> @llvm.vp.fsub(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> poison, <vscale x 16 x i1> poison, i32 poison)
ret void
}
define void @fsub_f16() {
; ZVFH-LABEL: 'fsub_f16'
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F16 = fsub half undef, undef
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F16 = fsub <1 x half> undef, undef
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F16 = fsub <2 x half> undef, undef
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F16 = fsub <4 x half> undef, undef
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8F16 = fsub <8 x half> undef, undef
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16F16 = fsub <16 x half> undef, undef
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V32F16 = fsub <32 x half> undef, undef
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F16 = fsub <vscale x 1 x half> undef, undef
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2F16 = fsub <vscale x 2 x half> undef, undef
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV4F16 = fsub <vscale x 4 x half> undef, undef
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV8F16 = fsub <vscale x 8 x half> undef, undef
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV16F16 = fsub <vscale x 16 x half> undef, undef
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV32F16 = fsub <vscale x 32 x half> undef, undef
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F16_VP = call <1 x half> @llvm.vp.fsub.v1f16(<1 x half> undef, <1 x half> undef, <1 x i1> undef, i32 undef)
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F16_VP = call <2 x half> @llvm.vp.fsub.v2f16(<2 x half> undef, <2 x half> undef, <2 x i1> undef, i32 undef)
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F16_VP = call <4 x half> @llvm.vp.fsub.v4f16(<4 x half> undef, <4 x half> undef, <4 x i1> undef, i32 undef)
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8F16_VP = call <8 x half> @llvm.vp.fsub.v8f16(<8 x half> undef, <8 x half> undef, <8 x i1> undef, i32 undef)
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16F16_VP = call <16 x half> @llvm.vp.fsub.v16f16(<16 x half> undef, <16 x half> undef, <16 x i1> undef, i32 undef)
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F16_VP = call <vscale x 1 x half> @llvm.vp.fsub.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x i1> undef, i32 undef)
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2F16_VP = call <vscale x 2 x half> @llvm.vp.fsub.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x i1> undef, i32 undef)
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV4F16_VP = call <vscale x 4 x half> @llvm.vp.fsub.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x i1> undef, i32 undef)
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV8F16_VP = call <vscale x 8 x half> @llvm.vp.fsub.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x i1> undef, i32 undef)
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV16F16_VP = call <vscale x 16 x half> @llvm.vp.fsub.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, <vscale x 16 x i1> undef, i32 undef)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F16 = fsub half poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F16 = fsub <1 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F16 = fsub <2 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F16 = fsub <4 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8F16 = fsub <8 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16F16 = fsub <16 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V32F16 = fsub <32 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F16 = fsub <vscale x 1 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2F16 = fsub <vscale x 2 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV4F16 = fsub <vscale x 4 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV8F16 = fsub <vscale x 8 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV16F16 = fsub <vscale x 16 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV32F16 = fsub <vscale x 32 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F16_VP = call <1 x half> @llvm.vp.fsub.v1f16(<1 x half> poison, <1 x half> poison, <1 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F16_VP = call <2 x half> @llvm.vp.fsub.v2f16(<2 x half> poison, <2 x half> poison, <2 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F16_VP = call <4 x half> @llvm.vp.fsub.v4f16(<4 x half> poison, <4 x half> poison, <4 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8F16_VP = call <8 x half> @llvm.vp.fsub.v8f16(<8 x half> poison, <8 x half> poison, <8 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16F16_VP = call <16 x half> @llvm.vp.fsub.v16f16(<16 x half> poison, <16 x half> poison, <16 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F16_VP = call <vscale x 1 x half> @llvm.vp.fsub.nxv1f16(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2F16_VP = call <vscale x 2 x half> @llvm.vp.fsub.nxv2f16(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV4F16_VP = call <vscale x 4 x half> @llvm.vp.fsub.nxv4f16(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV8F16_VP = call <vscale x 8 x half> @llvm.vp.fsub.nxv8f16(<vscale x 8 x half> poison, <vscale x 8 x half> poison, <vscale x 8 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV16F16_VP = call <vscale x 16 x half> @llvm.vp.fsub.nxv16f16(<vscale x 16 x half> poison, <vscale x 16 x half> poison, <vscale x 16 x i1> poison, i32 poison)
; ZVFH-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
; ZVFHMIN-LABEL: 'fsub_f16'
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F16 = fsub half undef, undef
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V1F16 = fsub <1 x half> undef, undef
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V2F16 = fsub <2 x half> undef, undef
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V4F16 = fsub <4 x half> undef, undef
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %V8F16 = fsub <8 x half> undef, undef
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %V16F16 = fsub <16 x half> undef, undef
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 36 for instruction: %V32F16 = fsub <32 x half> undef, undef
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV1F16 = fsub <vscale x 1 x half> undef, undef
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV2F16 = fsub <vscale x 2 x half> undef, undef
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %NXV4F16 = fsub <vscale x 4 x half> undef, undef
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %NXV8F16 = fsub <vscale x 8 x half> undef, undef
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 36 for instruction: %NXV16F16 = fsub <vscale x 16 x half> undef, undef
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV32F16 = fsub <vscale x 32 x half> undef, undef
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V1F16_VP = call <1 x half> @llvm.vp.fsub.v1f16(<1 x half> undef, <1 x half> undef, <1 x i1> undef, i32 undef)
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2F16_VP = call <2 x half> @llvm.vp.fsub.v2f16(<2 x half> undef, <2 x half> undef, <2 x i1> undef, i32 undef)
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4F16_VP = call <4 x half> @llvm.vp.fsub.v4f16(<4 x half> undef, <4 x half> undef, <4 x i1> undef, i32 undef)
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V8F16_VP = call <8 x half> @llvm.vp.fsub.v8f16(<8 x half> undef, <8 x half> undef, <8 x i1> undef, i32 undef)
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V16F16_VP = call <16 x half> @llvm.vp.fsub.v16f16(<16 x half> undef, <16 x half> undef, <16 x i1> undef, i32 undef)
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %NXV1F16_VP = call <vscale x 1 x half> @llvm.vp.fsub.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x i1> undef, i32 undef)
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %NXV2F16_VP = call <vscale x 2 x half> @llvm.vp.fsub.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x i1> undef, i32 undef)
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV4F16_VP = call <vscale x 4 x half> @llvm.vp.fsub.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x i1> undef, i32 undef)
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %NXV8F16_VP = call <vscale x 8 x half> @llvm.vp.fsub.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x i1> undef, i32 undef)
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %NXV16F16_VP = call <vscale x 16 x half> @llvm.vp.fsub.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, <vscale x 16 x i1> undef, i32 undef)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F16 = fsub half poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V1F16 = fsub <1 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V2F16 = fsub <2 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V4F16 = fsub <4 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %V8F16 = fsub <8 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %V16F16 = fsub <16 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 36 for instruction: %V32F16 = fsub <32 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV1F16 = fsub <vscale x 1 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV2F16 = fsub <vscale x 2 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %NXV4F16 = fsub <vscale x 4 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %NXV8F16 = fsub <vscale x 8 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 36 for instruction: %NXV16F16 = fsub <vscale x 16 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV32F16 = fsub <vscale x 32 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V1F16_VP = call <1 x half> @llvm.vp.fsub.v1f16(<1 x half> poison, <1 x half> poison, <1 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2F16_VP = call <2 x half> @llvm.vp.fsub.v2f16(<2 x half> poison, <2 x half> poison, <2 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4F16_VP = call <4 x half> @llvm.vp.fsub.v4f16(<4 x half> poison, <4 x half> poison, <4 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V8F16_VP = call <8 x half> @llvm.vp.fsub.v8f16(<8 x half> poison, <8 x half> poison, <8 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V16F16_VP = call <16 x half> @llvm.vp.fsub.v16f16(<16 x half> poison, <16 x half> poison, <16 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %NXV1F16_VP = call <vscale x 1 x half> @llvm.vp.fsub.nxv1f16(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %NXV2F16_VP = call <vscale x 2 x half> @llvm.vp.fsub.nxv2f16(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV4F16_VP = call <vscale x 4 x half> @llvm.vp.fsub.nxv4f16(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %NXV8F16_VP = call <vscale x 8 x half> @llvm.vp.fsub.nxv8f16(<vscale x 8 x half> poison, <vscale x 8 x half> poison, <vscale x 8 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %NXV16F16_VP = call <vscale x 16 x half> @llvm.vp.fsub.nxv16f16(<vscale x 16 x half> poison, <vscale x 16 x half> poison, <vscale x 16 x i1> poison, i32 poison)
; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
- %F16 = fsub half undef, undef
-
- %V1F16 = fsub <1 x half> undef, undef
- %V2F16 = fsub <2 x half> undef, undef
- %V4F16 = fsub <4 x half> undef, undef
- %V8F16 = fsub <8 x half> undef, undef
- %V16F16 = fsub <16 x half> undef, undef
- %V32F16 = fsub <32 x half> undef, undef
-
- %NXV1F16 = fsub <vscale x 1 x half> undef, undef
- %NXV2F16 = fsub <vscale x 2 x half> undef, undef
- %NXV4F16 = fsub <vscale x 4 x half> undef, undef
- %NXV8F16 = fsub <vscale x 8 x half> undef, undef
- %NXV16F16 = fsub <vscale x 16 x half> undef, undef
- %NXV32F16 = fsub <vscale x 32 x half> undef, undef
-
- %V1F16_VP = call <1 x half> @llvm.vp.fsub.v1f16(<1 x half> undef, <1 x half> undef, <1 x i1> undef, i32 undef)
- %V2F16_VP = call <2 x half> @llvm.vp.fsub.v2f16(<2 x half> undef, <2 x half> undef, <2 x i1> undef, i32 undef)
- %V4F16_VP = call <4 x half> @llvm.vp.fsub.v4f16(<4 x half> undef, <4 x half> undef, <4 x i1> undef, i32 undef)
- %V8F16_VP = call <8 x half> @llvm.vp.fsub.v8f16(<8 x half> undef, <8 x half> undef, <8 x i1> undef, i32 undef)
- %V16F16_VP = call <16 x half> @llvm.vp.fsub.v16f16(<16 x half> undef, <16 x half> undef, <16 x i1> undef, i32 undef)
-
- %NXV1F16_VP = call <vscale x 1 x half> @llvm.vp.fsub.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x i1> undef, i32 undef)
- %NXV2F16_VP = call <vscale x 2 x half> @llvm.vp.fsub.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x i1> undef, i32 undef)
- %NXV4F16_VP = call <vscale x 4 x half> @llvm.vp.fsub.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x i1> undef, i32 undef)
- %NXV8F16_VP = call <vscale x 8 x half> @llvm.vp.fsub.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x i1> undef, i32 undef)
- %NXV16F16_VP = call <vscale x 16 x half> @llvm.vp.fsub.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, <vscale x 16 x i1> undef, i32 undef)
+; NO-ZFHMIN-LABEL: 'fsub_f16'
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F16 = fsub half poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F16 = fsub <1 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V2F16 = fsub <2 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4F16 = fsub <4 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8F16 = fsub <8 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16F16 = fsub <16 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %V32F16 = fsub <32 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV1F16 = fsub <vscale x 1 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV2F16 = fsub <vscale x 2 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV4F16 = fsub <vscale x 4 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV8F16 = fsub <vscale x 8 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV16F16 = fsub <vscale x 16 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV32F16 = fsub <vscale x 32 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F16_VP = call <1 x half> @llvm.vp.fsub.v1f16(<1 x half> poison, <1 x half> poison, <1 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V2F16_VP = call <2 x half> @llvm.vp.fsub.v2f16(<2 x half> poison, <2 x half> poison, <2 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4F16_VP = call <4 x half> @llvm.vp.fsub.v4f16(<4 x half> poison, <4 x half> poison, <4 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8F16_VP = call <8 x half> @llvm.vp.fsub.v8f16(<8 x half> poison, <8 x half> poison, <8 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16F16_VP = call <16 x half> @llvm.vp.fsub.v16f16(<16 x half> poison, <16 x half> poison, <16 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV1F16_VP = call <vscale x 1 x half> @llvm.vp.fsub.nxv1f16(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV2F16_VP = call <vscale x 2 x half> @llvm.vp.fsub.nxv2f16(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV4F16_VP = call <vscale x 4 x half> @llvm.vp.fsub.nxv4f16(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV8F16_VP = call <vscale x 8 x half> @llvm.vp.fsub.nxv8f16(<vscale x 8 x half> poison, <vscale x 8 x half> poison, <vscale x 8 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV16F16_VP = call <vscale x 16 x half> @llvm.vp.fsub.nxv16f16(<vscale x 16 x half> poison, <vscale x 16 x half> poison, <vscale x 16 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+ %F16 = fsub half poison, poison
+
+ %V1F16 = fsub <1 x half> poison, poison
+ %V2F16 = fsub <2 x half> poison, poison
+ %V4F16 = fsub <4 x half> poison, poison
+ %V8F16 = fsub <8 x half> poison, poison
+ %V16F16 = fsub <16 x half> poison, poison
+ %V32F16 = fsub <32 x half> poison, poison
+
+ %NXV1F16 = fsub <vscale x 1 x half> poison, poison
+ %NXV2F16 = fsub <vscale x 2 x half> poison, poison
+ %NXV4F16 = fsub <vscale x 4 x half> poison, poison
+ %NXV8F16 = fsub <vscale x 8 x half> poison, poison
+ %NXV16F16 = fsub <vscale x 16 x half> poison, poison
+ %NXV32F16 = fsub <vscale x 32 x half> poison, poison
+
+ %V1F16_VP = call <1 x half> @llvm.vp.fsub(<1 x half> poison, <1 x half> poison, <1 x i1> poison, i32 poison)
+ %V2F16_VP = call <2 x half> @llvm.vp.fsub(<2 x half> poison, <2 x half> poison, <2 x i1> poison, i32 poison)
+ %V4F16_VP = call <4 x half> @llvm.vp.fsub(<4 x half> poison, <4 x half> poison, <4 x i1> poison, i32 poison)
+ %V8F16_VP = call <8 x half> @llvm.vp.fsub(<8 x half> poison, <8 x half> poison, <8 x i1> poison, i32 poison)
+ %V16F16_VP = call <16 x half> @llvm.vp.fsub(<16 x half> poison, <16 x half> poison, <16 x i1> poison, i32 poison)
+
+ %NXV1F16_VP = call <vscale x 1 x half> @llvm.vp.fsub(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x i1> poison, i32 poison)
+ %NXV2F16_VP = call <vscale x 2 x half> @llvm.vp.fsub(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x i1> poison, i32 poison)
+ %NXV4F16_VP = call <vscale x 4 x half> @llvm.vp.fsub(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x i1> poison, i32 poison)
+ %NXV8F16_VP = call <vscale x 8 x half> @llvm.vp.fsub(<vscale x 8 x half> poison, <vscale x 8 x half> poison, <vscale x 8 x i1> poison, i32 poison)
+ %NXV16F16_VP = call <vscale x 16 x half> @llvm.vp.fsub(<vscale x 16 x half> poison, <vscale x 16 x half> poison, <vscale x 16 x i1> poison, i32 poison)
ret void
}
define void @fmul() {
; CHECK-LABEL: 'fmul'
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %BF16 = fmul bfloat undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F32 = fmul float undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F64 = fmul double undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V1BF16 = fmul <1 x bfloat> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V2BF16 = fmul <2 x bfloat> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V4BF16 = fmul <4 x bfloat> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %V8BF16 = fmul <8 x bfloat> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %V16BF16 = fmul <16 x bfloat> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV1BF16 = fmul <vscale x 1 x bfloat> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV2BF16 = fmul <vscale x 2 x bfloat> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %NXV4BF16 = fmul <vscale x 4 x bfloat> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %NXV8BF16 = fmul <vscale x 8 x bfloat> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 36 for instruction: %NXV16BF16 = fmul <vscale x 16 x bfloat> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F32 = fmul <1 x float> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F32 = fmul <2 x float> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F32 = fmul <4 x float> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V8F32 = fmul <8 x float> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V16F32 = fmul <16 x float> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F32 = fmul <vscale x 1 x float> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2F32 = fmul <vscale x 2 x float> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV4F32 = fmul <vscale x 4 x float> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV8F32 = fmul <vscale x 8 x float> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV16F32 = fmul <vscale x 16 x float> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F64 = fmul <1 x double> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F64 = fmul <2 x double> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4F64 = fmul <4 x double> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V8F64 = fmul <8 x double> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F64 = fmul <vscale x 1 x double> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV2F64 = fmul <vscale x 2 x double> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV4F64 = fmul <vscale x 4 x double> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV8F64 = fmul <vscale x 8 x double> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V1F16_VP = call <1 x bfloat> @llvm.vp.fmul.v1bf16(<1 x bfloat> undef, <1 x bfloat> undef, <1 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2F16_VP = call <2 x bfloat> @llvm.vp.fmul.v2bf16(<2 x bfloat> undef, <2 x bfloat> undef, <2 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4F16_VP = call <4 x bfloat> @llvm.vp.fmul.v4bf16(<4 x bfloat> undef, <4 x bfloat> undef, <4 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V8F16_VP = call <8 x bfloat> @llvm.vp.fmul.v8bf16(<8 x bfloat> undef, <8 x bfloat> undef, <8 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V16F16_VP = call <16 x bfloat> @llvm.vp.fmul.v16bf16(<16 x bfloat> undef, <16 x bfloat> undef, <16 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F32_VP = call <1 x float> @llvm.vp.fmul.v1f32(<1 x float> undef, <1 x float> undef, <1 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F32_VP = call <2 x float> @llvm.vp.fmul.v2f32(<2 x float> undef, <2 x float> undef, <2 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F32_VP = call <4 x float> @llvm.vp.fmul.v4f32(<4 x float> undef, <4 x float> undef, <4 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V8F32_VP = call <8 x float> @llvm.vp.fmul.v8f32(<8 x float> undef, <8 x float> undef, <8 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V16F32_VP = call <16 x float> @llvm.vp.fmul.v16f32(<16 x float> undef, <16 x float> undef, <16 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F64_VP = call <1 x double> @llvm.vp.fmul.v1f64(<1 x double> undef, <1 x double> undef, <1 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F64_VP = call <2 x double> @llvm.vp.fmul.v2f64(<2 x double> undef, <2 x double> undef, <2 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4F64_VP = call <4 x double> @llvm.vp.fmul.v4f64(<4 x double> undef, <4 x double> undef, <4 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V8F64_VP = call <8 x double> @llvm.vp.fmul.v8f64(<8 x double> undef, <8 x double> undef, <8 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %NXV1F16_VP = call <vscale x 1 x bfloat> @llvm.vp.fmul.nxv1bf16(<vscale x 1 x bfloat> undef, <vscale x 1 x bfloat> undef, <vscale x 1 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %NXV2F16_VP = call <vscale x 2 x bfloat> @llvm.vp.fmul.nxv2bf16(<vscale x 2 x bfloat> undef, <vscale x 2 x bfloat> undef, <vscale x 2 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV4F16_VP = call <vscale x 4 x bfloat> @llvm.vp.fmul.nxv4bf16(<vscale x 4 x bfloat> undef, <vscale x 4 x bfloat> undef, <vscale x 4 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %NXV8F16_VP = call <vscale x 8 x bfloat> @llvm.vp.fmul.nxv8bf16(<vscale x 8 x bfloat> undef, <vscale x 8 x bfloat> undef, <vscale x 8 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %NXV16F16_VP = call <vscale x 16 x bfloat> @llvm.vp.fmul.nxv16bf16(<vscale x 16 x bfloat> undef, <vscale x 16 x bfloat> undef, <vscale x 16 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F32_VP = call <vscale x 1 x float> @llvm.vp.fmul.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2F32_VP = call <vscale x 2 x float> @llvm.vp.fmul.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV4F32_VP = call <vscale x 4 x float> @llvm.vp.fmul.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV8F32_VP = call <vscale x 8 x float> @llvm.vp.fmul.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef, <vscale x 8 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV16F32_VP = call <vscale x 16 x float> @llvm.vp.fmul.nxv16f32(<vscale x 16 x float> undef, <vscale x 16 x float> undef, <vscale x 16 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F64_VP = call <vscale x 1 x double> @llvm.vp.fmul.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV2F64_VP = call <vscale x 2 x double> @llvm.vp.fmul.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV4F64_VP = call <vscale x 4 x double> @llvm.vp.fmul.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef, <vscale x 4 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV8F64_VP = call <vscale x 8 x double> @llvm.vp.fmul.nxv8f64(<vscale x 8 x double> undef, <vscale x 8 x double> undef, <vscale x 8 x i1> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F32 = fmul float poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F64 = fmul double poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F32 = fmul <1 x float> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F32 = fmul <2 x float> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F32 = fmul <4 x float> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V8F32 = fmul <8 x float> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V16F32 = fmul <16 x float> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F32 = fmul <vscale x 1 x float> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2F32 = fmul <vscale x 2 x float> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV4F32 = fmul <vscale x 4 x float> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV8F32 = fmul <vscale x 8 x float> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV16F32 = fmul <vscale x 16 x float> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F64 = fmul <1 x double> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F64 = fmul <2 x double> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4F64 = fmul <4 x double> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V8F64 = fmul <8 x double> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F64 = fmul <vscale x 1 x double> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV2F64 = fmul <vscale x 2 x double> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV4F64 = fmul <vscale x 4 x double> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV8F64 = fmul <vscale x 8 x double> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F32_VP = call <1 x float> @llvm.vp.fmul.v1f32(<1 x float> poison, <1 x float> poison, <1 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F32_VP = call <2 x float> @llvm.vp.fmul.v2f32(<2 x float> poison, <2 x float> poison, <2 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F32_VP = call <4 x float> @llvm.vp.fmul.v4f32(<4 x float> poison, <4 x float> poison, <4 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V8F32_VP = call <8 x float> @llvm.vp.fmul.v8f32(<8 x float> poison, <8 x float> poison, <8 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V16F32_VP = call <16 x float> @llvm.vp.fmul.v16f32(<16 x float> poison, <16 x float> poison, <16 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F64_VP = call <1 x double> @llvm.vp.fmul.v1f64(<1 x double> poison, <1 x double> poison, <1 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F64_VP = call <2 x double> @llvm.vp.fmul.v2f64(<2 x double> poison, <2 x double> poison, <2 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4F64_VP = call <4 x double> @llvm.vp.fmul.v4f64(<4 x double> poison, <4 x double> poison, <4 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V8F64_VP = call <8 x double> @llvm.vp.fmul.v8f64(<8 x double> poison, <8 x double> poison, <8 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F32_VP = call <vscale x 1 x float> @llvm.vp.fmul.nxv1f32(<vscale x 1 x float> poison, <vscale x 1 x float> poison, <vscale x 1 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2F32_VP = call <vscale x 2 x float> @llvm.vp.fmul.nxv2f32(<vscale x 2 x float> poison, <vscale x 2 x float> poison, <vscale x 2 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV4F32_VP = call <vscale x 4 x float> @llvm.vp.fmul.nxv4f32(<vscale x 4 x float> poison, <vscale x 4 x float> poison, <vscale x 4 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV8F32_VP = call <vscale x 8 x float> @llvm.vp.fmul.nxv8f32(<vscale x 8 x float> poison, <vscale x 8 x float> poison, <vscale x 8 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV16F32_VP = call <vscale x 16 x float> @llvm.vp.fmul.nxv16f32(<vscale x 16 x float> poison, <vscale x 16 x float> poison, <vscale x 16 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F64_VP = call <vscale x 1 x double> @llvm.vp.fmul.nxv1f64(<vscale x 1 x double> poison, <vscale x 1 x double> poison, <vscale x 1 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV2F64_VP = call <vscale x 2 x double> @llvm.vp.fmul.nxv2f64(<vscale x 2 x double> poison, <vscale x 2 x double> poison, <vscale x 2 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV4F64_VP = call <vscale x 4 x double> @llvm.vp.fmul.nxv4f64(<vscale x 4 x double> poison, <vscale x 4 x double> poison, <vscale x 4 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV8F64_VP = call <vscale x 8 x double> @llvm.vp.fmul.nxv8f64(<vscale x 8 x double> poison, <vscale x 8 x double> poison, <vscale x 8 x i1> poison, i32 poison)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
- %BF16 = fmul bfloat undef, undef
- %F32 = fmul float undef, undef
- %F64 = fmul double undef, undef
-
- %V1BF16 = fmul <1 x bfloat> undef, undef
- %V2BF16 = fmul <2 x bfloat> undef, undef
- %V4BF16 = fmul <4 x bfloat> undef, undef
- %V8BF16 = fmul <8 x bfloat> undef, undef
- %V16BF16 = fmul <16 x bfloat> undef, undef
-
- %NXV1BF16 = fmul <vscale x 1 x bfloat> undef, undef
- %NXV2BF16 = fmul <vscale x 2 x bfloat> undef, undef
- %NXV4BF16 = fmul <vscale x 4 x bfloat> undef, undef
- %NXV8BF16 = fmul <vscale x 8 x bfloat> undef, undef
- %NXV16BF16 = fmul <vscale x 16 x bfloat> undef, undef
-
- %V1F32 = fmul <1 x float> undef, undef
- %V2F32 = fmul <2 x float> undef, undef
- %V4F32 = fmul <4 x float> undef, undef
- %V8F32 = fmul <8 x float> undef, undef
- %V16F32 = fmul <16 x float> undef, undef
-
- %NXV1F32 = fmul <vscale x 1 x float> undef, undef
- %NXV2F32 = fmul <vscale x 2 x float> undef, undef
- %NXV4F32 = fmul <vscale x 4 x float> undef, undef
- %NXV8F32 = fmul <vscale x 8 x float> undef, undef
- %NXV16F32 = fmul <vscale x 16 x float> undef, undef
-
- %V1F64 = fmul <1 x double> undef, undef
- %V2F64 = fmul <2 x double> undef, undef
- %V4F64 = fmul <4 x double> undef, undef
- %V8F64 = fmul <8 x double> undef, undef
-
- %NXV1F64 = fmul <vscale x 1 x double> undef, undef
- %NXV2F64 = fmul <vscale x 2 x double> undef, undef
- %NXV4F64 = fmul <vscale x 4 x double> undef, undef
- %NXV8F64 = fmul <vscale x 8 x double> undef, undef
-
- %V1F16_VP = call <1 x bfloat> @llvm.vp.fmul.v1f16(<1 x bfloat> undef, <1 x bfloat> undef, <1 x i1> undef, i32 undef)
- %V2F16_VP = call <2 x bfloat> @llvm.vp.fmul.v2f16(<2 x bfloat> undef, <2 x bfloat> undef, <2 x i1> undef, i32 undef)
- %V4F16_VP = call <4 x bfloat> @llvm.vp.fmul.v4f16(<4 x bfloat> undef, <4 x bfloat> undef, <4 x i1> undef, i32 undef)
- %V8F16_VP = call <8 x bfloat> @llvm.vp.fmul.v8f16(<8 x bfloat> undef, <8 x bfloat> undef, <8 x i1> undef, i32 undef)
- %V16F16_VP = call <16 x bfloat> @llvm.vp.fmul.v16f16(<16 x bfloat> undef, <16 x bfloat> undef, <16 x i1> undef, i32 undef)
-
- %V1F32_VP = call <1 x float> @llvm.vp.fmul.v1f32(<1 x float> undef, <1 x float> undef, <1 x i1> undef, i32 undef)
- %V2F32_VP = call <2 x float> @llvm.vp.fmul.v2f32(<2 x float> undef, <2 x float> undef, <2 x i1> undef, i32 undef)
- %V4F32_VP = call <4 x float> @llvm.vp.fmul.v4f32(<4 x float> undef, <4 x float> undef, <4 x i1> undef, i32 undef)
- %V8F32_VP = call <8 x float> @llvm.vp.fmul.v8f32(<8 x float> undef, <8 x float> undef, <8 x i1> undef, i32 undef)
- %V16F32_VP = call <16 x float> @llvm.vp.fmul.v16f32(<16 x float> undef, <16 x float> undef, <16 x i1> undef, i32 undef)
-
- %V1F64_VP = call <1 x double> @llvm.vp.fmul.v1f64(<1 x double> undef, <1 x double> undef, <1 x i1> undef, i32 undef)
- %V2F64_VP = call <2 x double> @llvm.vp.fmul.v2f64(<2 x double> undef, <2 x double> undef, <2 x i1> undef, i32 undef)
- %V4F64_VP = call <4 x double> @llvm.vp.fmul.v4f64(<4 x double> undef, <4 x double> undef, <4 x i1> undef, i32 undef)
- %V8F64_VP = call <8 x double> @llvm.vp.fmul.v8f64(<8 x double> undef, <8 x double> undef, <8 x i1> undef, i32 undef)
-
- %NXV1F16_VP = call <vscale x 1 x bfloat> @llvm.vp.fmul.nxv1f16(<vscale x 1 x bfloat> undef, <vscale x 1 x bfloat> undef, <vscale x 1 x i1> undef, i32 undef)
- %NXV2F16_VP = call <vscale x 2 x bfloat> @llvm.vp.fmul.nxv2f16(<vscale x 2 x bfloat> undef, <vscale x 2 x bfloat> undef, <vscale x 2 x i1> undef, i32 undef)
- %NXV4F16_VP = call <vscale x 4 x bfloat> @llvm.vp.fmul.nxv4f16(<vscale x 4 x bfloat> undef, <vscale x 4 x bfloat> undef, <vscale x 4 x i1> undef, i32 undef)
- %NXV8F16_VP = call <vscale x 8 x bfloat> @llvm.vp.fmul.nxv8f16(<vscale x 8 x bfloat> undef, <vscale x 8 x bfloat> undef, <vscale x 8 x i1> undef, i32 undef)
- %NXV16F16_VP = call <vscale x 16 x bfloat> @llvm.vp.fmul.nxv16f16(<vscale x 16 x bfloat> undef, <vscale x 16 x bfloat> undef, <vscale x 16 x i1> undef, i32 undef)
-
- %NXV1F32_VP = call <vscale x 1 x float> @llvm.vp.fmul.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x i1> undef, i32 undef)
- %NXV2F32_VP = call <vscale x 2 x float> @llvm.vp.fmul.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x i1> undef, i32 undef)
- %NXV4F32_VP = call <vscale x 4 x float> @llvm.vp.fmul.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x i1> undef, i32 undef)
- %NXV8F32_VP = call <vscale x 8 x float> @llvm.vp.fmul.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef, <vscale x 8 x i1> undef, i32 undef)
- %NXV16F32_VP = call <vscale x 16 x float> @llvm.vp.fmul.nxv16f32(<vscale x 16 x float> undef, <vscale x 16 x float> undef, <vscale x 16 x i1> undef, i32 undef)
-
- %NXV1F64_VP = call <vscale x 1 x double> @llvm.vp.fmul.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x i1> undef, i32 undef)
- %NXV2F64_VP = call <vscale x 2 x double> @llvm.vp.fmul.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x i1> undef, i32 undef)
- %NXV4F64_VP = call <vscale x 4 x double> @llvm.vp.fmul.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef, <vscale x 4 x i1> undef, i32 undef)
- %NXV8F64_VP = call <vscale x 8 x double> @llvm.vp.fmul.nxv8f64(<vscale x 8 x double> undef, <vscale x 8 x double> undef, <vscale x 8 x i1> undef, i32 undef)
+ %F32 = fmul float poison, poison
+ %F64 = fmul double poison, poison
+
+ %V1F32 = fmul <1 x float> poison, poison
+ %V2F32 = fmul <2 x float> poison, poison
+ %V4F32 = fmul <4 x float> poison, poison
+ %V8F32 = fmul <8 x float> poison, poison
+ %V16F32 = fmul <16 x float> poison, poison
+
+ %NXV1F32 = fmul <vscale x 1 x float> poison, poison
+ %NXV2F32 = fmul <vscale x 2 x float> poison, poison
+ %NXV4F32 = fmul <vscale x 4 x float> poison, poison
+ %NXV8F32 = fmul <vscale x 8 x float> poison, poison
+ %NXV16F32 = fmul <vscale x 16 x float> poison, poison
+
+ %V1F64 = fmul <1 x double> poison, poison
+ %V2F64 = fmul <2 x double> poison, poison
+ %V4F64 = fmul <4 x double> poison, poison
+ %V8F64 = fmul <8 x double> poison, poison
+
+ %NXV1F64 = fmul <vscale x 1 x double> poison, poison
+ %NXV2F64 = fmul <vscale x 2 x double> poison, poison
+ %NXV4F64 = fmul <vscale x 4 x double> poison, poison
+ %NXV8F64 = fmul <vscale x 8 x double> poison, poison
+
+ %V1F32_VP = call <1 x float> @llvm.vp.fmul(<1 x float> poison, <1 x float> poison, <1 x i1> poison, i32 poison)
+ %V2F32_VP = call <2 x float> @llvm.vp.fmul(<2 x float> poison, <2 x float> poison, <2 x i1> poison, i32 poison)
+ %V4F32_VP = call <4 x float> @llvm.vp.fmul(<4 x float> poison, <4 x float> poison, <4 x i1> poison, i32 poison)
+ %V8F32_VP = call <8 x float> @llvm.vp.fmul(<8 x float> poison, <8 x float> poison, <8 x i1> poison, i32 poison)
+ %V16F32_VP = call <16 x float> @llvm.vp.fmul(<16 x float> poison, <16 x float> poison, <16 x i1> poison, i32 poison)
+
+ %V1F64_VP = call <1 x double> @llvm.vp.fmul(<1 x double> poison, <1 x double> poison, <1 x i1> poison, i32 poison)
+ %V2F64_VP = call <2 x double> @llvm.vp.fmul(<2 x double> poison, <2 x double> poison, <2 x i1> poison, i32 poison)
+ %V4F64_VP = call <4 x double> @llvm.vp.fmul(<4 x double> poison, <4 x double> poison, <4 x i1> poison, i32 poison)
+ %V8F64_VP = call <8 x double> @llvm.vp.fmul(<8 x double> poison, <8 x double> poison, <8 x i1> poison, i32 poison)
+
+ %NXV1F32_VP = call <vscale x 1 x float> @llvm.vp.fmul(<vscale x 1 x float> poison, <vscale x 1 x float> poison, <vscale x 1 x i1> poison, i32 poison)
+ %NXV2F32_VP = call <vscale x 2 x float> @llvm.vp.fmul(<vscale x 2 x float> poison, <vscale x 2 x float> poison, <vscale x 2 x i1> poison, i32 poison)
+ %NXV4F32_VP = call <vscale x 4 x float> @llvm.vp.fmul(<vscale x 4 x float> poison, <vscale x 4 x float> poison, <vscale x 4 x i1> poison, i32 poison)
+ %NXV8F32_VP = call <vscale x 8 x float> @llvm.vp.fmul(<vscale x 8 x float> poison, <vscale x 8 x float> poison, <vscale x 8 x i1> poison, i32 poison)
+ %NXV16F32_VP = call <vscale x 16 x float> @llvm.vp.fmul(<vscale x 16 x float> poison, <vscale x 16 x float> poison, <vscale x 16 x i1> poison, i32 poison)
+
+ %NXV1F64_VP = call <vscale x 1 x double> @llvm.vp.fmul(<vscale x 1 x double> poison, <vscale x 1 x double> poison, <vscale x 1 x i1> poison, i32 poison)
+ %NXV2F64_VP = call <vscale x 2 x double> @llvm.vp.fmul(<vscale x 2 x double> poison, <vscale x 2 x double> poison, <vscale x 2 x i1> poison, i32 poison)
+ %NXV4F64_VP = call <vscale x 4 x double> @llvm.vp.fmul(<vscale x 4 x double> poison, <vscale x 4 x double> poison, <vscale x 4 x i1> poison, i32 poison)
+ %NXV8F64_VP = call <vscale x 8 x double> @llvm.vp.fmul(<vscale x 8 x double> poison, <vscale x 8 x double> poison, <vscale x 8 x i1> poison, i32 poison)
+
+ ret void
+}
+
+define void @fmul_bf16() {
+; ZVFH-LABEL: 'fmul_bf16'
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %BF16 = fmul bfloat poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V1BF16 = fmul <1 x bfloat> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V2BF16 = fmul <2 x bfloat> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V4BF16 = fmul <4 x bfloat> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %V8BF16 = fmul <8 x bfloat> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %V16BF16 = fmul <16 x bfloat> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 36 for instruction: %V32BF16 = fmul <32 x bfloat> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV1BF16 = fmul <vscale x 1 x bfloat> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV2BF16 = fmul <vscale x 2 x bfloat> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %NXV4BF16 = fmul <vscale x 4 x bfloat> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %NXV8BF16 = fmul <vscale x 8 x bfloat> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 36 for instruction: %NXV16BF16 = fmul <vscale x 16 x bfloat> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV32BF16 = fmul <vscale x 32 x bfloat> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V1BF16_VP = call <1 x bfloat> @llvm.vp.fmul.v1bf16(<1 x bfloat> poison, <1 x bfloat> poison, <1 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2BF16_VP = call <2 x bfloat> @llvm.vp.fmul.v2bf16(<2 x bfloat> poison, <2 x bfloat> poison, <2 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4BF16_VP = call <4 x bfloat> @llvm.vp.fmul.v4bf16(<4 x bfloat> poison, <4 x bfloat> poison, <4 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V8BF16_VP = call <8 x bfloat> @llvm.vp.fmul.v8bf16(<8 x bfloat> poison, <8 x bfloat> poison, <8 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V16BF16_VP = call <16 x bfloat> @llvm.vp.fmul.v16bf16(<16 x bfloat> poison, <16 x bfloat> poison, <16 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %NXV1BF16_VP = call <vscale x 1 x bfloat> @llvm.vp.fmul.nxv1bf16(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> poison, <vscale x 1 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %NXV2BF16_VP = call <vscale x 2 x bfloat> @llvm.vp.fmul.nxv2bf16(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> poison, <vscale x 2 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV4BF16_VP = call <vscale x 4 x bfloat> @llvm.vp.fmul.nxv4bf16(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> poison, <vscale x 4 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %NXV8BF16_VP = call <vscale x 8 x bfloat> @llvm.vp.fmul.nxv8bf16(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> poison, <vscale x 8 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %NXV16BF16_VP = call <vscale x 16 x bfloat> @llvm.vp.fmul.nxv16bf16(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> poison, <vscale x 16 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+; ZVFHMIN-LABEL: 'fmul_bf16'
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %BF16 = fmul bfloat poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V1BF16 = fmul <1 x bfloat> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V2BF16 = fmul <2 x bfloat> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V4BF16 = fmul <4 x bfloat> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %V8BF16 = fmul <8 x bfloat> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %V16BF16 = fmul <16 x bfloat> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 36 for instruction: %V32BF16 = fmul <32 x bfloat> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV1BF16 = fmul <vscale x 1 x bfloat> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV2BF16 = fmul <vscale x 2 x bfloat> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %NXV4BF16 = fmul <vscale x 4 x bfloat> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %NXV8BF16 = fmul <vscale x 8 x bfloat> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 36 for instruction: %NXV16BF16 = fmul <vscale x 16 x bfloat> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV32BF16 = fmul <vscale x 32 x bfloat> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V1BF16_VP = call <1 x bfloat> @llvm.vp.fmul.v1bf16(<1 x bfloat> poison, <1 x bfloat> poison, <1 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2BF16_VP = call <2 x bfloat> @llvm.vp.fmul.v2bf16(<2 x bfloat> poison, <2 x bfloat> poison, <2 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4BF16_VP = call <4 x bfloat> @llvm.vp.fmul.v4bf16(<4 x bfloat> poison, <4 x bfloat> poison, <4 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V8BF16_VP = call <8 x bfloat> @llvm.vp.fmul.v8bf16(<8 x bfloat> poison, <8 x bfloat> poison, <8 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V16BF16_VP = call <16 x bfloat> @llvm.vp.fmul.v16bf16(<16 x bfloat> poison, <16 x bfloat> poison, <16 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %NXV1BF16_VP = call <vscale x 1 x bfloat> @llvm.vp.fmul.nxv1bf16(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> poison, <vscale x 1 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %NXV2BF16_VP = call <vscale x 2 x bfloat> @llvm.vp.fmul.nxv2bf16(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> poison, <vscale x 2 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV4BF16_VP = call <vscale x 4 x bfloat> @llvm.vp.fmul.nxv4bf16(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> poison, <vscale x 4 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %NXV8BF16_VP = call <vscale x 8 x bfloat> @llvm.vp.fmul.nxv8bf16(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> poison, <vscale x 8 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %NXV16BF16_VP = call <vscale x 16 x bfloat> @llvm.vp.fmul.nxv16bf16(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> poison, <vscale x 16 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+; NO-ZFHMIN-LABEL: 'fmul_bf16'
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %BF16 = fmul bfloat poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1BF16 = fmul <1 x bfloat> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V2BF16 = fmul <2 x bfloat> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4BF16 = fmul <4 x bfloat> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8BF16 = fmul <8 x bfloat> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16BF16 = fmul <16 x bfloat> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %V32BF16 = fmul <32 x bfloat> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV1BF16 = fmul <vscale x 1 x bfloat> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV2BF16 = fmul <vscale x 2 x bfloat> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV4BF16 = fmul <vscale x 4 x bfloat> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV8BF16 = fmul <vscale x 8 x bfloat> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV16BF16 = fmul <vscale x 16 x bfloat> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV32BF16 = fmul <vscale x 32 x bfloat> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1BF16_VP = call <1 x bfloat> @llvm.vp.fmul.v1bf16(<1 x bfloat> poison, <1 x bfloat> poison, <1 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V2BF16_VP = call <2 x bfloat> @llvm.vp.fmul.v2bf16(<2 x bfloat> poison, <2 x bfloat> poison, <2 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4BF16_VP = call <4 x bfloat> @llvm.vp.fmul.v4bf16(<4 x bfloat> poison, <4 x bfloat> poison, <4 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8BF16_VP = call <8 x bfloat> @llvm.vp.fmul.v8bf16(<8 x bfloat> poison, <8 x bfloat> poison, <8 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16BF16_VP = call <16 x bfloat> @llvm.vp.fmul.v16bf16(<16 x bfloat> poison, <16 x bfloat> poison, <16 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV1BF16_VP = call <vscale x 1 x bfloat> @llvm.vp.fmul.nxv1bf16(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> poison, <vscale x 1 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV2BF16_VP = call <vscale x 2 x bfloat> @llvm.vp.fmul.nxv2bf16(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> poison, <vscale x 2 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV4BF16_VP = call <vscale x 4 x bfloat> @llvm.vp.fmul.nxv4bf16(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> poison, <vscale x 4 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV8BF16_VP = call <vscale x 8 x bfloat> @llvm.vp.fmul.nxv8bf16(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> poison, <vscale x 8 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV16BF16_VP = call <vscale x 16 x bfloat> @llvm.vp.fmul.nxv16bf16(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> poison, <vscale x 16 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+ %BF16 = fmul bfloat poison, poison
+
+ %V1BF16 = fmul <1 x bfloat> poison, poison
+ %V2BF16 = fmul <2 x bfloat> poison, poison
+ %V4BF16 = fmul <4 x bfloat> poison, poison
+ %V8BF16 = fmul <8 x bfloat> poison, poison
+ %V16BF16 = fmul <16 x bfloat> poison, poison
+ %V32BF16 = fmul <32 x bfloat> poison, poison
+
+ %NXV1BF16 = fmul <vscale x 1 x bfloat> poison, poison
+ %NXV2BF16 = fmul <vscale x 2 x bfloat> poison, poison
+ %NXV4BF16 = fmul <vscale x 4 x bfloat> poison, poison
+ %NXV8BF16 = fmul <vscale x 8 x bfloat> poison, poison
+ %NXV16BF16 = fmul <vscale x 16 x bfloat> poison, poison
+ %NXV32BF16 = fmul <vscale x 32 x bfloat> poison, poison
+
+ %V1BF16_VP = call <1 x bfloat> @llvm.vp.fmul(<1 x bfloat> poison, <1 x bfloat> poison, <1 x i1> poison, i32 poison)
+ %V2BF16_VP = call <2 x bfloat> @llvm.vp.fmul(<2 x bfloat> poison, <2 x bfloat> poison, <2 x i1> poison, i32 poison)
+ %V4BF16_VP = call <4 x bfloat> @llvm.vp.fmul(<4 x bfloat> poison, <4 x bfloat> poison, <4 x i1> poison, i32 poison)
+ %V8BF16_VP = call <8 x bfloat> @llvm.vp.fmul(<8 x bfloat> poison, <8 x bfloat> poison, <8 x i1> poison, i32 poison)
+ %V16BF16_VP = call <16 x bfloat> @llvm.vp.fmul(<16 x bfloat> poison, <16 x bfloat> poison, <16 x i1> poison, i32 poison)
+
+ %NXV1BF16_VP = call <vscale x 1 x bfloat> @llvm.vp.fmul(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> poison, <vscale x 1 x i1> poison, i32 poison)
+ %NXV2BF16_VP = call <vscale x 2 x bfloat> @llvm.vp.fmul(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> poison, <vscale x 2 x i1> poison, i32 poison)
+ %NXV4BF16_VP = call <vscale x 4 x bfloat> @llvm.vp.fmul(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> poison, <vscale x 4 x i1> poison, i32 poison)
+ %NXV8BF16_VP = call <vscale x 8 x bfloat> @llvm.vp.fmul(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> poison, <vscale x 8 x i1> poison, i32 poison)
+ %NXV16BF16_VP = call <vscale x 16 x bfloat> @llvm.vp.fmul(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> poison, <vscale x 16 x i1> poison, i32 poison)
ret void
}
define void @fmul_f16() {
; ZVFH-LABEL: 'fmul_f16'
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F16 = fmul half undef, undef
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F16 = fmul <1 x half> undef, undef
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F16 = fmul <2 x half> undef, undef
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F16 = fmul <4 x half> undef, undef
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8F16 = fmul <8 x half> undef, undef
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16F16 = fmul <16 x half> undef, undef
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V32F16 = fmul <32 x half> undef, undef
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F16 = fmul <vscale x 1 x half> undef, undef
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2F16 = fmul <vscale x 2 x half> undef, undef
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV4F16 = fmul <vscale x 4 x half> undef, undef
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV8F16 = fmul <vscale x 8 x half> undef, undef
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV16F16 = fmul <vscale x 16 x half> undef, undef
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV32F16 = fmul <vscale x 32 x half> undef, undef
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F16_VP = call <1 x half> @llvm.vp.fmul.v1f16(<1 x half> undef, <1 x half> undef, <1 x i1> undef, i32 undef)
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F16_VP = call <2 x half> @llvm.vp.fmul.v2f16(<2 x half> undef, <2 x half> undef, <2 x i1> undef, i32 undef)
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F16_VP = call <4 x half> @llvm.vp.fmul.v4f16(<4 x half> undef, <4 x half> undef, <4 x i1> undef, i32 undef)
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8F16_VP = call <8 x half> @llvm.vp.fmul.v8f16(<8 x half> undef, <8 x half> undef, <8 x i1> undef, i32 undef)
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16F16_VP = call <16 x half> @llvm.vp.fmul.v16f16(<16 x half> undef, <16 x half> undef, <16 x i1> undef, i32 undef)
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F16_VP = call <vscale x 1 x half> @llvm.vp.fmul.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x i1> undef, i32 undef)
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2F16_VP = call <vscale x 2 x half> @llvm.vp.fmul.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x i1> undef, i32 undef)
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV4F16_VP = call <vscale x 4 x half> @llvm.vp.fmul.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x i1> undef, i32 undef)
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV8F16_VP = call <vscale x 8 x half> @llvm.vp.fmul.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x i1> undef, i32 undef)
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV16F16_VP = call <vscale x 16 x half> @llvm.vp.fmul.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, <vscale x 16 x i1> undef, i32 undef)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F16 = fmul half poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F16 = fmul <1 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F16 = fmul <2 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F16 = fmul <4 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8F16 = fmul <8 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16F16 = fmul <16 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V32F16 = fmul <32 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F16 = fmul <vscale x 1 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2F16 = fmul <vscale x 2 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV4F16 = fmul <vscale x 4 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV8F16 = fmul <vscale x 8 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV16F16 = fmul <vscale x 16 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV32F16 = fmul <vscale x 32 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F16_VP = call <1 x half> @llvm.vp.fmul.v1f16(<1 x half> poison, <1 x half> poison, <1 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F16_VP = call <2 x half> @llvm.vp.fmul.v2f16(<2 x half> poison, <2 x half> poison, <2 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F16_VP = call <4 x half> @llvm.vp.fmul.v4f16(<4 x half> poison, <4 x half> poison, <4 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8F16_VP = call <8 x half> @llvm.vp.fmul.v8f16(<8 x half> poison, <8 x half> poison, <8 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16F16_VP = call <16 x half> @llvm.vp.fmul.v16f16(<16 x half> poison, <16 x half> poison, <16 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F16_VP = call <vscale x 1 x half> @llvm.vp.fmul.nxv1f16(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2F16_VP = call <vscale x 2 x half> @llvm.vp.fmul.nxv2f16(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV4F16_VP = call <vscale x 4 x half> @llvm.vp.fmul.nxv4f16(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV8F16_VP = call <vscale x 8 x half> @llvm.vp.fmul.nxv8f16(<vscale x 8 x half> poison, <vscale x 8 x half> poison, <vscale x 8 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV16F16_VP = call <vscale x 16 x half> @llvm.vp.fmul.nxv16f16(<vscale x 16 x half> poison, <vscale x 16 x half> poison, <vscale x 16 x i1> poison, i32 poison)
; ZVFH-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
; ZVFHMIN-LABEL: 'fmul_f16'
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F16 = fmul half undef, undef
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V1F16 = fmul <1 x half> undef, undef
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V2F16 = fmul <2 x half> undef, undef
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V4F16 = fmul <4 x half> undef, undef
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %V8F16 = fmul <8 x half> undef, undef
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %V16F16 = fmul <16 x half> undef, undef
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 36 for instruction: %V32F16 = fmul <32 x half> undef, undef
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV1F16 = fmul <vscale x 1 x half> undef, undef
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV2F16 = fmul <vscale x 2 x half> undef, undef
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %NXV4F16 = fmul <vscale x 4 x half> undef, undef
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %NXV8F16 = fmul <vscale x 8 x half> undef, undef
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 36 for instruction: %NXV16F16 = fmul <vscale x 16 x half> undef, undef
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV32F16 = fmul <vscale x 32 x half> undef, undef
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V1F16_VP = call <1 x half> @llvm.vp.fmul.v1f16(<1 x half> undef, <1 x half> undef, <1 x i1> undef, i32 undef)
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2F16_VP = call <2 x half> @llvm.vp.fmul.v2f16(<2 x half> undef, <2 x half> undef, <2 x i1> undef, i32 undef)
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4F16_VP = call <4 x half> @llvm.vp.fmul.v4f16(<4 x half> undef, <4 x half> undef, <4 x i1> undef, i32 undef)
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V8F16_VP = call <8 x half> @llvm.vp.fmul.v8f16(<8 x half> undef, <8 x half> undef, <8 x i1> undef, i32 undef)
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V16F16_VP = call <16 x half> @llvm.vp.fmul.v16f16(<16 x half> undef, <16 x half> undef, <16 x i1> undef, i32 undef)
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %NXV1F16_VP = call <vscale x 1 x half> @llvm.vp.fmul.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x i1> undef, i32 undef)
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %NXV2F16_VP = call <vscale x 2 x half> @llvm.vp.fmul.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x i1> undef, i32 undef)
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV4F16_VP = call <vscale x 4 x half> @llvm.vp.fmul.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x i1> undef, i32 undef)
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %NXV8F16_VP = call <vscale x 8 x half> @llvm.vp.fmul.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x i1> undef, i32 undef)
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %NXV16F16_VP = call <vscale x 16 x half> @llvm.vp.fmul.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, <vscale x 16 x i1> undef, i32 undef)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F16 = fmul half poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V1F16 = fmul <1 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V2F16 = fmul <2 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V4F16 = fmul <4 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %V8F16 = fmul <8 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %V16F16 = fmul <16 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 36 for instruction: %V32F16 = fmul <32 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV1F16 = fmul <vscale x 1 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV2F16 = fmul <vscale x 2 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %NXV4F16 = fmul <vscale x 4 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %NXV8F16 = fmul <vscale x 8 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 36 for instruction: %NXV16F16 = fmul <vscale x 16 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV32F16 = fmul <vscale x 32 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V1F16_VP = call <1 x half> @llvm.vp.fmul.v1f16(<1 x half> poison, <1 x half> poison, <1 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2F16_VP = call <2 x half> @llvm.vp.fmul.v2f16(<2 x half> poison, <2 x half> poison, <2 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4F16_VP = call <4 x half> @llvm.vp.fmul.v4f16(<4 x half> poison, <4 x half> poison, <4 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V8F16_VP = call <8 x half> @llvm.vp.fmul.v8f16(<8 x half> poison, <8 x half> poison, <8 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V16F16_VP = call <16 x half> @llvm.vp.fmul.v16f16(<16 x half> poison, <16 x half> poison, <16 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %NXV1F16_VP = call <vscale x 1 x half> @llvm.vp.fmul.nxv1f16(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %NXV2F16_VP = call <vscale x 2 x half> @llvm.vp.fmul.nxv2f16(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV4F16_VP = call <vscale x 4 x half> @llvm.vp.fmul.nxv4f16(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %NXV8F16_VP = call <vscale x 8 x half> @llvm.vp.fmul.nxv8f16(<vscale x 8 x half> poison, <vscale x 8 x half> poison, <vscale x 8 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %NXV16F16_VP = call <vscale x 16 x half> @llvm.vp.fmul.nxv16f16(<vscale x 16 x half> poison, <vscale x 16 x half> poison, <vscale x 16 x i1> poison, i32 poison)
; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
- %F16 = fmul half undef, undef
-
- %V1F16 = fmul <1 x half> undef, undef
- %V2F16 = fmul <2 x half> undef, undef
- %V4F16 = fmul <4 x half> undef, undef
- %V8F16 = fmul <8 x half> undef, undef
- %V16F16 = fmul <16 x half> undef, undef
- %V32F16 = fmul <32 x half> undef, undef
-
- %NXV1F16 = fmul <vscale x 1 x half> undef, undef
- %NXV2F16 = fmul <vscale x 2 x half> undef, undef
- %NXV4F16 = fmul <vscale x 4 x half> undef, undef
- %NXV8F16 = fmul <vscale x 8 x half> undef, undef
- %NXV16F16 = fmul <vscale x 16 x half> undef, undef
- %NXV32F16 = fmul <vscale x 32 x half> undef, undef
-
- %V1F16_VP = call <1 x half> @llvm.vp.fmul.v1f16(<1 x half> undef, <1 x half> undef, <1 x i1> undef, i32 undef)
- %V2F16_VP = call <2 x half> @llvm.vp.fmul.v2f16(<2 x half> undef, <2 x half> undef, <2 x i1> undef, i32 undef)
- %V4F16_VP = call <4 x half> @llvm.vp.fmul.v4f16(<4 x half> undef, <4 x half> undef, <4 x i1> undef, i32 undef)
- %V8F16_VP = call <8 x half> @llvm.vp.fmul.v8f16(<8 x half> undef, <8 x half> undef, <8 x i1> undef, i32 undef)
- %V16F16_VP = call <16 x half> @llvm.vp.fmul.v16f16(<16 x half> undef, <16 x half> undef, <16 x i1> undef, i32 undef)
-
- %NXV1F16_VP = call <vscale x 1 x half> @llvm.vp.fmul.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x i1> undef, i32 undef)
- %NXV2F16_VP = call <vscale x 2 x half> @llvm.vp.fmul.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x i1> undef, i32 undef)
- %NXV4F16_VP = call <vscale x 4 x half> @llvm.vp.fmul.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x i1> undef, i32 undef)
- %NXV8F16_VP = call <vscale x 8 x half> @llvm.vp.fmul.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x i1> undef, i32 undef)
- %NXV16F16_VP = call <vscale x 16 x half> @llvm.vp.fmul.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, <vscale x 16 x i1> undef, i32 undef)
+; NO-ZFHMIN-LABEL: 'fmul_f16'
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F16 = fmul half poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F16 = fmul <1 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V2F16 = fmul <2 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4F16 = fmul <4 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8F16 = fmul <8 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16F16 = fmul <16 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %V32F16 = fmul <32 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV1F16 = fmul <vscale x 1 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV2F16 = fmul <vscale x 2 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV4F16 = fmul <vscale x 4 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV8F16 = fmul <vscale x 8 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV16F16 = fmul <vscale x 16 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV32F16 = fmul <vscale x 32 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F16_VP = call <1 x half> @llvm.vp.fmul.v1f16(<1 x half> poison, <1 x half> poison, <1 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V2F16_VP = call <2 x half> @llvm.vp.fmul.v2f16(<2 x half> poison, <2 x half> poison, <2 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4F16_VP = call <4 x half> @llvm.vp.fmul.v4f16(<4 x half> poison, <4 x half> poison, <4 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8F16_VP = call <8 x half> @llvm.vp.fmul.v8f16(<8 x half> poison, <8 x half> poison, <8 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16F16_VP = call <16 x half> @llvm.vp.fmul.v16f16(<16 x half> poison, <16 x half> poison, <16 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV1F16_VP = call <vscale x 1 x half> @llvm.vp.fmul.nxv1f16(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV2F16_VP = call <vscale x 2 x half> @llvm.vp.fmul.nxv2f16(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV4F16_VP = call <vscale x 4 x half> @llvm.vp.fmul.nxv4f16(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV8F16_VP = call <vscale x 8 x half> @llvm.vp.fmul.nxv8f16(<vscale x 8 x half> poison, <vscale x 8 x half> poison, <vscale x 8 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV16F16_VP = call <vscale x 16 x half> @llvm.vp.fmul.nxv16f16(<vscale x 16 x half> poison, <vscale x 16 x half> poison, <vscale x 16 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+ %F16 = fmul half poison, poison
+
+ %V1F16 = fmul <1 x half> poison, poison
+ %V2F16 = fmul <2 x half> poison, poison
+ %V4F16 = fmul <4 x half> poison, poison
+ %V8F16 = fmul <8 x half> poison, poison
+ %V16F16 = fmul <16 x half> poison, poison
+ %V32F16 = fmul <32 x half> poison, poison
+
+ %NXV1F16 = fmul <vscale x 1 x half> poison, poison
+ %NXV2F16 = fmul <vscale x 2 x half> poison, poison
+ %NXV4F16 = fmul <vscale x 4 x half> poison, poison
+ %NXV8F16 = fmul <vscale x 8 x half> poison, poison
+ %NXV16F16 = fmul <vscale x 16 x half> poison, poison
+ %NXV32F16 = fmul <vscale x 32 x half> poison, poison
+
+ %V1F16_VP = call <1 x half> @llvm.vp.fmul(<1 x half> poison, <1 x half> poison, <1 x i1> poison, i32 poison)
+ %V2F16_VP = call <2 x half> @llvm.vp.fmul(<2 x half> poison, <2 x half> poison, <2 x i1> poison, i32 poison)
+ %V4F16_VP = call <4 x half> @llvm.vp.fmul(<4 x half> poison, <4 x half> poison, <4 x i1> poison, i32 poison)
+ %V8F16_VP = call <8 x half> @llvm.vp.fmul(<8 x half> poison, <8 x half> poison, <8 x i1> poison, i32 poison)
+ %V16F16_VP = call <16 x half> @llvm.vp.fmul(<16 x half> poison, <16 x half> poison, <16 x i1> poison, i32 poison)
+
+ %NXV1F16_VP = call <vscale x 1 x half> @llvm.vp.fmul(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x i1> poison, i32 poison)
+ %NXV2F16_VP = call <vscale x 2 x half> @llvm.vp.fmul(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x i1> poison, i32 poison)
+ %NXV4F16_VP = call <vscale x 4 x half> @llvm.vp.fmul(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x i1> poison, i32 poison)
+ %NXV8F16_VP = call <vscale x 8 x half> @llvm.vp.fmul(<vscale x 8 x half> poison, <vscale x 8 x half> poison, <vscale x 8 x i1> poison, i32 poison)
+ %NXV16F16_VP = call <vscale x 16 x half> @llvm.vp.fmul(<vscale x 16 x half> poison, <vscale x 16 x half> poison, <vscale x 16 x i1> poison, i32 poison)
ret void
}
define void @fdiv() {
; CHECK-LABEL: 'fdiv'
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %BF16 = fdiv bfloat undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F32 = fdiv float undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F64 = fdiv double undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V1BF16 = fdiv <1 x bfloat> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V2BF16 = fdiv <2 x bfloat> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V4BF16 = fdiv <4 x bfloat> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %V8BF16 = fdiv <8 x bfloat> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %V16BF16 = fdiv <16 x bfloat> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV1BF16 = fdiv <vscale x 1 x bfloat> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV2BF16 = fdiv <vscale x 2 x bfloat> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %NXV4BF16 = fdiv <vscale x 4 x bfloat> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %NXV8BF16 = fdiv <vscale x 8 x bfloat> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 36 for instruction: %NXV16BF16 = fdiv <vscale x 16 x bfloat> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F32 = fdiv <1 x float> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F32 = fdiv <2 x float> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F32 = fdiv <4 x float> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V8F32 = fdiv <8 x float> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V16F32 = fdiv <16 x float> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F32 = fdiv <vscale x 1 x float> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2F32 = fdiv <vscale x 2 x float> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV4F32 = fdiv <vscale x 4 x float> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV8F32 = fdiv <vscale x 8 x float> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV16F32 = fdiv <vscale x 16 x float> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F64 = fdiv <1 x double> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F64 = fdiv <2 x double> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4F64 = fdiv <4 x double> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V8F64 = fdiv <8 x double> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F64 = fdiv <vscale x 1 x double> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV2F64 = fdiv <vscale x 2 x double> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV4F64 = fdiv <vscale x 4 x double> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV8F64 = fdiv <vscale x 8 x double> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V1F16_VP = call <1 x bfloat> @llvm.vp.fdiv.v1bf16(<1 x bfloat> undef, <1 x bfloat> undef, <1 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2F16_VP = call <2 x bfloat> @llvm.vp.fdiv.v2bf16(<2 x bfloat> undef, <2 x bfloat> undef, <2 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4F16_VP = call <4 x bfloat> @llvm.vp.fdiv.v4bf16(<4 x bfloat> undef, <4 x bfloat> undef, <4 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V8F16_VP = call <8 x bfloat> @llvm.vp.fdiv.v8bf16(<8 x bfloat> undef, <8 x bfloat> undef, <8 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V16F16_VP = call <16 x bfloat> @llvm.vp.fdiv.v16bf16(<16 x bfloat> undef, <16 x bfloat> undef, <16 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F32_VP = call <1 x float> @llvm.vp.fdiv.v1f32(<1 x float> undef, <1 x float> undef, <1 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F32_VP = call <2 x float> @llvm.vp.fdiv.v2f32(<2 x float> undef, <2 x float> undef, <2 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F32_VP = call <4 x float> @llvm.vp.fdiv.v4f32(<4 x float> undef, <4 x float> undef, <4 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V8F32_VP = call <8 x float> @llvm.vp.fdiv.v8f32(<8 x float> undef, <8 x float> undef, <8 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V16F32_VP = call <16 x float> @llvm.vp.fdiv.v16f32(<16 x float> undef, <16 x float> undef, <16 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F64_VP = call <1 x double> @llvm.vp.fdiv.v1f64(<1 x double> undef, <1 x double> undef, <1 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F64_VP = call <2 x double> @llvm.vp.fdiv.v2f64(<2 x double> undef, <2 x double> undef, <2 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4F64_VP = call <4 x double> @llvm.vp.fdiv.v4f64(<4 x double> undef, <4 x double> undef, <4 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V8F64_VP = call <8 x double> @llvm.vp.fdiv.v8f64(<8 x double> undef, <8 x double> undef, <8 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %NXV1F16_VP = call <vscale x 1 x bfloat> @llvm.vp.fdiv.nxv1bf16(<vscale x 1 x bfloat> undef, <vscale x 1 x bfloat> undef, <vscale x 1 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %NXV2F16_VP = call <vscale x 2 x bfloat> @llvm.vp.fdiv.nxv2bf16(<vscale x 2 x bfloat> undef, <vscale x 2 x bfloat> undef, <vscale x 2 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV4F16_VP = call <vscale x 4 x bfloat> @llvm.vp.fdiv.nxv4bf16(<vscale x 4 x bfloat> undef, <vscale x 4 x bfloat> undef, <vscale x 4 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %NXV8F16_VP = call <vscale x 8 x bfloat> @llvm.vp.fdiv.nxv8bf16(<vscale x 8 x bfloat> undef, <vscale x 8 x bfloat> undef, <vscale x 8 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %NXV16F16_VP = call <vscale x 16 x bfloat> @llvm.vp.fdiv.nxv16bf16(<vscale x 16 x bfloat> undef, <vscale x 16 x bfloat> undef, <vscale x 16 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F32_VP = call <vscale x 1 x float> @llvm.vp.fdiv.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2F32_VP = call <vscale x 2 x float> @llvm.vp.fdiv.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV4F32_VP = call <vscale x 4 x float> @llvm.vp.fdiv.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV8F32_VP = call <vscale x 8 x float> @llvm.vp.fdiv.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef, <vscale x 8 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV16F32_VP = call <vscale x 16 x float> @llvm.vp.fdiv.nxv16f32(<vscale x 16 x float> undef, <vscale x 16 x float> undef, <vscale x 16 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F64_VP = call <vscale x 1 x double> @llvm.vp.fdiv.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV2F64_VP = call <vscale x 2 x double> @llvm.vp.fdiv.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV4F64_VP = call <vscale x 4 x double> @llvm.vp.fdiv.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef, <vscale x 4 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV8F64_VP = call <vscale x 8 x double> @llvm.vp.fdiv.nxv8f64(<vscale x 8 x double> undef, <vscale x 8 x double> undef, <vscale x 8 x i1> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F32 = fdiv float poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F64 = fdiv double poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F32 = fdiv <1 x float> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F32 = fdiv <2 x float> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F32 = fdiv <4 x float> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V8F32 = fdiv <8 x float> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V16F32 = fdiv <16 x float> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F32 = fdiv <vscale x 1 x float> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2F32 = fdiv <vscale x 2 x float> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV4F32 = fdiv <vscale x 4 x float> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV8F32 = fdiv <vscale x 8 x float> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV16F32 = fdiv <vscale x 16 x float> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F64 = fdiv <1 x double> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F64 = fdiv <2 x double> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4F64 = fdiv <4 x double> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V8F64 = fdiv <8 x double> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F64 = fdiv <vscale x 1 x double> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV2F64 = fdiv <vscale x 2 x double> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV4F64 = fdiv <vscale x 4 x double> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV8F64 = fdiv <vscale x 8 x double> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F32_VP = call <1 x float> @llvm.vp.fdiv.v1f32(<1 x float> poison, <1 x float> poison, <1 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F32_VP = call <2 x float> @llvm.vp.fdiv.v2f32(<2 x float> poison, <2 x float> poison, <2 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F32_VP = call <4 x float> @llvm.vp.fdiv.v4f32(<4 x float> poison, <4 x float> poison, <4 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V8F32_VP = call <8 x float> @llvm.vp.fdiv.v8f32(<8 x float> poison, <8 x float> poison, <8 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V16F32_VP = call <16 x float> @llvm.vp.fdiv.v16f32(<16 x float> poison, <16 x float> poison, <16 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F64_VP = call <1 x double> @llvm.vp.fdiv.v1f64(<1 x double> poison, <1 x double> poison, <1 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F64_VP = call <2 x double> @llvm.vp.fdiv.v2f64(<2 x double> poison, <2 x double> poison, <2 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4F64_VP = call <4 x double> @llvm.vp.fdiv.v4f64(<4 x double> poison, <4 x double> poison, <4 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V8F64_VP = call <8 x double> @llvm.vp.fdiv.v8f64(<8 x double> poison, <8 x double> poison, <8 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F32_VP = call <vscale x 1 x float> @llvm.vp.fdiv.nxv1f32(<vscale x 1 x float> poison, <vscale x 1 x float> poison, <vscale x 1 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2F32_VP = call <vscale x 2 x float> @llvm.vp.fdiv.nxv2f32(<vscale x 2 x float> poison, <vscale x 2 x float> poison, <vscale x 2 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV4F32_VP = call <vscale x 4 x float> @llvm.vp.fdiv.nxv4f32(<vscale x 4 x float> poison, <vscale x 4 x float> poison, <vscale x 4 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV8F32_VP = call <vscale x 8 x float> @llvm.vp.fdiv.nxv8f32(<vscale x 8 x float> poison, <vscale x 8 x float> poison, <vscale x 8 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV16F32_VP = call <vscale x 16 x float> @llvm.vp.fdiv.nxv16f32(<vscale x 16 x float> poison, <vscale x 16 x float> poison, <vscale x 16 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F64_VP = call <vscale x 1 x double> @llvm.vp.fdiv.nxv1f64(<vscale x 1 x double> poison, <vscale x 1 x double> poison, <vscale x 1 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV2F64_VP = call <vscale x 2 x double> @llvm.vp.fdiv.nxv2f64(<vscale x 2 x double> poison, <vscale x 2 x double> poison, <vscale x 2 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV4F64_VP = call <vscale x 4 x double> @llvm.vp.fdiv.nxv4f64(<vscale x 4 x double> poison, <vscale x 4 x double> poison, <vscale x 4 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV8F64_VP = call <vscale x 8 x double> @llvm.vp.fdiv.nxv8f64(<vscale x 8 x double> poison, <vscale x 8 x double> poison, <vscale x 8 x i1> poison, i32 poison)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
- %BF16 = fdiv bfloat undef, undef
- %F32 = fdiv float undef, undef
- %F64 = fdiv double undef, undef
-
- %V1BF16 = fdiv <1 x bfloat> undef, undef
- %V2BF16 = fdiv <2 x bfloat> undef, undef
- %V4BF16 = fdiv <4 x bfloat> undef, undef
- %V8BF16 = fdiv <8 x bfloat> undef, undef
- %V16BF16 = fdiv <16 x bfloat> undef, undef
-
- %NXV1BF16 = fdiv <vscale x 1 x bfloat> undef, undef
- %NXV2BF16 = fdiv <vscale x 2 x bfloat> undef, undef
- %NXV4BF16 = fdiv <vscale x 4 x bfloat> undef, undef
- %NXV8BF16 = fdiv <vscale x 8 x bfloat> undef, undef
- %NXV16BF16 = fdiv <vscale x 16 x bfloat> undef, undef
-
- %V1F32 = fdiv <1 x float> undef, undef
- %V2F32 = fdiv <2 x float> undef, undef
- %V4F32 = fdiv <4 x float> undef, undef
- %V8F32 = fdiv <8 x float> undef, undef
- %V16F32 = fdiv <16 x float> undef, undef
-
- %NXV1F32 = fdiv <vscale x 1 x float> undef, undef
- %NXV2F32 = fdiv <vscale x 2 x float> undef, undef
- %NXV4F32 = fdiv <vscale x 4 x float> undef, undef
- %NXV8F32 = fdiv <vscale x 8 x float> undef, undef
- %NXV16F32 = fdiv <vscale x 16 x float> undef, undef
-
- %V1F64 = fdiv <1 x double> undef, undef
- %V2F64 = fdiv <2 x double> undef, undef
- %V4F64 = fdiv <4 x double> undef, undef
- %V8F64 = fdiv <8 x double> undef, undef
-
- %NXV1F64 = fdiv <vscale x 1 x double> undef, undef
- %NXV2F64 = fdiv <vscale x 2 x double> undef, undef
- %NXV4F64 = fdiv <vscale x 4 x double> undef, undef
- %NXV8F64 = fdiv <vscale x 8 x double> undef, undef
-
- %V1F16_VP = call <1 x bfloat> @llvm.vp.fdiv.v1f16(<1 x bfloat> undef, <1 x bfloat> undef, <1 x i1> undef, i32 undef)
- %V2F16_VP = call <2 x bfloat> @llvm.vp.fdiv.v2f16(<2 x bfloat> undef, <2 x bfloat> undef, <2 x i1> undef, i32 undef)
- %V4F16_VP = call <4 x bfloat> @llvm.vp.fdiv.v4f16(<4 x bfloat> undef, <4 x bfloat> undef, <4 x i1> undef, i32 undef)
- %V8F16_VP = call <8 x bfloat> @llvm.vp.fdiv.v8f16(<8 x bfloat> undef, <8 x bfloat> undef, <8 x i1> undef, i32 undef)
- %V16F16_VP = call <16 x bfloat> @llvm.vp.fdiv.v16f16(<16 x bfloat> undef, <16 x bfloat> undef, <16 x i1> undef, i32 undef)
-
- %V1F32_VP = call <1 x float> @llvm.vp.fdiv.v1f32(<1 x float> undef, <1 x float> undef, <1 x i1> undef, i32 undef)
- %V2F32_VP = call <2 x float> @llvm.vp.fdiv.v2f32(<2 x float> undef, <2 x float> undef, <2 x i1> undef, i32 undef)
- %V4F32_VP = call <4 x float> @llvm.vp.fdiv.v4f32(<4 x float> undef, <4 x float> undef, <4 x i1> undef, i32 undef)
- %V8F32_VP = call <8 x float> @llvm.vp.fdiv.v8f32(<8 x float> undef, <8 x float> undef, <8 x i1> undef, i32 undef)
- %V16F32_VP = call <16 x float> @llvm.vp.fdiv.v16f32(<16 x float> undef, <16 x float> undef, <16 x i1> undef, i32 undef)
-
- %V1F64_VP = call <1 x double> @llvm.vp.fdiv.v1f64(<1 x double> undef, <1 x double> undef, <1 x i1> undef, i32 undef)
- %V2F64_VP = call <2 x double> @llvm.vp.fdiv.v2f64(<2 x double> undef, <2 x double> undef, <2 x i1> undef, i32 undef)
- %V4F64_VP = call <4 x double> @llvm.vp.fdiv.v4f64(<4 x double> undef, <4 x double> undef, <4 x i1> undef, i32 undef)
- %V8F64_VP = call <8 x double> @llvm.vp.fdiv.v8f64(<8 x double> undef, <8 x double> undef, <8 x i1> undef, i32 undef)
-
- %NXV1F16_VP = call <vscale x 1 x bfloat> @llvm.vp.fdiv.nxv1f16(<vscale x 1 x bfloat> undef, <vscale x 1 x bfloat> undef, <vscale x 1 x i1> undef, i32 undef)
- %NXV2F16_VP = call <vscale x 2 x bfloat> @llvm.vp.fdiv.nxv2f16(<vscale x 2 x bfloat> undef, <vscale x 2 x bfloat> undef, <vscale x 2 x i1> undef, i32 undef)
- %NXV4F16_VP = call <vscale x 4 x bfloat> @llvm.vp.fdiv.nxv4f16(<vscale x 4 x bfloat> undef, <vscale x 4 x bfloat> undef, <vscale x 4 x i1> undef, i32 undef)
- %NXV8F16_VP = call <vscale x 8 x bfloat> @llvm.vp.fdiv.nxv8f16(<vscale x 8 x bfloat> undef, <vscale x 8 x bfloat> undef, <vscale x 8 x i1> undef, i32 undef)
- %NXV16F16_VP = call <vscale x 16 x bfloat> @llvm.vp.fdiv.nxv16f16(<vscale x 16 x bfloat> undef, <vscale x 16 x bfloat> undef, <vscale x 16 x i1> undef, i32 undef)
-
- %NXV1F32_VP = call <vscale x 1 x float> @llvm.vp.fdiv.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x i1> undef, i32 undef)
- %NXV2F32_VP = call <vscale x 2 x float> @llvm.vp.fdiv.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x i1> undef, i32 undef)
- %NXV4F32_VP = call <vscale x 4 x float> @llvm.vp.fdiv.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x i1> undef, i32 undef)
- %NXV8F32_VP = call <vscale x 8 x float> @llvm.vp.fdiv.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef, <vscale x 8 x i1> undef, i32 undef)
- %NXV16F32_VP = call <vscale x 16 x float> @llvm.vp.fdiv.nxv16f32(<vscale x 16 x float> undef, <vscale x 16 x float> undef, <vscale x 16 x i1> undef, i32 undef)
-
- %NXV1F64_VP = call <vscale x 1 x double> @llvm.vp.fdiv.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x i1> undef, i32 undef)
- %NXV2F64_VP = call <vscale x 2 x double> @llvm.vp.fdiv.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x i1> undef, i32 undef)
- %NXV4F64_VP = call <vscale x 4 x double> @llvm.vp.fdiv.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef, <vscale x 4 x i1> undef, i32 undef)
- %NXV8F64_VP = call <vscale x 8 x double> @llvm.vp.fdiv.nxv8f64(<vscale x 8 x double> undef, <vscale x 8 x double> undef, <vscale x 8 x i1> undef, i32 undef)
+ %F32 = fdiv float poison, poison
+ %F64 = fdiv double poison, poison
+
+ %V1F32 = fdiv <1 x float> poison, poison
+ %V2F32 = fdiv <2 x float> poison, poison
+ %V4F32 = fdiv <4 x float> poison, poison
+ %V8F32 = fdiv <8 x float> poison, poison
+ %V16F32 = fdiv <16 x float> poison, poison
+
+ %NXV1F32 = fdiv <vscale x 1 x float> poison, poison
+ %NXV2F32 = fdiv <vscale x 2 x float> poison, poison
+ %NXV4F32 = fdiv <vscale x 4 x float> poison, poison
+ %NXV8F32 = fdiv <vscale x 8 x float> poison, poison
+ %NXV16F32 = fdiv <vscale x 16 x float> poison, poison
+
+ %V1F64 = fdiv <1 x double> poison, poison
+ %V2F64 = fdiv <2 x double> poison, poison
+ %V4F64 = fdiv <4 x double> poison, poison
+ %V8F64 = fdiv <8 x double> poison, poison
+
+ %NXV1F64 = fdiv <vscale x 1 x double> poison, poison
+ %NXV2F64 = fdiv <vscale x 2 x double> poison, poison
+ %NXV4F64 = fdiv <vscale x 4 x double> poison, poison
+ %NXV8F64 = fdiv <vscale x 8 x double> poison, poison
+
+ %V1F32_VP = call <1 x float> @llvm.vp.fdiv(<1 x float> poison, <1 x float> poison, <1 x i1> poison, i32 poison)
+ %V2F32_VP = call <2 x float> @llvm.vp.fdiv(<2 x float> poison, <2 x float> poison, <2 x i1> poison, i32 poison)
+ %V4F32_VP = call <4 x float> @llvm.vp.fdiv(<4 x float> poison, <4 x float> poison, <4 x i1> poison, i32 poison)
+ %V8F32_VP = call <8 x float> @llvm.vp.fdiv(<8 x float> poison, <8 x float> poison, <8 x i1> poison, i32 poison)
+ %V16F32_VP = call <16 x float> @llvm.vp.fdiv(<16 x float> poison, <16 x float> poison, <16 x i1> poison, i32 poison)
+
+ %V1F64_VP = call <1 x double> @llvm.vp.fdiv(<1 x double> poison, <1 x double> poison, <1 x i1> poison, i32 poison)
+ %V2F64_VP = call <2 x double> @llvm.vp.fdiv(<2 x double> poison, <2 x double> poison, <2 x i1> poison, i32 poison)
+ %V4F64_VP = call <4 x double> @llvm.vp.fdiv(<4 x double> poison, <4 x double> poison, <4 x i1> poison, i32 poison)
+ %V8F64_VP = call <8 x double> @llvm.vp.fdiv(<8 x double> poison, <8 x double> poison, <8 x i1> poison, i32 poison)
+
+
+ %NXV1F32_VP = call <vscale x 1 x float> @llvm.vp.fdiv(<vscale x 1 x float> poison, <vscale x 1 x float> poison, <vscale x 1 x i1> poison, i32 poison)
+ %NXV2F32_VP = call <vscale x 2 x float> @llvm.vp.fdiv(<vscale x 2 x float> poison, <vscale x 2 x float> poison, <vscale x 2 x i1> poison, i32 poison)
+ %NXV4F32_VP = call <vscale x 4 x float> @llvm.vp.fdiv(<vscale x 4 x float> poison, <vscale x 4 x float> poison, <vscale x 4 x i1> poison, i32 poison)
+ %NXV8F32_VP = call <vscale x 8 x float> @llvm.vp.fdiv(<vscale x 8 x float> poison, <vscale x 8 x float> poison, <vscale x 8 x i1> poison, i32 poison)
+ %NXV16F32_VP = call <vscale x 16 x float> @llvm.vp.fdiv(<vscale x 16 x float> poison, <vscale x 16 x float> poison, <vscale x 16 x i1> poison, i32 poison)
+
+ %NXV1F64_VP = call <vscale x 1 x double> @llvm.vp.fdiv(<vscale x 1 x double> poison, <vscale x 1 x double> poison, <vscale x 1 x i1> poison, i32 poison)
+ %NXV2F64_VP = call <vscale x 2 x double> @llvm.vp.fdiv(<vscale x 2 x double> poison, <vscale x 2 x double> poison, <vscale x 2 x i1> poison, i32 poison)
+ %NXV4F64_VP = call <vscale x 4 x double> @llvm.vp.fdiv(<vscale x 4 x double> poison, <vscale x 4 x double> poison, <vscale x 4 x i1> poison, i32 poison)
+ %NXV8F64_VP = call <vscale x 8 x double> @llvm.vp.fdiv(<vscale x 8 x double> poison, <vscale x 8 x double> poison, <vscale x 8 x i1> poison, i32 poison)
+
+ ret void
+}
+
+define void @fdiv_bf16() {
+; ZVFH-LABEL: 'fdiv_bf16'
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %BF16 = fdiv bfloat poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V1BF16 = fdiv <1 x bfloat> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V2BF16 = fdiv <2 x bfloat> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V4BF16 = fdiv <4 x bfloat> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %V8BF16 = fdiv <8 x bfloat> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %V16BF16 = fdiv <16 x bfloat> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 36 for instruction: %V32BF16 = fdiv <32 x bfloat> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV1BF16 = fdiv <vscale x 1 x bfloat> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV2BF16 = fdiv <vscale x 2 x bfloat> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %NXV4BF16 = fdiv <vscale x 4 x bfloat> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %NXV8BF16 = fdiv <vscale x 8 x bfloat> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 36 for instruction: %NXV16BF16 = fdiv <vscale x 16 x bfloat> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV32BF16 = fdiv <vscale x 32 x bfloat> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V1BF16_VP = call <1 x bfloat> @llvm.vp.fdiv.v1bf16(<1 x bfloat> poison, <1 x bfloat> poison, <1 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2BF16_VP = call <2 x bfloat> @llvm.vp.fdiv.v2bf16(<2 x bfloat> poison, <2 x bfloat> poison, <2 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4BF16_VP = call <4 x bfloat> @llvm.vp.fdiv.v4bf16(<4 x bfloat> poison, <4 x bfloat> poison, <4 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V8BF16_VP = call <8 x bfloat> @llvm.vp.fdiv.v8bf16(<8 x bfloat> poison, <8 x bfloat> poison, <8 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V16BF16_VP = call <16 x bfloat> @llvm.vp.fdiv.v16bf16(<16 x bfloat> poison, <16 x bfloat> poison, <16 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %NXV1BF16_VP = call <vscale x 1 x bfloat> @llvm.vp.fdiv.nxv1bf16(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> poison, <vscale x 1 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %NXV2BF16_VP = call <vscale x 2 x bfloat> @llvm.vp.fdiv.nxv2bf16(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> poison, <vscale x 2 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV4BF16_VP = call <vscale x 4 x bfloat> @llvm.vp.fdiv.nxv4bf16(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> poison, <vscale x 4 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %NXV8BF16_VP = call <vscale x 8 x bfloat> @llvm.vp.fdiv.nxv8bf16(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> poison, <vscale x 8 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %NXV16BF16_VP = call <vscale x 16 x bfloat> @llvm.vp.fdiv.nxv16bf16(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> poison, <vscale x 16 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+; ZVFHMIN-LABEL: 'fdiv_bf16'
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %BF16 = fdiv bfloat poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V1BF16 = fdiv <1 x bfloat> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V2BF16 = fdiv <2 x bfloat> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V4BF16 = fdiv <4 x bfloat> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %V8BF16 = fdiv <8 x bfloat> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %V16BF16 = fdiv <16 x bfloat> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 36 for instruction: %V32BF16 = fdiv <32 x bfloat> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV1BF16 = fdiv <vscale x 1 x bfloat> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV2BF16 = fdiv <vscale x 2 x bfloat> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %NXV4BF16 = fdiv <vscale x 4 x bfloat> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %NXV8BF16 = fdiv <vscale x 8 x bfloat> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 36 for instruction: %NXV16BF16 = fdiv <vscale x 16 x bfloat> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV32BF16 = fdiv <vscale x 32 x bfloat> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V1BF16_VP = call <1 x bfloat> @llvm.vp.fdiv.v1bf16(<1 x bfloat> poison, <1 x bfloat> poison, <1 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2BF16_VP = call <2 x bfloat> @llvm.vp.fdiv.v2bf16(<2 x bfloat> poison, <2 x bfloat> poison, <2 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4BF16_VP = call <4 x bfloat> @llvm.vp.fdiv.v4bf16(<4 x bfloat> poison, <4 x bfloat> poison, <4 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V8BF16_VP = call <8 x bfloat> @llvm.vp.fdiv.v8bf16(<8 x bfloat> poison, <8 x bfloat> poison, <8 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V16BF16_VP = call <16 x bfloat> @llvm.vp.fdiv.v16bf16(<16 x bfloat> poison, <16 x bfloat> poison, <16 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %NXV1BF16_VP = call <vscale x 1 x bfloat> @llvm.vp.fdiv.nxv1bf16(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> poison, <vscale x 1 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %NXV2BF16_VP = call <vscale x 2 x bfloat> @llvm.vp.fdiv.nxv2bf16(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> poison, <vscale x 2 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV4BF16_VP = call <vscale x 4 x bfloat> @llvm.vp.fdiv.nxv4bf16(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> poison, <vscale x 4 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %NXV8BF16_VP = call <vscale x 8 x bfloat> @llvm.vp.fdiv.nxv8bf16(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> poison, <vscale x 8 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %NXV16BF16_VP = call <vscale x 16 x bfloat> @llvm.vp.fdiv.nxv16bf16(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> poison, <vscale x 16 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+; NO-ZFHMIN-LABEL: 'fdiv_bf16'
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %BF16 = fdiv bfloat poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1BF16 = fdiv <1 x bfloat> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V2BF16 = fdiv <2 x bfloat> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4BF16 = fdiv <4 x bfloat> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8BF16 = fdiv <8 x bfloat> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16BF16 = fdiv <16 x bfloat> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %V32BF16 = fdiv <32 x bfloat> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV1BF16 = fdiv <vscale x 1 x bfloat> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV2BF16 = fdiv <vscale x 2 x bfloat> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV4BF16 = fdiv <vscale x 4 x bfloat> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV8BF16 = fdiv <vscale x 8 x bfloat> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV16BF16 = fdiv <vscale x 16 x bfloat> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV32BF16 = fdiv <vscale x 32 x bfloat> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1BF16_VP = call <1 x bfloat> @llvm.vp.fdiv.v1bf16(<1 x bfloat> poison, <1 x bfloat> poison, <1 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V2BF16_VP = call <2 x bfloat> @llvm.vp.fdiv.v2bf16(<2 x bfloat> poison, <2 x bfloat> poison, <2 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4BF16_VP = call <4 x bfloat> @llvm.vp.fdiv.v4bf16(<4 x bfloat> poison, <4 x bfloat> poison, <4 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8BF16_VP = call <8 x bfloat> @llvm.vp.fdiv.v8bf16(<8 x bfloat> poison, <8 x bfloat> poison, <8 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16BF16_VP = call <16 x bfloat> @llvm.vp.fdiv.v16bf16(<16 x bfloat> poison, <16 x bfloat> poison, <16 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV1BF16_VP = call <vscale x 1 x bfloat> @llvm.vp.fdiv.nxv1bf16(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> poison, <vscale x 1 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV2BF16_VP = call <vscale x 2 x bfloat> @llvm.vp.fdiv.nxv2bf16(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> poison, <vscale x 2 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV4BF16_VP = call <vscale x 4 x bfloat> @llvm.vp.fdiv.nxv4bf16(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> poison, <vscale x 4 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV8BF16_VP = call <vscale x 8 x bfloat> @llvm.vp.fdiv.nxv8bf16(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> poison, <vscale x 8 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV16BF16_VP = call <vscale x 16 x bfloat> @llvm.vp.fdiv.nxv16bf16(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> poison, <vscale x 16 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+ %BF16 = fdiv bfloat poison, poison
+
+ %V1BF16 = fdiv <1 x bfloat> poison, poison
+ %V2BF16 = fdiv <2 x bfloat> poison, poison
+ %V4BF16 = fdiv <4 x bfloat> poison, poison
+ %V8BF16 = fdiv <8 x bfloat> poison, poison
+ %V16BF16 = fdiv <16 x bfloat> poison, poison
+ %V32BF16 = fdiv <32 x bfloat> poison, poison
+
+ %NXV1BF16 = fdiv <vscale x 1 x bfloat> poison, poison
+ %NXV2BF16 = fdiv <vscale x 2 x bfloat> poison, poison
+ %NXV4BF16 = fdiv <vscale x 4 x bfloat> poison, poison
+ %NXV8BF16 = fdiv <vscale x 8 x bfloat> poison, poison
+ %NXV16BF16 = fdiv <vscale x 16 x bfloat> poison, poison
+ %NXV32BF16 = fdiv <vscale x 32 x bfloat> poison, poison
+
+ %V1BF16_VP = call <1 x bfloat> @llvm.vp.fdiv(<1 x bfloat> poison, <1 x bfloat> poison, <1 x i1> poison, i32 poison)
+ %V2BF16_VP = call <2 x bfloat> @llvm.vp.fdiv(<2 x bfloat> poison, <2 x bfloat> poison, <2 x i1> poison, i32 poison)
+ %V4BF16_VP = call <4 x bfloat> @llvm.vp.fdiv(<4 x bfloat> poison, <4 x bfloat> poison, <4 x i1> poison, i32 poison)
+ %V8BF16_VP = call <8 x bfloat> @llvm.vp.fdiv(<8 x bfloat> poison, <8 x bfloat> poison, <8 x i1> poison, i32 poison)
+ %V16BF16_VP = call <16 x bfloat> @llvm.vp.fdiv(<16 x bfloat> poison, <16 x bfloat> poison, <16 x i1> poison, i32 poison)
+
+ %NXV1BF16_VP = call <vscale x 1 x bfloat> @llvm.vp.fdiv(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> poison, <vscale x 1 x i1> poison, i32 poison)
+ %NXV2BF16_VP = call <vscale x 2 x bfloat> @llvm.vp.fdiv(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> poison, <vscale x 2 x i1> poison, i32 poison)
+ %NXV4BF16_VP = call <vscale x 4 x bfloat> @llvm.vp.fdiv(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> poison, <vscale x 4 x i1> poison, i32 poison)
+ %NXV8BF16_VP = call <vscale x 8 x bfloat> @llvm.vp.fdiv(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> poison, <vscale x 8 x i1> poison, i32 poison)
+ %NXV16BF16_VP = call <vscale x 16 x bfloat> @llvm.vp.fdiv(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> poison, <vscale x 16 x i1> poison, i32 poison)
ret void
}
define void @fdiv_f16() {
; ZVFH-LABEL: 'fdiv_f16'
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F16 = fdiv half undef, undef
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F16 = fdiv <1 x half> undef, undef
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F16 = fdiv <2 x half> undef, undef
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F16 = fdiv <4 x half> undef, undef
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8F16 = fdiv <8 x half> undef, undef
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16F16 = fdiv <16 x half> undef, undef
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V32F16 = fdiv <32 x half> undef, undef
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F16 = fdiv <vscale x 1 x half> undef, undef
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2F16 = fdiv <vscale x 2 x half> undef, undef
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV4F16 = fdiv <vscale x 4 x half> undef, undef
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV8F16 = fdiv <vscale x 8 x half> undef, undef
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV16F16 = fdiv <vscale x 16 x half> undef, undef
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV32F16 = fdiv <vscale x 32 x half> undef, undef
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F16_VP = call <1 x half> @llvm.vp.fdiv.v1f16(<1 x half> undef, <1 x half> undef, <1 x i1> undef, i32 undef)
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F16_VP = call <2 x half> @llvm.vp.fdiv.v2f16(<2 x half> undef, <2 x half> undef, <2 x i1> undef, i32 undef)
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F16_VP = call <4 x half> @llvm.vp.fdiv.v4f16(<4 x half> undef, <4 x half> undef, <4 x i1> undef, i32 undef)
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8F16_VP = call <8 x half> @llvm.vp.fdiv.v8f16(<8 x half> undef, <8 x half> undef, <8 x i1> undef, i32 undef)
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16F16_VP = call <16 x half> @llvm.vp.fdiv.v16f16(<16 x half> undef, <16 x half> undef, <16 x i1> undef, i32 undef)
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F16_VP = call <vscale x 1 x half> @llvm.vp.fdiv.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x i1> undef, i32 undef)
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2F16_VP = call <vscale x 2 x half> @llvm.vp.fdiv.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x i1> undef, i32 undef)
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV4F16_VP = call <vscale x 4 x half> @llvm.vp.fdiv.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x i1> undef, i32 undef)
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV8F16_VP = call <vscale x 8 x half> @llvm.vp.fdiv.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x i1> undef, i32 undef)
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV16F16_VP = call <vscale x 16 x half> @llvm.vp.fdiv.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, <vscale x 16 x i1> undef, i32 undef)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F16 = fdiv half poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F16 = fdiv <1 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F16 = fdiv <2 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F16 = fdiv <4 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8F16 = fdiv <8 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16F16 = fdiv <16 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V32F16 = fdiv <32 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F16 = fdiv <vscale x 1 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2F16 = fdiv <vscale x 2 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV4F16 = fdiv <vscale x 4 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV8F16 = fdiv <vscale x 8 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV16F16 = fdiv <vscale x 16 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV32F16 = fdiv <vscale x 32 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F16_VP = call <1 x half> @llvm.vp.fdiv.v1f16(<1 x half> poison, <1 x half> poison, <1 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F16_VP = call <2 x half> @llvm.vp.fdiv.v2f16(<2 x half> poison, <2 x half> poison, <2 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F16_VP = call <4 x half> @llvm.vp.fdiv.v4f16(<4 x half> poison, <4 x half> poison, <4 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8F16_VP = call <8 x half> @llvm.vp.fdiv.v8f16(<8 x half> poison, <8 x half> poison, <8 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16F16_VP = call <16 x half> @llvm.vp.fdiv.v16f16(<16 x half> poison, <16 x half> poison, <16 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F16_VP = call <vscale x 1 x half> @llvm.vp.fdiv.nxv1f16(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2F16_VP = call <vscale x 2 x half> @llvm.vp.fdiv.nxv2f16(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV4F16_VP = call <vscale x 4 x half> @llvm.vp.fdiv.nxv4f16(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV8F16_VP = call <vscale x 8 x half> @llvm.vp.fdiv.nxv8f16(<vscale x 8 x half> poison, <vscale x 8 x half> poison, <vscale x 8 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV16F16_VP = call <vscale x 16 x half> @llvm.vp.fdiv.nxv16f16(<vscale x 16 x half> poison, <vscale x 16 x half> poison, <vscale x 16 x i1> poison, i32 poison)
; ZVFH-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
; ZVFHMIN-LABEL: 'fdiv_f16'
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F16 = fdiv half undef, undef
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V1F16 = fdiv <1 x half> undef, undef
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V2F16 = fdiv <2 x half> undef, undef
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V4F16 = fdiv <4 x half> undef, undef
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %V8F16 = fdiv <8 x half> undef, undef
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %V16F16 = fdiv <16 x half> undef, undef
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 36 for instruction: %V32F16 = fdiv <32 x half> undef, undef
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV1F16 = fdiv <vscale x 1 x half> undef, undef
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV2F16 = fdiv <vscale x 2 x half> undef, undef
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %NXV4F16 = fdiv <vscale x 4 x half> undef, undef
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %NXV8F16 = fdiv <vscale x 8 x half> undef, undef
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 36 for instruction: %NXV16F16 = fdiv <vscale x 16 x half> undef, undef
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV32F16 = fdiv <vscale x 32 x half> undef, undef
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V1F16_VP = call <1 x half> @llvm.vp.fdiv.v1f16(<1 x half> undef, <1 x half> undef, <1 x i1> undef, i32 undef)
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2F16_VP = call <2 x half> @llvm.vp.fdiv.v2f16(<2 x half> undef, <2 x half> undef, <2 x i1> undef, i32 undef)
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4F16_VP = call <4 x half> @llvm.vp.fdiv.v4f16(<4 x half> undef, <4 x half> undef, <4 x i1> undef, i32 undef)
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V8F16_VP = call <8 x half> @llvm.vp.fdiv.v8f16(<8 x half> undef, <8 x half> undef, <8 x i1> undef, i32 undef)
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V16F16_VP = call <16 x half> @llvm.vp.fdiv.v16f16(<16 x half> undef, <16 x half> undef, <16 x i1> undef, i32 undef)
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %NXV1F16_VP = call <vscale x 1 x half> @llvm.vp.fdiv.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x i1> undef, i32 undef)
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %NXV2F16_VP = call <vscale x 2 x half> @llvm.vp.fdiv.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x i1> undef, i32 undef)
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV4F16_VP = call <vscale x 4 x half> @llvm.vp.fdiv.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x i1> undef, i32 undef)
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %NXV8F16_VP = call <vscale x 8 x half> @llvm.vp.fdiv.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x i1> undef, i32 undef)
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %NXV16F16_VP = call <vscale x 16 x half> @llvm.vp.fdiv.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, <vscale x 16 x i1> undef, i32 undef)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F16 = fdiv half poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V1F16 = fdiv <1 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V2F16 = fdiv <2 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V4F16 = fdiv <4 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %V8F16 = fdiv <8 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %V16F16 = fdiv <16 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 36 for instruction: %V32F16 = fdiv <32 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV1F16 = fdiv <vscale x 1 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV2F16 = fdiv <vscale x 2 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %NXV4F16 = fdiv <vscale x 4 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %NXV8F16 = fdiv <vscale x 8 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 36 for instruction: %NXV16F16 = fdiv <vscale x 16 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV32F16 = fdiv <vscale x 32 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V1F16_VP = call <1 x half> @llvm.vp.fdiv.v1f16(<1 x half> poison, <1 x half> poison, <1 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V2F16_VP = call <2 x half> @llvm.vp.fdiv.v2f16(<2 x half> poison, <2 x half> poison, <2 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V4F16_VP = call <4 x half> @llvm.vp.fdiv.v4f16(<4 x half> poison, <4 x half> poison, <4 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %V8F16_VP = call <8 x half> @llvm.vp.fdiv.v8f16(<8 x half> poison, <8 x half> poison, <8 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %V16F16_VP = call <16 x half> @llvm.vp.fdiv.v16f16(<16 x half> poison, <16 x half> poison, <16 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %NXV1F16_VP = call <vscale x 1 x half> @llvm.vp.fdiv.nxv1f16(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %NXV2F16_VP = call <vscale x 2 x half> @llvm.vp.fdiv.nxv2f16(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %NXV4F16_VP = call <vscale x 4 x half> @llvm.vp.fdiv.nxv4f16(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %NXV8F16_VP = call <vscale x 8 x half> @llvm.vp.fdiv.nxv8f16(<vscale x 8 x half> poison, <vscale x 8 x half> poison, <vscale x 8 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %NXV16F16_VP = call <vscale x 16 x half> @llvm.vp.fdiv.nxv16f16(<vscale x 16 x half> poison, <vscale x 16 x half> poison, <vscale x 16 x i1> poison, i32 poison)
; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
- %F16 = fdiv half undef, undef
-
- %V1F16 = fdiv <1 x half> undef, undef
- %V2F16 = fdiv <2 x half> undef, undef
- %V4F16 = fdiv <4 x half> undef, undef
- %V8F16 = fdiv <8 x half> undef, undef
- %V16F16 = fdiv <16 x half> undef, undef
- %V32F16 = fdiv <32 x half> undef, undef
-
- %NXV1F16 = fdiv <vscale x 1 x half> undef, undef
- %NXV2F16 = fdiv <vscale x 2 x half> undef, undef
- %NXV4F16 = fdiv <vscale x 4 x half> undef, undef
- %NXV8F16 = fdiv <vscale x 8 x half> undef, undef
- %NXV16F16 = fdiv <vscale x 16 x half> undef, undef
- %NXV32F16 = fdiv <vscale x 32 x half> undef, undef
-
- %V1F16_VP = call <1 x half> @llvm.vp.fdiv.v1f16(<1 x half> undef, <1 x half> undef, <1 x i1> undef, i32 undef)
- %V2F16_VP = call <2 x half> @llvm.vp.fdiv.v2f16(<2 x half> undef, <2 x half> undef, <2 x i1> undef, i32 undef)
- %V4F16_VP = call <4 x half> @llvm.vp.fdiv.v4f16(<4 x half> undef, <4 x half> undef, <4 x i1> undef, i32 undef)
- %V8F16_VP = call <8 x half> @llvm.vp.fdiv.v8f16(<8 x half> undef, <8 x half> undef, <8 x i1> undef, i32 undef)
- %V16F16_VP = call <16 x half> @llvm.vp.fdiv.v16f16(<16 x half> undef, <16 x half> undef, <16 x i1> undef, i32 undef)
-
- %NXV1F16_VP = call <vscale x 1 x half> @llvm.vp.fdiv.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x i1> undef, i32 undef)
- %NXV2F16_VP = call <vscale x 2 x half> @llvm.vp.fdiv.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x i1> undef, i32 undef)
- %NXV4F16_VP = call <vscale x 4 x half> @llvm.vp.fdiv.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x i1> undef, i32 undef)
- %NXV8F16_VP = call <vscale x 8 x half> @llvm.vp.fdiv.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x i1> undef, i32 undef)
- %NXV16F16_VP = call <vscale x 16 x half> @llvm.vp.fdiv.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, <vscale x 16 x i1> undef, i32 undef)
+; NO-ZFHMIN-LABEL: 'fdiv_f16'
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F16 = fdiv half poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F16 = fdiv <1 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V2F16 = fdiv <2 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4F16 = fdiv <4 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8F16 = fdiv <8 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16F16 = fdiv <16 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %V32F16 = fdiv <32 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV1F16 = fdiv <vscale x 1 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV2F16 = fdiv <vscale x 2 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV4F16 = fdiv <vscale x 4 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV8F16 = fdiv <vscale x 8 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV16F16 = fdiv <vscale x 16 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV32F16 = fdiv <vscale x 32 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F16_VP = call <1 x half> @llvm.vp.fdiv.v1f16(<1 x half> poison, <1 x half> poison, <1 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V2F16_VP = call <2 x half> @llvm.vp.fdiv.v2f16(<2 x half> poison, <2 x half> poison, <2 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4F16_VP = call <4 x half> @llvm.vp.fdiv.v4f16(<4 x half> poison, <4 x half> poison, <4 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8F16_VP = call <8 x half> @llvm.vp.fdiv.v8f16(<8 x half> poison, <8 x half> poison, <8 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16F16_VP = call <16 x half> @llvm.vp.fdiv.v16f16(<16 x half> poison, <16 x half> poison, <16 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV1F16_VP = call <vscale x 1 x half> @llvm.vp.fdiv.nxv1f16(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV2F16_VP = call <vscale x 2 x half> @llvm.vp.fdiv.nxv2f16(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV4F16_VP = call <vscale x 4 x half> @llvm.vp.fdiv.nxv4f16(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV8F16_VP = call <vscale x 8 x half> @llvm.vp.fdiv.nxv8f16(<vscale x 8 x half> poison, <vscale x 8 x half> poison, <vscale x 8 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV16F16_VP = call <vscale x 16 x half> @llvm.vp.fdiv.nxv16f16(<vscale x 16 x half> poison, <vscale x 16 x half> poison, <vscale x 16 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+ %F16 = fdiv half poison, poison
+
+ %V1F16 = fdiv <1 x half> poison, poison
+ %V2F16 = fdiv <2 x half> poison, poison
+ %V4F16 = fdiv <4 x half> poison, poison
+ %V8F16 = fdiv <8 x half> poison, poison
+ %V16F16 = fdiv <16 x half> poison, poison
+ %V32F16 = fdiv <32 x half> poison, poison
+
+ %NXV1F16 = fdiv <vscale x 1 x half> poison, poison
+ %NXV2F16 = fdiv <vscale x 2 x half> poison, poison
+ %NXV4F16 = fdiv <vscale x 4 x half> poison, poison
+ %NXV8F16 = fdiv <vscale x 8 x half> poison, poison
+ %NXV16F16 = fdiv <vscale x 16 x half> poison, poison
+ %NXV32F16 = fdiv <vscale x 32 x half> poison, poison
+
+ %V1F16_VP = call <1 x half> @llvm.vp.fdiv(<1 x half> poison, <1 x half> poison, <1 x i1> poison, i32 poison)
+ %V2F16_VP = call <2 x half> @llvm.vp.fdiv(<2 x half> poison, <2 x half> poison, <2 x i1> poison, i32 poison)
+ %V4F16_VP = call <4 x half> @llvm.vp.fdiv(<4 x half> poison, <4 x half> poison, <4 x i1> poison, i32 poison)
+ %V8F16_VP = call <8 x half> @llvm.vp.fdiv(<8 x half> poison, <8 x half> poison, <8 x i1> poison, i32 poison)
+ %V16F16_VP = call <16 x half> @llvm.vp.fdiv(<16 x half> poison, <16 x half> poison, <16 x i1> poison, i32 poison)
+
+ %NXV1F16_VP = call <vscale x 1 x half> @llvm.vp.fdiv(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x i1> poison, i32 poison)
+ %NXV2F16_VP = call <vscale x 2 x half> @llvm.vp.fdiv(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x i1> poison, i32 poison)
+ %NXV4F16_VP = call <vscale x 4 x half> @llvm.vp.fdiv(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x i1> poison, i32 poison)
+ %NXV8F16_VP = call <vscale x 8 x half> @llvm.vp.fdiv(<vscale x 8 x half> poison, <vscale x 8 x half> poison, <vscale x 8 x i1> poison, i32 poison)
+ %NXV16F16_VP = call <vscale x 16 x half> @llvm.vp.fdiv(<vscale x 16 x half> poison, <vscale x 16 x half> poison, <vscale x 16 x i1> poison, i32 poison)
ret void
}
define void @frem() {
; CHECK-LABEL: 'frem'
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %BF16 = frem bfloat undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F32 = frem float undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F64 = frem double undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V1BF16 = frem <1 x bfloat> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V2BF16 = frem <2 x bfloat> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %V4BF16 = frem <4 x bfloat> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %V8BF16 = frem <8 x bfloat> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 63 for instruction: %V16BF16 = frem <16 x bfloat> undef, undef
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV1BF16 = frem <vscale x 1 x bfloat> undef, undef
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV2BF16 = frem <vscale x 2 x bfloat> undef, undef
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV4BF16 = frem <vscale x 4 x bfloat> undef, undef
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV8BF16 = frem <vscale x 8 x bfloat> undef, undef
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV16BF16 = frem <vscale x 16 x bfloat> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V1F32 = frem <1 x float> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V2F32 = frem <2 x float> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %V4F32 = frem <4 x float> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 31 for instruction: %V8F32 = frem <8 x float> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 63 for instruction: %V16F32 = frem <16 x float> undef, undef
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV1F32 = frem <vscale x 1 x float> undef, undef
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV2F32 = frem <vscale x 2 x float> undef, undef
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV4F32 = frem <vscale x 4 x float> undef, undef
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV8F32 = frem <vscale x 8 x float> undef, undef
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV16F32 = frem <vscale x 16 x float> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V1F64 = frem <1 x double> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V2F64 = frem <2 x double> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %V4F64 = frem <4 x double> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 31 for instruction: %V8F64 = frem <8 x double> undef, undef
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV1F64 = frem <vscale x 1 x double> undef, undef
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV2F64 = frem <vscale x 2 x double> undef, undef
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV4F64 = frem <vscale x 4 x double> undef, undef
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV8F64 = frem <vscale x 8 x double> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V1F16_VP = call <1 x bfloat> @llvm.vp.frem.v1bf16(<1 x bfloat> undef, <1 x bfloat> undef, <1 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %V2F16_VP = call <2 x bfloat> @llvm.vp.frem.v2bf16(<2 x bfloat> undef, <2 x bfloat> undef, <2 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 19 for instruction: %V4F16_VP = call <4 x bfloat> @llvm.vp.frem.v4bf16(<4 x bfloat> undef, <4 x bfloat> undef, <4 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 39 for instruction: %V8F16_VP = call <8 x bfloat> @llvm.vp.frem.v8bf16(<8 x bfloat> undef, <8 x bfloat> undef, <8 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 94 for instruction: %V16F16_VP = call <16 x bfloat> @llvm.vp.frem.v16bf16(<16 x bfloat> undef, <16 x bfloat> undef, <16 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V1F32_VP = call <1 x float> @llvm.vp.frem.v1f32(<1 x float> undef, <1 x float> undef, <1 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %V2F32_VP = call <2 x float> @llvm.vp.frem.v2f32(<2 x float> undef, <2 x float> undef, <2 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 19 for instruction: %V4F32_VP = call <4 x float> @llvm.vp.frem.v4f32(<4 x float> undef, <4 x float> undef, <4 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 46 for instruction: %V8F32_VP = call <8 x float> @llvm.vp.frem.v8f32(<8 x float> undef, <8 x float> undef, <8 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 94 for instruction: %V16F32_VP = call <16 x float> @llvm.vp.frem.v16f32(<16 x float> undef, <16 x float> undef, <16 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V1F64_VP = call <1 x double> @llvm.vp.frem.v1f64(<1 x double> undef, <1 x double> undef, <1 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %V2F64_VP = call <2 x double> @llvm.vp.frem.v2f64(<2 x double> undef, <2 x double> undef, <2 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 22 for instruction: %V4F64_VP = call <4 x double> @llvm.vp.frem.v4f64(<4 x double> undef, <4 x double> undef, <4 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 46 for instruction: %V8F64_VP = call <8 x double> @llvm.vp.frem.v8f64(<8 x double> undef, <8 x double> undef, <8 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV1F16_VP = call <vscale x 1 x bfloat> @llvm.vp.frem.nxv1bf16(<vscale x 1 x bfloat> undef, <vscale x 1 x bfloat> undef, <vscale x 1 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV2F16_VP = call <vscale x 2 x bfloat> @llvm.vp.frem.nxv2bf16(<vscale x 2 x bfloat> undef, <vscale x 2 x bfloat> undef, <vscale x 2 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV4F16_VP = call <vscale x 4 x bfloat> @llvm.vp.frem.nxv4bf16(<vscale x 4 x bfloat> undef, <vscale x 4 x bfloat> undef, <vscale x 4 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV8F16_VP = call <vscale x 8 x bfloat> @llvm.vp.frem.nxv8bf16(<vscale x 8 x bfloat> undef, <vscale x 8 x bfloat> undef, <vscale x 8 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV16F16_VP = call <vscale x 16 x bfloat> @llvm.vp.frem.nxv16bf16(<vscale x 16 x bfloat> undef, <vscale x 16 x bfloat> undef, <vscale x 16 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV1F32_VP = call <vscale x 1 x float> @llvm.vp.frem.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV2F32_VP = call <vscale x 2 x float> @llvm.vp.frem.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV4F32_VP = call <vscale x 4 x float> @llvm.vp.frem.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV8F32_VP = call <vscale x 8 x float> @llvm.vp.frem.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef, <vscale x 8 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV16F32_VP = call <vscale x 16 x float> @llvm.vp.frem.nxv16f32(<vscale x 16 x float> undef, <vscale x 16 x float> undef, <vscale x 16 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV1F64_VP = call <vscale x 1 x double> @llvm.vp.frem.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV2F64_VP = call <vscale x 2 x double> @llvm.vp.frem.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV4F64_VP = call <vscale x 4 x double> @llvm.vp.frem.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef, <vscale x 4 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV8F64_VP = call <vscale x 8 x double> @llvm.vp.frem.nxv8f64(<vscale x 8 x double> undef, <vscale x 8 x double> undef, <vscale x 8 x i1> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F32 = frem float poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F64 = frem double poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V1F32 = frem <1 x float> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V2F32 = frem <2 x float> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %V4F32 = frem <4 x float> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 31 for instruction: %V8F32 = frem <8 x float> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 63 for instruction: %V16F32 = frem <16 x float> poison, poison
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV1F32 = frem <vscale x 1 x float> poison, poison
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV2F32 = frem <vscale x 2 x float> poison, poison
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV4F32 = frem <vscale x 4 x float> poison, poison
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV8F32 = frem <vscale x 8 x float> poison, poison
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV16F32 = frem <vscale x 16 x float> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V1F64 = frem <1 x double> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V2F64 = frem <2 x double> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %V4F64 = frem <4 x double> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 31 for instruction: %V8F64 = frem <8 x double> poison, poison
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV1F64 = frem <vscale x 1 x double> poison, poison
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV2F64 = frem <vscale x 2 x double> poison, poison
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV4F64 = frem <vscale x 4 x double> poison, poison
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV8F64 = frem <vscale x 8 x double> poison, poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V1F32_VP = call <1 x float> @llvm.vp.frem.v1f32(<1 x float> poison, <1 x float> poison, <1 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %V2F32_VP = call <2 x float> @llvm.vp.frem.v2f32(<2 x float> poison, <2 x float> poison, <2 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 19 for instruction: %V4F32_VP = call <4 x float> @llvm.vp.frem.v4f32(<4 x float> poison, <4 x float> poison, <4 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 46 for instruction: %V8F32_VP = call <8 x float> @llvm.vp.frem.v8f32(<8 x float> poison, <8 x float> poison, <8 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 94 for instruction: %V16F32_VP = call <16 x float> @llvm.vp.frem.v16f32(<16 x float> poison, <16 x float> poison, <16 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V1F64_VP = call <1 x double> @llvm.vp.frem.v1f64(<1 x double> poison, <1 x double> poison, <1 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %V2F64_VP = call <2 x double> @llvm.vp.frem.v2f64(<2 x double> poison, <2 x double> poison, <2 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 22 for instruction: %V4F64_VP = call <4 x double> @llvm.vp.frem.v4f64(<4 x double> poison, <4 x double> poison, <4 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 46 for instruction: %V8F64_VP = call <8 x double> @llvm.vp.frem.v8f64(<8 x double> poison, <8 x double> poison, <8 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV1F32_VP = call <vscale x 1 x float> @llvm.vp.frem.nxv1f32(<vscale x 1 x float> poison, <vscale x 1 x float> poison, <vscale x 1 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV2F32_VP = call <vscale x 2 x float> @llvm.vp.frem.nxv2f32(<vscale x 2 x float> poison, <vscale x 2 x float> poison, <vscale x 2 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV4F32_VP = call <vscale x 4 x float> @llvm.vp.frem.nxv4f32(<vscale x 4 x float> poison, <vscale x 4 x float> poison, <vscale x 4 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV8F32_VP = call <vscale x 8 x float> @llvm.vp.frem.nxv8f32(<vscale x 8 x float> poison, <vscale x 8 x float> poison, <vscale x 8 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV16F32_VP = call <vscale x 16 x float> @llvm.vp.frem.nxv16f32(<vscale x 16 x float> poison, <vscale x 16 x float> poison, <vscale x 16 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV1F64_VP = call <vscale x 1 x double> @llvm.vp.frem.nxv1f64(<vscale x 1 x double> poison, <vscale x 1 x double> poison, <vscale x 1 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV2F64_VP = call <vscale x 2 x double> @llvm.vp.frem.nxv2f64(<vscale x 2 x double> poison, <vscale x 2 x double> poison, <vscale x 2 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV4F64_VP = call <vscale x 4 x double> @llvm.vp.frem.nxv4f64(<vscale x 4 x double> poison, <vscale x 4 x double> poison, <vscale x 4 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV8F64_VP = call <vscale x 8 x double> @llvm.vp.frem.nxv8f64(<vscale x 8 x double> poison, <vscale x 8 x double> poison, <vscale x 8 x i1> poison, i32 poison)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
- %BF16 = frem bfloat undef, undef
- %F32 = frem float undef, undef
- %F64 = frem double undef, undef
-
- %V1BF16 = frem <1 x bfloat> undef, undef
- %V2BF16 = frem <2 x bfloat> undef, undef
- %V4BF16 = frem <4 x bfloat> undef, undef
- %V8BF16 = frem <8 x bfloat> undef, undef
- %V16BF16 = frem <16 x bfloat> undef, undef
-
- %NXV1BF16 = frem <vscale x 1 x bfloat> undef, undef
- %NXV2BF16 = frem <vscale x 2 x bfloat> undef, undef
- %NXV4BF16 = frem <vscale x 4 x bfloat> undef, undef
- %NXV8BF16 = frem <vscale x 8 x bfloat> undef, undef
- %NXV16BF16 = frem <vscale x 16 x bfloat> undef, undef
-
- %V1F32 = frem <1 x float> undef, undef
- %V2F32 = frem <2 x float> undef, undef
- %V4F32 = frem <4 x float> undef, undef
- %V8F32 = frem <8 x float> undef, undef
- %V16F32 = frem <16 x float> undef, undef
-
- %NXV1F32 = frem <vscale x 1 x float> undef, undef
- %NXV2F32 = frem <vscale x 2 x float> undef, undef
- %NXV4F32 = frem <vscale x 4 x float> undef, undef
- %NXV8F32 = frem <vscale x 8 x float> undef, undef
- %NXV16F32 = frem <vscale x 16 x float> undef, undef
-
- %V1F64 = frem <1 x double> undef, undef
- %V2F64 = frem <2 x double> undef, undef
- %V4F64 = frem <4 x double> undef, undef
- %V8F64 = frem <8 x double> undef, undef
-
- %NXV1F64 = frem <vscale x 1 x double> undef, undef
- %NXV2F64 = frem <vscale x 2 x double> undef, undef
- %NXV4F64 = frem <vscale x 4 x double> undef, undef
- %NXV8F64 = frem <vscale x 8 x double> undef, undef
-
- %V1F16_VP = call <1 x bfloat> @llvm.vp.frem.v1f16(<1 x bfloat> undef, <1 x bfloat> undef, <1 x i1> undef, i32 undef)
- %V2F16_VP = call <2 x bfloat> @llvm.vp.frem.v2f16(<2 x bfloat> undef, <2 x bfloat> undef, <2 x i1> undef, i32 undef)
- %V4F16_VP = call <4 x bfloat> @llvm.vp.frem.v4f16(<4 x bfloat> undef, <4 x bfloat> undef, <4 x i1> undef, i32 undef)
- %V8F16_VP = call <8 x bfloat> @llvm.vp.frem.v8f16(<8 x bfloat> undef, <8 x bfloat> undef, <8 x i1> undef, i32 undef)
- %V16F16_VP = call <16 x bfloat> @llvm.vp.frem.v16f16(<16 x bfloat> undef, <16 x bfloat> undef, <16 x i1> undef, i32 undef)
-
- %V1F32_VP = call <1 x float> @llvm.vp.frem.v1f32(<1 x float> undef, <1 x float> undef, <1 x i1> undef, i32 undef)
- %V2F32_VP = call <2 x float> @llvm.vp.frem.v2f32(<2 x float> undef, <2 x float> undef, <2 x i1> undef, i32 undef)
- %V4F32_VP = call <4 x float> @llvm.vp.frem.v4f32(<4 x float> undef, <4 x float> undef, <4 x i1> undef, i32 undef)
- %V8F32_VP = call <8 x float> @llvm.vp.frem.v8f32(<8 x float> undef, <8 x float> undef, <8 x i1> undef, i32 undef)
- %V16F32_VP = call <16 x float> @llvm.vp.frem.v16f32(<16 x float> undef, <16 x float> undef, <16 x i1> undef, i32 undef)
-
- %V1F64_VP = call <1 x double> @llvm.vp.frem.v1f64(<1 x double> undef, <1 x double> undef, <1 x i1> undef, i32 undef)
- %V2F64_VP = call <2 x double> @llvm.vp.frem.v2f64(<2 x double> undef, <2 x double> undef, <2 x i1> undef, i32 undef)
- %V4F64_VP = call <4 x double> @llvm.vp.frem.v4f64(<4 x double> undef, <4 x double> undef, <4 x i1> undef, i32 undef)
- %V8F64_VP = call <8 x double> @llvm.vp.frem.v8f64(<8 x double> undef, <8 x double> undef, <8 x i1> undef, i32 undef)
-
- %NXV1F16_VP = call <vscale x 1 x bfloat> @llvm.vp.frem.nxv1f16(<vscale x 1 x bfloat> undef, <vscale x 1 x bfloat> undef, <vscale x 1 x i1> undef, i32 undef)
- %NXV2F16_VP = call <vscale x 2 x bfloat> @llvm.vp.frem.nxv2f16(<vscale x 2 x bfloat> undef, <vscale x 2 x bfloat> undef, <vscale x 2 x i1> undef, i32 undef)
- %NXV4F16_VP = call <vscale x 4 x bfloat> @llvm.vp.frem.nxv4f16(<vscale x 4 x bfloat> undef, <vscale x 4 x bfloat> undef, <vscale x 4 x i1> undef, i32 undef)
- %NXV8F16_VP = call <vscale x 8 x bfloat> @llvm.vp.frem.nxv8f16(<vscale x 8 x bfloat> undef, <vscale x 8 x bfloat> undef, <vscale x 8 x i1> undef, i32 undef)
- %NXV16F16_VP = call <vscale x 16 x bfloat> @llvm.vp.frem.nxv16f16(<vscale x 16 x bfloat> undef, <vscale x 16 x bfloat> undef, <vscale x 16 x i1> undef, i32 undef)
-
- %NXV1F32_VP = call <vscale x 1 x float> @llvm.vp.frem.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x i1> undef, i32 undef)
- %NXV2F32_VP = call <vscale x 2 x float> @llvm.vp.frem.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x i1> undef, i32 undef)
- %NXV4F32_VP = call <vscale x 4 x float> @llvm.vp.frem.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x i1> undef, i32 undef)
- %NXV8F32_VP = call <vscale x 8 x float> @llvm.vp.frem.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef, <vscale x 8 x i1> undef, i32 undef)
- %NXV16F32_VP = call <vscale x 16 x float> @llvm.vp.frem.nxv16f32(<vscale x 16 x float> undef, <vscale x 16 x float> undef, <vscale x 16 x i1> undef, i32 undef)
-
- %NXV1F64_VP = call <vscale x 1 x double> @llvm.vp.frem.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x i1> undef, i32 undef)
- %NXV2F64_VP = call <vscale x 2 x double> @llvm.vp.frem.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x i1> undef, i32 undef)
- %NXV4F64_VP = call <vscale x 4 x double> @llvm.vp.frem.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef, <vscale x 4 x i1> undef, i32 undef)
- %NXV8F64_VP = call <vscale x 8 x double> @llvm.vp.frem.nxv8f64(<vscale x 8 x double> undef, <vscale x 8 x double> undef, <vscale x 8 x i1> undef, i32 undef)
+ %F32 = frem float poison, poison
+ %F64 = frem double poison, poison
+
+ %V1F32 = frem <1 x float> poison, poison
+ %V2F32 = frem <2 x float> poison, poison
+ %V4F32 = frem <4 x float> poison, poison
+ %V8F32 = frem <8 x float> poison, poison
+ %V16F32 = frem <16 x float> poison, poison
+
+ %NXV1F32 = frem <vscale x 1 x float> poison, poison
+ %NXV2F32 = frem <vscale x 2 x float> poison, poison
+ %NXV4F32 = frem <vscale x 4 x float> poison, poison
+ %NXV8F32 = frem <vscale x 8 x float> poison, poison
+ %NXV16F32 = frem <vscale x 16 x float> poison, poison
+
+ %V1F64 = frem <1 x double> poison, poison
+ %V2F64 = frem <2 x double> poison, poison
+ %V4F64 = frem <4 x double> poison, poison
+ %V8F64 = frem <8 x double> poison, poison
+
+ %NXV1F64 = frem <vscale x 1 x double> poison, poison
+ %NXV2F64 = frem <vscale x 2 x double> poison, poison
+ %NXV4F64 = frem <vscale x 4 x double> poison, poison
+ %NXV8F64 = frem <vscale x 8 x double> poison, poison
+
+ %V1F32_VP = call <1 x float> @llvm.vp.frem(<1 x float> poison, <1 x float> poison, <1 x i1> poison, i32 poison)
+ %V2F32_VP = call <2 x float> @llvm.vp.frem(<2 x float> poison, <2 x float> poison, <2 x i1> poison, i32 poison)
+ %V4F32_VP = call <4 x float> @llvm.vp.frem(<4 x float> poison, <4 x float> poison, <4 x i1> poison, i32 poison)
+ %V8F32_VP = call <8 x float> @llvm.vp.frem(<8 x float> poison, <8 x float> poison, <8 x i1> poison, i32 poison)
+ %V16F32_VP = call <16 x float> @llvm.vp.frem(<16 x float> poison, <16 x float> poison, <16 x i1> poison, i32 poison)
+
+ %V1F64_VP = call <1 x double> @llvm.vp.frem(<1 x double> poison, <1 x double> poison, <1 x i1> poison, i32 poison)
+ %V2F64_VP = call <2 x double> @llvm.vp.frem(<2 x double> poison, <2 x double> poison, <2 x i1> poison, i32 poison)
+ %V4F64_VP = call <4 x double> @llvm.vp.frem(<4 x double> poison, <4 x double> poison, <4 x i1> poison, i32 poison)
+ %V8F64_VP = call <8 x double> @llvm.vp.frem(<8 x double> poison, <8 x double> poison, <8 x i1> poison, i32 poison)
+
+ %NXV1F32_VP = call <vscale x 1 x float> @llvm.vp.frem(<vscale x 1 x float> poison, <vscale x 1 x float> poison, <vscale x 1 x i1> poison, i32 poison)
+ %NXV2F32_VP = call <vscale x 2 x float> @llvm.vp.frem(<vscale x 2 x float> poison, <vscale x 2 x float> poison, <vscale x 2 x i1> poison, i32 poison)
+ %NXV4F32_VP = call <vscale x 4 x float> @llvm.vp.frem(<vscale x 4 x float> poison, <vscale x 4 x float> poison, <vscale x 4 x i1> poison, i32 poison)
+ %NXV8F32_VP = call <vscale x 8 x float> @llvm.vp.frem(<vscale x 8 x float> poison, <vscale x 8 x float> poison, <vscale x 8 x i1> poison, i32 poison)
+ %NXV16F32_VP = call <vscale x 16 x float> @llvm.vp.frem(<vscale x 16 x float> poison, <vscale x 16 x float> poison, <vscale x 16 x i1> poison, i32 poison)
+
+ %NXV1F64_VP = call <vscale x 1 x double> @llvm.vp.frem(<vscale x 1 x double> poison, <vscale x 1 x double> poison, <vscale x 1 x i1> poison, i32 poison)
+ %NXV2F64_VP = call <vscale x 2 x double> @llvm.vp.frem(<vscale x 2 x double> poison, <vscale x 2 x double> poison, <vscale x 2 x i1> poison, i32 poison)
+ %NXV4F64_VP = call <vscale x 4 x double> @llvm.vp.frem(<vscale x 4 x double> poison, <vscale x 4 x double> poison, <vscale x 4 x i1> poison, i32 poison)
+ %NXV8F64_VP = call <vscale x 8 x double> @llvm.vp.frem(<vscale x 8 x double> poison, <vscale x 8 x double> poison, <vscale x 8 x i1> poison, i32 poison)
+
+ ret void
+}
+
+define void @frem_bf16() {
+; ZVFH-LABEL: 'frem_bf16'
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %BF16 = frem bfloat poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V1BF16 = frem <1 x bfloat> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V2BF16 = frem <2 x bfloat> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %V4BF16 = frem <4 x bfloat> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %V8BF16 = frem <8 x bfloat> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 63 for instruction: %V16BF16 = frem <16 x bfloat> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 127 for instruction: %V32BF16 = frem <32 x bfloat> poison, poison
+; ZVFH-NEXT: Cost Model: Invalid cost for instruction: %NXV1BF16 = frem <vscale x 1 x bfloat> poison, poison
+; ZVFH-NEXT: Cost Model: Invalid cost for instruction: %NXV2BF16 = frem <vscale x 2 x bfloat> poison, poison
+; ZVFH-NEXT: Cost Model: Invalid cost for instruction: %NXV4BF16 = frem <vscale x 4 x bfloat> poison, poison
+; ZVFH-NEXT: Cost Model: Invalid cost for instruction: %NXV8BF16 = frem <vscale x 8 x bfloat> poison, poison
+; ZVFH-NEXT: Cost Model: Invalid cost for instruction: %NXV16BF16 = frem <vscale x 16 x bfloat> poison, poison
+; ZVFH-NEXT: Cost Model: Invalid cost for instruction: %NXV32BF16 = frem <vscale x 32 x bfloat> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V1BF16_VP = call <1 x bfloat> @llvm.vp.frem.v1bf16(<1 x bfloat> poison, <1 x bfloat> poison, <1 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %V2BF16_VP = call <2 x bfloat> @llvm.vp.frem.v2bf16(<2 x bfloat> poison, <2 x bfloat> poison, <2 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 19 for instruction: %V4BF16_VP = call <4 x bfloat> @llvm.vp.frem.v4bf16(<4 x bfloat> poison, <4 x bfloat> poison, <4 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 39 for instruction: %V8BF16_VP = call <8 x bfloat> @llvm.vp.frem.v8bf16(<8 x bfloat> poison, <8 x bfloat> poison, <8 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 94 for instruction: %V16BF16_VP = call <16 x bfloat> @llvm.vp.frem.v16bf16(<16 x bfloat> poison, <16 x bfloat> poison, <16 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Invalid cost for instruction: %NXV1BF16_VP = call <vscale x 1 x bfloat> @llvm.vp.frem.nxv1bf16(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> poison, <vscale x 1 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Invalid cost for instruction: %NXV2BF16_VP = call <vscale x 2 x bfloat> @llvm.vp.frem.nxv2bf16(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> poison, <vscale x 2 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Invalid cost for instruction: %NXV4BF16_VP = call <vscale x 4 x bfloat> @llvm.vp.frem.nxv4bf16(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> poison, <vscale x 4 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Invalid cost for instruction: %NXV8BF16_VP = call <vscale x 8 x bfloat> @llvm.vp.frem.nxv8bf16(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> poison, <vscale x 8 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Invalid cost for instruction: %NXV16BF16_VP = call <vscale x 16 x bfloat> @llvm.vp.frem.nxv16bf16(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> poison, <vscale x 16 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+; ZVFHMIN-LABEL: 'frem_bf16'
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %BF16 = frem bfloat poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V1BF16 = frem <1 x bfloat> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V2BF16 = frem <2 x bfloat> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %V4BF16 = frem <4 x bfloat> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %V8BF16 = frem <8 x bfloat> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 63 for instruction: %V16BF16 = frem <16 x bfloat> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 127 for instruction: %V32BF16 = frem <32 x bfloat> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV1BF16 = frem <vscale x 1 x bfloat> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV2BF16 = frem <vscale x 2 x bfloat> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV4BF16 = frem <vscale x 4 x bfloat> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV8BF16 = frem <vscale x 8 x bfloat> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV16BF16 = frem <vscale x 16 x bfloat> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV32BF16 = frem <vscale x 32 x bfloat> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V1BF16_VP = call <1 x bfloat> @llvm.vp.frem.v1bf16(<1 x bfloat> poison, <1 x bfloat> poison, <1 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %V2BF16_VP = call <2 x bfloat> @llvm.vp.frem.v2bf16(<2 x bfloat> poison, <2 x bfloat> poison, <2 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 19 for instruction: %V4BF16_VP = call <4 x bfloat> @llvm.vp.frem.v4bf16(<4 x bfloat> poison, <4 x bfloat> poison, <4 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 39 for instruction: %V8BF16_VP = call <8 x bfloat> @llvm.vp.frem.v8bf16(<8 x bfloat> poison, <8 x bfloat> poison, <8 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 94 for instruction: %V16BF16_VP = call <16 x bfloat> @llvm.vp.frem.v16bf16(<16 x bfloat> poison, <16 x bfloat> poison, <16 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV1BF16_VP = call <vscale x 1 x bfloat> @llvm.vp.frem.nxv1bf16(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> poison, <vscale x 1 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV2BF16_VP = call <vscale x 2 x bfloat> @llvm.vp.frem.nxv2bf16(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> poison, <vscale x 2 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV4BF16_VP = call <vscale x 4 x bfloat> @llvm.vp.frem.nxv4bf16(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> poison, <vscale x 4 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV8BF16_VP = call <vscale x 8 x bfloat> @llvm.vp.frem.nxv8bf16(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> poison, <vscale x 8 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV16BF16_VP = call <vscale x 16 x bfloat> @llvm.vp.frem.nxv16bf16(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> poison, <vscale x 16 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+; NO-ZFHMIN-LABEL: 'frem_bf16'
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %BF16 = frem bfloat poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1BF16 = frem <1 x bfloat> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V2BF16 = frem <2 x bfloat> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4BF16 = frem <4 x bfloat> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8BF16 = frem <8 x bfloat> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16BF16 = frem <16 x bfloat> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %V32BF16 = frem <32 x bfloat> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV1BF16 = frem <vscale x 1 x bfloat> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV2BF16 = frem <vscale x 2 x bfloat> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV4BF16 = frem <vscale x 4 x bfloat> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV8BF16 = frem <vscale x 8 x bfloat> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV16BF16 = frem <vscale x 16 x bfloat> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV32BF16 = frem <vscale x 32 x bfloat> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1BF16_VP = call <1 x bfloat> @llvm.vp.frem.v1bf16(<1 x bfloat> poison, <1 x bfloat> poison, <1 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V2BF16_VP = call <2 x bfloat> @llvm.vp.frem.v2bf16(<2 x bfloat> poison, <2 x bfloat> poison, <2 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4BF16_VP = call <4 x bfloat> @llvm.vp.frem.v4bf16(<4 x bfloat> poison, <4 x bfloat> poison, <4 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8BF16_VP = call <8 x bfloat> @llvm.vp.frem.v8bf16(<8 x bfloat> poison, <8 x bfloat> poison, <8 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16BF16_VP = call <16 x bfloat> @llvm.vp.frem.v16bf16(<16 x bfloat> poison, <16 x bfloat> poison, <16 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV1BF16_VP = call <vscale x 1 x bfloat> @llvm.vp.frem.nxv1bf16(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> poison, <vscale x 1 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV2BF16_VP = call <vscale x 2 x bfloat> @llvm.vp.frem.nxv2bf16(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> poison, <vscale x 2 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV4BF16_VP = call <vscale x 4 x bfloat> @llvm.vp.frem.nxv4bf16(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> poison, <vscale x 4 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV8BF16_VP = call <vscale x 8 x bfloat> @llvm.vp.frem.nxv8bf16(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> poison, <vscale x 8 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV16BF16_VP = call <vscale x 16 x bfloat> @llvm.vp.frem.nxv16bf16(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> poison, <vscale x 16 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+ %BF16 = frem bfloat poison, poison
+
+ %V1BF16 = frem <1 x bfloat> poison, poison
+ %V2BF16 = frem <2 x bfloat> poison, poison
+ %V4BF16 = frem <4 x bfloat> poison, poison
+ %V8BF16 = frem <8 x bfloat> poison, poison
+ %V16BF16 = frem <16 x bfloat> poison, poison
+ %V32BF16 = frem <32 x bfloat> poison, poison
+
+ %NXV1BF16 = frem <vscale x 1 x bfloat> poison, poison
+ %NXV2BF16 = frem <vscale x 2 x bfloat> poison, poison
+ %NXV4BF16 = frem <vscale x 4 x bfloat> poison, poison
+ %NXV8BF16 = frem <vscale x 8 x bfloat> poison, poison
+ %NXV16BF16 = frem <vscale x 16 x bfloat> poison, poison
+ %NXV32BF16 = frem <vscale x 32 x bfloat> poison, poison
+
+ %V1BF16_VP = call <1 x bfloat> @llvm.vp.frem(<1 x bfloat> poison, <1 x bfloat> poison, <1 x i1> poison, i32 poison)
+ %V2BF16_VP = call <2 x bfloat> @llvm.vp.frem(<2 x bfloat> poison, <2 x bfloat> poison, <2 x i1> poison, i32 poison)
+ %V4BF16_VP = call <4 x bfloat> @llvm.vp.frem(<4 x bfloat> poison, <4 x bfloat> poison, <4 x i1> poison, i32 poison)
+ %V8BF16_VP = call <8 x bfloat> @llvm.vp.frem(<8 x bfloat> poison, <8 x bfloat> poison, <8 x i1> poison, i32 poison)
+ %V16BF16_VP = call <16 x bfloat> @llvm.vp.frem(<16 x bfloat> poison, <16 x bfloat> poison, <16 x i1> poison, i32 poison)
+
+ %NXV1BF16_VP = call <vscale x 1 x bfloat> @llvm.vp.frem(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> poison, <vscale x 1 x i1> poison, i32 poison)
+ %NXV2BF16_VP = call <vscale x 2 x bfloat> @llvm.vp.frem(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> poison, <vscale x 2 x i1> poison, i32 poison)
+ %NXV4BF16_VP = call <vscale x 4 x bfloat> @llvm.vp.frem(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> poison, <vscale x 4 x i1> poison, i32 poison)
+ %NXV8BF16_VP = call <vscale x 8 x bfloat> @llvm.vp.frem(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> poison, <vscale x 8 x i1> poison, i32 poison)
+ %NXV16BF16_VP = call <vscale x 16 x bfloat> @llvm.vp.frem(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> poison, <vscale x 16 x i1> poison, i32 poison)
ret void
}
define void @frem_f16() {
-; CHECK-LABEL: 'frem_f16'
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F16 = frem half undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V1F16 = frem <1 x half> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V2F16 = frem <2 x half> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %V4F16 = frem <4 x half> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %V8F16 = frem <8 x half> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 63 for instruction: %V16F16 = frem <16 x half> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 127 for instruction: %V32F16 = frem <32 x half> undef, undef
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV1F16 = frem <vscale x 1 x half> undef, undef
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV2F16 = frem <vscale x 2 x half> undef, undef
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV4F16 = frem <vscale x 4 x half> undef, undef
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV8F16 = frem <vscale x 8 x half> undef, undef
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV16F16 = frem <vscale x 16 x half> undef, undef
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV32F16 = frem <vscale x 32 x half> undef, undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+; ZVFH-LABEL: 'frem_f16'
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F16 = frem half poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V1F16 = frem <1 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V2F16 = frem <2 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %V4F16 = frem <4 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %V8F16 = frem <8 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 63 for instruction: %V16F16 = frem <16 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 127 for instruction: %V32F16 = frem <32 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Invalid cost for instruction: %NXV1F16 = frem <vscale x 1 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Invalid cost for instruction: %NXV2F16 = frem <vscale x 2 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Invalid cost for instruction: %NXV4F16 = frem <vscale x 4 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Invalid cost for instruction: %NXV8F16 = frem <vscale x 8 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Invalid cost for instruction: %NXV16F16 = frem <vscale x 16 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Invalid cost for instruction: %NXV32F16 = frem <vscale x 32 x half> poison, poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V1F16_VP = call <1 x half> @llvm.vp.frem.v1f16(<1 x half> poison, <1 x half> poison, <1 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %V2F16_VP = call <2 x half> @llvm.vp.frem.v2f16(<2 x half> poison, <2 x half> poison, <2 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 19 for instruction: %V4F16_VP = call <4 x half> @llvm.vp.frem.v4f16(<4 x half> poison, <4 x half> poison, <4 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 39 for instruction: %V8F16_VP = call <8 x half> @llvm.vp.frem.v8f16(<8 x half> poison, <8 x half> poison, <8 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 94 for instruction: %V16F16_VP = call <16 x half> @llvm.vp.frem.v16f16(<16 x half> poison, <16 x half> poison, <16 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Invalid cost for instruction: %NXV1F16_VP = call <vscale x 1 x half> @llvm.vp.frem.nxv1f16(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Invalid cost for instruction: %NXV2F16_VP = call <vscale x 2 x half> @llvm.vp.frem.nxv2f16(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Invalid cost for instruction: %NXV4F16_VP = call <vscale x 4 x half> @llvm.vp.frem.nxv4f16(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Invalid cost for instruction: %NXV8F16_VP = call <vscale x 8 x half> @llvm.vp.frem.nxv8f16(<vscale x 8 x half> poison, <vscale x 8 x half> poison, <vscale x 8 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Invalid cost for instruction: %NXV16F16_VP = call <vscale x 16 x half> @llvm.vp.frem.nxv16f16(<vscale x 16 x half> poison, <vscale x 16 x half> poison, <vscale x 16 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+; ZVFHMIN-LABEL: 'frem_f16'
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F16 = frem half poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %V1F16 = frem <1 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %V2F16 = frem <2 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 12 for instruction: %V4F16 = frem <4 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 24 for instruction: %V8F16 = frem <8 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 63 for instruction: %V16F16 = frem <16 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 127 for instruction: %V32F16 = frem <32 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV1F16 = frem <vscale x 1 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV2F16 = frem <vscale x 2 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV4F16 = frem <vscale x 4 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV8F16 = frem <vscale x 8 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV16F16 = frem <vscale x 16 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV32F16 = frem <vscale x 32 x half> poison, poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V1F16_VP = call <1 x half> @llvm.vp.frem.v1f16(<1 x half> poison, <1 x half> poison, <1 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 9 for instruction: %V2F16_VP = call <2 x half> @llvm.vp.frem.v2f16(<2 x half> poison, <2 x half> poison, <2 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 19 for instruction: %V4F16_VP = call <4 x half> @llvm.vp.frem.v4f16(<4 x half> poison, <4 x half> poison, <4 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 39 for instruction: %V8F16_VP = call <8 x half> @llvm.vp.frem.v8f16(<8 x half> poison, <8 x half> poison, <8 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 94 for instruction: %V16F16_VP = call <16 x half> @llvm.vp.frem.v16f16(<16 x half> poison, <16 x half> poison, <16 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV1F16_VP = call <vscale x 1 x half> @llvm.vp.frem.nxv1f16(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV2F16_VP = call <vscale x 2 x half> @llvm.vp.frem.nxv2f16(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV4F16_VP = call <vscale x 4 x half> @llvm.vp.frem.nxv4f16(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV8F16_VP = call <vscale x 8 x half> @llvm.vp.frem.nxv8f16(<vscale x 8 x half> poison, <vscale x 8 x half> poison, <vscale x 8 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV16F16_VP = call <vscale x 16 x half> @llvm.vp.frem.nxv16f16(<vscale x 16 x half> poison, <vscale x 16 x half> poison, <vscale x 16 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
- %F16 = frem half undef, undef
-
- %V1F16 = frem <1 x half> undef, undef
- %V2F16 = frem <2 x half> undef, undef
- %V4F16 = frem <4 x half> undef, undef
- %V8F16 = frem <8 x half> undef, undef
- %V16F16 = frem <16 x half> undef, undef
- %V32F16 = frem <32 x half> undef, undef
-
- %NXV1F16 = frem <vscale x 1 x half> undef, undef
- %NXV2F16 = frem <vscale x 2 x half> undef, undef
- %NXV4F16 = frem <vscale x 4 x half> undef, undef
- %NXV8F16 = frem <vscale x 8 x half> undef, undef
- %NXV16F16 = frem <vscale x 16 x half> undef, undef
- %NXV32F16 = frem <vscale x 32 x half> undef, undef
+; NO-ZFHMIN-LABEL: 'frem_f16'
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F16 = frem half poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F16 = frem <1 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V2F16 = frem <2 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4F16 = frem <4 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8F16 = frem <8 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16F16 = frem <16 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %V32F16 = frem <32 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV1F16 = frem <vscale x 1 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV2F16 = frem <vscale x 2 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV4F16 = frem <vscale x 4 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV8F16 = frem <vscale x 8 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV16F16 = frem <vscale x 16 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV32F16 = frem <vscale x 32 x half> poison, poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F16_VP = call <1 x half> @llvm.vp.frem.v1f16(<1 x half> poison, <1 x half> poison, <1 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V2F16_VP = call <2 x half> @llvm.vp.frem.v2f16(<2 x half> poison, <2 x half> poison, <2 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4F16_VP = call <4 x half> @llvm.vp.frem.v4f16(<4 x half> poison, <4 x half> poison, <4 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8F16_VP = call <8 x half> @llvm.vp.frem.v8f16(<8 x half> poison, <8 x half> poison, <8 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16F16_VP = call <16 x half> @llvm.vp.frem.v16f16(<16 x half> poison, <16 x half> poison, <16 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV1F16_VP = call <vscale x 1 x half> @llvm.vp.frem.nxv1f16(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV2F16_VP = call <vscale x 2 x half> @llvm.vp.frem.nxv2f16(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV4F16_VP = call <vscale x 4 x half> @llvm.vp.frem.nxv4f16(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV8F16_VP = call <vscale x 8 x half> @llvm.vp.frem.nxv8f16(<vscale x 8 x half> poison, <vscale x 8 x half> poison, <vscale x 8 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV16F16_VP = call <vscale x 16 x half> @llvm.vp.frem.nxv16f16(<vscale x 16 x half> poison, <vscale x 16 x half> poison, <vscale x 16 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+ %F16 = frem half poison, poison
+
+ %V1F16 = frem <1 x half> poison, poison
+ %V2F16 = frem <2 x half> poison, poison
+ %V4F16 = frem <4 x half> poison, poison
+ %V8F16 = frem <8 x half> poison, poison
+ %V16F16 = frem <16 x half> poison, poison
+ %V32F16 = frem <32 x half> poison, poison
+
+ %NXV1F16 = frem <vscale x 1 x half> poison, poison
+ %NXV2F16 = frem <vscale x 2 x half> poison, poison
+ %NXV4F16 = frem <vscale x 4 x half> poison, poison
+ %NXV8F16 = frem <vscale x 8 x half> poison, poison
+ %NXV16F16 = frem <vscale x 16 x half> poison, poison
+ %NXV32F16 = frem <vscale x 32 x half> poison, poison
+
+ %V1F16_VP = call <1 x half> @llvm.vp.frem(<1 x half> poison, <1 x half> poison, <1 x i1> poison, i32 poison)
+ %V2F16_VP = call <2 x half> @llvm.vp.frem(<2 x half> poison, <2 x half> poison, <2 x i1> poison, i32 poison)
+ %V4F16_VP = call <4 x half> @llvm.vp.frem(<4 x half> poison, <4 x half> poison, <4 x i1> poison, i32 poison)
+ %V8F16_VP = call <8 x half> @llvm.vp.frem(<8 x half> poison, <8 x half> poison, <8 x i1> poison, i32 poison)
+ %V16F16_VP = call <16 x half> @llvm.vp.frem(<16 x half> poison, <16 x half> poison, <16 x i1> poison, i32 poison)
+
+ %NXV1F16_VP = call <vscale x 1 x half> @llvm.vp.frem(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x i1> poison, i32 poison)
+ %NXV2F16_VP = call <vscale x 2 x half> @llvm.vp.frem(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x i1> poison, i32 poison)
+ %NXV4F16_VP = call <vscale x 4 x half> @llvm.vp.frem(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x i1> poison, i32 poison)
+ %NXV8F16_VP = call <vscale x 8 x half> @llvm.vp.frem(<vscale x 8 x half> poison, <vscale x 8 x half> poison, <vscale x 8 x i1> poison, i32 poison)
+ %NXV16F16_VP = call <vscale x 16 x half> @llvm.vp.frem(<vscale x 16 x half> poison, <vscale x 16 x half> poison, <vscale x 16 x i1> poison, i32 poison)
ret void
}
define void @fneg() {
; CHECK-LABEL: 'fneg'
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %BF16 = fneg half undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F32 = fneg float undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F64 = fneg double undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1BF16 = fneg <1 x bfloat> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2BF16 = fneg <2 x bfloat> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4BF16 = fneg <4 x bfloat> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8BF16 = fneg <8 x bfloat> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16BF16 = fneg <16 x bfloat> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1BF16 = fneg <vscale x 1 x bfloat> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2BF16 = fneg <vscale x 2 x bfloat> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV4BF16 = fneg <vscale x 4 x bfloat> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV8BF16 = fneg <vscale x 8 x bfloat> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV16BF16 = fneg <vscale x 16 x bfloat> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F32 = fneg <1 x float> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F32 = fneg <2 x float> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F32 = fneg <4 x float> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V8F32 = fneg <8 x float> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V16F32 = fneg <16 x float> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F32 = fneg <vscale x 1 x float> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2F32 = fneg <vscale x 2 x float> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV4F32 = fneg <vscale x 4 x float> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV8F32 = fneg <vscale x 8 x float> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV16F32 = fneg <vscale x 16 x float> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F64 = fneg <1 x double> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F64 = fneg <2 x double> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4F64 = fneg <4 x double> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V8F64 = fneg <8 x double> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F64 = fneg <vscale x 1 x double> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV2F64 = fneg <vscale x 2 x double> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV4F64 = fneg <vscale x 4 x double> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV8F64 = fneg <vscale x 8 x double> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1BF16_VP = call <1 x bfloat> @llvm.vp.fneg.v1bf16(<1 x bfloat> undef, <1 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2BF16_VP = call <2 x bfloat> @llvm.vp.fneg.v2bf16(<2 x bfloat> undef, <2 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4BF16_VP = call <4 x bfloat> @llvm.vp.fneg.v4bf16(<4 x bfloat> undef, <4 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8BF16_VP = call <8 x bfloat> @llvm.vp.fneg.v8bf16(<8 x bfloat> undef, <8 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16BF16_VP = call <16 x bfloat> @llvm.vp.fneg.v16bf16(<16 x bfloat> undef, <16 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F32_VP = call <1 x float> @llvm.vp.fneg.v1f32(<1 x float> undef, <1 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F32_VP = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> undef, <2 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F32_VP = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> undef, <4 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V8F32_VP = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> undef, <8 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V16F32_VP = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> undef, <16 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F64_VP = call <1 x double> @llvm.vp.fneg.v1f64(<1 x double> undef, <1 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F64_VP = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> undef, <2 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4F64_VP = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> undef, <4 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V8F64_VP = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> undef, <8 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V16F64_VP = call <16 x double> @llvm.vp.fneg.v16f64(<16 x double> undef, <16 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1BF16_VP = call <vscale x 1 x bfloat> @llvm.vp.fneg.nxv1bf16(<vscale x 1 x bfloat> undef, <vscale x 1 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2BF16_VP = call <vscale x 2 x bfloat> @llvm.vp.fneg.nxv2bf16(<vscale x 2 x bfloat> undef, <vscale x 2 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV4BF16_VP = call <vscale x 4 x bfloat> @llvm.vp.fneg.nxv4bf16(<vscale x 4 x bfloat> undef, <vscale x 4 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV8BF16_VP = call <vscale x 8 x bfloat> @llvm.vp.fneg.nxv8bf16(<vscale x 8 x bfloat> undef, <vscale x 8 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV16BF16_VP = call <vscale x 16 x bfloat> @llvm.vp.fneg.nxv16bf16(<vscale x 16 x bfloat> undef, <vscale x 16 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1BF32_VP = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2BF32_VP = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV4BF32_VP = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV8BF32_VP = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV16BF32_VP = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> undef, <vscale x 16 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1BF64_VP = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV2BF64_VP = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV4BF64_VP = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV8BF64_VP = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> undef, <vscale x 8 x i1> undef, i32 undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %NXV16BF64_VP = call <vscale x 16 x double> @llvm.vp.fneg.nxv16f64(<vscale x 16 x double> undef, <vscale x 16 x i1> undef, i32 undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F32 = fneg float poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F64 = fneg double poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F32 = fneg <1 x float> poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F32 = fneg <2 x float> poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F32 = fneg <4 x float> poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V8F32 = fneg <8 x float> poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V16F32 = fneg <16 x float> poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F32 = fneg <vscale x 1 x float> poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2F32 = fneg <vscale x 2 x float> poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV4F32 = fneg <vscale x 4 x float> poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV8F32 = fneg <vscale x 8 x float> poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV16F32 = fneg <vscale x 16 x float> poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F64 = fneg <1 x double> poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F64 = fneg <2 x double> poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4F64 = fneg <4 x double> poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V8F64 = fneg <8 x double> poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F64 = fneg <vscale x 1 x double> poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV2F64 = fneg <vscale x 2 x double> poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV4F64 = fneg <vscale x 4 x double> poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV8F64 = fneg <vscale x 8 x double> poison
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F32_VP = call <1 x float> @llvm.vp.fneg.v1f32(<1 x float> poison, <1 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F32_VP = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> poison, <2 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F32_VP = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> poison, <4 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V8F32_VP = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> poison, <8 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V16F32_VP = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> poison, <16 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F64_VP = call <1 x double> @llvm.vp.fneg.v1f64(<1 x double> poison, <1 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F64_VP = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> poison, <2 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V4F64_VP = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> poison, <4 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V8F64_VP = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> poison, <8 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V16F64_VP = call <16 x double> @llvm.vp.fneg.v16f64(<16 x double> poison, <16 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F32_VP = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> poison, <vscale x 1 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2F32_VP = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> poison, <vscale x 2 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV4F32_VP = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> poison, <vscale x 4 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV8F32_VP = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> poison, <vscale x 8 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV16F32_VP = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> poison, <vscale x 16 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F64_VP = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> poison, <vscale x 1 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV2F64_VP = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> poison, <vscale x 2 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV4F64_VP = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> poison, <vscale x 4 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV8F64_VP = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> poison, <vscale x 8 x i1> poison, i32 poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %NXV16F64_VP = call <vscale x 16 x double> @llvm.vp.fneg.nxv16f64(<vscale x 16 x double> poison, <vscale x 16 x i1> poison, i32 poison)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
- %BF16 = fneg half undef
- %F32 = fneg float undef
- %F64 = fneg double undef
-
- %V1BF16 = fneg <1 x bfloat> undef
- %V2BF16 = fneg <2 x bfloat> undef
- %V4BF16 = fneg <4 x bfloat> undef
- %V8BF16 = fneg <8 x bfloat> undef
- %V16BF16 = fneg <16 x bfloat> undef
-
- %NXV1BF16 = fneg <vscale x 1 x bfloat> undef
- %NXV2BF16 = fneg <vscale x 2 x bfloat> undef
- %NXV4BF16 = fneg <vscale x 4 x bfloat> undef
- %NXV8BF16 = fneg <vscale x 8 x bfloat> undef
- %NXV16BF16 = fneg <vscale x 16 x bfloat> undef
-
- %V1F32 = fneg <1 x float> undef
- %V2F32 = fneg <2 x float> undef
- %V4F32 = fneg <4 x float> undef
- %V8F32 = fneg <8 x float> undef
- %V16F32 = fneg <16 x float> undef
-
- %NXV1F32 = fneg <vscale x 1 x float> undef
- %NXV2F32 = fneg <vscale x 2 x float> undef
- %NXV4F32 = fneg <vscale x 4 x float> undef
- %NXV8F32 = fneg <vscale x 8 x float> undef
- %NXV16F32 = fneg <vscale x 16 x float> undef
-
- %V1F64 = fneg <1 x double> undef
- %V2F64 = fneg <2 x double> undef
- %V4F64 = fneg <4 x double> undef
- %V8F64 = fneg <8 x double> undef
-
- %NXV1F64 = fneg <vscale x 1 x double> undef
- %NXV2F64 = fneg <vscale x 2 x double> undef
- %NXV4F64 = fneg <vscale x 4 x double> undef
- %NXV8F64 = fneg <vscale x 8 x double> undef
-
- %V1BF16_VP = call <1 x bfloat> @llvm.vp.fneg.v1f16(<1 x bfloat> undef, <1 x i1> undef, i32 undef)
- %V2BF16_VP = call <2 x bfloat> @llvm.vp.fneg.v2f16(<2 x bfloat> undef, <2 x i1> undef, i32 undef)
- %V4BF16_VP = call <4 x bfloat> @llvm.vp.fneg.v4f16(<4 x bfloat> undef, <4 x i1> undef, i32 undef)
- %V8BF16_VP = call <8 x bfloat> @llvm.vp.fneg.v8f16(<8 x bfloat> undef, <8 x i1> undef, i32 undef)
- %V16BF16_VP = call <16 x bfloat> @llvm.vp.fneg.v16f16(<16 x bfloat> undef, <16 x i1> undef, i32 undef)
-
- %V1F32_VP = call <1 x float> @llvm.vp.fneg.v1f32(<1 x float> undef, <1 x i1> undef, i32 undef)
- %V2F32_VP = call <2 x float> @llvm.vp.fneg.v2f32(<2 x float> undef, <2 x i1> undef, i32 undef)
- %V4F32_VP = call <4 x float> @llvm.vp.fneg.v4f32(<4 x float> undef, <4 x i1> undef, i32 undef)
- %V8F32_VP = call <8 x float> @llvm.vp.fneg.v8f32(<8 x float> undef, <8 x i1> undef, i32 undef)
- %V16F32_VP = call <16 x float> @llvm.vp.fneg.v16f32(<16 x float> undef, <16 x i1> undef, i32 undef)
-
- %V1F64_VP = call <1 x double> @llvm.vp.fneg.v1f64(<1 x double> undef, <1 x i1> undef, i32 undef)
- %V2F64_VP = call <2 x double> @llvm.vp.fneg.v2f64(<2 x double> undef, <2 x i1> undef, i32 undef)
- %V4F64_VP = call <4 x double> @llvm.vp.fneg.v4f64(<4 x double> undef, <4 x i1> undef, i32 undef)
- %V8F64_VP = call <8 x double> @llvm.vp.fneg.v8f64(<8 x double> undef, <8 x i1> undef, i32 undef)
- %V16F64_VP = call <16 x double> @llvm.vp.fneg.v16f64(<16 x double> undef, <16 x i1> undef, i32 undef)
-
- %NXV1BF16_VP = call <vscale x 1 x bfloat> @llvm.vp.fneg.nxv1f16(<vscale x 1 x bfloat> undef, <vscale x 1 x i1> undef, i32 undef)
- %NXV2BF16_VP = call <vscale x 2 x bfloat> @llvm.vp.fneg.nxv2f16(<vscale x 2 x bfloat> undef, <vscale x 2 x i1> undef, i32 undef)
- %NXV4BF16_VP = call <vscale x 4 x bfloat> @llvm.vp.fneg.nxv4f16(<vscale x 4 x bfloat> undef, <vscale x 4 x i1> undef, i32 undef)
- %NXV8BF16_VP = call <vscale x 8 x bfloat> @llvm.vp.fneg.nxv8f16(<vscale x 8 x bfloat> undef, <vscale x 8 x i1> undef, i32 undef)
- %NXV16BF16_VP = call <vscale x 16 x bfloat> @llvm.vp.fneg.nxv16f16(<vscale x 16 x bfloat> undef, <vscale x 16 x i1> undef, i32 undef)
-
- %NXV1BF32_VP = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x i1> undef, i32 undef)
- %NXV2BF32_VP = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x i1> undef, i32 undef)
- %NXV4BF32_VP = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x i1> undef, i32 undef)
- %NXV8BF32_VP = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x i1> undef, i32 undef)
- %NXV16BF32_VP = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> undef, <vscale x 16 x i1> undef, i32 undef)
-
- %NXV1BF64_VP = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x i1> undef, i32 undef)
- %NXV2BF64_VP = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x i1> undef, i32 undef)
- %NXV4BF64_VP = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x i1> undef, i32 undef)
- %NXV8BF64_VP = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> undef, <vscale x 8 x i1> undef, i32 undef)
- %NXV16BF64_VP = call <vscale x 16 x double> @llvm.vp.fneg.nxv16f64(<vscale x 16 x double> undef, <vscale x 16 x i1> undef, i32 undef)
+ %F32 = fneg float poison
+ %F64 = fneg double poison
+
+ %V1F32 = fneg <1 x float> poison
+ %V2F32 = fneg <2 x float> poison
+ %V4F32 = fneg <4 x float> poison
+ %V8F32 = fneg <8 x float> poison
+ %V16F32 = fneg <16 x float> poison
+
+ %NXV1F32 = fneg <vscale x 1 x float> poison
+ %NXV2F32 = fneg <vscale x 2 x float> poison
+ %NXV4F32 = fneg <vscale x 4 x float> poison
+ %NXV8F32 = fneg <vscale x 8 x float> poison
+ %NXV16F32 = fneg <vscale x 16 x float> poison
+
+ %V1F64 = fneg <1 x double> poison
+ %V2F64 = fneg <2 x double> poison
+ %V4F64 = fneg <4 x double> poison
+ %V8F64 = fneg <8 x double> poison
+
+ %NXV1F64 = fneg <vscale x 1 x double> poison
+ %NXV2F64 = fneg <vscale x 2 x double> poison
+ %NXV4F64 = fneg <vscale x 4 x double> poison
+ %NXV8F64 = fneg <vscale x 8 x double> poison
+
+ %V1F32_VP = call <1 x float> @llvm.vp.fneg(<1 x float> poison, <1 x i1> poison, i32 poison)
+ %V2F32_VP = call <2 x float> @llvm.vp.fneg(<2 x float> poison, <2 x i1> poison, i32 poison)
+ %V4F32_VP = call <4 x float> @llvm.vp.fneg(<4 x float> poison, <4 x i1> poison, i32 poison)
+ %V8F32_VP = call <8 x float> @llvm.vp.fneg(<8 x float> poison, <8 x i1> poison, i32 poison)
+ %V16F32_VP = call <16 x float> @llvm.vp.fneg(<16 x float> poison, <16 x i1> poison, i32 poison)
+
+ %V1F64_VP = call <1 x double> @llvm.vp.fneg(<1 x double> poison, <1 x i1> poison, i32 poison)
+ %V2F64_VP = call <2 x double> @llvm.vp.fneg(<2 x double> poison, <2 x i1> poison, i32 poison)
+ %V4F64_VP = call <4 x double> @llvm.vp.fneg(<4 x double> poison, <4 x i1> poison, i32 poison)
+ %V8F64_VP = call <8 x double> @llvm.vp.fneg(<8 x double> poison, <8 x i1> poison, i32 poison)
+ %V16F64_VP = call <16 x double> @llvm.vp.fneg(<16 x double> poison, <16 x i1> poison, i32 poison)
+
+ %NXV1F32_VP = call <vscale x 1 x float> @llvm.vp.fneg(<vscale x 1 x float> poison, <vscale x 1 x i1> poison, i32 poison)
+ %NXV2F32_VP = call <vscale x 2 x float> @llvm.vp.fneg(<vscale x 2 x float> poison, <vscale x 2 x i1> poison, i32 poison)
+ %NXV4F32_VP = call <vscale x 4 x float> @llvm.vp.fneg(<vscale x 4 x float> poison, <vscale x 4 x i1> poison, i32 poison)
+ %NXV8F32_VP = call <vscale x 8 x float> @llvm.vp.fneg(<vscale x 8 x float> poison, <vscale x 8 x i1> poison, i32 poison)
+ %NXV16F32_VP = call <vscale x 16 x float> @llvm.vp.fneg(<vscale x 16 x float> poison, <vscale x 16 x i1> poison, i32 poison)
+
+ %NXV1F64_VP = call <vscale x 1 x double> @llvm.vp.fneg(<vscale x 1 x double> poison, <vscale x 1 x i1> poison, i32 poison)
+ %NXV2F64_VP = call <vscale x 2 x double> @llvm.vp.fneg(<vscale x 2 x double> poison, <vscale x 2 x i1> poison, i32 poison)
+ %NXV4F64_VP = call <vscale x 4 x double> @llvm.vp.fneg(<vscale x 4 x double> poison, <vscale x 4 x i1> poison, i32 poison)
+ %NXV8F64_VP = call <vscale x 8 x double> @llvm.vp.fneg(<vscale x 8 x double> poison, <vscale x 8 x i1> poison, i32 poison)
+ %NXV16F64_VP = call <vscale x 16 x double> @llvm.vp.fneg(<vscale x 16 x double> poison, <vscale x 16 x i1> poison, i32 poison)
+
+ ret void
+}
+
+define void @fneg_bf16() {
+; ZVFH-LABEL: 'fneg_bf16'
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %BF16 = fneg bfloat poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1BF16 = fneg <1 x bfloat> poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2BF16 = fneg <2 x bfloat> poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4BF16 = fneg <4 x bfloat> poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8BF16 = fneg <8 x bfloat> poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16BF16 = fneg <16 x bfloat> poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V32BF16 = fneg <32 x bfloat> poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1BF16 = fneg <vscale x 1 x bfloat> poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2BF16 = fneg <vscale x 2 x bfloat> poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV4BF16 = fneg <vscale x 4 x bfloat> poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV8BF16 = fneg <vscale x 8 x bfloat> poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV16BF16 = fneg <vscale x 16 x bfloat> poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV32BF16 = fneg <vscale x 32 x bfloat> poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1BF16_VP = call <1 x bfloat> @llvm.vp.fneg.v1bf16(<1 x bfloat> poison, <1 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2BF16_VP = call <2 x bfloat> @llvm.vp.fneg.v2bf16(<2 x bfloat> poison, <2 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4BF16_VP = call <4 x bfloat> @llvm.vp.fneg.v4bf16(<4 x bfloat> poison, <4 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8BF16_VP = call <8 x bfloat> @llvm.vp.fneg.v8bf16(<8 x bfloat> poison, <8 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16BF16_VP = call <16 x bfloat> @llvm.vp.fneg.v16bf16(<16 x bfloat> poison, <16 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1BF16_VP = call <vscale x 1 x bfloat> @llvm.vp.fneg.nxv1bf16(<vscale x 1 x bfloat> poison, <vscale x 1 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2BF16_VP = call <vscale x 2 x bfloat> @llvm.vp.fneg.nxv2bf16(<vscale x 2 x bfloat> poison, <vscale x 2 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV4BF16_VP = call <vscale x 4 x bfloat> @llvm.vp.fneg.nxv4bf16(<vscale x 4 x bfloat> poison, <vscale x 4 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV8BF16_VP = call <vscale x 8 x bfloat> @llvm.vp.fneg.nxv8bf16(<vscale x 8 x bfloat> poison, <vscale x 8 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV16BF16_VP = call <vscale x 16 x bfloat> @llvm.vp.fneg.nxv16bf16(<vscale x 16 x bfloat> poison, <vscale x 16 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+; ZVFHMIN-LABEL: 'fneg_bf16'
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %BF16 = fneg bfloat poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1BF16 = fneg <1 x bfloat> poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2BF16 = fneg <2 x bfloat> poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4BF16 = fneg <4 x bfloat> poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8BF16 = fneg <8 x bfloat> poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16BF16 = fneg <16 x bfloat> poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V32BF16 = fneg <32 x bfloat> poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1BF16 = fneg <vscale x 1 x bfloat> poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2BF16 = fneg <vscale x 2 x bfloat> poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV4BF16 = fneg <vscale x 4 x bfloat> poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV8BF16 = fneg <vscale x 8 x bfloat> poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV16BF16 = fneg <vscale x 16 x bfloat> poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV32BF16 = fneg <vscale x 32 x bfloat> poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1BF16_VP = call <1 x bfloat> @llvm.vp.fneg.v1bf16(<1 x bfloat> poison, <1 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2BF16_VP = call <2 x bfloat> @llvm.vp.fneg.v2bf16(<2 x bfloat> poison, <2 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4BF16_VP = call <4 x bfloat> @llvm.vp.fneg.v4bf16(<4 x bfloat> poison, <4 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8BF16_VP = call <8 x bfloat> @llvm.vp.fneg.v8bf16(<8 x bfloat> poison, <8 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16BF16_VP = call <16 x bfloat> @llvm.vp.fneg.v16bf16(<16 x bfloat> poison, <16 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1BF16_VP = call <vscale x 1 x bfloat> @llvm.vp.fneg.nxv1bf16(<vscale x 1 x bfloat> poison, <vscale x 1 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2BF16_VP = call <vscale x 2 x bfloat> @llvm.vp.fneg.nxv2bf16(<vscale x 2 x bfloat> poison, <vscale x 2 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV4BF16_VP = call <vscale x 4 x bfloat> @llvm.vp.fneg.nxv4bf16(<vscale x 4 x bfloat> poison, <vscale x 4 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV8BF16_VP = call <vscale x 8 x bfloat> @llvm.vp.fneg.nxv8bf16(<vscale x 8 x bfloat> poison, <vscale x 8 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV16BF16_VP = call <vscale x 16 x bfloat> @llvm.vp.fneg.nxv16bf16(<vscale x 16 x bfloat> poison, <vscale x 16 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+; NO-ZFHMIN-LABEL: 'fneg_bf16'
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %BF16 = fneg bfloat poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1BF16 = fneg <1 x bfloat> poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V2BF16 = fneg <2 x bfloat> poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4BF16 = fneg <4 x bfloat> poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8BF16 = fneg <8 x bfloat> poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16BF16 = fneg <16 x bfloat> poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %V32BF16 = fneg <32 x bfloat> poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV1BF16 = fneg <vscale x 1 x bfloat> poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV2BF16 = fneg <vscale x 2 x bfloat> poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV4BF16 = fneg <vscale x 4 x bfloat> poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV8BF16 = fneg <vscale x 8 x bfloat> poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV16BF16 = fneg <vscale x 16 x bfloat> poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV32BF16 = fneg <vscale x 32 x bfloat> poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1BF16_VP = call <1 x bfloat> @llvm.vp.fneg.v1bf16(<1 x bfloat> poison, <1 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V2BF16_VP = call <2 x bfloat> @llvm.vp.fneg.v2bf16(<2 x bfloat> poison, <2 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4BF16_VP = call <4 x bfloat> @llvm.vp.fneg.v4bf16(<4 x bfloat> poison, <4 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8BF16_VP = call <8 x bfloat> @llvm.vp.fneg.v8bf16(<8 x bfloat> poison, <8 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16BF16_VP = call <16 x bfloat> @llvm.vp.fneg.v16bf16(<16 x bfloat> poison, <16 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV1BF16_VP = call <vscale x 1 x bfloat> @llvm.vp.fneg.nxv1bf16(<vscale x 1 x bfloat> poison, <vscale x 1 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV2BF16_VP = call <vscale x 2 x bfloat> @llvm.vp.fneg.nxv2bf16(<vscale x 2 x bfloat> poison, <vscale x 2 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV4BF16_VP = call <vscale x 4 x bfloat> @llvm.vp.fneg.nxv4bf16(<vscale x 4 x bfloat> poison, <vscale x 4 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV8BF16_VP = call <vscale x 8 x bfloat> @llvm.vp.fneg.nxv8bf16(<vscale x 8 x bfloat> poison, <vscale x 8 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV16BF16_VP = call <vscale x 16 x bfloat> @llvm.vp.fneg.nxv16bf16(<vscale x 16 x bfloat> poison, <vscale x 16 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+ %BF16 = fneg bfloat poison
+
+ %V1BF16 = fneg <1 x bfloat> poison
+ %V2BF16 = fneg <2 x bfloat> poison
+ %V4BF16 = fneg <4 x bfloat> poison
+ %V8BF16 = fneg <8 x bfloat> poison
+ %V16BF16 = fneg <16 x bfloat> poison
+ %V32BF16 = fneg <32 x bfloat> poison
+
+ %NXV1BF16 = fneg <vscale x 1 x bfloat> poison
+ %NXV2BF16 = fneg <vscale x 2 x bfloat> poison
+ %NXV4BF16 = fneg <vscale x 4 x bfloat> poison
+ %NXV8BF16 = fneg <vscale x 8 x bfloat> poison
+ %NXV16BF16 = fneg <vscale x 16 x bfloat> poison
+ %NXV32BF16 = fneg <vscale x 32 x bfloat> poison
+
+ %V1BF16_VP = call <1 x bfloat> @llvm.vp.fneg(<1 x bfloat> poison, <1 x i1> poison, i32 poison)
+ %V2BF16_VP = call <2 x bfloat> @llvm.vp.fneg(<2 x bfloat> poison, <2 x i1> poison, i32 poison)
+ %V4BF16_VP = call <4 x bfloat> @llvm.vp.fneg(<4 x bfloat> poison, <4 x i1> poison, i32 poison)
+ %V8BF16_VP = call <8 x bfloat> @llvm.vp.fneg(<8 x bfloat> poison, <8 x i1> poison, i32 poison)
+ %V16BF16_VP = call <16 x bfloat> @llvm.vp.fneg(<16 x bfloat> poison, <16 x i1> poison, i32 poison)
+
+ %NXV1BF16_VP = call <vscale x 1 x bfloat> @llvm.vp.fneg(<vscale x 1 x bfloat> poison, <vscale x 1 x i1> poison, i32 poison)
+ %NXV2BF16_VP = call <vscale x 2 x bfloat> @llvm.vp.fneg(<vscale x 2 x bfloat> poison, <vscale x 2 x i1> poison, i32 poison)
+ %NXV4BF16_VP = call <vscale x 4 x bfloat> @llvm.vp.fneg(<vscale x 4 x bfloat> poison, <vscale x 4 x i1> poison, i32 poison)
+ %NXV8BF16_VP = call <vscale x 8 x bfloat> @llvm.vp.fneg(<vscale x 8 x bfloat> poison, <vscale x 8 x i1> poison, i32 poison)
+ %NXV16BF16_VP = call <vscale x 16 x bfloat> @llvm.vp.fneg(<vscale x 16 x bfloat> poison, <vscale x 16 x i1> poison, i32 poison)
ret void
}
define void @fneg_f16() {
-; CHECK-LABEL: 'fneg_f16'
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F16 = fneg half undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F16 = fneg <1 x half> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F16 = fneg <2 x half> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F16 = fneg <4 x half> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8F16 = fneg <8 x half> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16F16 = fneg <16 x half> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V32F16 = fneg <32 x half> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F16 = fneg <vscale x 1 x half> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2F16 = fneg <vscale x 2 x half> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV4F16 = fneg <vscale x 4 x half> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV8F16 = fneg <vscale x 8 x half> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV16F16 = fneg <vscale x 16 x half> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV32F16 = fneg <vscale x 32 x half> undef
-; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+; ZVFH-LABEL: 'fneg_f16'
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F16 = fneg half poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F16 = fneg <1 x half> poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F16 = fneg <2 x half> poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F16 = fneg <4 x half> poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8F16 = fneg <8 x half> poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16F16 = fneg <16 x half> poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V32F16 = fneg <32 x half> poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F16 = fneg <vscale x 1 x half> poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2F16 = fneg <vscale x 2 x half> poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV4F16 = fneg <vscale x 4 x half> poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV8F16 = fneg <vscale x 8 x half> poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV16F16 = fneg <vscale x 16 x half> poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV32F16 = fneg <vscale x 32 x half> poison
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F16_VP = call <1 x half> @llvm.vp.fneg.v1f16(<1 x half> poison, <1 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F16_VP = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> poison, <2 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F16_VP = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> poison, <4 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8F16_VP = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> poison, <8 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16F16_VP = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> poison, <16 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F16_VP = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> poison, <vscale x 1 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2F16_VP = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> poison, <vscale x 2 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV4F16_VP = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> poison, <vscale x 4 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV8F16_VP = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> poison, <vscale x 8 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV16F16_VP = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> poison, <vscale x 16 x i1> poison, i32 poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+; ZVFHMIN-LABEL: 'fneg_f16'
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F16 = fneg half poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F16 = fneg <1 x half> poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F16 = fneg <2 x half> poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F16 = fneg <4 x half> poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8F16 = fneg <8 x half> poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16F16 = fneg <16 x half> poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V32F16 = fneg <32 x half> poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F16 = fneg <vscale x 1 x half> poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2F16 = fneg <vscale x 2 x half> poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV4F16 = fneg <vscale x 4 x half> poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV8F16 = fneg <vscale x 8 x half> poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV16F16 = fneg <vscale x 16 x half> poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %NXV32F16 = fneg <vscale x 32 x half> poison
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F16_VP = call <1 x half> @llvm.vp.fneg.v1f16(<1 x half> poison, <1 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F16_VP = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> poison, <2 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F16_VP = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> poison, <4 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8F16_VP = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> poison, <8 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16F16_VP = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> poison, <16 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV1F16_VP = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> poison, <vscale x 1 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2F16_VP = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> poison, <vscale x 2 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV4F16_VP = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> poison, <vscale x 4 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV8F16_VP = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> poison, <vscale x 8 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV16F16_VP = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> poison, <vscale x 16 x i1> poison, i32 poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
- %F16 = fneg half undef
-
- %V1F16 = fneg <1 x half> undef
- %V2F16 = fneg <2 x half> undef
- %V4F16 = fneg <4 x half> undef
- %V8F16 = fneg <8 x half> undef
- %V16F16 = fneg <16 x half> undef
- %V32F16 = fneg <32 x half> undef
-
- %NXV1F16 = fneg <vscale x 1 x half> undef
- %NXV2F16 = fneg <vscale x 2 x half> undef
- %NXV4F16 = fneg <vscale x 4 x half> undef
- %NXV8F16 = fneg <vscale x 8 x half> undef
- %NXV16F16 = fneg <vscale x 16 x half> undef
- %NXV32F16 = fneg <vscale x 32 x half> undef
+; NO-ZFHMIN-LABEL: 'fneg_f16'
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %F16 = fneg half poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F16 = fneg <1 x half> poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V2F16 = fneg <2 x half> poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4F16 = fneg <4 x half> poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8F16 = fneg <8 x half> poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16F16 = fneg <16 x half> poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %V32F16 = fneg <32 x half> poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV1F16 = fneg <vscale x 1 x half> poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV2F16 = fneg <vscale x 2 x half> poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV4F16 = fneg <vscale x 4 x half> poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV8F16 = fneg <vscale x 8 x half> poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV16F16 = fneg <vscale x 16 x half> poison
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV32F16 = fneg <vscale x 32 x half> poison
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F16_VP = call <1 x half> @llvm.vp.fneg.v1f16(<1 x half> poison, <1 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V2F16_VP = call <2 x half> @llvm.vp.fneg.v2f16(<2 x half> poison, <2 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4F16_VP = call <4 x half> @llvm.vp.fneg.v4f16(<4 x half> poison, <4 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8F16_VP = call <8 x half> @llvm.vp.fneg.v8f16(<8 x half> poison, <8 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16F16_VP = call <16 x half> @llvm.vp.fneg.v16f16(<16 x half> poison, <16 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV1F16_VP = call <vscale x 1 x half> @llvm.vp.fneg.nxv1f16(<vscale x 1 x half> poison, <vscale x 1 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV2F16_VP = call <vscale x 2 x half> @llvm.vp.fneg.nxv2f16(<vscale x 2 x half> poison, <vscale x 2 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV4F16_VP = call <vscale x 4 x half> @llvm.vp.fneg.nxv4f16(<vscale x 4 x half> poison, <vscale x 4 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV8F16_VP = call <vscale x 8 x half> @llvm.vp.fneg.nxv8f16(<vscale x 8 x half> poison, <vscale x 8 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV16F16_VP = call <vscale x 16 x half> @llvm.vp.fneg.nxv16f16(<vscale x 16 x half> poison, <vscale x 16 x i1> poison, i32 poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+ %F16 = fneg half poison
+
+ %V1F16 = fneg <1 x half> poison
+ %V2F16 = fneg <2 x half> poison
+ %V4F16 = fneg <4 x half> poison
+ %V8F16 = fneg <8 x half> poison
+ %V16F16 = fneg <16 x half> poison
+ %V32F16 = fneg <32 x half> poison
+
+ %NXV1F16 = fneg <vscale x 1 x half> poison
+ %NXV2F16 = fneg <vscale x 2 x half> poison
+ %NXV4F16 = fneg <vscale x 4 x half> poison
+ %NXV8F16 = fneg <vscale x 8 x half> poison
+ %NXV16F16 = fneg <vscale x 16 x half> poison
+ %NXV32F16 = fneg <vscale x 32 x half> poison
+
+ %V1F16_VP = call <1 x half> @llvm.vp.fneg(<1 x half> poison, <1 x i1> poison, i32 poison)
+ %V2F16_VP = call <2 x half> @llvm.vp.fneg(<2 x half> poison, <2 x i1> poison, i32 poison)
+ %V4F16_VP = call <4 x half> @llvm.vp.fneg(<4 x half> poison, <4 x i1> poison, i32 poison)
+ %V8F16_VP = call <8 x half> @llvm.vp.fneg(<8 x half> poison, <8 x i1> poison, i32 poison)
+ %V16F16_VP = call <16 x half> @llvm.vp.fneg(<16 x half> poison, <16 x i1> poison, i32 poison)
+
+ %NXV1F16_VP = call <vscale x 1 x half> @llvm.vp.fneg(<vscale x 1 x half> poison, <vscale x 1 x i1> poison, i32 poison)
+ %NXV2F16_VP = call <vscale x 2 x half> @llvm.vp.fneg(<vscale x 2 x half> poison, <vscale x 2 x i1> poison, i32 poison)
+ %NXV4F16_VP = call <vscale x 4 x half> @llvm.vp.fneg(<vscale x 4 x half> poison, <vscale x 4 x i1> poison, i32 poison)
+ %NXV8F16_VP = call <vscale x 8 x half> @llvm.vp.fneg(<vscale x 8 x half> poison, <vscale x 8 x i1> poison, i32 poison)
+ %NXV16F16_VP = call <vscale x 16 x half> @llvm.vp.fneg(<vscale x 16 x half> poison, <vscale x 16 x i1> poison, i32 poison)
ret void
}
define void @fcopysign() {
; CHECK-LABEL: 'fcopysign'
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %BF16 = call bfloat @llvm.copysign.bf16(bfloat undef, bfloat undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %F32 = call float @llvm.copysign.f32(float undef, float undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %F64 = call double @llvm.copysign.f64(double undef, double undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V1BF16 = call <1 x bfloat> @llvm.copysign.v1bf16(<1 x bfloat> undef, <1 x bfloat> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V2BF16 = call <2 x bfloat> @llvm.copysign.v2bf16(<2 x bfloat> undef, <2 x bfloat> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4BF16 = call <4 x bfloat> @llvm.copysign.v4bf16(<4 x bfloat> undef, <4 x bfloat> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8BF16 = call <8 x bfloat> @llvm.copysign.v8bf16(<8 x bfloat> undef, <8 x bfloat> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 47 for instruction: %V16BF16 = call <16 x bfloat> @llvm.copysign.v16bf16(<16 x bfloat> undef, <16 x bfloat> undef)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV1BF16 = call <vscale x 1 x bfloat> @llvm.copysign.nxv1bf16(<vscale x 1 x bfloat> undef, <vscale x 1 x bfloat> undef)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV2BF16 = call <vscale x 2 x bfloat> @llvm.copysign.nxv2bf16(<vscale x 2 x bfloat> undef, <vscale x 2 x bfloat> undef)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV4BF16 = call <vscale x 4 x bfloat> @llvm.copysign.nxv4bf16(<vscale x 4 x bfloat> undef, <vscale x 4 x bfloat> undef)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV8BF16 = call <vscale x 8 x bfloat> @llvm.copysign.nxv8bf16(<vscale x 8 x bfloat> undef, <vscale x 8 x bfloat> undef)
-; CHECK-NEXT: Cost Model: Invalid cost for instruction: %NXV16BF16 = call <vscale x 16 x bfloat> @llvm.copysign.nxv16bf16(<vscale x 16 x bfloat> undef, <vscale x 16 x bfloat> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F32 = call <1 x float> @llvm.copysign.v1f32(<1 x float> undef, <1 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F32 = call <2 x float> @llvm.copysign.v2f32(<2 x float> undef, <2 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F32 = call <4 x float> @llvm.copysign.v4f32(<4 x float> undef, <4 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8F32 = call <8 x float> @llvm.copysign.v8f32(<8 x float> undef, <8 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16F32 = call <16 x float> @llvm.copysign.v16f32(<16 x float> undef, <16 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV1F32 = call <vscale x 1 x float> @llvm.copysign.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV2F32 = call <vscale x 2 x float> @llvm.copysign.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV4F32 = call <vscale x 4 x float> @llvm.copysign.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV8F32 = call <vscale x 8 x float> @llvm.copysign.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV16F32 = call <vscale x 16 x float> @llvm.copysign.nxv16f32(<vscale x 16 x float> undef, <vscale x 16 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F64 = call <1 x double> @llvm.copysign.v1f64(<1 x double> undef, <1 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F64 = call <2 x double> @llvm.copysign.v2f64(<2 x double> undef, <2 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F64 = call <4 x double> @llvm.copysign.v4f64(<4 x double> undef, <4 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8F64 = call <8 x double> @llvm.copysign.v8f64(<8 x double> undef, <8 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV1F64 = call <vscale x 1 x double> @llvm.copysign.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV2F64 = call <vscale x 2 x double> @llvm.copysign.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV4F64 = call <vscale x 4 x double> @llvm.copysign.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV8F64 = call <vscale x 8 x double> @llvm.copysign.nxv8f64(<vscale x 8 x double> undef, <vscale x 8 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %F32 = call float @llvm.copysign.f32(float poison, float poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %F64 = call double @llvm.copysign.f64(double poison, double poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F32 = call <1 x float> @llvm.copysign.v1f32(<1 x float> poison, <1 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F32 = call <2 x float> @llvm.copysign.v2f32(<2 x float> poison, <2 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F32 = call <4 x float> @llvm.copysign.v4f32(<4 x float> poison, <4 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8F32 = call <8 x float> @llvm.copysign.v8f32(<8 x float> poison, <8 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16F32 = call <16 x float> @llvm.copysign.v16f32(<16 x float> poison, <16 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV1F32 = call <vscale x 1 x float> @llvm.copysign.nxv1f32(<vscale x 1 x float> poison, <vscale x 1 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV2F32 = call <vscale x 2 x float> @llvm.copysign.nxv2f32(<vscale x 2 x float> poison, <vscale x 2 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV4F32 = call <vscale x 4 x float> @llvm.copysign.nxv4f32(<vscale x 4 x float> poison, <vscale x 4 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV8F32 = call <vscale x 8 x float> @llvm.copysign.nxv8f32(<vscale x 8 x float> poison, <vscale x 8 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV16F32 = call <vscale x 16 x float> @llvm.copysign.nxv16f32(<vscale x 16 x float> poison, <vscale x 16 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F64 = call <1 x double> @llvm.copysign.v1f64(<1 x double> poison, <1 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F64 = call <2 x double> @llvm.copysign.v2f64(<2 x double> poison, <2 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F64 = call <4 x double> @llvm.copysign.v4f64(<4 x double> poison, <4 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8F64 = call <8 x double> @llvm.copysign.v8f64(<8 x double> poison, <8 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV1F64 = call <vscale x 1 x double> @llvm.copysign.nxv1f64(<vscale x 1 x double> poison, <vscale x 1 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV2F64 = call <vscale x 2 x double> @llvm.copysign.nxv2f64(<vscale x 2 x double> poison, <vscale x 2 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV4F64 = call <vscale x 4 x double> @llvm.copysign.nxv4f64(<vscale x 4 x double> poison, <vscale x 4 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV8F64 = call <vscale x 8 x double> @llvm.copysign.nxv8f64(<vscale x 8 x double> poison, <vscale x 8 x double> poison)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
- %BF16 = call bfloat @llvm.copysign.bf16(bfloat undef, bfloat undef)
- %F32 = call float @llvm.copysign.f32(float undef, float undef)
- %F64 = call double @llvm.copysign.f64(double undef, double undef)
-
- %V1BF16 = call <1 x bfloat> @llvm.copysign.v1bf16(<1 x bfloat> undef, <1 x bfloat> undef)
- %V2BF16 = call <2 x bfloat> @llvm.copysign.v2bf16(<2 x bfloat> undef, <2 x bfloat> undef)
- %V4BF16 = call <4 x bfloat> @llvm.copysign.v4bf16(<4 x bfloat> undef, <4 x bfloat> undef)
- %V8BF16 = call <8 x bfloat> @llvm.copysign.v8bf16(<8 x bfloat> undef, <8 x bfloat> undef)
- %V16BF16 = call <16 x bfloat> @llvm.copysign.v16bf16(<16 x bfloat> undef, <16 x bfloat> undef)
-
- %NXV1BF16 = call <vscale x 1 x bfloat> @llvm.copysign.nxv1bf16(<vscale x 1 x bfloat> undef, <vscale x 1 x bfloat> undef)
- %NXV2BF16 = call <vscale x 2 x bfloat> @llvm.copysign.nxv2bf16(<vscale x 2 x bfloat> undef, <vscale x 2 x bfloat> undef)
- %NXV4BF16 = call <vscale x 4 x bfloat> @llvm.copysign.nxv4bf16(<vscale x 4 x bfloat> undef, <vscale x 4 x bfloat> undef)
- %NXV8BF16 = call <vscale x 8 x bfloat> @llvm.copysign.nxv8bf16(<vscale x 8 x bfloat> undef, <vscale x 8 x bfloat> undef)
- %NXV16BF16 = call <vscale x 16 x bfloat> @llvm.copysign.nxv16bf16(<vscale x 16 x bfloat> undef, <vscale x 16 x bfloat> undef)
-
- %V1F32 = call <1 x float> @llvm.copysign.v1f32(<1 x float> undef, <1 x float> undef)
- %V2F32 = call <2 x float> @llvm.copysign.v2f32(<2 x float> undef, <2 x float> undef)
- %V4F32 = call <4 x float> @llvm.copysign.v4f32(<4 x float> undef, <4 x float> undef)
- %V8F32 = call <8 x float> @llvm.copysign.v8f32(<8 x float> undef, <8 x float> undef)
- %V16F32 = call <16 x float> @llvm.copysign.v16f32(<16 x float> undef, <16 x float> undef)
-
- %NXV1F32 = call <vscale x 1 x float> @llvm.copysign.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef)
- %NXV2F32 = call <vscale x 2 x float> @llvm.copysign.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef)
- %NXV4F32 = call <vscale x 4 x float> @llvm.copysign.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef)
- %NXV8F32 = call <vscale x 8 x float> @llvm.copysign.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef)
- %NXV16F32 = call <vscale x 16 x float> @llvm.copysign.nxv16f32(<vscale x 16 x float> undef, <vscale x 16 x float> undef)
-
- %V1F64 = call <1 x double> @llvm.copysign.v1f64(<1 x double> undef, <1 x double> undef)
- %V2F64 = call <2 x double> @llvm.copysign.v2f64(<2 x double> undef, <2 x double> undef)
- %V4F64 = call <4 x double> @llvm.copysign.v4f64(<4 x double> undef, <4 x double> undef)
- %V8F64 = call <8 x double> @llvm.copysign.v8f64(<8 x double> undef, <8 x double> undef)
-
- %NXV1F64 = call <vscale x 1 x double> @llvm.copysign.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef)
- %NXV2F64 = call <vscale x 2 x double> @llvm.copysign.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef)
- %NXV4F64 = call <vscale x 4 x double> @llvm.copysign.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef)
- %NXV8F64 = call <vscale x 8 x double> @llvm.copysign.nxv8f64(<vscale x 8 x double> undef, <vscale x 8 x double> undef)
+ %F32 = call float @llvm.copysign.f32(float poison, float poison)
+ %F64 = call double @llvm.copysign.f64(double poison, double poison)
+
+ %V1F32 = call <1 x float> @llvm.copysign(<1 x float> poison, <1 x float> poison)
+ %V2F32 = call <2 x float> @llvm.copysign(<2 x float> poison, <2 x float> poison)
+ %V4F32 = call <4 x float> @llvm.copysign(<4 x float> poison, <4 x float> poison)
+ %V8F32 = call <8 x float> @llvm.copysign(<8 x float> poison, <8 x float> poison)
+ %V16F32 = call <16 x float> @llvm.copysign(<16 x float> poison, <16 x float> poison)
+
+ %NXV1F32 = call <vscale x 1 x float> @llvm.copysign(<vscale x 1 x float> poison, <vscale x 1 x float> poison)
+ %NXV2F32 = call <vscale x 2 x float> @llvm.copysign(<vscale x 2 x float> poison, <vscale x 2 x float> poison)
+ %NXV4F32 = call <vscale x 4 x float> @llvm.copysign(<vscale x 4 x float> poison, <vscale x 4 x float> poison)
+ %NXV8F32 = call <vscale x 8 x float> @llvm.copysign(<vscale x 8 x float> poison, <vscale x 8 x float> poison)
+ %NXV16F32 = call <vscale x 16 x float> @llvm.copysign(<vscale x 16 x float> poison, <vscale x 16 x float> poison)
+
+ %V1F64 = call <1 x double> @llvm.copysign(<1 x double> poison, <1 x double> poison)
+ %V2F64 = call <2 x double> @llvm.copysign(<2 x double> poison, <2 x double> poison)
+ %V4F64 = call <4 x double> @llvm.copysign(<4 x double> poison, <4 x double> poison)
+ %V8F64 = call <8 x double> @llvm.copysign(<8 x double> poison, <8 x double> poison)
+
+ %NXV1F64 = call <vscale x 1 x double> @llvm.copysign(<vscale x 1 x double> poison, <vscale x 1 x double> poison)
+ %NXV2F64 = call <vscale x 2 x double> @llvm.copysign(<vscale x 2 x double> poison, <vscale x 2 x double> poison)
+ %NXV4F64 = call <vscale x 4 x double> @llvm.copysign(<vscale x 4 x double> poison, <vscale x 4 x double> poison)
+ %NXV8F64 = call <vscale x 8 x double> @llvm.copysign(<vscale x 8 x double> poison, <vscale x 8 x double> poison)
+
+ ret void
+}
+
+define void @fcopysign_bf16() {
+; ZVFH-LABEL: 'fcopysign_bf16'
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %BF16 = call bfloat @llvm.copysign.bf16(bfloat poison, bfloat poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V1BF16 = call <1 x bfloat> @llvm.copysign.v1bf16(<1 x bfloat> poison, <1 x bfloat> poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V2BF16 = call <2 x bfloat> @llvm.copysign.v2bf16(<2 x bfloat> poison, <2 x bfloat> poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4BF16 = call <4 x bfloat> @llvm.copysign.v4bf16(<4 x bfloat> poison, <4 x bfloat> poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8BF16 = call <8 x bfloat> @llvm.copysign.v8bf16(<8 x bfloat> poison, <8 x bfloat> poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 47 for instruction: %V16BF16 = call <16 x bfloat> @llvm.copysign.v16bf16(<16 x bfloat> poison, <16 x bfloat> poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 95 for instruction: %V32BF16 = call <32 x bfloat> @llvm.copysign.v32bf16(<32 x bfloat> poison, <32 x bfloat> poison)
+; ZVFH-NEXT: Cost Model: Invalid cost for instruction: %NXV1BF16 = call <vscale x 1 x bfloat> @llvm.copysign.nxv1bf16(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> poison)
+; ZVFH-NEXT: Cost Model: Invalid cost for instruction: %NXV2BF16 = call <vscale x 2 x bfloat> @llvm.copysign.nxv2bf16(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> poison)
+; ZVFH-NEXT: Cost Model: Invalid cost for instruction: %NXV4BF16 = call <vscale x 4 x bfloat> @llvm.copysign.nxv4bf16(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> poison)
+; ZVFH-NEXT: Cost Model: Invalid cost for instruction: %NXV8BF16 = call <vscale x 8 x bfloat> @llvm.copysign.nxv8bf16(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> poison)
+; ZVFH-NEXT: Cost Model: Invalid cost for instruction: %NXV16BF16 = call <vscale x 16 x bfloat> @llvm.copysign.nxv16bf16(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> poison)
+; ZVFH-NEXT: Cost Model: Invalid cost for instruction: %NXV32BF16 = call <vscale x 32 x bfloat> @llvm.copysign.nxv32bf16(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+; ZVFHMIN-LABEL: 'fcopysign_bf16'
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %BF16 = call bfloat @llvm.copysign.bf16(bfloat poison, bfloat poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V1BF16 = call <1 x bfloat> @llvm.copysign.v1bf16(<1 x bfloat> poison, <1 x bfloat> poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V2BF16 = call <2 x bfloat> @llvm.copysign.v2bf16(<2 x bfloat> poison, <2 x bfloat> poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4BF16 = call <4 x bfloat> @llvm.copysign.v4bf16(<4 x bfloat> poison, <4 x bfloat> poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8BF16 = call <8 x bfloat> @llvm.copysign.v8bf16(<8 x bfloat> poison, <8 x bfloat> poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 47 for instruction: %V16BF16 = call <16 x bfloat> @llvm.copysign.v16bf16(<16 x bfloat> poison, <16 x bfloat> poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 95 for instruction: %V32BF16 = call <32 x bfloat> @llvm.copysign.v32bf16(<32 x bfloat> poison, <32 x bfloat> poison)
+; ZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV1BF16 = call <vscale x 1 x bfloat> @llvm.copysign.nxv1bf16(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> poison)
+; ZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV2BF16 = call <vscale x 2 x bfloat> @llvm.copysign.nxv2bf16(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> poison)
+; ZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV4BF16 = call <vscale x 4 x bfloat> @llvm.copysign.nxv4bf16(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> poison)
+; ZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV8BF16 = call <vscale x 8 x bfloat> @llvm.copysign.nxv8bf16(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> poison)
+; ZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV16BF16 = call <vscale x 16 x bfloat> @llvm.copysign.nxv16bf16(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> poison)
+; ZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV32BF16 = call <vscale x 32 x bfloat> @llvm.copysign.nxv32bf16(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+; NO-ZFHMIN-LABEL: 'fcopysign_bf16'
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %BF16 = call bfloat @llvm.copysign.bf16(bfloat poison, bfloat poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V1BF16 = call <1 x bfloat> @llvm.copysign.v1bf16(<1 x bfloat> poison, <1 x bfloat> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V2BF16 = call <2 x bfloat> @llvm.copysign.v2bf16(<2 x bfloat> poison, <2 x bfloat> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4BF16 = call <4 x bfloat> @llvm.copysign.v4bf16(<4 x bfloat> poison, <4 x bfloat> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8BF16 = call <8 x bfloat> @llvm.copysign.v8bf16(<8 x bfloat> poison, <8 x bfloat> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16BF16 = call <16 x bfloat> @llvm.copysign.v16bf16(<16 x bfloat> poison, <16 x bfloat> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %V32BF16 = call <32 x bfloat> @llvm.copysign.v32bf16(<32 x bfloat> poison, <32 x bfloat> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV1BF16 = call <vscale x 1 x bfloat> @llvm.copysign.nxv1bf16(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV2BF16 = call <vscale x 2 x bfloat> @llvm.copysign.nxv2bf16(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV4BF16 = call <vscale x 4 x bfloat> @llvm.copysign.nxv4bf16(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV8BF16 = call <vscale x 8 x bfloat> @llvm.copysign.nxv8bf16(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV16BF16 = call <vscale x 16 x bfloat> @llvm.copysign.nxv16bf16(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV32BF16 = call <vscale x 32 x bfloat> @llvm.copysign.nxv32bf16(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+ %BF16 = call bfloat @llvm.copysign(bfloat poison, bfloat poison)
+
+ %V1BF16 = call <1 x bfloat> @llvm.copysign(<1 x bfloat> poison, <1 x bfloat> poison)
+ %V2BF16 = call <2 x bfloat> @llvm.copysign(<2 x bfloat> poison, <2 x bfloat> poison)
+ %V4BF16 = call <4 x bfloat> @llvm.copysign(<4 x bfloat> poison, <4 x bfloat> poison)
+ %V8BF16 = call <8 x bfloat> @llvm.copysign(<8 x bfloat> poison, <8 x bfloat> poison)
+ %V16BF16 = call <16 x bfloat> @llvm.copysign(<16 x bfloat> poison, <16 x bfloat> poison)
+ %V32BF16 = call <32 x bfloat> @llvm.copysign(<32 x bfloat> poison, <32 x bfloat> poison)
+
+ %NXV1BF16 = call <vscale x 1 x bfloat> @llvm.copysign(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> poison)
+ %NXV2BF16 = call <vscale x 2 x bfloat> @llvm.copysign(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> poison)
+ %NXV4BF16 = call <vscale x 4 x bfloat> @llvm.copysign(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> poison)
+ %NXV8BF16 = call <vscale x 8 x bfloat> @llvm.copysign(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> poison)
+ %NXV16BF16 = call <vscale x 16 x bfloat> @llvm.copysign(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> poison)
+ %NXV32BF16 = call <vscale x 32 x bfloat> @llvm.copysign(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> poison)
ret void
}
define void @fcopysign_f16() {
; ZVFH-LABEL: 'fcopysign_f16'
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %F16 = call half @llvm.copysign.f16(half undef, half undef)
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F16 = call <1 x half> @llvm.copysign.v1f16(<1 x half> undef, <1 x half> undef)
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F16 = call <2 x half> @llvm.copysign.v2f16(<2 x half> undef, <2 x half> undef)
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F16 = call <4 x half> @llvm.copysign.v4f16(<4 x half> undef, <4 x half> undef)
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8F16 = call <8 x half> @llvm.copysign.v8f16(<8 x half> undef, <8 x half> undef)
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16F16 = call <16 x half> @llvm.copysign.v16f16(<16 x half> undef, <16 x half> undef)
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V32F16 = call <32 x half> @llvm.copysign.v32f16(<32 x half> undef, <32 x half> undef)
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV1F16 = call <vscale x 1 x half> @llvm.copysign.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef)
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV2F16 = call <vscale x 2 x half> @llvm.copysign.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef)
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV4F16 = call <vscale x 4 x half> @llvm.copysign.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef)
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV8F16 = call <vscale x 8 x half> @llvm.copysign.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef)
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV16F16 = call <vscale x 16 x half> @llvm.copysign.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef)
-; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV32F16 = call <vscale x 32 x half> @llvm.copysign.nxv32f16(<vscale x 32 x half> undef, <vscale x 32 x half> undef)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %F16 = call half @llvm.copysign.f16(half poison, half poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V1F16 = call <1 x half> @llvm.copysign.v1f16(<1 x half> poison, <1 x half> poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V2F16 = call <2 x half> @llvm.copysign.v2f16(<2 x half> poison, <2 x half> poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F16 = call <4 x half> @llvm.copysign.v4f16(<4 x half> poison, <4 x half> poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8F16 = call <8 x half> @llvm.copysign.v8f16(<8 x half> poison, <8 x half> poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16F16 = call <16 x half> @llvm.copysign.v16f16(<16 x half> poison, <16 x half> poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V32F16 = call <32 x half> @llvm.copysign.v32f16(<32 x half> poison, <32 x half> poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV1F16 = call <vscale x 1 x half> @llvm.copysign.nxv1f16(<vscale x 1 x half> poison, <vscale x 1 x half> poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV2F16 = call <vscale x 2 x half> @llvm.copysign.nxv2f16(<vscale x 2 x half> poison, <vscale x 2 x half> poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV4F16 = call <vscale x 4 x half> @llvm.copysign.nxv4f16(<vscale x 4 x half> poison, <vscale x 4 x half> poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV8F16 = call <vscale x 8 x half> @llvm.copysign.nxv8f16(<vscale x 8 x half> poison, <vscale x 8 x half> poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV16F16 = call <vscale x 16 x half> @llvm.copysign.nxv16f16(<vscale x 16 x half> poison, <vscale x 16 x half> poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV32F16 = call <vscale x 32 x half> @llvm.copysign.nxv32f16(<vscale x 32 x half> poison, <vscale x 32 x half> poison)
; ZVFH-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
; ZVFHMIN-LABEL: 'fcopysign_f16'
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %F16 = call half @llvm.copysign.f16(half undef, half undef)
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V1F16 = call <1 x half> @llvm.copysign.v1f16(<1 x half> undef, <1 x half> undef)
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V2F16 = call <2 x half> @llvm.copysign.v2f16(<2 x half> undef, <2 x half> undef)
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4F16 = call <4 x half> @llvm.copysign.v4f16(<4 x half> undef, <4 x half> undef)
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8F16 = call <8 x half> @llvm.copysign.v8f16(<8 x half> undef, <8 x half> undef)
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 47 for instruction: %V16F16 = call <16 x half> @llvm.copysign.v16f16(<16 x half> undef, <16 x half> undef)
-; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 95 for instruction: %V32F16 = call <32 x half> @llvm.copysign.v32f16(<32 x half> undef, <32 x half> undef)
-; ZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV1F16 = call <vscale x 1 x half> @llvm.copysign.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef)
-; ZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV2F16 = call <vscale x 2 x half> @llvm.copysign.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef)
-; ZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV4F16 = call <vscale x 4 x half> @llvm.copysign.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef)
-; ZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV8F16 = call <vscale x 8 x half> @llvm.copysign.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef)
-; ZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV16F16 = call <vscale x 16 x half> @llvm.copysign.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef)
-; ZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV32F16 = call <vscale x 32 x half> @llvm.copysign.nxv32f16(<vscale x 32 x half> undef, <vscale x 32 x half> undef)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %F16 = call half @llvm.copysign.f16(half poison, half poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V1F16 = call <1 x half> @llvm.copysign.v1f16(<1 x half> poison, <1 x half> poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V2F16 = call <2 x half> @llvm.copysign.v2f16(<2 x half> poison, <2 x half> poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4F16 = call <4 x half> @llvm.copysign.v4f16(<4 x half> poison, <4 x half> poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8F16 = call <8 x half> @llvm.copysign.v8f16(<8 x half> poison, <8 x half> poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 47 for instruction: %V16F16 = call <16 x half> @llvm.copysign.v16f16(<16 x half> poison, <16 x half> poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 95 for instruction: %V32F16 = call <32 x half> @llvm.copysign.v32f16(<32 x half> poison, <32 x half> poison)
+; ZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV1F16 = call <vscale x 1 x half> @llvm.copysign.nxv1f16(<vscale x 1 x half> poison, <vscale x 1 x half> poison)
+; ZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV2F16 = call <vscale x 2 x half> @llvm.copysign.nxv2f16(<vscale x 2 x half> poison, <vscale x 2 x half> poison)
+; ZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV4F16 = call <vscale x 4 x half> @llvm.copysign.nxv4f16(<vscale x 4 x half> poison, <vscale x 4 x half> poison)
+; ZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV8F16 = call <vscale x 8 x half> @llvm.copysign.nxv8f16(<vscale x 8 x half> poison, <vscale x 8 x half> poison)
+; ZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV16F16 = call <vscale x 16 x half> @llvm.copysign.nxv16f16(<vscale x 16 x half> poison, <vscale x 16 x half> poison)
+; ZVFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV32F16 = call <vscale x 32 x half> @llvm.copysign.nxv32f16(<vscale x 32 x half> poison, <vscale x 32 x half> poison)
; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
- %F16 = call half @llvm.copysign.f16(half undef, half undef)
-
- %V1F16 = call <1 x half> @llvm.copysign.v1f16(<1 x half> undef, <1 x half> undef)
- %V2F16 = call <2 x half> @llvm.copysign.v2f16(<2 x half> undef, <2 x half> undef)
- %V4F16 = call <4 x half> @llvm.copysign.v4f16(<4 x half> undef, <4 x half> undef)
- %V8F16 = call <8 x half> @llvm.copysign.v8f16(<8 x half> undef, <8 x half> undef)
- %V16F16 = call <16 x half> @llvm.copysign.v16f16(<16 x half> undef, <16 x half> undef)
- %V32F16 = call <32 x half> @llvm.copysign.v32f16(<32 x half> undef, <32 x half> undef)
-
- %NXV1F16 = call <vscale x 1 x half> @llvm.copysign.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef)
- %NXV2F16 = call <vscale x 2 x half> @llvm.copysign.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef)
- %NXV4F16 = call <vscale x 4 x half> @llvm.copysign.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef)
- %NXV8F16 = call <vscale x 8 x half> @llvm.copysign.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef)
- %NXV16F16 = call <vscale x 16 x half> @llvm.copysign.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef)
- %NXV32F16 = call <vscale x 32 x half> @llvm.copysign.nxv32f16(<vscale x 32 x half> undef, <vscale x 32 x half> undef)
+; NO-ZFHMIN-LABEL: 'fcopysign_f16'
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %F16 = call half @llvm.copysign.f16(half poison, half poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V1F16 = call <1 x half> @llvm.copysign.v1f16(<1 x half> poison, <1 x half> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V2F16 = call <2 x half> @llvm.copysign.v2f16(<2 x half> poison, <2 x half> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4F16 = call <4 x half> @llvm.copysign.v4f16(<4 x half> poison, <4 x half> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8F16 = call <8 x half> @llvm.copysign.v8f16(<8 x half> poison, <8 x half> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16F16 = call <16 x half> @llvm.copysign.v16f16(<16 x half> poison, <16 x half> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %V32F16 = call <32 x half> @llvm.copysign.v32f16(<32 x half> poison, <32 x half> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV1F16 = call <vscale x 1 x half> @llvm.copysign.nxv1f16(<vscale x 1 x half> poison, <vscale x 1 x half> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV2F16 = call <vscale x 2 x half> @llvm.copysign.nxv2f16(<vscale x 2 x half> poison, <vscale x 2 x half> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV4F16 = call <vscale x 4 x half> @llvm.copysign.nxv4f16(<vscale x 4 x half> poison, <vscale x 4 x half> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV8F16 = call <vscale x 8 x half> @llvm.copysign.nxv8f16(<vscale x 8 x half> poison, <vscale x 8 x half> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV16F16 = call <vscale x 16 x half> @llvm.copysign.nxv16f16(<vscale x 16 x half> poison, <vscale x 16 x half> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV32F16 = call <vscale x 32 x half> @llvm.copysign.nxv32f16(<vscale x 32 x half> poison, <vscale x 32 x half> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+ %F16 = call half @llvm.copysign(half poison, half poison)
+
+ %V1F16 = call <1 x half> @llvm.copysign(<1 x half> poison, <1 x half> poison)
+ %V2F16 = call <2 x half> @llvm.copysign(<2 x half> poison, <2 x half> poison)
+ %V4F16 = call <4 x half> @llvm.copysign(<4 x half> poison, <4 x half> poison)
+ %V8F16 = call <8 x half> @llvm.copysign(<8 x half> poison, <8 x half> poison)
+ %V16F16 = call <16 x half> @llvm.copysign(<16 x half> poison, <16 x half> poison)
+ %V32F16 = call <32 x half> @llvm.copysign(<32 x half> poison, <32 x half> poison)
+
+ %NXV1F16 = call <vscale x 1 x half> @llvm.copysign(<vscale x 1 x half> poison, <vscale x 1 x half> poison)
+ %NXV2F16 = call <vscale x 2 x half> @llvm.copysign(<vscale x 2 x half> poison, <vscale x 2 x half> poison)
+ %NXV4F16 = call <vscale x 4 x half> @llvm.copysign(<vscale x 4 x half> poison, <vscale x 4 x half> poison)
+ %NXV8F16 = call <vscale x 8 x half> @llvm.copysign(<vscale x 8 x half> poison, <vscale x 8 x half> poison)
+ %NXV16F16 = call <vscale x 16 x half> @llvm.copysign(<vscale x 16 x half> poison, <vscale x 16 x half> poison)
+ %NXV32F16 = call <vscale x 32 x half> @llvm.copysign(<vscale x 32 x half> poison, <vscale x 32 x half> poison)
ret void
}
define void @fma() {
; CHECK-LABEL: 'fma'
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %BF16 = call bfloat @llvm.fma.bf16(bfloat undef, bfloat undef, bfloat undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %F32 = call float @llvm.fma.f32(float undef, float undef, float undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %F64 = call double @llvm.fma.f64(double undef, double undef, double undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V1BF16 = call <1 x bfloat> @llvm.fma.v1bf16(<1 x bfloat> undef, <1 x bfloat> undef, <1 x bfloat> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2BF16 = call <2 x bfloat> @llvm.fma.v2bf16(<2 x bfloat> undef, <2 x bfloat> undef, <2 x bfloat> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4BF16 = call <4 x bfloat> @llvm.fma.v4bf16(<4 x bfloat> undef, <4 x bfloat> undef, <4 x bfloat> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8BF16 = call <8 x bfloat> @llvm.fma.v8bf16(<8 x bfloat> undef, <8 x bfloat> undef, <8 x bfloat> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16BF16 = call <16 x bfloat> @llvm.fma.v16bf16(<16 x bfloat> undef, <16 x bfloat> undef, <16 x bfloat> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV1BF16 = call <vscale x 1 x bfloat> @llvm.fma.nxv1bf16(<vscale x 1 x bfloat> undef, <vscale x 1 x bfloat> undef, <vscale x 1 x bfloat> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV2BF16 = call <vscale x 2 x bfloat> @llvm.fma.nxv2bf16(<vscale x 2 x bfloat> undef, <vscale x 2 x bfloat> undef, <vscale x 2 x bfloat> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV4BF16 = call <vscale x 4 x bfloat> @llvm.fma.nxv4bf16(<vscale x 4 x bfloat> undef, <vscale x 4 x bfloat> undef, <vscale x 4 x bfloat> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV8BF16 = call <vscale x 8 x bfloat> @llvm.fma.nxv8bf16(<vscale x 8 x bfloat> undef, <vscale x 8 x bfloat> undef, <vscale x 8 x bfloat> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV16BF16 = call <vscale x 16 x bfloat> @llvm.fma.nxv16bf16(<vscale x 16 x bfloat> undef, <vscale x 16 x bfloat> undef, <vscale x 16 x bfloat> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V1F32 = call <1 x float> @llvm.fma.v1f32(<1 x float> undef, <1 x float> undef, <1 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2F32 = call <2 x float> @llvm.fma.v2f32(<2 x float> undef, <2 x float> undef, <2 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4F32 = call <4 x float> @llvm.fma.v4f32(<4 x float> undef, <4 x float> undef, <4 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8F32 = call <8 x float> @llvm.fma.v8f32(<8 x float> undef, <8 x float> undef, <8 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16F32 = call <16 x float> @llvm.fma.v16f32(<16 x float> undef, <16 x float> undef, <16 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV1F32 = call <vscale x 1 x float> @llvm.fma.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV2F32 = call <vscale x 2 x float> @llvm.fma.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV4F32 = call <vscale x 4 x float> @llvm.fma.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV8F32 = call <vscale x 8 x float> @llvm.fma.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef, <vscale x 8 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV16F32 = call <vscale x 16 x float> @llvm.fma.nxv16f32(<vscale x 16 x float> undef, <vscale x 16 x float> undef, <vscale x 16 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V1F64 = call <1 x double> @llvm.fma.v1f64(<1 x double> undef, <1 x double> undef, <1 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2F64 = call <2 x double> @llvm.fma.v2f64(<2 x double> undef, <2 x double> undef, <2 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F64 = call <4 x double> @llvm.fma.v4f64(<4 x double> undef, <4 x double> undef, <4 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V8F64 = call <8 x double> @llvm.fma.v8f64(<8 x double> undef, <8 x double> undef, <8 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV1F64 = call <vscale x 1 x double> @llvm.fma.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2F64 = call <vscale x 2 x double> @llvm.fma.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV4F64 = call <vscale x 4 x double> @llvm.fma.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef, <vscale x 4 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV8F64 = call <vscale x 8 x double> @llvm.fma.nxv8f64(<vscale x 8 x double> undef, <vscale x 8 x double> undef, <vscale x 8 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %F32 = call float @llvm.fma.f32(float poison, float poison, float poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %F64 = call double @llvm.fma.f64(double poison, double poison, double poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V1F32 = call <1 x float> @llvm.fma.v1f32(<1 x float> poison, <1 x float> poison, <1 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2F32 = call <2 x float> @llvm.fma.v2f32(<2 x float> poison, <2 x float> poison, <2 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4F32 = call <4 x float> @llvm.fma.v4f32(<4 x float> poison, <4 x float> poison, <4 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V8F32 = call <8 x float> @llvm.fma.v8f32(<8 x float> poison, <8 x float> poison, <8 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V16F32 = call <16 x float> @llvm.fma.v16f32(<16 x float> poison, <16 x float> poison, <16 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV1F32 = call <vscale x 1 x float> @llvm.fma.nxv1f32(<vscale x 1 x float> poison, <vscale x 1 x float> poison, <vscale x 1 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV2F32 = call <vscale x 2 x float> @llvm.fma.nxv2f32(<vscale x 2 x float> poison, <vscale x 2 x float> poison, <vscale x 2 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV4F32 = call <vscale x 4 x float> @llvm.fma.nxv4f32(<vscale x 4 x float> poison, <vscale x 4 x float> poison, <vscale x 4 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV8F32 = call <vscale x 8 x float> @llvm.fma.nxv8f32(<vscale x 8 x float> poison, <vscale x 8 x float> poison, <vscale x 8 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV16F32 = call <vscale x 16 x float> @llvm.fma.nxv16f32(<vscale x 16 x float> poison, <vscale x 16 x float> poison, <vscale x 16 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V1F64 = call <1 x double> @llvm.fma.v1f64(<1 x double> poison, <1 x double> poison, <1 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2F64 = call <2 x double> @llvm.fma.v2f64(<2 x double> poison, <2 x double> poison, <2 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V4F64 = call <4 x double> @llvm.fma.v4f64(<4 x double> poison, <4 x double> poison, <4 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V8F64 = call <8 x double> @llvm.fma.v8f64(<8 x double> poison, <8 x double> poison, <8 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV1F64 = call <vscale x 1 x double> @llvm.fma.nxv1f64(<vscale x 1 x double> poison, <vscale x 1 x double> poison, <vscale x 1 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV2F64 = call <vscale x 2 x double> @llvm.fma.nxv2f64(<vscale x 2 x double> poison, <vscale x 2 x double> poison, <vscale x 2 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV4F64 = call <vscale x 4 x double> @llvm.fma.nxv4f64(<vscale x 4 x double> poison, <vscale x 4 x double> poison, <vscale x 4 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV8F64 = call <vscale x 8 x double> @llvm.fma.nxv8f64(<vscale x 8 x double> poison, <vscale x 8 x double> poison, <vscale x 8 x double> poison)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
- %BF16 = call bfloat @llvm.fma.bf16(bfloat undef, bfloat undef, bfloat undef)
- %F32 = call float @llvm.fma.f32(float undef, float undef, float undef)
- %F64 = call double @llvm.fma.f64(double undef, double undef, double undef)
-
- %V1BF16 = call <1 x bfloat> @llvm.fma.v1bf16(<1 x bfloat> undef, <1 x bfloat> undef, <1 x bfloat> undef)
- %V2BF16 = call <2 x bfloat> @llvm.fma.v2bf16(<2 x bfloat> undef, <2 x bfloat> undef, <2 x bfloat> undef)
- %V4BF16 = call <4 x bfloat> @llvm.fma.v4bf16(<4 x bfloat> undef, <4 x bfloat> undef, <4 x bfloat> undef)
- %V8BF16 = call <8 x bfloat> @llvm.fma.v8bf16(<8 x bfloat> undef, <8 x bfloat> undef, <8 x bfloat> undef)
- %V16BF16 = call <16 x bfloat> @llvm.fma.v16bf16(<16 x bfloat> undef, <16 x bfloat> undef, <16 x bfloat> undef)
-
- %NXV1BF16 = call <vscale x 1 x bfloat> @llvm.fma.nxv1bf16(<vscale x 1 x bfloat> undef, <vscale x 1 x bfloat> undef, <vscale x 1 x bfloat> undef)
- %NXV2BF16 = call <vscale x 2 x bfloat> @llvm.fma.nxv2bf16(<vscale x 2 x bfloat> undef, <vscale x 2 x bfloat> undef, <vscale x 2 x bfloat> undef)
- %NXV4BF16 = call <vscale x 4 x bfloat> @llvm.fma.nxv4bf16(<vscale x 4 x bfloat> undef, <vscale x 4 x bfloat> undef, <vscale x 4 x bfloat> undef)
- %NXV8BF16 = call <vscale x 8 x bfloat> @llvm.fma.nxv8bf16(<vscale x 8 x bfloat> undef, <vscale x 8 x bfloat> undef, <vscale x 8 x bfloat> undef)
- %NXV16BF16 = call <vscale x 16 x bfloat> @llvm.fma.nxv16bf16(<vscale x 16 x bfloat> undef, <vscale x 16 x bfloat> undef, <vscale x 16 x bfloat> undef)
-
- %V1F32 = call <1 x float> @llvm.fma.v1f32(<1 x float> undef, <1 x float> undef, <1 x float> undef)
- %V2F32 = call <2 x float> @llvm.fma.v2f32(<2 x float> undef, <2 x float> undef, <2 x float> undef)
- %V4F32 = call <4 x float> @llvm.fma.v4f32(<4 x float> undef, <4 x float> undef, <4 x float> undef)
- %V8F32 = call <8 x float> @llvm.fma.v8f32(<8 x float> undef, <8 x float> undef, <8 x float> undef)
- %V16F32 = call <16 x float> @llvm.fma.v16f32(<16 x float> undef, <16 x float> undef, <16 x float> undef)
-
- %NXV1F32 = call <vscale x 1 x float> @llvm.fma.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef)
- %NXV2F32 = call <vscale x 2 x float> @llvm.fma.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef)
- %NXV4F32 = call <vscale x 4 x float> @llvm.fma.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef)
- %NXV8F32 = call <vscale x 8 x float> @llvm.fma.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef, <vscale x 8 x float> undef)
- %NXV16F32 = call <vscale x 16 x float> @llvm.fma.nxv16f32(<vscale x 16 x float> undef, <vscale x 16 x float> undef, <vscale x 16 x float> undef)
-
- %V1F64 = call <1 x double> @llvm.fma.v1f64(<1 x double> undef, <1 x double> undef, <1 x double> undef)
- %V2F64 = call <2 x double> @llvm.fma.v2f64(<2 x double> undef, <2 x double> undef, <2 x double> undef)
- %V4F64 = call <4 x double> @llvm.fma.v4f64(<4 x double> undef, <4 x double> undef, <4 x double> undef)
- %V8F64 = call <8 x double> @llvm.fma.v8f64(<8 x double> undef, <8 x double> undef, <8 x double> undef)
-
- %NXV1F64 = call <vscale x 1 x double> @llvm.fma.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef)
- %NXV2F64 = call <vscale x 2 x double> @llvm.fma.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef)
- %NXV4F64 = call <vscale x 4 x double> @llvm.fma.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef, <vscale x 4 x double> undef)
- %NXV8F64 = call <vscale x 8 x double> @llvm.fma.nxv8f64(<vscale x 8 x double> undef, <vscale x 8 x double> undef, <vscale x 8 x double> undef)
+ %F32 = call float @llvm.fma(float poison, float poison, float poison)
+ %F64 = call double @llvm.fma(double poison, double poison, double poison)
+
+ %V1F32 = call <1 x float> @llvm.fma(<1 x float> poison, <1 x float> poison, <1 x float> poison)
+ %V2F32 = call <2 x float> @llvm.fma(<2 x float> poison, <2 x float> poison, <2 x float> poison)
+ %V4F32 = call <4 x float> @llvm.fma(<4 x float> poison, <4 x float> poison, <4 x float> poison)
+ %V8F32 = call <8 x float> @llvm.fma(<8 x float> poison, <8 x float> poison, <8 x float> poison)
+ %V16F32 = call <16 x float> @llvm.fma(<16 x float> poison, <16 x float> poison, <16 x float> poison)
+
+ %NXV1F32 = call <vscale x 1 x float> @llvm.fma(<vscale x 1 x float> poison, <vscale x 1 x float> poison, <vscale x 1 x float> poison)
+ %NXV2F32 = call <vscale x 2 x float> @llvm.fma(<vscale x 2 x float> poison, <vscale x 2 x float> poison, <vscale x 2 x float> poison)
+ %NXV4F32 = call <vscale x 4 x float> @llvm.fma(<vscale x 4 x float> poison, <vscale x 4 x float> poison, <vscale x 4 x float> poison)
+ %NXV8F32 = call <vscale x 8 x float> @llvm.fma(<vscale x 8 x float> poison, <vscale x 8 x float> poison, <vscale x 8 x float> poison)
+ %NXV16F32 = call <vscale x 16 x float> @llvm.fma(<vscale x 16 x float> poison, <vscale x 16 x float> poison, <vscale x 16 x float> poison)
+
+ %V1F64 = call <1 x double> @llvm.fma(<1 x double> poison, <1 x double> poison, <1 x double> poison)
+ %V2F64 = call <2 x double> @llvm.fma(<2 x double> poison, <2 x double> poison, <2 x double> poison)
+ %V4F64 = call <4 x double> @llvm.fma(<4 x double> poison, <4 x double> poison, <4 x double> poison)
+ %V8F64 = call <8 x double> @llvm.fma(<8 x double> poison, <8 x double> poison, <8 x double> poison)
+
+ %NXV1F64 = call <vscale x 1 x double> @llvm.fma(<vscale x 1 x double> poison, <vscale x 1 x double> poison, <vscale x 1 x double> poison)
+ %NXV2F64 = call <vscale x 2 x double> @llvm.fma(<vscale x 2 x double> poison, <vscale x 2 x double> poison, <vscale x 2 x double> poison)
+ %NXV4F64 = call <vscale x 4 x double> @llvm.fma(<vscale x 4 x double> poison, <vscale x 4 x double> poison, <vscale x 4 x double> poison)
+ %NXV8F64 = call <vscale x 8 x double> @llvm.fma(<vscale x 8 x double> poison, <vscale x 8 x double> poison, <vscale x 8 x double> poison)
+
+ ret void
+}
+
+define void @fma_bf16() {
+; ZVFH-LABEL: 'fma_bf16'
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %BF16 = call bfloat @llvm.fma.bf16(bfloat poison, bfloat poison, bfloat poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V1BF16 = call <1 x bfloat> @llvm.fma.v1bf16(<1 x bfloat> poison, <1 x bfloat> poison, <1 x bfloat> poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2BF16 = call <2 x bfloat> @llvm.fma.v2bf16(<2 x bfloat> poison, <2 x bfloat> poison, <2 x bfloat> poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4BF16 = call <4 x bfloat> @llvm.fma.v4bf16(<4 x bfloat> poison, <4 x bfloat> poison, <4 x bfloat> poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8BF16 = call <8 x bfloat> @llvm.fma.v8bf16(<8 x bfloat> poison, <8 x bfloat> poison, <8 x bfloat> poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16BF16 = call <16 x bfloat> @llvm.fma.v16bf16(<16 x bfloat> poison, <16 x bfloat> poison, <16 x bfloat> poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V32BF16 = call <32 x bfloat> @llvm.fma.v32bf16(<32 x bfloat> poison, <32 x bfloat> poison, <32 x bfloat> poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV1BF16 = call <vscale x 1 x bfloat> @llvm.fma.nxv1bf16(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV2BF16 = call <vscale x 2 x bfloat> @llvm.fma.nxv2bf16(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV4BF16 = call <vscale x 4 x bfloat> @llvm.fma.nxv4bf16(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV8BF16 = call <vscale x 8 x bfloat> @llvm.fma.nxv8bf16(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV16BF16 = call <vscale x 16 x bfloat> @llvm.fma.nxv16bf16(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV32BF16 = call <vscale x 32 x bfloat> @llvm.fma.nxv32bf16(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+; ZVFHMIN-LABEL: 'fma_bf16'
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %BF16 = call bfloat @llvm.fma.bf16(bfloat poison, bfloat poison, bfloat poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V1BF16 = call <1 x bfloat> @llvm.fma.v1bf16(<1 x bfloat> poison, <1 x bfloat> poison, <1 x bfloat> poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2BF16 = call <2 x bfloat> @llvm.fma.v2bf16(<2 x bfloat> poison, <2 x bfloat> poison, <2 x bfloat> poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4BF16 = call <4 x bfloat> @llvm.fma.v4bf16(<4 x bfloat> poison, <4 x bfloat> poison, <4 x bfloat> poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8BF16 = call <8 x bfloat> @llvm.fma.v8bf16(<8 x bfloat> poison, <8 x bfloat> poison, <8 x bfloat> poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16BF16 = call <16 x bfloat> @llvm.fma.v16bf16(<16 x bfloat> poison, <16 x bfloat> poison, <16 x bfloat> poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V32BF16 = call <32 x bfloat> @llvm.fma.v32bf16(<32 x bfloat> poison, <32 x bfloat> poison, <32 x bfloat> poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV1BF16 = call <vscale x 1 x bfloat> @llvm.fma.nxv1bf16(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV2BF16 = call <vscale x 2 x bfloat> @llvm.fma.nxv2bf16(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV4BF16 = call <vscale x 4 x bfloat> @llvm.fma.nxv4bf16(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV8BF16 = call <vscale x 8 x bfloat> @llvm.fma.nxv8bf16(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV16BF16 = call <vscale x 16 x bfloat> @llvm.fma.nxv16bf16(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV32BF16 = call <vscale x 32 x bfloat> @llvm.fma.nxv32bf16(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+; NO-ZFHMIN-LABEL: 'fma_bf16'
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %BF16 = call bfloat @llvm.fma.bf16(bfloat poison, bfloat poison, bfloat poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V1BF16 = call <1 x bfloat> @llvm.fma.v1bf16(<1 x bfloat> poison, <1 x bfloat> poison, <1 x bfloat> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V2BF16 = call <2 x bfloat> @llvm.fma.v2bf16(<2 x bfloat> poison, <2 x bfloat> poison, <2 x bfloat> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4BF16 = call <4 x bfloat> @llvm.fma.v4bf16(<4 x bfloat> poison, <4 x bfloat> poison, <4 x bfloat> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8BF16 = call <8 x bfloat> @llvm.fma.v8bf16(<8 x bfloat> poison, <8 x bfloat> poison, <8 x bfloat> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16BF16 = call <16 x bfloat> @llvm.fma.v16bf16(<16 x bfloat> poison, <16 x bfloat> poison, <16 x bfloat> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %V32BF16 = call <32 x bfloat> @llvm.fma.v32bf16(<32 x bfloat> poison, <32 x bfloat> poison, <32 x bfloat> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV1BF16 = call <vscale x 1 x bfloat> @llvm.fma.nxv1bf16(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV2BF16 = call <vscale x 2 x bfloat> @llvm.fma.nxv2bf16(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV4BF16 = call <vscale x 4 x bfloat> @llvm.fma.nxv4bf16(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV8BF16 = call <vscale x 8 x bfloat> @llvm.fma.nxv8bf16(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV16BF16 = call <vscale x 16 x bfloat> @llvm.fma.nxv16bf16(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV32BF16 = call <vscale x 32 x bfloat> @llvm.fma.nxv32bf16(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+ %BF16 = call bfloat @llvm.fma(bfloat poison, bfloat poison, bfloat poison)
+
+ %V1BF16 = call <1 x bfloat> @llvm.fma(<1 x bfloat> poison, <1 x bfloat> poison, <1 x bfloat> poison)
+ %V2BF16 = call <2 x bfloat> @llvm.fma(<2 x bfloat> poison, <2 x bfloat> poison, <2 x bfloat> poison)
+ %V4BF16 = call <4 x bfloat> @llvm.fma(<4 x bfloat> poison, <4 x bfloat> poison, <4 x bfloat> poison)
+ %V8BF16 = call <8 x bfloat> @llvm.fma(<8 x bfloat> poison, <8 x bfloat> poison, <8 x bfloat> poison)
+ %V16BF16 = call <16 x bfloat> @llvm.fma(<16 x bfloat> poison, <16 x bfloat> poison, <16 x bfloat> poison)
+ %V32BF16 = call <32 x bfloat> @llvm.fma(<32 x bfloat> poison, <32 x bfloat> poison, <32 x bfloat> poison)
+
+ %NXV1BF16 = call <vscale x 1 x bfloat> @llvm.fma(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> poison)
+ %NXV2BF16 = call <vscale x 2 x bfloat> @llvm.fma(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> poison)
+ %NXV4BF16 = call <vscale x 4 x bfloat> @llvm.fma(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> poison)
+ %NXV8BF16 = call <vscale x 8 x bfloat> @llvm.fma(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> poison)
+ %NXV16BF16 = call <vscale x 16 x bfloat> @llvm.fma(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> poison)
+ %NXV32BF16 = call <vscale x 32 x bfloat> @llvm.fma(<vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> poison, <vscale x 32 x bfloat> poison)
ret void
}
define void @fma_f16() {
-; CHECK-LABEL: 'fma_f16'
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %F16 = call half @llvm.fma.f16(half undef, half undef, half undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V1F16 = call <1 x half> @llvm.fma.v1f16(<1 x half> undef, <1 x half> undef, <1 x half> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2F16 = call <2 x half> @llvm.fma.v2f16(<2 x half> undef, <2 x half> undef, <2 x half> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4F16 = call <4 x half> @llvm.fma.v4f16(<4 x half> undef, <4 x half> undef, <4 x half> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8F16 = call <8 x half> @llvm.fma.v8f16(<8 x half> undef, <8 x half> undef, <8 x half> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16F16 = call <16 x half> @llvm.fma.v16f16(<16 x half> undef, <16 x half> undef, <16 x half> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V32F16 = call <32 x half> @llvm.fma.v32f16(<32 x half> undef, <32 x half> undef, <32 x half> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV1F16 = call <vscale x 1 x half> @llvm.fma.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV2F16 = call <vscale x 2 x half> @llvm.fma.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV4F16 = call <vscale x 4 x half> @llvm.fma.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV8F16 = call <vscale x 8 x half> @llvm.fma.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV16F16 = call <vscale x 16 x half> @llvm.fma.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, <vscale x 16 x half> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV32F16 = call <vscale x 32 x half> @llvm.fma.nxv32f16(<vscale x 32 x half> undef, <vscale x 32 x half> undef, <vscale x 32 x half> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+; ZVFH-LABEL: 'fma_f16'
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %F16 = call half @llvm.fma.f16(half poison, half poison, half poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V1F16 = call <1 x half> @llvm.fma.v1f16(<1 x half> poison, <1 x half> poison, <1 x half> poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2F16 = call <2 x half> @llvm.fma.v2f16(<2 x half> poison, <2 x half> poison, <2 x half> poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4F16 = call <4 x half> @llvm.fma.v4f16(<4 x half> poison, <4 x half> poison, <4 x half> poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8F16 = call <8 x half> @llvm.fma.v8f16(<8 x half> poison, <8 x half> poison, <8 x half> poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16F16 = call <16 x half> @llvm.fma.v16f16(<16 x half> poison, <16 x half> poison, <16 x half> poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V32F16 = call <32 x half> @llvm.fma.v32f16(<32 x half> poison, <32 x half> poison, <32 x half> poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV1F16 = call <vscale x 1 x half> @llvm.fma.nxv1f16(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV2F16 = call <vscale x 2 x half> @llvm.fma.nxv2f16(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV4F16 = call <vscale x 4 x half> @llvm.fma.nxv4f16(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV8F16 = call <vscale x 8 x half> @llvm.fma.nxv8f16(<vscale x 8 x half> poison, <vscale x 8 x half> poison, <vscale x 8 x half> poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV16F16 = call <vscale x 16 x half> @llvm.fma.nxv16f16(<vscale x 16 x half> poison, <vscale x 16 x half> poison, <vscale x 16 x half> poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV32F16 = call <vscale x 32 x half> @llvm.fma.nxv32f16(<vscale x 32 x half> poison, <vscale x 32 x half> poison, <vscale x 32 x half> poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+; ZVFHMIN-LABEL: 'fma_f16'
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %F16 = call half @llvm.fma.f16(half poison, half poison, half poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V1F16 = call <1 x half> @llvm.fma.v1f16(<1 x half> poison, <1 x half> poison, <1 x half> poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V2F16 = call <2 x half> @llvm.fma.v2f16(<2 x half> poison, <2 x half> poison, <2 x half> poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V4F16 = call <4 x half> @llvm.fma.v4f16(<4 x half> poison, <4 x half> poison, <4 x half> poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V8F16 = call <8 x half> @llvm.fma.v8f16(<8 x half> poison, <8 x half> poison, <8 x half> poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %V16F16 = call <16 x half> @llvm.fma.v16f16(<16 x half> poison, <16 x half> poison, <16 x half> poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V32F16 = call <32 x half> @llvm.fma.v32f16(<32 x half> poison, <32 x half> poison, <32 x half> poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV1F16 = call <vscale x 1 x half> @llvm.fma.nxv1f16(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV2F16 = call <vscale x 2 x half> @llvm.fma.nxv2f16(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %NXV4F16 = call <vscale x 4 x half> @llvm.fma.nxv4f16(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %NXV8F16 = call <vscale x 8 x half> @llvm.fma.nxv8f16(<vscale x 8 x half> poison, <vscale x 8 x half> poison, <vscale x 8 x half> poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %NXV16F16 = call <vscale x 16 x half> @llvm.fma.nxv16f16(<vscale x 16 x half> poison, <vscale x 16 x half> poison, <vscale x 16 x half> poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %NXV32F16 = call <vscale x 32 x half> @llvm.fma.nxv32f16(<vscale x 32 x half> poison, <vscale x 32 x half> poison, <vscale x 32 x half> poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+; NO-ZFHMIN-LABEL: 'fma_f16'
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %F16 = call half @llvm.fma.f16(half poison, half poison, half poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %V1F16 = call <1 x half> @llvm.fma.v1f16(<1 x half> poison, <1 x half> poison, <1 x half> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %V2F16 = call <2 x half> @llvm.fma.v2f16(<2 x half> poison, <2 x half> poison, <2 x half> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %V4F16 = call <4 x half> @llvm.fma.v4f16(<4 x half> poison, <4 x half> poison, <4 x half> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %V8F16 = call <8 x half> @llvm.fma.v8f16(<8 x half> poison, <8 x half> poison, <8 x half> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %V16F16 = call <16 x half> @llvm.fma.v16f16(<16 x half> poison, <16 x half> poison, <16 x half> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 64 for instruction: %V32F16 = call <32 x half> @llvm.fma.v32f16(<32 x half> poison, <32 x half> poison, <32 x half> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV1F16 = call <vscale x 1 x half> @llvm.fma.nxv1f16(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV2F16 = call <vscale x 2 x half> @llvm.fma.nxv2f16(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV4F16 = call <vscale x 4 x half> @llvm.fma.nxv4f16(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV8F16 = call <vscale x 8 x half> @llvm.fma.nxv8f16(<vscale x 8 x half> poison, <vscale x 8 x half> poison, <vscale x 8 x half> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV16F16 = call <vscale x 16 x half> @llvm.fma.nxv16f16(<vscale x 16 x half> poison, <vscale x 16 x half> poison, <vscale x 16 x half> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %NXV32F16 = call <vscale x 32 x half> @llvm.fma.nxv32f16(<vscale x 32 x half> poison, <vscale x 32 x half> poison, <vscale x 32 x half> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
- %F16 = call half @llvm.fma.f16(half undef, half undef, half undef)
-
- %V1F16 = call <1 x half> @llvm.fma.v1f16(<1 x half> undef, <1 x half> undef, <1 x half> undef)
- %V2F16 = call <2 x half> @llvm.fma.v2f16(<2 x half> undef, <2 x half> undef, <2 x half> undef)
- %V4F16 = call <4 x half> @llvm.fma.v4f16(<4 x half> undef, <4 x half> undef, <4 x half> undef)
- %V8F16 = call <8 x half> @llvm.fma.v8f16(<8 x half> undef, <8 x half> undef, <8 x half> undef)
- %V16F16 = call <16 x half> @llvm.fma.v16f16(<16 x half> undef, <16 x half> undef, <16 x half> undef)
- %V32F16 = call <32 x half> @llvm.fma.v32f16(<32 x half> undef, <32 x half> undef, <32 x half> undef)
-
- %NXV1F16 = call <vscale x 1 x half> @llvm.fma.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef)
- %NXV2F16 = call <vscale x 2 x half> @llvm.fma.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef)
- %NXV4F16 = call <vscale x 4 x half> @llvm.fma.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef)
- %NXV8F16 = call <vscale x 8 x half> @llvm.fma.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef)
- %NXV16F16 = call <vscale x 16 x half> @llvm.fma.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, <vscale x 16 x half> undef)
- %NXV32F16 = call <vscale x 32 x half> @llvm.fma.nxv32f16(<vscale x 32 x half> undef, <vscale x 32 x half> undef, <vscale x 32 x half> undef)
+ %F16 = call half @llvm.fma(half poison, half poison, half poison)
+
+ %V1F16 = call <1 x half> @llvm.fma(<1 x half> poison, <1 x half> poison, <1 x half> poison)
+ %V2F16 = call <2 x half> @llvm.fma(<2 x half> poison, <2 x half> poison, <2 x half> poison)
+ %V4F16 = call <4 x half> @llvm.fma(<4 x half> poison, <4 x half> poison, <4 x half> poison)
+ %V8F16 = call <8 x half> @llvm.fma(<8 x half> poison, <8 x half> poison, <8 x half> poison)
+ %V16F16 = call <16 x half> @llvm.fma(<16 x half> poison, <16 x half> poison, <16 x half> poison)
+ %V32F16 = call <32 x half> @llvm.fma(<32 x half> poison, <32 x half> poison, <32 x half> poison)
+
+ %NXV1F16 = call <vscale x 1 x half> @llvm.fma(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison)
+ %NXV2F16 = call <vscale x 2 x half> @llvm.fma(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison)
+ %NXV4F16 = call <vscale x 4 x half> @llvm.fma(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison)
+ %NXV8F16 = call <vscale x 8 x half> @llvm.fma(<vscale x 8 x half> poison, <vscale x 8 x half> poison, <vscale x 8 x half> poison)
+ %NXV16F16 = call <vscale x 16 x half> @llvm.fma(<vscale x 16 x half> poison, <vscale x 16 x half> poison, <vscale x 16 x half> poison)
+ %NXV32F16 = call <vscale x 32 x half> @llvm.fma(<vscale x 32 x half> poison, <vscale x 32 x half> poison, <vscale x 32 x half> poison)
ret void
}
define void @fmuladd() {
; CHECK-LABEL: 'fmuladd'
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call bfloat @llvm.fmuladd.bf16(bfloat undef, bfloat undef, bfloat undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call float @llvm.fmuladd.f32(float undef, float undef, float undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %3 = call double @llvm.fmuladd.f64(double undef, double undef, double undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %4 = call <2 x bfloat> @llvm.fmuladd.v2bf16(<2 x bfloat> undef, <2 x bfloat> undef, <2 x bfloat> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %5 = call <4 x bfloat> @llvm.fmuladd.v4bf16(<4 x bfloat> undef, <4 x bfloat> undef, <4 x bfloat> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <8 x bfloat> @llvm.fmuladd.v8bf16(<8 x bfloat> undef, <8 x bfloat> undef, <8 x bfloat> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %7 = call <16 x bfloat> @llvm.fmuladd.v16bf16(<16 x bfloat> undef, <16 x bfloat> undef, <16 x bfloat> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %8 = call <2 x float> @llvm.fmuladd.v2f32(<2 x float> undef, <2 x float> undef, <2 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %9 = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> undef, <4 x float> undef, <4 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %10 = call <8 x float> @llvm.fmuladd.v8f32(<8 x float> undef, <8 x float> undef, <8 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %11 = call <16 x float> @llvm.fmuladd.v16f32(<16 x float> undef, <16 x float> undef, <16 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %12 = call <2 x double> @llvm.fmuladd.v2f64(<2 x double> undef, <2 x double> undef, <2 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %13 = call <4 x double> @llvm.fmuladd.v4f64(<4 x double> undef, <4 x double> undef, <4 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %14 = call <8 x double> @llvm.fmuladd.v8f64(<8 x double> undef, <8 x double> undef, <8 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %15 = call <16 x double> @llvm.fmuladd.v16f64(<16 x double> undef, <16 x double> undef, <16 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %16 = call <vscale x 1 x bfloat> @llvm.fmuladd.nxv1bf16(<vscale x 1 x bfloat> undef, <vscale x 1 x bfloat> undef, <vscale x 1 x bfloat> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %17 = call <vscale x 2 x bfloat> @llvm.fmuladd.nxv2bf16(<vscale x 2 x bfloat> undef, <vscale x 2 x bfloat> undef, <vscale x 2 x bfloat> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %18 = call <vscale x 4 x bfloat> @llvm.fmuladd.nxv4bf16(<vscale x 4 x bfloat> undef, <vscale x 4 x bfloat> undef, <vscale x 4 x bfloat> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %19 = call <vscale x 8 x bfloat> @llvm.fmuladd.nxv8bf16(<vscale x 8 x bfloat> undef, <vscale x 8 x bfloat> undef, <vscale x 8 x bfloat> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %20 = call <vscale x 16 x bfloat> @llvm.fmuladd.nxv16bf16(<vscale x 16 x bfloat> undef, <vscale x 16 x bfloat> undef, <vscale x 16 x bfloat> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %21 = call <vscale x 1 x float> @llvm.fmuladd.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %22 = call <vscale x 2 x float> @llvm.fmuladd.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %23 = call <vscale x 4 x float> @llvm.fmuladd.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %24 = call <vscale x 8 x float> @llvm.fmuladd.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef, <vscale x 8 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %25 = call <vscale x 16 x float> @llvm.fmuladd.nxv16f32(<vscale x 16 x float> undef, <vscale x 16 x float> undef, <vscale x 16 x float> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %26 = call <vscale x 1 x double> @llvm.fmuladd.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %27 = call <vscale x 2 x double> @llvm.fmuladd.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %28 = call <vscale x 4 x double> @llvm.fmuladd.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef, <vscale x 4 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %29 = call <vscale x 8 x double> @llvm.fmuladd.nxv8f64(<vscale x 8 x double> undef, <vscale x 8 x double> undef, <vscale x 8 x double> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %30 = call <vscale x 16 x double> @llvm.fmuladd.nxv16f64(<vscale x 16 x double> undef, <vscale x 16 x double> undef, <vscale x 16 x double> undef)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call float @llvm.fmuladd.f32(float poison, float poison, float poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call double @llvm.fmuladd.f64(double poison, double poison, double poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %3 = call <2 x float> @llvm.fmuladd.v2f32(<2 x float> poison, <2 x float> poison, <2 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %4 = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> poison, <4 x float> poison, <4 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %5 = call <8 x float> @llvm.fmuladd.v8f32(<8 x float> poison, <8 x float> poison, <8 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %6 = call <16 x float> @llvm.fmuladd.v16f32(<16 x float> poison, <16 x float> poison, <16 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <2 x double> @llvm.fmuladd.v2f64(<2 x double> poison, <2 x double> poison, <2 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %8 = call <4 x double> @llvm.fmuladd.v4f64(<4 x double> poison, <4 x double> poison, <4 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %9 = call <8 x double> @llvm.fmuladd.v8f64(<8 x double> poison, <8 x double> poison, <8 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %10 = call <16 x double> @llvm.fmuladd.v16f64(<16 x double> poison, <16 x double> poison, <16 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %11 = call <vscale x 1 x float> @llvm.fmuladd.nxv1f32(<vscale x 1 x float> poison, <vscale x 1 x float> poison, <vscale x 1 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %12 = call <vscale x 2 x float> @llvm.fmuladd.nxv2f32(<vscale x 2 x float> poison, <vscale x 2 x float> poison, <vscale x 2 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %13 = call <vscale x 4 x float> @llvm.fmuladd.nxv4f32(<vscale x 4 x float> poison, <vscale x 4 x float> poison, <vscale x 4 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %14 = call <vscale x 8 x float> @llvm.fmuladd.nxv8f32(<vscale x 8 x float> poison, <vscale x 8 x float> poison, <vscale x 8 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %15 = call <vscale x 16 x float> @llvm.fmuladd.nxv16f32(<vscale x 16 x float> poison, <vscale x 16 x float> poison, <vscale x 16 x float> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %16 = call <vscale x 1 x double> @llvm.fmuladd.nxv1f64(<vscale x 1 x double> poison, <vscale x 1 x double> poison, <vscale x 1 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %17 = call <vscale x 2 x double> @llvm.fmuladd.nxv2f64(<vscale x 2 x double> poison, <vscale x 2 x double> poison, <vscale x 2 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %18 = call <vscale x 4 x double> @llvm.fmuladd.nxv4f64(<vscale x 4 x double> poison, <vscale x 4 x double> poison, <vscale x 4 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %19 = call <vscale x 8 x double> @llvm.fmuladd.nxv8f64(<vscale x 8 x double> poison, <vscale x 8 x double> poison, <vscale x 8 x double> poison)
+; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %20 = call <vscale x 16 x double> @llvm.fmuladd.nxv16f64(<vscale x 16 x double> poison, <vscale x 16 x double> poison, <vscale x 16 x double> poison)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
- call bfloat @llvm.fmuladd.bf16(bfloat undef, bfloat undef, bfloat undef)
- call float @llvm.fmuladd.f32(float undef, float undef, float undef)
- call double @llvm.fmuladd.f64(double undef, double undef, double undef)
- call <2 x bfloat> @llvm.fmuladd.v2bf16(<2 x bfloat> undef, <2 x bfloat> undef, <2 x bfloat> undef)
- call <4 x bfloat> @llvm.fmuladd.v4bf16(<4 x bfloat> undef, <4 x bfloat> undef, <4 x bfloat> undef)
- call <8 x bfloat> @llvm.fmuladd.v8bf16(<8 x bfloat> undef, <8 x bfloat> undef, <8 x bfloat> undef)
- call <16 x bfloat> @llvm.fmuladd.v16bf16(<16 x bfloat> undef, <16 x bfloat> undef, <16 x bfloat> undef)
- call <2 x float> @llvm.fmuladd.v2f32(<2 x float> undef, <2 x float> undef, <2 x float> undef)
- call <4 x float> @llvm.fmuladd.v4f32(<4 x float> undef, <4 x float> undef, <4 x float> undef)
- call <8 x float> @llvm.fmuladd.v8f32(<8 x float> undef, <8 x float> undef, <8 x float> undef)
- call <16 x float> @llvm.fmuladd.v16f32(<16 x float> undef, <16 x float> undef, <16 x float> undef)
- call <2 x double> @llvm.fmuladd.v2f64(<2 x double> undef, <2 x double> undef, <2 x double> undef)
- call <4 x double> @llvm.fmuladd.v4f64(<4 x double> undef, <4 x double> undef, <4 x double> undef)
- call <8 x double> @llvm.fmuladd.v8f64(<8 x double> undef, <8 x double> undef, <8 x double> undef)
- call <16 x double> @llvm.fmuladd.v16f64(<16 x double> undef, <16 x double> undef, <16 x double> undef)
- call <vscale x 1 x bfloat> @llvm.fmuladd.nxv1bf16(<vscale x 1 x bfloat> undef, <vscale x 1 x bfloat> undef, <vscale x 1 x bfloat> undef)
- call <vscale x 2 x bfloat> @llvm.fmuladd.nxv2bf16(<vscale x 2 x bfloat> undef, <vscale x 2 x bfloat> undef, <vscale x 2 x bfloat> undef)
- call <vscale x 4 x bfloat> @llvm.fmuladd.nxv4bf16(<vscale x 4 x bfloat> undef, <vscale x 4 x bfloat> undef, <vscale x 4 x bfloat> undef)
- call <vscale x 8 x bfloat> @llvm.fmuladd.nxv8bf16(<vscale x 8 x bfloat> undef, <vscale x 8 x bfloat> undef, <vscale x 8 x bfloat> undef)
- call <vscale x 16 x bfloat> @llvm.fmuladd.nxv16bf16(<vscale x 16 x bfloat> undef, <vscale x 16 x bfloat> undef, <vscale x 16 x bfloat> undef)
- call <vscale x 1 x float> @llvm.fmuladd.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef)
- call <vscale x 2 x float> @llvm.fmuladd.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef)
- call <vscale x 4 x float> @llvm.fmuladd.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef)
- call <vscale x 8 x float> @llvm.fmuladd.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef, <vscale x 8 x float> undef)
- call <vscale x 16 x float> @llvm.fmuladd.nxv16f32(<vscale x 16 x float> undef, <vscale x 16 x float> undef, <vscale x 16 x float> undef)
- call <vscale x 1 x double> @llvm.fmuladd.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef)
- call <vscale x 2 x double> @llvm.fmuladd.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef)
- call <vscale x 4 x double> @llvm.fmuladd.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef, <vscale x 4 x double> undef)
- call <vscale x 8 x double> @llvm.fmuladd.nxv8f64(<vscale x 8 x double> undef, <vscale x 8 x double> undef, <vscale x 8 x double> undef)
- call <vscale x 16 x double> @llvm.fmuladd.nxv16f64(<vscale x 16 x double> undef, <vscale x 16 x double> undef, <vscale x 16 x double> undef)
+ call float @llvm.fmuladd(float poison, float poison, float poison)
+ call double @llvm.fmuladd(double poison, double poison, double poison)
+
+ call <2 x float> @llvm.fmuladd(<2 x float> poison, <2 x float> poison, <2 x float> poison)
+ call <4 x float> @llvm.fmuladd(<4 x float> poison, <4 x float> poison, <4 x float> poison)
+ call <8 x float> @llvm.fmuladd(<8 x float> poison, <8 x float> poison, <8 x float> poison)
+ call <16 x float> @llvm.fmuladd(<16 x float> poison, <16 x float> poison, <16 x float> poison)
+
+ call <2 x double> @llvm.fmuladd(<2 x double> poison, <2 x double> poison, <2 x double> poison)
+ call <4 x double> @llvm.fmuladd(<4 x double> poison, <4 x double> poison, <4 x double> poison)
+ call <8 x double> @llvm.fmuladd(<8 x double> poison, <8 x double> poison, <8 x double> poison)
+ call <16 x double> @llvm.fmuladd(<16 x double> poison, <16 x double> poison, <16 x double> poison)
+
+ call <vscale x 1 x float> @llvm.fmuladd(<vscale x 1 x float> poison, <vscale x 1 x float> poison, <vscale x 1 x float> poison)
+ call <vscale x 2 x float> @llvm.fmuladd(<vscale x 2 x float> poison, <vscale x 2 x float> poison, <vscale x 2 x float> poison)
+ call <vscale x 4 x float> @llvm.fmuladd(<vscale x 4 x float> poison, <vscale x 4 x float> poison, <vscale x 4 x float> poison)
+ call <vscale x 8 x float> @llvm.fmuladd(<vscale x 8 x float> poison, <vscale x 8 x float> poison, <vscale x 8 x float> poison)
+ call <vscale x 16 x float> @llvm.fmuladd(<vscale x 16 x float> poison, <vscale x 16 x float> poison, <vscale x 16 x float> poison)
+
+ call <vscale x 1 x double> @llvm.fmuladd(<vscale x 1 x double> poison, <vscale x 1 x double> poison, <vscale x 1 x double> poison)
+ call <vscale x 2 x double> @llvm.fmuladd(<vscale x 2 x double> poison, <vscale x 2 x double> poison, <vscale x 2 x double> poison)
+ call <vscale x 4 x double> @llvm.fmuladd(<vscale x 4 x double> poison, <vscale x 4 x double> poison, <vscale x 4 x double> poison)
+ call <vscale x 8 x double> @llvm.fmuladd(<vscale x 8 x double> poison, <vscale x 8 x double> poison, <vscale x 8 x double> poison)
+ call <vscale x 16 x double> @llvm.fmuladd(<vscale x 16 x double> poison, <vscale x 16 x double> poison, <vscale x 16 x double> poison)
+
+ ret void
+}
+
+define void @fmuladd_bf16() {
+; ZVFH-LABEL: 'fmuladd_bf16'
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call bfloat @llvm.fmuladd.bf16(bfloat poison, bfloat poison, bfloat poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <2 x bfloat> @llvm.fmuladd.v2bf16(<2 x bfloat> poison, <2 x bfloat> poison, <2 x bfloat> poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %3 = call <4 x bfloat> @llvm.fmuladd.v4bf16(<4 x bfloat> poison, <4 x bfloat> poison, <4 x bfloat> poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %4 = call <8 x bfloat> @llvm.fmuladd.v8bf16(<8 x bfloat> poison, <8 x bfloat> poison, <8 x bfloat> poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %5 = call <16 x bfloat> @llvm.fmuladd.v16bf16(<16 x bfloat> poison, <16 x bfloat> poison, <16 x bfloat> poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 1 x bfloat> @llvm.fmuladd.nxv1bf16(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 2 x bfloat> @llvm.fmuladd.nxv2bf16(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 4 x bfloat> @llvm.fmuladd.nxv4bf16(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %9 = call <vscale x 8 x bfloat> @llvm.fmuladd.nxv8bf16(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %10 = call <vscale x 16 x bfloat> @llvm.fmuladd.nxv16bf16(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+; ZVFHMIN-LABEL: 'fmuladd_bf16'
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call bfloat @llvm.fmuladd.bf16(bfloat poison, bfloat poison, bfloat poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <2 x bfloat> @llvm.fmuladd.v2bf16(<2 x bfloat> poison, <2 x bfloat> poison, <2 x bfloat> poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %3 = call <4 x bfloat> @llvm.fmuladd.v4bf16(<4 x bfloat> poison, <4 x bfloat> poison, <4 x bfloat> poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %4 = call <8 x bfloat> @llvm.fmuladd.v8bf16(<8 x bfloat> poison, <8 x bfloat> poison, <8 x bfloat> poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %5 = call <16 x bfloat> @llvm.fmuladd.v16bf16(<16 x bfloat> poison, <16 x bfloat> poison, <16 x bfloat> poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 1 x bfloat> @llvm.fmuladd.nxv1bf16(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 2 x bfloat> @llvm.fmuladd.nxv2bf16(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 4 x bfloat> @llvm.fmuladd.nxv4bf16(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %9 = call <vscale x 8 x bfloat> @llvm.fmuladd.nxv8bf16(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %10 = call <vscale x 16 x bfloat> @llvm.fmuladd.nxv16bf16(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+; NO-ZFHMIN-LABEL: 'fmuladd_bf16'
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call bfloat @llvm.fmuladd.bf16(bfloat poison, bfloat poison, bfloat poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %2 = call <2 x bfloat> @llvm.fmuladd.v2bf16(<2 x bfloat> poison, <2 x bfloat> poison, <2 x bfloat> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %3 = call <4 x bfloat> @llvm.fmuladd.v4bf16(<4 x bfloat> poison, <4 x bfloat> poison, <4 x bfloat> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %4 = call <8 x bfloat> @llvm.fmuladd.v8bf16(<8 x bfloat> poison, <8 x bfloat> poison, <8 x bfloat> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %5 = call <16 x bfloat> @llvm.fmuladd.v16bf16(<16 x bfloat> poison, <16 x bfloat> poison, <16 x bfloat> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %6 = call <vscale x 1 x bfloat> @llvm.fmuladd.nxv1bf16(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %7 = call <vscale x 2 x bfloat> @llvm.fmuladd.nxv2bf16(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %8 = call <vscale x 4 x bfloat> @llvm.fmuladd.nxv4bf16(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %9 = call <vscale x 8 x bfloat> @llvm.fmuladd.nxv8bf16(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %10 = call <vscale x 16 x bfloat> @llvm.fmuladd.nxv16bf16(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+ call bfloat @llvm.fmuladd(bfloat poison, bfloat poison, bfloat poison)
+
+ call <2 x bfloat> @llvm.fmuladd(<2 x bfloat> poison, <2 x bfloat> poison, <2 x bfloat> poison)
+ call <4 x bfloat> @llvm.fmuladd(<4 x bfloat> poison, <4 x bfloat> poison, <4 x bfloat> poison)
+ call <8 x bfloat> @llvm.fmuladd(<8 x bfloat> poison, <8 x bfloat> poison, <8 x bfloat> poison)
+ call <16 x bfloat> @llvm.fmuladd(<16 x bfloat> poison, <16 x bfloat> poison, <16 x bfloat> poison)
+
+ call <vscale x 1 x bfloat> @llvm.fmuladd(<vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> poison, <vscale x 1 x bfloat> poison)
+ call <vscale x 2 x bfloat> @llvm.fmuladd(<vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> poison, <vscale x 2 x bfloat> poison)
+ call <vscale x 4 x bfloat> @llvm.fmuladd(<vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> poison, <vscale x 4 x bfloat> poison)
+ call <vscale x 8 x bfloat> @llvm.fmuladd(<vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> poison, <vscale x 8 x bfloat> poison)
+ call <vscale x 16 x bfloat> @llvm.fmuladd(<vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> poison, <vscale x 16 x bfloat> poison)
ret void
}
define void @fmuladd_f16() {
-; CHECK-LABEL: 'fmuladd_f16'
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call half @llvm.fmuladd.f16(half undef, half undef, half undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <2 x half> @llvm.fmuladd.v2f16(<2 x half> undef, <2 x half> undef, <2 x half> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %3 = call <4 x half> @llvm.fmuladd.v4f16(<4 x half> undef, <4 x half> undef, <4 x half> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %4 = call <8 x half> @llvm.fmuladd.v8f16(<8 x half> undef, <8 x half> undef, <8 x half> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %5 = call <16 x half> @llvm.fmuladd.v16f16(<16 x half> undef, <16 x half> undef, <16 x half> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 1 x half> @llvm.fmuladd.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 2 x half> @llvm.fmuladd.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 4 x half> @llvm.fmuladd.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %9 = call <vscale x 8 x half> @llvm.fmuladd.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %10 = call <vscale x 16 x half> @llvm.fmuladd.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, <vscale x 16 x half> undef)
-; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+; ZVFH-LABEL: 'fmuladd_f16'
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call half @llvm.fmuladd.f16(half poison, half poison, half poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <2 x half> @llvm.fmuladd.v2f16(<2 x half> poison, <2 x half> poison, <2 x half> poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %3 = call <4 x half> @llvm.fmuladd.v4f16(<4 x half> poison, <4 x half> poison, <4 x half> poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %4 = call <8 x half> @llvm.fmuladd.v8f16(<8 x half> poison, <8 x half> poison, <8 x half> poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %5 = call <16 x half> @llvm.fmuladd.v16f16(<16 x half> poison, <16 x half> poison, <16 x half> poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 1 x half> @llvm.fmuladd.nxv1f16(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 2 x half> @llvm.fmuladd.nxv2f16(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 4 x half> @llvm.fmuladd.nxv4f16(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %9 = call <vscale x 8 x half> @llvm.fmuladd.nxv8f16(<vscale x 8 x half> poison, <vscale x 8 x half> poison, <vscale x 8 x half> poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %10 = call <vscale x 16 x half> @llvm.fmuladd.nxv16f16(<vscale x 16 x half> poison, <vscale x 16 x half> poison, <vscale x 16 x half> poison)
+; ZVFH-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+; ZVFHMIN-LABEL: 'fmuladd_f16'
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call half @llvm.fmuladd.f16(half poison, half poison, half poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <2 x half> @llvm.fmuladd.v2f16(<2 x half> poison, <2 x half> poison, <2 x half> poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %3 = call <4 x half> @llvm.fmuladd.v4f16(<4 x half> poison, <4 x half> poison, <4 x half> poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %4 = call <8 x half> @llvm.fmuladd.v8f16(<8 x half> poison, <8 x half> poison, <8 x half> poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %5 = call <16 x half> @llvm.fmuladd.v16f16(<16 x half> poison, <16 x half> poison, <16 x half> poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 1 x half> @llvm.fmuladd.nxv1f16(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 2 x half> @llvm.fmuladd.nxv2f16(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 4 x half> @llvm.fmuladd.nxv4f16(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %9 = call <vscale x 8 x half> @llvm.fmuladd.nxv8f16(<vscale x 8 x half> poison, <vscale x 8 x half> poison, <vscale x 8 x half> poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %10 = call <vscale x 16 x half> @llvm.fmuladd.nxv16f16(<vscale x 16 x half> poison, <vscale x 16 x half> poison, <vscale x 16 x half> poison)
+; ZVFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
+;
+; NO-ZFHMIN-LABEL: 'fmuladd_f16'
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call half @llvm.fmuladd.f16(half poison, half poison, half poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %2 = call <2 x half> @llvm.fmuladd.v2f16(<2 x half> poison, <2 x half> poison, <2 x half> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %3 = call <4 x half> @llvm.fmuladd.v4f16(<4 x half> poison, <4 x half> poison, <4 x half> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %4 = call <8 x half> @llvm.fmuladd.v8f16(<8 x half> poison, <8 x half> poison, <8 x half> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %5 = call <16 x half> @llvm.fmuladd.v16f16(<16 x half> poison, <16 x half> poison, <16 x half> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %6 = call <vscale x 1 x half> @llvm.fmuladd.nxv1f16(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %7 = call <vscale x 2 x half> @llvm.fmuladd.nxv2f16(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %8 = call <vscale x 4 x half> @llvm.fmuladd.nxv4f16(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %9 = call <vscale x 8 x half> @llvm.fmuladd.nxv8f16(<vscale x 8 x half> poison, <vscale x 8 x half> poison, <vscale x 8 x half> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Invalid cost for instruction: %10 = call <vscale x 16 x half> @llvm.fmuladd.nxv16f16(<vscale x 16 x half> poison, <vscale x 16 x half> poison, <vscale x 16 x half> poison)
+; NO-ZFHMIN-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
- call half @llvm.fmuladd.f16(half undef, half undef, half undef)
- call <2 x half> @llvm.fmuladd.v2f16(<2 x half> undef, <2 x half> undef, <2 x half> undef)
- call <4 x half> @llvm.fmuladd.v4f16(<4 x half> undef, <4 x half> undef, <4 x half> undef)
- call <8 x half> @llvm.fmuladd.v8f16(<8 x half> undef, <8 x half> undef, <8 x half> undef)
- call <16 x half> @llvm.fmuladd.v16f16(<16 x half> undef, <16 x half> undef, <16 x half> undef)
- call <vscale x 1 x half> @llvm.fmuladd.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef)
- call <vscale x 2 x half> @llvm.fmuladd.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef)
- call <vscale x 4 x half> @llvm.fmuladd.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef)
- call <vscale x 8 x half> @llvm.fmuladd.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef)
- call <vscale x 16 x half> @llvm.fmuladd.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, <vscale x 16 x half> undef)
+ call half @llvm.fmuladd(half poison, half poison, half poison)
+
+ call <2 x half> @llvm.fmuladd(<2 x half> poison, <2 x half> poison, <2 x half> poison)
+ call <4 x half> @llvm.fmuladd(<4 x half> poison, <4 x half> poison, <4 x half> poison)
+ call <8 x half> @llvm.fmuladd(<8 x half> poison, <8 x half> poison, <8 x half> poison)
+ call <16 x half> @llvm.fmuladd(<16 x half> poison, <16 x half> poison, <16 x half> poison)
+
+ call <vscale x 1 x half> @llvm.fmuladd(<vscale x 1 x half> poison, <vscale x 1 x half> poison, <vscale x 1 x half> poison)
+ call <vscale x 2 x half> @llvm.fmuladd(<vscale x 2 x half> poison, <vscale x 2 x half> poison, <vscale x 2 x half> poison)
+ call <vscale x 4 x half> @llvm.fmuladd(<vscale x 4 x half> poison, <vscale x 4 x half> poison, <vscale x 4 x half> poison)
+ call <vscale x 8 x half> @llvm.fmuladd(<vscale x 8 x half> poison, <vscale x 8 x half> poison, <vscale x 8 x half> poison)
+ call <vscale x 16 x half> @llvm.fmuladd(<vscale x 16 x half> poison, <vscale x 16 x half> poison, <vscale x 16 x half> poison)
ret void
}
diff --git a/llvm/test/CodeGen/AArch64/andcompare.ll b/llvm/test/CodeGen/AArch64/andcompare.ll
index cbacd17..0e15b94 100644
--- a/llvm/test/CodeGen/AArch64/andcompare.ll
+++ b/llvm/test/CodeGen/AArch64/andcompare.ll
@@ -1,23 +1,23 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=aarch64-none-elf -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,SDISEL
-; RUN: llc -mtriple=aarch64-none-elf -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,GISEL
+; RUN: llc -mtriple=aarch64-none-elf -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc -mtriple=aarch64-none-elf -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI
define i32 @and_eq_eq(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_eq_eq:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #0, eq
-; SDISEL-NEXT: cset w0, eq
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_eq_eq:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, eq
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, eq
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_eq_eq:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #0, eq
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_eq_eq:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, eq
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, eq
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp eq i32 %s0, %s1
%c1 = icmp eq i32 %s2, %s3
@@ -27,21 +27,21 @@ entry:
}
define i32 @and_eq_ne(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_eq_ne:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #4, eq
-; SDISEL-NEXT: cset w0, ne
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_eq_ne:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, eq
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, ne
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_eq_ne:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #4, eq
+; CHECK-SD-NEXT: cset w0, ne
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_eq_ne:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, eq
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, ne
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp eq i32 %s0, %s1
%c1 = icmp ne i32 %s2, %s3
@@ -51,21 +51,21 @@ entry:
}
define i32 @and_eq_ult(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_eq_ult:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #2, eq
-; SDISEL-NEXT: cset w0, lo
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_eq_ult:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, eq
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, lo
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_eq_ult:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #2, eq
+; CHECK-SD-NEXT: cset w0, lo
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_eq_ult:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, eq
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, lo
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp eq i32 %s0, %s1
%c1 = icmp ult i32 %s2, %s3
@@ -75,21 +75,21 @@ entry:
}
define i32 @and_eq_ule(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_eq_ule:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #2, eq
-; SDISEL-NEXT: cset w0, ls
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_eq_ule:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, eq
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, ls
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_eq_ule:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #2, eq
+; CHECK-SD-NEXT: cset w0, ls
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_eq_ule:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, eq
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, ls
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp eq i32 %s0, %s1
%c1 = icmp ule i32 %s2, %s3
@@ -99,21 +99,21 @@ entry:
}
define i32 @and_eq_ugt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_eq_ugt:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #0, eq
-; SDISEL-NEXT: cset w0, hi
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_eq_ugt:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, eq
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, hi
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_eq_ugt:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #0, eq
+; CHECK-SD-NEXT: cset w0, hi
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_eq_ugt:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, eq
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, hi
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp eq i32 %s0, %s1
%c1 = icmp ugt i32 %s2, %s3
@@ -123,21 +123,21 @@ entry:
}
define i32 @and_eq_uge(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_eq_uge:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #0, eq
-; SDISEL-NEXT: cset w0, hs
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_eq_uge:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, eq
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, hs
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_eq_uge:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #0, eq
+; CHECK-SD-NEXT: cset w0, hs
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_eq_uge:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, eq
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, hs
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp eq i32 %s0, %s1
%c1 = icmp uge i32 %s2, %s3
@@ -147,21 +147,21 @@ entry:
}
define i32 @and_eq_slt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_eq_slt:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #0, eq
-; SDISEL-NEXT: cset w0, lt
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_eq_slt:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, eq
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, lt
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_eq_slt:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #0, eq
+; CHECK-SD-NEXT: cset w0, lt
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_eq_slt:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, eq
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, lt
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp eq i32 %s0, %s1
%c1 = icmp slt i32 %s2, %s3
@@ -171,21 +171,21 @@ entry:
}
define i32 @and_eq_sle(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_eq_sle:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #0, eq
-; SDISEL-NEXT: cset w0, le
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_eq_sle:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, eq
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, le
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_eq_sle:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #0, eq
+; CHECK-SD-NEXT: cset w0, le
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_eq_sle:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, eq
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, le
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp eq i32 %s0, %s1
%c1 = icmp sle i32 %s2, %s3
@@ -195,21 +195,21 @@ entry:
}
define i32 @and_eq_sgt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_eq_sgt:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #4, eq
-; SDISEL-NEXT: cset w0, gt
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_eq_sgt:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, eq
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, gt
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_eq_sgt:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #4, eq
+; CHECK-SD-NEXT: cset w0, gt
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_eq_sgt:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, eq
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, gt
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp eq i32 %s0, %s1
%c1 = icmp sgt i32 %s2, %s3
@@ -219,21 +219,21 @@ entry:
}
define i32 @and_eq_sge(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_eq_sge:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #8, eq
-; SDISEL-NEXT: cset w0, ge
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_eq_sge:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, eq
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, ge
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_eq_sge:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #8, eq
+; CHECK-SD-NEXT: cset w0, ge
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_eq_sge:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, eq
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, ge
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp eq i32 %s0, %s1
%c1 = icmp sge i32 %s2, %s3
@@ -243,21 +243,21 @@ entry:
}
define i32 @and_ne_eq(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_ne_eq:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #0, ne
-; SDISEL-NEXT: cset w0, eq
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_ne_eq:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, ne
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, eq
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_ne_eq:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #0, ne
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_ne_eq:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, ne
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, eq
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp ne i32 %s0, %s1
%c1 = icmp eq i32 %s2, %s3
@@ -267,21 +267,21 @@ entry:
}
define i32 @and_ne_ne(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_ne_ne:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #4, ne
-; SDISEL-NEXT: cset w0, ne
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_ne_ne:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, ne
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, ne
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_ne_ne:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #4, ne
+; CHECK-SD-NEXT: cset w0, ne
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_ne_ne:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, ne
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, ne
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp ne i32 %s0, %s1
%c1 = icmp ne i32 %s2, %s3
@@ -291,21 +291,21 @@ entry:
}
define i32 @and_ne_ult(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_ne_ult:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #2, ne
-; SDISEL-NEXT: cset w0, lo
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_ne_ult:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, ne
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, lo
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_ne_ult:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #2, ne
+; CHECK-SD-NEXT: cset w0, lo
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_ne_ult:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, ne
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, lo
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp ne i32 %s0, %s1
%c1 = icmp ult i32 %s2, %s3
@@ -315,21 +315,21 @@ entry:
}
define i32 @and_ne_ule(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_ne_ule:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #2, ne
-; SDISEL-NEXT: cset w0, ls
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_ne_ule:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, ne
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, ls
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_ne_ule:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #2, ne
+; CHECK-SD-NEXT: cset w0, ls
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_ne_ule:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, ne
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, ls
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp ne i32 %s0, %s1
%c1 = icmp ule i32 %s2, %s3
@@ -339,21 +339,21 @@ entry:
}
define i32 @and_ne_ugt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_ne_ugt:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #0, ne
-; SDISEL-NEXT: cset w0, hi
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_ne_ugt:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, ne
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, hi
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_ne_ugt:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #0, ne
+; CHECK-SD-NEXT: cset w0, hi
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_ne_ugt:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, ne
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, hi
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp ne i32 %s0, %s1
%c1 = icmp ugt i32 %s2, %s3
@@ -363,21 +363,21 @@ entry:
}
define i32 @and_ne_uge(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_ne_uge:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #0, ne
-; SDISEL-NEXT: cset w0, hs
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_ne_uge:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, ne
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, hs
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_ne_uge:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #0, ne
+; CHECK-SD-NEXT: cset w0, hs
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_ne_uge:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, ne
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, hs
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp ne i32 %s0, %s1
%c1 = icmp uge i32 %s2, %s3
@@ -387,21 +387,21 @@ entry:
}
define i32 @and_ne_slt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_ne_slt:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #0, ne
-; SDISEL-NEXT: cset w0, lt
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_ne_slt:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, ne
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, lt
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_ne_slt:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #0, ne
+; CHECK-SD-NEXT: cset w0, lt
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_ne_slt:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, ne
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, lt
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp ne i32 %s0, %s1
%c1 = icmp slt i32 %s2, %s3
@@ -411,21 +411,21 @@ entry:
}
define i32 @and_ne_sle(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_ne_sle:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #0, ne
-; SDISEL-NEXT: cset w0, le
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_ne_sle:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, ne
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, le
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_ne_sle:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #0, ne
+; CHECK-SD-NEXT: cset w0, le
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_ne_sle:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, ne
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, le
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp ne i32 %s0, %s1
%c1 = icmp sle i32 %s2, %s3
@@ -435,21 +435,21 @@ entry:
}
define i32 @and_ne_sgt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_ne_sgt:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #4, ne
-; SDISEL-NEXT: cset w0, gt
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_ne_sgt:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, ne
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, gt
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_ne_sgt:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #4, ne
+; CHECK-SD-NEXT: cset w0, gt
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_ne_sgt:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, ne
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, gt
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp ne i32 %s0, %s1
%c1 = icmp sgt i32 %s2, %s3
@@ -459,21 +459,21 @@ entry:
}
define i32 @and_ne_sge(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_ne_sge:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #8, ne
-; SDISEL-NEXT: cset w0, ge
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_ne_sge:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, ne
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, ge
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_ne_sge:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #8, ne
+; CHECK-SD-NEXT: cset w0, ge
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_ne_sge:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, ne
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, ge
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp ne i32 %s0, %s1
%c1 = icmp sge i32 %s2, %s3
@@ -483,21 +483,21 @@ entry:
}
define i32 @and_ult_eq(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_ult_eq:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #0, lo
-; SDISEL-NEXT: cset w0, eq
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_ult_eq:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, lo
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, eq
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_ult_eq:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #0, lo
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_ult_eq:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, lo
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, eq
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp ult i32 %s0, %s1
%c1 = icmp eq i32 %s2, %s3
@@ -507,21 +507,21 @@ entry:
}
define i32 @and_ult_ne(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_ult_ne:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #4, lo
-; SDISEL-NEXT: cset w0, ne
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_ult_ne:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, lo
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, ne
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_ult_ne:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #4, lo
+; CHECK-SD-NEXT: cset w0, ne
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_ult_ne:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, lo
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, ne
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp ult i32 %s0, %s1
%c1 = icmp ne i32 %s2, %s3
@@ -531,21 +531,21 @@ entry:
}
define i32 @and_ult_ult(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_ult_ult:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #2, lo
-; SDISEL-NEXT: cset w0, lo
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_ult_ult:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, lo
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, lo
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_ult_ult:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #2, lo
+; CHECK-SD-NEXT: cset w0, lo
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_ult_ult:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, lo
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, lo
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp ult i32 %s0, %s1
%c1 = icmp ult i32 %s2, %s3
@@ -555,21 +555,21 @@ entry:
}
define i32 @and_ult_ule(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_ult_ule:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #2, lo
-; SDISEL-NEXT: cset w0, ls
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_ult_ule:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, lo
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, ls
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_ult_ule:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #2, lo
+; CHECK-SD-NEXT: cset w0, ls
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_ult_ule:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, lo
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, ls
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp ult i32 %s0, %s1
%c1 = icmp ule i32 %s2, %s3
@@ -579,21 +579,21 @@ entry:
}
define i32 @and_ult_ugt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_ult_ugt:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #0, lo
-; SDISEL-NEXT: cset w0, hi
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_ult_ugt:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, lo
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, hi
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_ult_ugt:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #0, lo
+; CHECK-SD-NEXT: cset w0, hi
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_ult_ugt:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, lo
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, hi
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp ult i32 %s0, %s1
%c1 = icmp ugt i32 %s2, %s3
@@ -603,21 +603,21 @@ entry:
}
define i32 @and_ult_uge(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_ult_uge:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #0, lo
-; SDISEL-NEXT: cset w0, hs
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_ult_uge:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, lo
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, hs
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_ult_uge:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #0, lo
+; CHECK-SD-NEXT: cset w0, hs
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_ult_uge:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, lo
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, hs
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp ult i32 %s0, %s1
%c1 = icmp uge i32 %s2, %s3
@@ -627,21 +627,21 @@ entry:
}
define i32 @and_ult_slt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_ult_slt:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #0, lo
-; SDISEL-NEXT: cset w0, lt
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_ult_slt:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, lo
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, lt
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_ult_slt:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #0, lo
+; CHECK-SD-NEXT: cset w0, lt
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_ult_slt:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, lo
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, lt
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp ult i32 %s0, %s1
%c1 = icmp slt i32 %s2, %s3
@@ -651,21 +651,21 @@ entry:
}
define i32 @and_ult_sle(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_ult_sle:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #0, lo
-; SDISEL-NEXT: cset w0, le
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_ult_sle:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, lo
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, le
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_ult_sle:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #0, lo
+; CHECK-SD-NEXT: cset w0, le
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_ult_sle:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, lo
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, le
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp ult i32 %s0, %s1
%c1 = icmp sle i32 %s2, %s3
@@ -675,21 +675,21 @@ entry:
}
define i32 @and_ult_sgt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_ult_sgt:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #4, lo
-; SDISEL-NEXT: cset w0, gt
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_ult_sgt:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, lo
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, gt
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_ult_sgt:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #4, lo
+; CHECK-SD-NEXT: cset w0, gt
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_ult_sgt:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, lo
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, gt
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp ult i32 %s0, %s1
%c1 = icmp sgt i32 %s2, %s3
@@ -699,21 +699,21 @@ entry:
}
define i32 @and_ult_sge(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_ult_sge:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #8, lo
-; SDISEL-NEXT: cset w0, ge
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_ult_sge:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, lo
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, ge
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_ult_sge:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #8, lo
+; CHECK-SD-NEXT: cset w0, ge
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_ult_sge:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, lo
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, ge
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp ult i32 %s0, %s1
%c1 = icmp sge i32 %s2, %s3
@@ -723,21 +723,21 @@ entry:
}
define i32 @and_ule_eq(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_ule_eq:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #0, ls
-; SDISEL-NEXT: cset w0, eq
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_ule_eq:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, ls
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, eq
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_ule_eq:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #0, ls
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_ule_eq:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, ls
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, eq
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp ule i32 %s0, %s1
%c1 = icmp eq i32 %s2, %s3
@@ -747,21 +747,21 @@ entry:
}
define i32 @and_ule_ne(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_ule_ne:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #4, ls
-; SDISEL-NEXT: cset w0, ne
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_ule_ne:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, ls
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, ne
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_ule_ne:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #4, ls
+; CHECK-SD-NEXT: cset w0, ne
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_ule_ne:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, ls
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, ne
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp ule i32 %s0, %s1
%c1 = icmp ne i32 %s2, %s3
@@ -771,21 +771,21 @@ entry:
}
define i32 @and_ule_ult(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_ule_ult:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #2, ls
-; SDISEL-NEXT: cset w0, lo
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_ule_ult:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, ls
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, lo
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_ule_ult:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #2, ls
+; CHECK-SD-NEXT: cset w0, lo
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_ule_ult:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, ls
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, lo
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp ule i32 %s0, %s1
%c1 = icmp ult i32 %s2, %s3
@@ -795,21 +795,21 @@ entry:
}
define i32 @and_ule_ule(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_ule_ule:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #2, ls
-; SDISEL-NEXT: cset w0, ls
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_ule_ule:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, ls
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, ls
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_ule_ule:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #2, ls
+; CHECK-SD-NEXT: cset w0, ls
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_ule_ule:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, ls
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, ls
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp ule i32 %s0, %s1
%c1 = icmp ule i32 %s2, %s3
@@ -819,21 +819,21 @@ entry:
}
define i32 @and_ule_ugt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_ule_ugt:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #0, ls
-; SDISEL-NEXT: cset w0, hi
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_ule_ugt:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, ls
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, hi
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_ule_ugt:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #0, ls
+; CHECK-SD-NEXT: cset w0, hi
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_ule_ugt:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, ls
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, hi
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp ule i32 %s0, %s1
%c1 = icmp ugt i32 %s2, %s3
@@ -843,21 +843,21 @@ entry:
}
define i32 @and_ule_uge(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_ule_uge:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #0, ls
-; SDISEL-NEXT: cset w0, hs
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_ule_uge:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, ls
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, hs
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_ule_uge:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #0, ls
+; CHECK-SD-NEXT: cset w0, hs
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_ule_uge:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, ls
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, hs
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp ule i32 %s0, %s1
%c1 = icmp uge i32 %s2, %s3
@@ -867,21 +867,21 @@ entry:
}
define i32 @and_ule_slt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_ule_slt:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #0, ls
-; SDISEL-NEXT: cset w0, lt
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_ule_slt:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, ls
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, lt
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_ule_slt:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #0, ls
+; CHECK-SD-NEXT: cset w0, lt
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_ule_slt:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, ls
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, lt
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp ule i32 %s0, %s1
%c1 = icmp slt i32 %s2, %s3
@@ -891,21 +891,21 @@ entry:
}
define i32 @and_ule_sle(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_ule_sle:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #0, ls
-; SDISEL-NEXT: cset w0, le
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_ule_sle:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, ls
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, le
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_ule_sle:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #0, ls
+; CHECK-SD-NEXT: cset w0, le
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_ule_sle:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, ls
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, le
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp ule i32 %s0, %s1
%c1 = icmp sle i32 %s2, %s3
@@ -915,21 +915,21 @@ entry:
}
define i32 @and_ule_sgt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_ule_sgt:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #4, ls
-; SDISEL-NEXT: cset w0, gt
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_ule_sgt:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, ls
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, gt
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_ule_sgt:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #4, ls
+; CHECK-SD-NEXT: cset w0, gt
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_ule_sgt:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, ls
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, gt
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp ule i32 %s0, %s1
%c1 = icmp sgt i32 %s2, %s3
@@ -939,21 +939,21 @@ entry:
}
define i32 @and_ule_sge(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_ule_sge:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #8, ls
-; SDISEL-NEXT: cset w0, ge
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_ule_sge:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, ls
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, ge
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_ule_sge:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #8, ls
+; CHECK-SD-NEXT: cset w0, ge
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_ule_sge:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, ls
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, ge
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp ule i32 %s0, %s1
%c1 = icmp sge i32 %s2, %s3
@@ -963,21 +963,21 @@ entry:
}
define i32 @and_ugt_eq(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_ugt_eq:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #0, hi
-; SDISEL-NEXT: cset w0, eq
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_ugt_eq:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, hi
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, eq
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_ugt_eq:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #0, hi
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_ugt_eq:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, hi
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, eq
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp ugt i32 %s0, %s1
%c1 = icmp eq i32 %s2, %s3
@@ -987,21 +987,21 @@ entry:
}
define i32 @and_ugt_ne(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_ugt_ne:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #4, hi
-; SDISEL-NEXT: cset w0, ne
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_ugt_ne:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, hi
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, ne
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_ugt_ne:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #4, hi
+; CHECK-SD-NEXT: cset w0, ne
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_ugt_ne:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, hi
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, ne
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp ugt i32 %s0, %s1
%c1 = icmp ne i32 %s2, %s3
@@ -1011,21 +1011,21 @@ entry:
}
define i32 @and_ugt_ult(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_ugt_ult:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #2, hi
-; SDISEL-NEXT: cset w0, lo
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_ugt_ult:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, hi
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, lo
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_ugt_ult:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #2, hi
+; CHECK-SD-NEXT: cset w0, lo
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_ugt_ult:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, hi
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, lo
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp ugt i32 %s0, %s1
%c1 = icmp ult i32 %s2, %s3
@@ -1035,21 +1035,21 @@ entry:
}
define i32 @and_ugt_ule(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_ugt_ule:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #2, hi
-; SDISEL-NEXT: cset w0, ls
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_ugt_ule:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, hi
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, ls
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_ugt_ule:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #2, hi
+; CHECK-SD-NEXT: cset w0, ls
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_ugt_ule:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, hi
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, ls
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp ugt i32 %s0, %s1
%c1 = icmp ule i32 %s2, %s3
@@ -1059,21 +1059,21 @@ entry:
}
define i32 @and_ugt_ugt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_ugt_ugt:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #0, hi
-; SDISEL-NEXT: cset w0, hi
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_ugt_ugt:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, hi
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, hi
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_ugt_ugt:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #0, hi
+; CHECK-SD-NEXT: cset w0, hi
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_ugt_ugt:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, hi
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, hi
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp ugt i32 %s0, %s1
%c1 = icmp ugt i32 %s2, %s3
@@ -1083,21 +1083,21 @@ entry:
}
define i32 @and_ugt_uge(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_ugt_uge:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #0, hi
-; SDISEL-NEXT: cset w0, hs
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_ugt_uge:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, hi
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, hs
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_ugt_uge:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #0, hi
+; CHECK-SD-NEXT: cset w0, hs
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_ugt_uge:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, hi
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, hs
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp ugt i32 %s0, %s1
%c1 = icmp uge i32 %s2, %s3
@@ -1107,21 +1107,21 @@ entry:
}
define i32 @and_ugt_slt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_ugt_slt:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #0, hi
-; SDISEL-NEXT: cset w0, lt
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_ugt_slt:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, hi
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, lt
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_ugt_slt:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #0, hi
+; CHECK-SD-NEXT: cset w0, lt
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_ugt_slt:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, hi
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, lt
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp ugt i32 %s0, %s1
%c1 = icmp slt i32 %s2, %s3
@@ -1131,21 +1131,21 @@ entry:
}
define i32 @and_ugt_sle(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_ugt_sle:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #0, hi
-; SDISEL-NEXT: cset w0, le
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_ugt_sle:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, hi
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, le
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_ugt_sle:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #0, hi
+; CHECK-SD-NEXT: cset w0, le
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_ugt_sle:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, hi
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, le
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp ugt i32 %s0, %s1
%c1 = icmp sle i32 %s2, %s3
@@ -1155,21 +1155,21 @@ entry:
}
define i32 @and_ugt_sgt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_ugt_sgt:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #4, hi
-; SDISEL-NEXT: cset w0, gt
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_ugt_sgt:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, hi
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, gt
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_ugt_sgt:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #4, hi
+; CHECK-SD-NEXT: cset w0, gt
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_ugt_sgt:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, hi
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, gt
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp ugt i32 %s0, %s1
%c1 = icmp sgt i32 %s2, %s3
@@ -1179,21 +1179,21 @@ entry:
}
define i32 @and_ugt_sge(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_ugt_sge:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #8, hi
-; SDISEL-NEXT: cset w0, ge
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_ugt_sge:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, hi
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, ge
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_ugt_sge:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #8, hi
+; CHECK-SD-NEXT: cset w0, ge
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_ugt_sge:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, hi
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, ge
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp ugt i32 %s0, %s1
%c1 = icmp sge i32 %s2, %s3
@@ -1203,21 +1203,21 @@ entry:
}
define i32 @and_uge_eq(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_uge_eq:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #0, hs
-; SDISEL-NEXT: cset w0, eq
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_uge_eq:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, hs
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, eq
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_uge_eq:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #0, hs
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_uge_eq:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, hs
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, eq
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp uge i32 %s0, %s1
%c1 = icmp eq i32 %s2, %s3
@@ -1227,21 +1227,21 @@ entry:
}
define i32 @and_uge_ne(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_uge_ne:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #4, hs
-; SDISEL-NEXT: cset w0, ne
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_uge_ne:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, hs
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, ne
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_uge_ne:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #4, hs
+; CHECK-SD-NEXT: cset w0, ne
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_uge_ne:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, hs
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, ne
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp uge i32 %s0, %s1
%c1 = icmp ne i32 %s2, %s3
@@ -1251,21 +1251,21 @@ entry:
}
define i32 @and_uge_ult(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_uge_ult:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #2, hs
-; SDISEL-NEXT: cset w0, lo
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_uge_ult:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, hs
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, lo
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_uge_ult:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #2, hs
+; CHECK-SD-NEXT: cset w0, lo
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_uge_ult:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, hs
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, lo
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp uge i32 %s0, %s1
%c1 = icmp ult i32 %s2, %s3
@@ -1275,21 +1275,21 @@ entry:
}
define i32 @and_uge_ule(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_uge_ule:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #2, hs
-; SDISEL-NEXT: cset w0, ls
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_uge_ule:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, hs
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, ls
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_uge_ule:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #2, hs
+; CHECK-SD-NEXT: cset w0, ls
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_uge_ule:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, hs
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, ls
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp uge i32 %s0, %s1
%c1 = icmp ule i32 %s2, %s3
@@ -1299,21 +1299,21 @@ entry:
}
define i32 @and_uge_ugt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_uge_ugt:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #0, hs
-; SDISEL-NEXT: cset w0, hi
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_uge_ugt:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, hs
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, hi
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_uge_ugt:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #0, hs
+; CHECK-SD-NEXT: cset w0, hi
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_uge_ugt:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, hs
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, hi
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp uge i32 %s0, %s1
%c1 = icmp ugt i32 %s2, %s3
@@ -1323,21 +1323,21 @@ entry:
}
define i32 @and_uge_uge(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_uge_uge:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #0, hs
-; SDISEL-NEXT: cset w0, hs
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_uge_uge:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, hs
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, hs
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_uge_uge:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #0, hs
+; CHECK-SD-NEXT: cset w0, hs
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_uge_uge:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, hs
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, hs
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp uge i32 %s0, %s1
%c1 = icmp uge i32 %s2, %s3
@@ -1347,21 +1347,21 @@ entry:
}
define i32 @and_uge_slt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_uge_slt:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #0, hs
-; SDISEL-NEXT: cset w0, lt
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_uge_slt:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, hs
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, lt
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_uge_slt:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #0, hs
+; CHECK-SD-NEXT: cset w0, lt
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_uge_slt:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, hs
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, lt
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp uge i32 %s0, %s1
%c1 = icmp slt i32 %s2, %s3
@@ -1371,21 +1371,21 @@ entry:
}
define i32 @and_uge_sle(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_uge_sle:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #0, hs
-; SDISEL-NEXT: cset w0, le
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_uge_sle:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, hs
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, le
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_uge_sle:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #0, hs
+; CHECK-SD-NEXT: cset w0, le
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_uge_sle:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, hs
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, le
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp uge i32 %s0, %s1
%c1 = icmp sle i32 %s2, %s3
@@ -1395,21 +1395,21 @@ entry:
}
define i32 @and_uge_sgt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_uge_sgt:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #4, hs
-; SDISEL-NEXT: cset w0, gt
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_uge_sgt:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, hs
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, gt
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_uge_sgt:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #4, hs
+; CHECK-SD-NEXT: cset w0, gt
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_uge_sgt:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, hs
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, gt
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp uge i32 %s0, %s1
%c1 = icmp sgt i32 %s2, %s3
@@ -1419,21 +1419,21 @@ entry:
}
define i32 @and_uge_sge(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_uge_sge:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #8, hs
-; SDISEL-NEXT: cset w0, ge
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_uge_sge:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, hs
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, ge
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_uge_sge:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #8, hs
+; CHECK-SD-NEXT: cset w0, ge
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_uge_sge:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, hs
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, ge
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp uge i32 %s0, %s1
%c1 = icmp sge i32 %s2, %s3
@@ -1443,21 +1443,21 @@ entry:
}
define i32 @and_slt_eq(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_slt_eq:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #0, lt
-; SDISEL-NEXT: cset w0, eq
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_slt_eq:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, lt
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, eq
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_slt_eq:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #0, lt
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_slt_eq:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, lt
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, eq
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp slt i32 %s0, %s1
%c1 = icmp eq i32 %s2, %s3
@@ -1467,21 +1467,21 @@ entry:
}
define i32 @and_slt_ne(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_slt_ne:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #4, lt
-; SDISEL-NEXT: cset w0, ne
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_slt_ne:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, lt
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, ne
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_slt_ne:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #4, lt
+; CHECK-SD-NEXT: cset w0, ne
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_slt_ne:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, lt
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, ne
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp slt i32 %s0, %s1
%c1 = icmp ne i32 %s2, %s3
@@ -1491,21 +1491,21 @@ entry:
}
define i32 @and_slt_ult(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_slt_ult:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #2, lt
-; SDISEL-NEXT: cset w0, lo
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_slt_ult:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, lt
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, lo
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_slt_ult:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #2, lt
+; CHECK-SD-NEXT: cset w0, lo
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_slt_ult:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, lt
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, lo
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp slt i32 %s0, %s1
%c1 = icmp ult i32 %s2, %s3
@@ -1515,21 +1515,21 @@ entry:
}
define i32 @and_slt_ule(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_slt_ule:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #2, lt
-; SDISEL-NEXT: cset w0, ls
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_slt_ule:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, lt
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, ls
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_slt_ule:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #2, lt
+; CHECK-SD-NEXT: cset w0, ls
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_slt_ule:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, lt
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, ls
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp slt i32 %s0, %s1
%c1 = icmp ule i32 %s2, %s3
@@ -1539,21 +1539,21 @@ entry:
}
define i32 @and_slt_ugt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_slt_ugt:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #0, lt
-; SDISEL-NEXT: cset w0, hi
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_slt_ugt:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, lt
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, hi
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_slt_ugt:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #0, lt
+; CHECK-SD-NEXT: cset w0, hi
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_slt_ugt:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, lt
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, hi
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp slt i32 %s0, %s1
%c1 = icmp ugt i32 %s2, %s3
@@ -1563,21 +1563,21 @@ entry:
}
define i32 @and_slt_uge(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_slt_uge:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #0, lt
-; SDISEL-NEXT: cset w0, hs
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_slt_uge:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, lt
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, hs
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_slt_uge:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #0, lt
+; CHECK-SD-NEXT: cset w0, hs
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_slt_uge:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, lt
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, hs
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp slt i32 %s0, %s1
%c1 = icmp uge i32 %s2, %s3
@@ -1587,21 +1587,21 @@ entry:
}
define i32 @and_slt_slt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_slt_slt:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #0, lt
-; SDISEL-NEXT: cset w0, lt
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_slt_slt:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, lt
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, lt
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_slt_slt:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #0, lt
+; CHECK-SD-NEXT: cset w0, lt
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_slt_slt:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, lt
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, lt
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp slt i32 %s0, %s1
%c1 = icmp slt i32 %s2, %s3
@@ -1611,21 +1611,21 @@ entry:
}
define i32 @and_slt_sle(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_slt_sle:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #0, lt
-; SDISEL-NEXT: cset w0, le
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_slt_sle:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, lt
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, le
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_slt_sle:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #0, lt
+; CHECK-SD-NEXT: cset w0, le
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_slt_sle:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, lt
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, le
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp slt i32 %s0, %s1
%c1 = icmp sle i32 %s2, %s3
@@ -1635,21 +1635,21 @@ entry:
}
define i32 @and_slt_sgt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_slt_sgt:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #4, lt
-; SDISEL-NEXT: cset w0, gt
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_slt_sgt:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, lt
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, gt
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_slt_sgt:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #4, lt
+; CHECK-SD-NEXT: cset w0, gt
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_slt_sgt:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, lt
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, gt
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp slt i32 %s0, %s1
%c1 = icmp sgt i32 %s2, %s3
@@ -1659,21 +1659,21 @@ entry:
}
define i32 @and_slt_sge(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_slt_sge:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #8, lt
-; SDISEL-NEXT: cset w0, ge
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_slt_sge:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, lt
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, ge
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_slt_sge:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #8, lt
+; CHECK-SD-NEXT: cset w0, ge
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_slt_sge:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, lt
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, ge
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp slt i32 %s0, %s1
%c1 = icmp sge i32 %s2, %s3
@@ -1683,21 +1683,21 @@ entry:
}
define i32 @and_sle_eq(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_sle_eq:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #0, le
-; SDISEL-NEXT: cset w0, eq
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_sle_eq:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, le
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, eq
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_sle_eq:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #0, le
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_sle_eq:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, le
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, eq
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp sle i32 %s0, %s1
%c1 = icmp eq i32 %s2, %s3
@@ -1707,21 +1707,21 @@ entry:
}
define i32 @and_sle_ne(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_sle_ne:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #4, le
-; SDISEL-NEXT: cset w0, ne
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_sle_ne:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, le
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, ne
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_sle_ne:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #4, le
+; CHECK-SD-NEXT: cset w0, ne
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_sle_ne:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, le
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, ne
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp sle i32 %s0, %s1
%c1 = icmp ne i32 %s2, %s3
@@ -1731,21 +1731,21 @@ entry:
}
define i32 @and_sle_ult(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_sle_ult:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #2, le
-; SDISEL-NEXT: cset w0, lo
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_sle_ult:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, le
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, lo
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_sle_ult:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #2, le
+; CHECK-SD-NEXT: cset w0, lo
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_sle_ult:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, le
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, lo
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp sle i32 %s0, %s1
%c1 = icmp ult i32 %s2, %s3
@@ -1755,21 +1755,21 @@ entry:
}
define i32 @and_sle_ule(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_sle_ule:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #2, le
-; SDISEL-NEXT: cset w0, ls
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_sle_ule:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, le
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, ls
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_sle_ule:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #2, le
+; CHECK-SD-NEXT: cset w0, ls
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_sle_ule:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, le
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, ls
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp sle i32 %s0, %s1
%c1 = icmp ule i32 %s2, %s3
@@ -1779,21 +1779,21 @@ entry:
}
define i32 @and_sle_ugt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_sle_ugt:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #0, le
-; SDISEL-NEXT: cset w0, hi
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_sle_ugt:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, le
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, hi
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_sle_ugt:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #0, le
+; CHECK-SD-NEXT: cset w0, hi
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_sle_ugt:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, le
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, hi
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp sle i32 %s0, %s1
%c1 = icmp ugt i32 %s2, %s3
@@ -1803,21 +1803,21 @@ entry:
}
define i32 @and_sle_uge(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_sle_uge:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #0, le
-; SDISEL-NEXT: cset w0, hs
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_sle_uge:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, le
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, hs
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_sle_uge:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #0, le
+; CHECK-SD-NEXT: cset w0, hs
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_sle_uge:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, le
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, hs
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp sle i32 %s0, %s1
%c1 = icmp uge i32 %s2, %s3
@@ -1827,21 +1827,21 @@ entry:
}
define i32 @and_sle_slt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_sle_slt:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #0, le
-; SDISEL-NEXT: cset w0, lt
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_sle_slt:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, le
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, lt
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_sle_slt:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #0, le
+; CHECK-SD-NEXT: cset w0, lt
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_sle_slt:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, le
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, lt
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp sle i32 %s0, %s1
%c1 = icmp slt i32 %s2, %s3
@@ -1851,21 +1851,21 @@ entry:
}
define i32 @and_sle_sle(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_sle_sle:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #0, le
-; SDISEL-NEXT: cset w0, le
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_sle_sle:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, le
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, le
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_sle_sle:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #0, le
+; CHECK-SD-NEXT: cset w0, le
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_sle_sle:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, le
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, le
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp sle i32 %s0, %s1
%c1 = icmp sle i32 %s2, %s3
@@ -1875,21 +1875,21 @@ entry:
}
define i32 @and_sle_sgt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_sle_sgt:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #4, le
-; SDISEL-NEXT: cset w0, gt
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_sle_sgt:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, le
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, gt
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_sle_sgt:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #4, le
+; CHECK-SD-NEXT: cset w0, gt
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_sle_sgt:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, le
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, gt
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp sle i32 %s0, %s1
%c1 = icmp sgt i32 %s2, %s3
@@ -1899,21 +1899,21 @@ entry:
}
define i32 @and_sle_sge(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_sle_sge:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #8, le
-; SDISEL-NEXT: cset w0, ge
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_sle_sge:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, le
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, ge
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_sle_sge:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #8, le
+; CHECK-SD-NEXT: cset w0, ge
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_sle_sge:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, le
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, ge
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp sle i32 %s0, %s1
%c1 = icmp sge i32 %s2, %s3
@@ -1923,21 +1923,21 @@ entry:
}
define i32 @and_sgt_eq(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_sgt_eq:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #0, gt
-; SDISEL-NEXT: cset w0, eq
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_sgt_eq:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, gt
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, eq
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_sgt_eq:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #0, gt
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_sgt_eq:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, gt
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, eq
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp sgt i32 %s0, %s1
%c1 = icmp eq i32 %s2, %s3
@@ -1947,21 +1947,21 @@ entry:
}
define i32 @and_sgt_ne(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_sgt_ne:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #4, gt
-; SDISEL-NEXT: cset w0, ne
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_sgt_ne:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, gt
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, ne
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_sgt_ne:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #4, gt
+; CHECK-SD-NEXT: cset w0, ne
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_sgt_ne:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, gt
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, ne
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp sgt i32 %s0, %s1
%c1 = icmp ne i32 %s2, %s3
@@ -1971,21 +1971,21 @@ entry:
}
define i32 @and_sgt_ult(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_sgt_ult:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #2, gt
-; SDISEL-NEXT: cset w0, lo
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_sgt_ult:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, gt
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, lo
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_sgt_ult:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #2, gt
+; CHECK-SD-NEXT: cset w0, lo
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_sgt_ult:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, gt
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, lo
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp sgt i32 %s0, %s1
%c1 = icmp ult i32 %s2, %s3
@@ -1995,21 +1995,21 @@ entry:
}
define i32 @and_sgt_ule(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_sgt_ule:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #2, gt
-; SDISEL-NEXT: cset w0, ls
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_sgt_ule:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, gt
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, ls
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_sgt_ule:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #2, gt
+; CHECK-SD-NEXT: cset w0, ls
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_sgt_ule:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, gt
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, ls
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp sgt i32 %s0, %s1
%c1 = icmp ule i32 %s2, %s3
@@ -2019,21 +2019,21 @@ entry:
}
define i32 @and_sgt_ugt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_sgt_ugt:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #0, gt
-; SDISEL-NEXT: cset w0, hi
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_sgt_ugt:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, gt
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, hi
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_sgt_ugt:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #0, gt
+; CHECK-SD-NEXT: cset w0, hi
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_sgt_ugt:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, gt
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, hi
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp sgt i32 %s0, %s1
%c1 = icmp ugt i32 %s2, %s3
@@ -2043,21 +2043,21 @@ entry:
}
define i32 @and_sgt_uge(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_sgt_uge:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #0, gt
-; SDISEL-NEXT: cset w0, hs
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_sgt_uge:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, gt
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, hs
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_sgt_uge:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #0, gt
+; CHECK-SD-NEXT: cset w0, hs
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_sgt_uge:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, gt
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, hs
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp sgt i32 %s0, %s1
%c1 = icmp uge i32 %s2, %s3
@@ -2067,21 +2067,21 @@ entry:
}
define i32 @and_sgt_slt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_sgt_slt:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #0, gt
-; SDISEL-NEXT: cset w0, lt
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_sgt_slt:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, gt
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, lt
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_sgt_slt:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #0, gt
+; CHECK-SD-NEXT: cset w0, lt
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_sgt_slt:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, gt
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, lt
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp sgt i32 %s0, %s1
%c1 = icmp slt i32 %s2, %s3
@@ -2091,21 +2091,21 @@ entry:
}
define i32 @and_sgt_sle(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_sgt_sle:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #0, gt
-; SDISEL-NEXT: cset w0, le
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_sgt_sle:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, gt
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, le
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_sgt_sle:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #0, gt
+; CHECK-SD-NEXT: cset w0, le
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_sgt_sle:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, gt
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, le
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp sgt i32 %s0, %s1
%c1 = icmp sle i32 %s2, %s3
@@ -2115,21 +2115,21 @@ entry:
}
define i32 @and_sgt_sgt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_sgt_sgt:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #4, gt
-; SDISEL-NEXT: cset w0, gt
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_sgt_sgt:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, gt
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, gt
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_sgt_sgt:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #4, gt
+; CHECK-SD-NEXT: cset w0, gt
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_sgt_sgt:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, gt
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, gt
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp sgt i32 %s0, %s1
%c1 = icmp sgt i32 %s2, %s3
@@ -2139,21 +2139,21 @@ entry:
}
define i32 @and_sgt_sge(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_sgt_sge:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #8, gt
-; SDISEL-NEXT: cset w0, ge
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_sgt_sge:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, gt
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, ge
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_sgt_sge:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #8, gt
+; CHECK-SD-NEXT: cset w0, ge
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_sgt_sge:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, gt
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, ge
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp sgt i32 %s0, %s1
%c1 = icmp sge i32 %s2, %s3
@@ -2163,21 +2163,21 @@ entry:
}
define i32 @and_sge_eq(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_sge_eq:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #0, ge
-; SDISEL-NEXT: cset w0, eq
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_sge_eq:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, ge
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, eq
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_sge_eq:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #0, ge
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_sge_eq:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, ge
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, eq
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp sge i32 %s0, %s1
%c1 = icmp eq i32 %s2, %s3
@@ -2187,21 +2187,21 @@ entry:
}
define i32 @and_sge_ne(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_sge_ne:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #4, ge
-; SDISEL-NEXT: cset w0, ne
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_sge_ne:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, ge
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, ne
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_sge_ne:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #4, ge
+; CHECK-SD-NEXT: cset w0, ne
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_sge_ne:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, ge
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, ne
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp sge i32 %s0, %s1
%c1 = icmp ne i32 %s2, %s3
@@ -2211,21 +2211,21 @@ entry:
}
define i32 @and_sge_ult(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_sge_ult:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #2, ge
-; SDISEL-NEXT: cset w0, lo
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_sge_ult:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, ge
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, lo
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_sge_ult:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #2, ge
+; CHECK-SD-NEXT: cset w0, lo
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_sge_ult:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, ge
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, lo
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp sge i32 %s0, %s1
%c1 = icmp ult i32 %s2, %s3
@@ -2235,21 +2235,21 @@ entry:
}
define i32 @and_sge_ule(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_sge_ule:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #2, ge
-; SDISEL-NEXT: cset w0, ls
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_sge_ule:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, ge
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, ls
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_sge_ule:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #2, ge
+; CHECK-SD-NEXT: cset w0, ls
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_sge_ule:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, ge
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, ls
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp sge i32 %s0, %s1
%c1 = icmp ule i32 %s2, %s3
@@ -2259,21 +2259,21 @@ entry:
}
define i32 @and_sge_ugt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_sge_ugt:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #0, ge
-; SDISEL-NEXT: cset w0, hi
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_sge_ugt:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, ge
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, hi
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_sge_ugt:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #0, ge
+; CHECK-SD-NEXT: cset w0, hi
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_sge_ugt:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, ge
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, hi
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp sge i32 %s0, %s1
%c1 = icmp ugt i32 %s2, %s3
@@ -2283,21 +2283,21 @@ entry:
}
define i32 @and_sge_uge(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_sge_uge:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #0, ge
-; SDISEL-NEXT: cset w0, hs
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_sge_uge:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, ge
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, hs
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_sge_uge:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #0, ge
+; CHECK-SD-NEXT: cset w0, hs
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_sge_uge:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, ge
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, hs
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp sge i32 %s0, %s1
%c1 = icmp uge i32 %s2, %s3
@@ -2307,21 +2307,21 @@ entry:
}
define i32 @and_sge_slt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_sge_slt:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #0, ge
-; SDISEL-NEXT: cset w0, lt
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_sge_slt:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, ge
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, lt
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_sge_slt:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #0, ge
+; CHECK-SD-NEXT: cset w0, lt
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_sge_slt:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, ge
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, lt
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp sge i32 %s0, %s1
%c1 = icmp slt i32 %s2, %s3
@@ -2331,21 +2331,21 @@ entry:
}
define i32 @and_sge_sle(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_sge_sle:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #0, ge
-; SDISEL-NEXT: cset w0, le
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_sge_sle:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, ge
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, le
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_sge_sle:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #0, ge
+; CHECK-SD-NEXT: cset w0, le
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_sge_sle:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, ge
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, le
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp sge i32 %s0, %s1
%c1 = icmp sle i32 %s2, %s3
@@ -2355,21 +2355,21 @@ entry:
}
define i32 @and_sge_sgt(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_sge_sgt:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #4, ge
-; SDISEL-NEXT: cset w0, gt
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_sge_sgt:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, ge
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, gt
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_sge_sgt:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #4, ge
+; CHECK-SD-NEXT: cset w0, gt
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_sge_sgt:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, ge
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, gt
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp sge i32 %s0, %s1
%c1 = icmp sgt i32 %s2, %s3
@@ -2379,21 +2379,21 @@ entry:
}
define i32 @and_sge_sge(i32 %s0, i32 %s1, i32 %s2, i32 %s3) {
-; SDISEL-LABEL: and_sge_sge:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #8, ge
-; SDISEL-NEXT: cset w0, ge
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_sge_sge:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, ge
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, ge
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_sge_sge:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #8, ge
+; CHECK-SD-NEXT: cset w0, ge
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_sge_sge:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, ge
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, ge
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp sge i32 %s0, %s1
%c1 = icmp sge i32 %s2, %s3
@@ -2403,19 +2403,19 @@ entry:
}
define i32 @cmp_to_ands1(i32 %num) {
-; SDISEL-LABEL: cmp_to_ands1:
-; SDISEL: // %bb.0:
-; SDISEL-NEXT: and w8, w0, #0xff
-; SDISEL-NEXT: tst w0, #0xfe
-; SDISEL-NEXT: csel w0, w8, wzr, ne
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: cmp_to_ands1:
-; GISEL: // %bb.0:
-; GISEL-NEXT: and w8, w0, #0xff
-; GISEL-NEXT: cmp w8, #1
-; GISEL-NEXT: csel w0, w8, wzr, hi
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: cmp_to_ands1:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: and w8, w0, #0xff
+; CHECK-SD-NEXT: tst w0, #0xfe
+; CHECK-SD-NEXT: csel w0, w8, wzr, ne
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: cmp_to_ands1:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: and w8, w0, #0xff
+; CHECK-GI-NEXT: cmp w8, #1
+; CHECK-GI-NEXT: csel w0, w8, wzr, hi
+; CHECK-GI-NEXT: ret
%and = and i32 %num, 255
%cmp = icmp ugt i32 %and, 1
%r = select i1 %cmp, i32 %and, i32 0
@@ -2423,19 +2423,19 @@ define i32 @cmp_to_ands1(i32 %num) {
}
define i32 @cmp_to_ands2(i32 %num) {
-; SDISEL-LABEL: cmp_to_ands2:
-; SDISEL: // %bb.0:
-; SDISEL-NEXT: and w8, w0, #0xfe
-; SDISEL-NEXT: tst w0, #0xc0
-; SDISEL-NEXT: csel w0, w8, wzr, ne
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: cmp_to_ands2:
-; GISEL: // %bb.0:
-; GISEL-NEXT: and w8, w0, #0xfe
-; GISEL-NEXT: cmp w8, #63
-; GISEL-NEXT: csel w0, w8, wzr, hi
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: cmp_to_ands2:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: and w8, w0, #0xfe
+; CHECK-SD-NEXT: tst w0, #0xc0
+; CHECK-SD-NEXT: csel w0, w8, wzr, ne
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: cmp_to_ands2:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: and w8, w0, #0xfe
+; CHECK-GI-NEXT: cmp w8, #63
+; CHECK-GI-NEXT: csel w0, w8, wzr, hi
+; CHECK-GI-NEXT: ret
%and = and i32 %num, 254
%cmp = icmp ugt i32 %and, 63
%r = select i1 %cmp, i32 %and, i32 0
@@ -2443,19 +2443,19 @@ define i32 @cmp_to_ands2(i32 %num) {
}
define i32 @cmp_to_ands3(i32 %num, i32 %a) {
-; SDISEL-LABEL: cmp_to_ands3:
-; SDISEL: // %bb.0:
-; SDISEL-NEXT: tst w0, #0x10
-; SDISEL-NEXT: csel w0, w1, wzr, ne
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: cmp_to_ands3:
-; GISEL: // %bb.0:
-; GISEL-NEXT: mov w8, #23 // =0x17
-; GISEL-NEXT: and w8, w0, w8
-; GISEL-NEXT: cmp w8, #7
-; GISEL-NEXT: csel w0, w1, wzr, hi
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: cmp_to_ands3:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: tst w0, #0x10
+; CHECK-SD-NEXT: csel w0, w1, wzr, ne
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: cmp_to_ands3:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: mov w8, #23 // =0x17
+; CHECK-GI-NEXT: and w8, w0, w8
+; CHECK-GI-NEXT: cmp w8, #7
+; CHECK-GI-NEXT: csel w0, w1, wzr, hi
+; CHECK-GI-NEXT: ret
%and = and i32 %num, 23
%cmp = icmp ugt i32 %and, 7
%r = select i1 %cmp, i32 %a, i32 0
@@ -2463,19 +2463,19 @@ define i32 @cmp_to_ands3(i32 %num, i32 %a) {
}
define i32 @cmp_to_ands4(i32 %num, i32 %a) {
-; SDISEL-LABEL: cmp_to_ands4:
-; SDISEL: // %bb.0:
-; SDISEL-NEXT: and w8, w0, #0x30
-; SDISEL-NEXT: tst w0, #0x20
-; SDISEL-NEXT: csel w0, w8, w1, eq
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: cmp_to_ands4:
-; GISEL: // %bb.0:
-; GISEL-NEXT: and w8, w0, #0x30
-; GISEL-NEXT: cmp w8, #31
-; GISEL-NEXT: csel w0, w8, w1, ls
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: cmp_to_ands4:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: and w8, w0, #0x30
+; CHECK-SD-NEXT: tst w0, #0x20
+; CHECK-SD-NEXT: csel w0, w8, w1, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: cmp_to_ands4:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: and w8, w0, #0x30
+; CHECK-GI-NEXT: cmp w8, #31
+; CHECK-GI-NEXT: csel w0, w8, w1, ls
+; CHECK-GI-NEXT: ret
%and = and i32 %num, 48
%cmp = icmp ule i32 %and, 31
%r = select i1 %cmp, i32 %and, i32 %a
@@ -2483,19 +2483,19 @@ define i32 @cmp_to_ands4(i32 %num, i32 %a) {
}
define i32 @cmp_to_ands5(i32 %num, i32 %a) {
-; SDISEL-LABEL: cmp_to_ands5:
-; SDISEL: // %bb.0:
-; SDISEL-NEXT: and w8, w0, #0xf8
-; SDISEL-NEXT: tst w0, #0xc0
-; SDISEL-NEXT: csel w0, w8, w1, eq
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: cmp_to_ands5:
-; GISEL: // %bb.0:
-; GISEL-NEXT: and w8, w0, #0xf8
-; GISEL-NEXT: cmp w8, #64
-; GISEL-NEXT: csel w0, w8, w1, lo
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: cmp_to_ands5:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: and w8, w0, #0xf8
+; CHECK-SD-NEXT: tst w0, #0xc0
+; CHECK-SD-NEXT: csel w0, w8, w1, eq
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: cmp_to_ands5:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: and w8, w0, #0xf8
+; CHECK-GI-NEXT: cmp w8, #64
+; CHECK-GI-NEXT: csel w0, w8, w1, lo
+; CHECK-GI-NEXT: ret
%and = and i32 %num, 248
%cmp = icmp ult i32 %and, 64
%r = select i1 %cmp, i32 %and, i32 %a
@@ -2503,19 +2503,19 @@ define i32 @cmp_to_ands5(i32 %num, i32 %a) {
}
define i32 @cmp_to_ands6(i32 %num) {
-; SDISEL-LABEL: cmp_to_ands6:
-; SDISEL: // %bb.0:
-; SDISEL-NEXT: and w8, w0, #0xfe
-; SDISEL-NEXT: tst w0, #0xf0
-; SDISEL-NEXT: csel w0, w8, wzr, ne
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: cmp_to_ands6:
-; GISEL: // %bb.0:
-; GISEL-NEXT: and w8, w0, #0xfe
-; GISEL-NEXT: cmp w8, #16
-; GISEL-NEXT: csel w0, w8, wzr, hs
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: cmp_to_ands6:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: and w8, w0, #0xfe
+; CHECK-SD-NEXT: tst w0, #0xf0
+; CHECK-SD-NEXT: csel w0, w8, wzr, ne
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: cmp_to_ands6:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: and w8, w0, #0xfe
+; CHECK-GI-NEXT: cmp w8, #16
+; CHECK-GI-NEXT: csel w0, w8, wzr, hs
+; CHECK-GI-NEXT: ret
%and = and i32 %num, 254
%cmp = icmp uge i32 %and, 16
%r = select i1 %cmp, i32 %and, i32 0
@@ -2523,21 +2523,21 @@ define i32 @cmp_to_ands6(i32 %num) {
}
define i1 @and_fcmp(float %0, float %1) {
-; SDISEL-LABEL: and_fcmp:
-; SDISEL: // %bb.0:
-; SDISEL-NEXT: fcmp s1, s1
-; SDISEL-NEXT: fccmp s0, s0, #0, vs
-; SDISEL-NEXT: cset w0, vs
-; SDISEL-NEXT: ret
-;
-; GISEL-LABEL: and_fcmp:
-; GISEL: // %bb.0:
-; GISEL-NEXT: fcmp s0, #0.0
-; GISEL-NEXT: cset w8, vs
-; GISEL-NEXT: fcmp s1, #0.0
-; GISEL-NEXT: cset w9, vs
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-SD-LABEL: and_fcmp:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: fcmp s1, s1
+; CHECK-SD-NEXT: fccmp s0, s0, #0, vs
+; CHECK-SD-NEXT: cset w0, vs
+; CHECK-SD-NEXT: ret
+;
+; CHECK-GI-LABEL: and_fcmp:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: fcmp s0, #0.0
+; CHECK-GI-NEXT: cset w8, vs
+; CHECK-GI-NEXT: fcmp s1, #0.0
+; CHECK-GI-NEXT: cset w9, vs
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
%3 = fcmp uno float %0, 0.000000e+00
%4 = fcmp uno float %1, 0.000000e+00
diff --git a/llvm/test/CodeGen/AArch64/andorbrcompare.ll b/llvm/test/CodeGen/AArch64/andorbrcompare.ll
index 951a5cd..5bc06ec 100644
--- a/llvm/test/CodeGen/AArch64/andorbrcompare.ll
+++ b/llvm/test/CodeGen/AArch64/andorbrcompare.ll
@@ -1,44 +1,44 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=aarch64-none-elf -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,SDISEL
-; RUN: llc -mtriple=aarch64-none-elf -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,GISEL
+; RUN: llc -mtriple=aarch64-none-elf -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc -mtriple=aarch64-none-elf -global-isel -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,CHECK-GI
declare void @dummy()
define i32 @and_eq_ne_ult(i32 %s0, i32 %s1, i32 %s2, i32 %s3, i32 %s4, i32 %s5, ptr %p) {
-; SDISEL-LABEL: and_eq_ne_ult:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w2, w3
-; SDISEL-NEXT: ccmp w0, w1, #0, ne
-; SDISEL-NEXT: b.eq .LBB0_3
-; SDISEL-NEXT: // %bb.1: // %entry
-; SDISEL-NEXT: cmp w4, w5
-; SDISEL-NEXT: b.lo .LBB0_3
-; SDISEL-NEXT: // %bb.2:
-; SDISEL-NEXT: mov w0, wzr
-; SDISEL-NEXT: ret
-; SDISEL-NEXT: .LBB0_3: // %if
-; SDISEL-NEXT: mov w0, #1 // =0x1
-; SDISEL-NEXT: str w0, [x6]
-; SDISEL-NEXT: ret
+; CHECK-SD-LABEL: and_eq_ne_ult:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w2, w3
+; CHECK-SD-NEXT: ccmp w0, w1, #0, ne
+; CHECK-SD-NEXT: b.eq .LBB0_3
+; CHECK-SD-NEXT: // %bb.1: // %entry
+; CHECK-SD-NEXT: cmp w4, w5
+; CHECK-SD-NEXT: b.lo .LBB0_3
+; CHECK-SD-NEXT: // %bb.2:
+; CHECK-SD-NEXT: mov w0, wzr
+; CHECK-SD-NEXT: ret
+; CHECK-SD-NEXT: .LBB0_3: // %if
+; CHECK-SD-NEXT: mov w0, #1 // =0x1
+; CHECK-SD-NEXT: str w0, [x6]
+; CHECK-SD-NEXT: ret
;
-; GISEL-LABEL: and_eq_ne_ult:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, eq
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, ne
-; GISEL-NEXT: and w8, w8, w9
-; GISEL-NEXT: tbnz w8, #0, .LBB0_3
-; GISEL-NEXT: // %bb.1: // %entry
-; GISEL-NEXT: cmp w4, w5
-; GISEL-NEXT: mov w0, wzr
-; GISEL-NEXT: b.lo .LBB0_3
-; GISEL-NEXT: // %bb.2: // %common.ret
-; GISEL-NEXT: ret
-; GISEL-NEXT: .LBB0_3: // %if
-; GISEL-NEXT: mov w0, #1 // =0x1
-; GISEL-NEXT: str w0, [x6]
-; GISEL-NEXT: ret
+; CHECK-GI-LABEL: and_eq_ne_ult:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, eq
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, ne
+; CHECK-GI-NEXT: and w8, w8, w9
+; CHECK-GI-NEXT: tbnz w8, #0, .LBB0_3
+; CHECK-GI-NEXT: // %bb.1: // %entry
+; CHECK-GI-NEXT: cmp w4, w5
+; CHECK-GI-NEXT: mov w0, wzr
+; CHECK-GI-NEXT: b.lo .LBB0_3
+; CHECK-GI-NEXT: // %bb.2: // %common.ret
+; CHECK-GI-NEXT: ret
+; CHECK-GI-NEXT: .LBB0_3: // %if
+; CHECK-GI-NEXT: mov w0, #1 // =0x1
+; CHECK-GI-NEXT: str w0, [x6]
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp eq i32 %s0, %s1
%c1 = icmp ne i32 %s2, %s3
@@ -56,40 +56,40 @@ else:
}
define i32 @and_ne_ult_ule(i32 %s0, i32 %s1, i32 %s2, i32 %s3, i32 %s4, i32 %s5, ptr %p) {
-; SDISEL-LABEL: and_ne_ult_ule:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w2, w3
-; SDISEL-NEXT: ccmp w0, w1, #4, lo
-; SDISEL-NEXT: b.ne .LBB1_3
-; SDISEL-NEXT: // %bb.1: // %entry
-; SDISEL-NEXT: cmp w4, w5
-; SDISEL-NEXT: b.ls .LBB1_3
-; SDISEL-NEXT: // %bb.2:
-; SDISEL-NEXT: mov w0, wzr
-; SDISEL-NEXT: ret
-; SDISEL-NEXT: .LBB1_3: // %if
-; SDISEL-NEXT: mov w0, #1 // =0x1
-; SDISEL-NEXT: str w0, [x6]
-; SDISEL-NEXT: ret
+; CHECK-SD-LABEL: and_ne_ult_ule:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w2, w3
+; CHECK-SD-NEXT: ccmp w0, w1, #4, lo
+; CHECK-SD-NEXT: b.ne .LBB1_3
+; CHECK-SD-NEXT: // %bb.1: // %entry
+; CHECK-SD-NEXT: cmp w4, w5
+; CHECK-SD-NEXT: b.ls .LBB1_3
+; CHECK-SD-NEXT: // %bb.2:
+; CHECK-SD-NEXT: mov w0, wzr
+; CHECK-SD-NEXT: ret
+; CHECK-SD-NEXT: .LBB1_3: // %if
+; CHECK-SD-NEXT: mov w0, #1 // =0x1
+; CHECK-SD-NEXT: str w0, [x6]
+; CHECK-SD-NEXT: ret
;
-; GISEL-LABEL: and_ne_ult_ule:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, ne
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, lo
-; GISEL-NEXT: and w8, w8, w9
-; GISEL-NEXT: tbnz w8, #0, .LBB1_3
-; GISEL-NEXT: // %bb.1: // %entry
-; GISEL-NEXT: cmp w4, w5
-; GISEL-NEXT: mov w0, wzr
-; GISEL-NEXT: b.ls .LBB1_3
-; GISEL-NEXT: // %bb.2: // %common.ret
-; GISEL-NEXT: ret
-; GISEL-NEXT: .LBB1_3: // %if
-; GISEL-NEXT: mov w0, #1 // =0x1
-; GISEL-NEXT: str w0, [x6]
-; GISEL-NEXT: ret
+; CHECK-GI-LABEL: and_ne_ult_ule:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, ne
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, lo
+; CHECK-GI-NEXT: and w8, w8, w9
+; CHECK-GI-NEXT: tbnz w8, #0, .LBB1_3
+; CHECK-GI-NEXT: // %bb.1: // %entry
+; CHECK-GI-NEXT: cmp w4, w5
+; CHECK-GI-NEXT: mov w0, wzr
+; CHECK-GI-NEXT: b.ls .LBB1_3
+; CHECK-GI-NEXT: // %bb.2: // %common.ret
+; CHECK-GI-NEXT: ret
+; CHECK-GI-NEXT: .LBB1_3: // %if
+; CHECK-GI-NEXT: mov w0, #1 // =0x1
+; CHECK-GI-NEXT: str w0, [x6]
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp ne i32 %s0, %s1
%c1 = icmp ult i32 %s2, %s3
@@ -107,40 +107,40 @@ else:
}
define i32 @and_ult_ule_ugt(i32 %s0, i32 %s1, i32 %s2, i32 %s3, i32 %s4, i32 %s5, ptr %p) {
-; SDISEL-LABEL: and_ult_ule_ugt:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w2, w3
-; SDISEL-NEXT: ccmp w0, w1, #2, ls
-; SDISEL-NEXT: b.lo .LBB2_3
-; SDISEL-NEXT: // %bb.1: // %entry
-; SDISEL-NEXT: cmp w4, w5
-; SDISEL-NEXT: b.hi .LBB2_3
-; SDISEL-NEXT: // %bb.2:
-; SDISEL-NEXT: mov w0, wzr
-; SDISEL-NEXT: ret
-; SDISEL-NEXT: .LBB2_3: // %if
-; SDISEL-NEXT: mov w0, #1 // =0x1
-; SDISEL-NEXT: str w0, [x6]
-; SDISEL-NEXT: ret
+; CHECK-SD-LABEL: and_ult_ule_ugt:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w2, w3
+; CHECK-SD-NEXT: ccmp w0, w1, #2, ls
+; CHECK-SD-NEXT: b.lo .LBB2_3
+; CHECK-SD-NEXT: // %bb.1: // %entry
+; CHECK-SD-NEXT: cmp w4, w5
+; CHECK-SD-NEXT: b.hi .LBB2_3
+; CHECK-SD-NEXT: // %bb.2:
+; CHECK-SD-NEXT: mov w0, wzr
+; CHECK-SD-NEXT: ret
+; CHECK-SD-NEXT: .LBB2_3: // %if
+; CHECK-SD-NEXT: mov w0, #1 // =0x1
+; CHECK-SD-NEXT: str w0, [x6]
+; CHECK-SD-NEXT: ret
;
-; GISEL-LABEL: and_ult_ule_ugt:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, lo
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, ls
-; GISEL-NEXT: and w8, w8, w9
-; GISEL-NEXT: tbnz w8, #0, .LBB2_3
-; GISEL-NEXT: // %bb.1: // %entry
-; GISEL-NEXT: cmp w4, w5
-; GISEL-NEXT: mov w0, wzr
-; GISEL-NEXT: b.hi .LBB2_3
-; GISEL-NEXT: // %bb.2: // %common.ret
-; GISEL-NEXT: ret
-; GISEL-NEXT: .LBB2_3: // %if
-; GISEL-NEXT: mov w0, #1 // =0x1
-; GISEL-NEXT: str w0, [x6]
-; GISEL-NEXT: ret
+; CHECK-GI-LABEL: and_ult_ule_ugt:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, lo
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, ls
+; CHECK-GI-NEXT: and w8, w8, w9
+; CHECK-GI-NEXT: tbnz w8, #0, .LBB2_3
+; CHECK-GI-NEXT: // %bb.1: // %entry
+; CHECK-GI-NEXT: cmp w4, w5
+; CHECK-GI-NEXT: mov w0, wzr
+; CHECK-GI-NEXT: b.hi .LBB2_3
+; CHECK-GI-NEXT: // %bb.2: // %common.ret
+; CHECK-GI-NEXT: ret
+; CHECK-GI-NEXT: .LBB2_3: // %if
+; CHECK-GI-NEXT: mov w0, #1 // =0x1
+; CHECK-GI-NEXT: str w0, [x6]
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp ult i32 %s0, %s1
%c1 = icmp ule i32 %s2, %s3
@@ -158,40 +158,40 @@ else:
}
define i32 @and_ule_ugt_uge(i32 %s0, i32 %s1, i32 %s2, i32 %s3, i32 %s4, i32 %s5, ptr %p) {
-; SDISEL-LABEL: and_ule_ugt_uge:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w2, w3
-; SDISEL-NEXT: ccmp w0, w1, #2, hi
-; SDISEL-NEXT: b.ls .LBB3_3
-; SDISEL-NEXT: // %bb.1: // %entry
-; SDISEL-NEXT: cmp w4, w5
-; SDISEL-NEXT: b.hs .LBB3_3
-; SDISEL-NEXT: // %bb.2:
-; SDISEL-NEXT: mov w0, wzr
-; SDISEL-NEXT: ret
-; SDISEL-NEXT: .LBB3_3: // %if
-; SDISEL-NEXT: mov w0, #1 // =0x1
-; SDISEL-NEXT: str w0, [x6]
-; SDISEL-NEXT: ret
+; CHECK-SD-LABEL: and_ule_ugt_uge:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w2, w3
+; CHECK-SD-NEXT: ccmp w0, w1, #2, hi
+; CHECK-SD-NEXT: b.ls .LBB3_3
+; CHECK-SD-NEXT: // %bb.1: // %entry
+; CHECK-SD-NEXT: cmp w4, w5
+; CHECK-SD-NEXT: b.hs .LBB3_3
+; CHECK-SD-NEXT: // %bb.2:
+; CHECK-SD-NEXT: mov w0, wzr
+; CHECK-SD-NEXT: ret
+; CHECK-SD-NEXT: .LBB3_3: // %if
+; CHECK-SD-NEXT: mov w0, #1 // =0x1
+; CHECK-SD-NEXT: str w0, [x6]
+; CHECK-SD-NEXT: ret
;
-; GISEL-LABEL: and_ule_ugt_uge:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, ls
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, hi
-; GISEL-NEXT: and w8, w8, w9
-; GISEL-NEXT: tbnz w8, #0, .LBB3_3
-; GISEL-NEXT: // %bb.1: // %entry
-; GISEL-NEXT: cmp w4, w5
-; GISEL-NEXT: mov w0, wzr
-; GISEL-NEXT: b.hs .LBB3_3
-; GISEL-NEXT: // %bb.2: // %common.ret
-; GISEL-NEXT: ret
-; GISEL-NEXT: .LBB3_3: // %if
-; GISEL-NEXT: mov w0, #1 // =0x1
-; GISEL-NEXT: str w0, [x6]
-; GISEL-NEXT: ret
+; CHECK-GI-LABEL: and_ule_ugt_uge:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, ls
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, hi
+; CHECK-GI-NEXT: and w8, w8, w9
+; CHECK-GI-NEXT: tbnz w8, #0, .LBB3_3
+; CHECK-GI-NEXT: // %bb.1: // %entry
+; CHECK-GI-NEXT: cmp w4, w5
+; CHECK-GI-NEXT: mov w0, wzr
+; CHECK-GI-NEXT: b.hs .LBB3_3
+; CHECK-GI-NEXT: // %bb.2: // %common.ret
+; CHECK-GI-NEXT: ret
+; CHECK-GI-NEXT: .LBB3_3: // %if
+; CHECK-GI-NEXT: mov w0, #1 // =0x1
+; CHECK-GI-NEXT: str w0, [x6]
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp ule i32 %s0, %s1
%c1 = icmp ugt i32 %s2, %s3
@@ -209,40 +209,40 @@ else:
}
define i32 @and_ugt_uge_slt(i32 %s0, i32 %s1, i32 %s2, i32 %s3, i32 %s4, i32 %s5, ptr %p) {
-; SDISEL-LABEL: and_ugt_uge_slt:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w2, w3
-; SDISEL-NEXT: ccmp w0, w1, #0, hs
-; SDISEL-NEXT: b.hi .LBB4_3
-; SDISEL-NEXT: // %bb.1: // %entry
-; SDISEL-NEXT: cmp w4, w5
-; SDISEL-NEXT: b.lt .LBB4_3
-; SDISEL-NEXT: // %bb.2:
-; SDISEL-NEXT: mov w0, wzr
-; SDISEL-NEXT: ret
-; SDISEL-NEXT: .LBB4_3: // %if
-; SDISEL-NEXT: mov w0, #1 // =0x1
-; SDISEL-NEXT: str w0, [x6]
-; SDISEL-NEXT: ret
+; CHECK-SD-LABEL: and_ugt_uge_slt:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w2, w3
+; CHECK-SD-NEXT: ccmp w0, w1, #0, hs
+; CHECK-SD-NEXT: b.hi .LBB4_3
+; CHECK-SD-NEXT: // %bb.1: // %entry
+; CHECK-SD-NEXT: cmp w4, w5
+; CHECK-SD-NEXT: b.lt .LBB4_3
+; CHECK-SD-NEXT: // %bb.2:
+; CHECK-SD-NEXT: mov w0, wzr
+; CHECK-SD-NEXT: ret
+; CHECK-SD-NEXT: .LBB4_3: // %if
+; CHECK-SD-NEXT: mov w0, #1 // =0x1
+; CHECK-SD-NEXT: str w0, [x6]
+; CHECK-SD-NEXT: ret
;
-; GISEL-LABEL: and_ugt_uge_slt:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, hi
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, hs
-; GISEL-NEXT: and w8, w8, w9
-; GISEL-NEXT: tbnz w8, #0, .LBB4_3
-; GISEL-NEXT: // %bb.1: // %entry
-; GISEL-NEXT: cmp w4, w5
-; GISEL-NEXT: mov w0, wzr
-; GISEL-NEXT: b.lt .LBB4_3
-; GISEL-NEXT: // %bb.2: // %common.ret
-; GISEL-NEXT: ret
-; GISEL-NEXT: .LBB4_3: // %if
-; GISEL-NEXT: mov w0, #1 // =0x1
-; GISEL-NEXT: str w0, [x6]
-; GISEL-NEXT: ret
+; CHECK-GI-LABEL: and_ugt_uge_slt:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, hi
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, hs
+; CHECK-GI-NEXT: and w8, w8, w9
+; CHECK-GI-NEXT: tbnz w8, #0, .LBB4_3
+; CHECK-GI-NEXT: // %bb.1: // %entry
+; CHECK-GI-NEXT: cmp w4, w5
+; CHECK-GI-NEXT: mov w0, wzr
+; CHECK-GI-NEXT: b.lt .LBB4_3
+; CHECK-GI-NEXT: // %bb.2: // %common.ret
+; CHECK-GI-NEXT: ret
+; CHECK-GI-NEXT: .LBB4_3: // %if
+; CHECK-GI-NEXT: mov w0, #1 // =0x1
+; CHECK-GI-NEXT: str w0, [x6]
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp ugt i32 %s0, %s1
%c1 = icmp uge i32 %s2, %s3
@@ -260,40 +260,40 @@ else:
}
define i32 @and_uge_slt_sle(i32 %s0, i32 %s1, i32 %s2, i32 %s3, i32 %s4, i32 %s5, ptr %p) {
-; SDISEL-LABEL: and_uge_slt_sle:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w2, w3
-; SDISEL-NEXT: ccmp w0, w1, #0, lt
-; SDISEL-NEXT: b.hs .LBB5_3
-; SDISEL-NEXT: // %bb.1: // %entry
-; SDISEL-NEXT: cmp w4, w5
-; SDISEL-NEXT: b.le .LBB5_3
-; SDISEL-NEXT: // %bb.2:
-; SDISEL-NEXT: mov w0, wzr
-; SDISEL-NEXT: ret
-; SDISEL-NEXT: .LBB5_3: // %if
-; SDISEL-NEXT: mov w0, #1 // =0x1
-; SDISEL-NEXT: str w0, [x6]
-; SDISEL-NEXT: ret
+; CHECK-SD-LABEL: and_uge_slt_sle:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w2, w3
+; CHECK-SD-NEXT: ccmp w0, w1, #0, lt
+; CHECK-SD-NEXT: b.hs .LBB5_3
+; CHECK-SD-NEXT: // %bb.1: // %entry
+; CHECK-SD-NEXT: cmp w4, w5
+; CHECK-SD-NEXT: b.le .LBB5_3
+; CHECK-SD-NEXT: // %bb.2:
+; CHECK-SD-NEXT: mov w0, wzr
+; CHECK-SD-NEXT: ret
+; CHECK-SD-NEXT: .LBB5_3: // %if
+; CHECK-SD-NEXT: mov w0, #1 // =0x1
+; CHECK-SD-NEXT: str w0, [x6]
+; CHECK-SD-NEXT: ret
;
-; GISEL-LABEL: and_uge_slt_sle:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, hs
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, lt
-; GISEL-NEXT: and w8, w8, w9
-; GISEL-NEXT: tbnz w8, #0, .LBB5_3
-; GISEL-NEXT: // %bb.1: // %entry
-; GISEL-NEXT: cmp w4, w5
-; GISEL-NEXT: mov w0, wzr
-; GISEL-NEXT: b.le .LBB5_3
-; GISEL-NEXT: // %bb.2: // %common.ret
-; GISEL-NEXT: ret
-; GISEL-NEXT: .LBB5_3: // %if
-; GISEL-NEXT: mov w0, #1 // =0x1
-; GISEL-NEXT: str w0, [x6]
-; GISEL-NEXT: ret
+; CHECK-GI-LABEL: and_uge_slt_sle:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, hs
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, lt
+; CHECK-GI-NEXT: and w8, w8, w9
+; CHECK-GI-NEXT: tbnz w8, #0, .LBB5_3
+; CHECK-GI-NEXT: // %bb.1: // %entry
+; CHECK-GI-NEXT: cmp w4, w5
+; CHECK-GI-NEXT: mov w0, wzr
+; CHECK-GI-NEXT: b.le .LBB5_3
+; CHECK-GI-NEXT: // %bb.2: // %common.ret
+; CHECK-GI-NEXT: ret
+; CHECK-GI-NEXT: .LBB5_3: // %if
+; CHECK-GI-NEXT: mov w0, #1 // =0x1
+; CHECK-GI-NEXT: str w0, [x6]
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp uge i32 %s0, %s1
%c1 = icmp slt i32 %s2, %s3
@@ -311,40 +311,40 @@ else:
}
define i32 @and_slt_sle_sgt(i32 %s0, i32 %s1, i32 %s2, i32 %s3, i32 %s4, i32 %s5, ptr %p) {
-; SDISEL-LABEL: and_slt_sle_sgt:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w2, w3
-; SDISEL-NEXT: ccmp w0, w1, #0, le
-; SDISEL-NEXT: b.lt .LBB6_3
-; SDISEL-NEXT: // %bb.1: // %entry
-; SDISEL-NEXT: cmp w4, w5
-; SDISEL-NEXT: b.gt .LBB6_3
-; SDISEL-NEXT: // %bb.2:
-; SDISEL-NEXT: mov w0, wzr
-; SDISEL-NEXT: ret
-; SDISEL-NEXT: .LBB6_3: // %if
-; SDISEL-NEXT: mov w0, #1 // =0x1
-; SDISEL-NEXT: str w0, [x6]
-; SDISEL-NEXT: ret
+; CHECK-SD-LABEL: and_slt_sle_sgt:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w2, w3
+; CHECK-SD-NEXT: ccmp w0, w1, #0, le
+; CHECK-SD-NEXT: b.lt .LBB6_3
+; CHECK-SD-NEXT: // %bb.1: // %entry
+; CHECK-SD-NEXT: cmp w4, w5
+; CHECK-SD-NEXT: b.gt .LBB6_3
+; CHECK-SD-NEXT: // %bb.2:
+; CHECK-SD-NEXT: mov w0, wzr
+; CHECK-SD-NEXT: ret
+; CHECK-SD-NEXT: .LBB6_3: // %if
+; CHECK-SD-NEXT: mov w0, #1 // =0x1
+; CHECK-SD-NEXT: str w0, [x6]
+; CHECK-SD-NEXT: ret
;
-; GISEL-LABEL: and_slt_sle_sgt:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, lt
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, le
-; GISEL-NEXT: and w8, w8, w9
-; GISEL-NEXT: tbnz w8, #0, .LBB6_3
-; GISEL-NEXT: // %bb.1: // %entry
-; GISEL-NEXT: cmp w4, w5
-; GISEL-NEXT: mov w0, wzr
-; GISEL-NEXT: b.gt .LBB6_3
-; GISEL-NEXT: // %bb.2: // %common.ret
-; GISEL-NEXT: ret
-; GISEL-NEXT: .LBB6_3: // %if
-; GISEL-NEXT: mov w0, #1 // =0x1
-; GISEL-NEXT: str w0, [x6]
-; GISEL-NEXT: ret
+; CHECK-GI-LABEL: and_slt_sle_sgt:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, lt
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, le
+; CHECK-GI-NEXT: and w8, w8, w9
+; CHECK-GI-NEXT: tbnz w8, #0, .LBB6_3
+; CHECK-GI-NEXT: // %bb.1: // %entry
+; CHECK-GI-NEXT: cmp w4, w5
+; CHECK-GI-NEXT: mov w0, wzr
+; CHECK-GI-NEXT: b.gt .LBB6_3
+; CHECK-GI-NEXT: // %bb.2: // %common.ret
+; CHECK-GI-NEXT: ret
+; CHECK-GI-NEXT: .LBB6_3: // %if
+; CHECK-GI-NEXT: mov w0, #1 // =0x1
+; CHECK-GI-NEXT: str w0, [x6]
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp slt i32 %s0, %s1
%c1 = icmp sle i32 %s2, %s3
@@ -362,40 +362,40 @@ else:
}
define i32 @and_sle_sgt_sge(i32 %s0, i32 %s1, i32 %s2, i32 %s3, i32 %s4, i32 %s5, ptr %p) {
-; SDISEL-LABEL: and_sle_sgt_sge:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: cmp w2, w3
-; SDISEL-NEXT: ccmp w0, w1, #0, gt
-; SDISEL-NEXT: b.le .LBB7_3
-; SDISEL-NEXT: // %bb.1: // %entry
-; SDISEL-NEXT: cmp w4, w5
-; SDISEL-NEXT: b.ge .LBB7_3
-; SDISEL-NEXT: // %bb.2:
-; SDISEL-NEXT: mov w0, wzr
-; SDISEL-NEXT: ret
-; SDISEL-NEXT: .LBB7_3: // %if
-; SDISEL-NEXT: mov w0, #1 // =0x1
-; SDISEL-NEXT: str w0, [x6]
-; SDISEL-NEXT: ret
+; CHECK-SD-LABEL: and_sle_sgt_sge:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: cmp w2, w3
+; CHECK-SD-NEXT: ccmp w0, w1, #0, gt
+; CHECK-SD-NEXT: b.le .LBB7_3
+; CHECK-SD-NEXT: // %bb.1: // %entry
+; CHECK-SD-NEXT: cmp w4, w5
+; CHECK-SD-NEXT: b.ge .LBB7_3
+; CHECK-SD-NEXT: // %bb.2:
+; CHECK-SD-NEXT: mov w0, wzr
+; CHECK-SD-NEXT: ret
+; CHECK-SD-NEXT: .LBB7_3: // %if
+; CHECK-SD-NEXT: mov w0, #1 // =0x1
+; CHECK-SD-NEXT: str w0, [x6]
+; CHECK-SD-NEXT: ret
;
-; GISEL-LABEL: and_sle_sgt_sge:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, le
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, gt
-; GISEL-NEXT: and w8, w8, w9
-; GISEL-NEXT: tbnz w8, #0, .LBB7_3
-; GISEL-NEXT: // %bb.1: // %entry
-; GISEL-NEXT: cmp w4, w5
-; GISEL-NEXT: mov w0, wzr
-; GISEL-NEXT: b.ge .LBB7_3
-; GISEL-NEXT: // %bb.2: // %common.ret
-; GISEL-NEXT: ret
-; GISEL-NEXT: .LBB7_3: // %if
-; GISEL-NEXT: mov w0, #1 // =0x1
-; GISEL-NEXT: str w0, [x6]
-; GISEL-NEXT: ret
+; CHECK-GI-LABEL: and_sle_sgt_sge:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, le
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, gt
+; CHECK-GI-NEXT: and w8, w8, w9
+; CHECK-GI-NEXT: tbnz w8, #0, .LBB7_3
+; CHECK-GI-NEXT: // %bb.1: // %entry
+; CHECK-GI-NEXT: cmp w4, w5
+; CHECK-GI-NEXT: mov w0, wzr
+; CHECK-GI-NEXT: b.ge .LBB7_3
+; CHECK-GI-NEXT: // %bb.2: // %common.ret
+; CHECK-GI-NEXT: ret
+; CHECK-GI-NEXT: .LBB7_3: // %if
+; CHECK-GI-NEXT: mov w0, #1 // =0x1
+; CHECK-GI-NEXT: str w0, [x6]
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp sle i32 %s0, %s1
%c1 = icmp sgt i32 %s2, %s3
diff --git a/llvm/test/CodeGen/AArch64/arm64-ccmp.ll b/llvm/test/CodeGen/AArch64/arm64-ccmp.ll
index 06e957f..a546ffd 100644
--- a/llvm/test/CodeGen/AArch64/arm64-ccmp.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-ccmp.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -debugify-and-strip-all-safe -mcpu=cyclone -verify-machineinstrs -aarch64-enable-ccmp -aarch64-stress-ccmp | FileCheck %s --check-prefixes=CHECK,SDISEL
-; RUN: llc < %s -debugify-and-strip-all-safe -mcpu=cyclone -verify-machineinstrs -aarch64-enable-ccmp -aarch64-stress-ccmp -global-isel | FileCheck %s --check-prefixes=CHECK,GISEL
+; RUN: llc < %s -debugify-and-strip-all-safe -mcpu=cyclone -verify-machineinstrs -aarch64-enable-ccmp -aarch64-stress-ccmp | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc < %s -debugify-and-strip-all-safe -mcpu=cyclone -verify-machineinstrs -aarch64-enable-ccmp -aarch64-stress-ccmp -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-GI
target triple = "arm64-apple-ios"
define i32 @single_same(i32 %a, i32 %b) nounwind ssp {
@@ -32,31 +32,31 @@ if.end:
; Different condition codes for the two compares.
define i32 @single_different(i32 %a, i32 %b) nounwind ssp {
-; SDISEL-LABEL: single_different:
-; SDISEL: ; %bb.0: ; %entry
-; SDISEL-NEXT: cmp w0, #6
-; SDISEL-NEXT: ccmp w1, #17, #0, ge
-; SDISEL-NEXT: b.eq LBB1_2
-; SDISEL-NEXT: ; %bb.1: ; %if.then
-; SDISEL-NEXT: stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
-; SDISEL-NEXT: bl _foo
-; SDISEL-NEXT: ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
-; SDISEL-NEXT: LBB1_2: ; %if.end
-; SDISEL-NEXT: mov w0, #7 ; =0x7
-; SDISEL-NEXT: ret
+; CHECK-SD-LABEL: single_different:
+; CHECK-SD: ; %bb.0: ; %entry
+; CHECK-SD-NEXT: cmp w0, #6
+; CHECK-SD-NEXT: ccmp w1, #17, #0, ge
+; CHECK-SD-NEXT: b.eq LBB1_2
+; CHECK-SD-NEXT: ; %bb.1: ; %if.then
+; CHECK-SD-NEXT: stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
+; CHECK-SD-NEXT: bl _foo
+; CHECK-SD-NEXT: ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
+; CHECK-SD-NEXT: LBB1_2: ; %if.end
+; CHECK-SD-NEXT: mov w0, #7 ; =0x7
+; CHECK-SD-NEXT: ret
;
-; GISEL-LABEL: single_different:
-; GISEL: ; %bb.0: ; %entry
-; GISEL-NEXT: cmp w0, #5
-; GISEL-NEXT: ccmp w1, #17, #0, gt
-; GISEL-NEXT: b.eq LBB1_2
-; GISEL-NEXT: ; %bb.1: ; %if.then
-; GISEL-NEXT: stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
-; GISEL-NEXT: bl _foo
-; GISEL-NEXT: ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
-; GISEL-NEXT: LBB1_2: ; %if.end
-; GISEL-NEXT: mov w0, #7 ; =0x7
-; GISEL-NEXT: ret
+; CHECK-GI-LABEL: single_different:
+; CHECK-GI: ; %bb.0: ; %entry
+; CHECK-GI-NEXT: cmp w0, #5
+; CHECK-GI-NEXT: ccmp w1, #17, #0, gt
+; CHECK-GI-NEXT: b.eq LBB1_2
+; CHECK-GI-NEXT: ; %bb.1: ; %if.then
+; CHECK-GI-NEXT: stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
+; CHECK-GI-NEXT: bl _foo
+; CHECK-GI-NEXT: ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
+; CHECK-GI-NEXT: LBB1_2: ; %if.end
+; CHECK-GI-NEXT: mov w0, #7 ; =0x7
+; CHECK-GI-NEXT: ret
entry:
%cmp = icmp sle i32 %a, 5
%cmp1 = icmp ne i32 %b, 17
@@ -73,41 +73,41 @@ if.end:
; Second block clobbers the flags, can't convert (easily).
define i32 @single_flagclobber(i32 %a, i32 %b) nounwind ssp {
-; SDISEL-LABEL: single_flagclobber:
-; SDISEL: ; %bb.0: ; %entry
-; SDISEL-NEXT: cmp w0, #5
-; SDISEL-NEXT: b.eq LBB2_2
-; SDISEL-NEXT: ; %bb.1: ; %lor.lhs.false
-; SDISEL-NEXT: lsl w8, w1, #1
-; SDISEL-NEXT: cmp w1, #7
-; SDISEL-NEXT: csinc w8, w8, w1, lt
-; SDISEL-NEXT: cmp w8, #16
-; SDISEL-NEXT: b.gt LBB2_3
-; SDISEL-NEXT: LBB2_2: ; %if.then
-; SDISEL-NEXT: stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
-; SDISEL-NEXT: bl _foo
-; SDISEL-NEXT: ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
-; SDISEL-NEXT: LBB2_3: ; %if.end
-; SDISEL-NEXT: mov w0, #7 ; =0x7
-; SDISEL-NEXT: ret
+; CHECK-SD-LABEL: single_flagclobber:
+; CHECK-SD: ; %bb.0: ; %entry
+; CHECK-SD-NEXT: cmp w0, #5
+; CHECK-SD-NEXT: b.eq LBB2_2
+; CHECK-SD-NEXT: ; %bb.1: ; %lor.lhs.false
+; CHECK-SD-NEXT: lsl w8, w1, #1
+; CHECK-SD-NEXT: cmp w1, #7
+; CHECK-SD-NEXT: csinc w8, w8, w1, lt
+; CHECK-SD-NEXT: cmp w8, #16
+; CHECK-SD-NEXT: b.gt LBB2_3
+; CHECK-SD-NEXT: LBB2_2: ; %if.then
+; CHECK-SD-NEXT: stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
+; CHECK-SD-NEXT: bl _foo
+; CHECK-SD-NEXT: ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
+; CHECK-SD-NEXT: LBB2_3: ; %if.end
+; CHECK-SD-NEXT: mov w0, #7 ; =0x7
+; CHECK-SD-NEXT: ret
;
-; GISEL-LABEL: single_flagclobber:
-; GISEL: ; %bb.0: ; %entry
-; GISEL-NEXT: cmp w0, #5
-; GISEL-NEXT: b.eq LBB2_2
-; GISEL-NEXT: ; %bb.1: ; %lor.lhs.false
-; GISEL-NEXT: lsl w8, w1, #1
-; GISEL-NEXT: cmp w1, #7
-; GISEL-NEXT: csinc w8, w8, w1, lt
-; GISEL-NEXT: cmp w8, #17
-; GISEL-NEXT: b.ge LBB2_3
-; GISEL-NEXT: LBB2_2: ; %if.then
-; GISEL-NEXT: stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
-; GISEL-NEXT: bl _foo
-; GISEL-NEXT: ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
-; GISEL-NEXT: LBB2_3: ; %if.end
-; GISEL-NEXT: mov w0, #7 ; =0x7
-; GISEL-NEXT: ret
+; CHECK-GI-LABEL: single_flagclobber:
+; CHECK-GI: ; %bb.0: ; %entry
+; CHECK-GI-NEXT: cmp w0, #5
+; CHECK-GI-NEXT: b.eq LBB2_2
+; CHECK-GI-NEXT: ; %bb.1: ; %lor.lhs.false
+; CHECK-GI-NEXT: lsl w8, w1, #1
+; CHECK-GI-NEXT: cmp w1, #7
+; CHECK-GI-NEXT: csinc w8, w8, w1, lt
+; CHECK-GI-NEXT: cmp w8, #17
+; CHECK-GI-NEXT: b.ge LBB2_3
+; CHECK-GI-NEXT: LBB2_2: ; %if.then
+; CHECK-GI-NEXT: stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
+; CHECK-GI-NEXT: bl _foo
+; CHECK-GI-NEXT: ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
+; CHECK-GI-NEXT: LBB2_3: ; %if.end
+; CHECK-GI-NEXT: mov w0, #7 ; =0x7
+; CHECK-GI-NEXT: ret
entry:
%cmp = icmp eq i32 %a, 5
br i1 %cmp, label %if.then, label %lor.lhs.false
@@ -171,37 +171,37 @@ if.end: ; preds = %if.then, %lor.lhs.f
; The sdiv/udiv instructions do not trap when the divisor is zero, so they are
; safe to speculate.
define i32 @speculate_division(i32 %a, i32 %b) nounwind ssp {
-; SDISEL-LABEL: speculate_division:
-; SDISEL: ; %bb.0: ; %entry
-; SDISEL-NEXT: cmp w0, #1
-; SDISEL-NEXT: sdiv w8, w1, w0
-; SDISEL-NEXT: ccmp w8, #16, #0, ge
-; SDISEL-NEXT: b.le LBB4_2
-; SDISEL-NEXT: ; %bb.1: ; %if.end
-; SDISEL-NEXT: mov w0, #7 ; =0x7
-; SDISEL-NEXT: ret
-; SDISEL-NEXT: LBB4_2: ; %if.then
-; SDISEL-NEXT: stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
-; SDISEL-NEXT: bl _foo
-; SDISEL-NEXT: ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
-; SDISEL-NEXT: mov w0, #7 ; =0x7
-; SDISEL-NEXT: ret
+; CHECK-SD-LABEL: speculate_division:
+; CHECK-SD: ; %bb.0: ; %entry
+; CHECK-SD-NEXT: cmp w0, #1
+; CHECK-SD-NEXT: sdiv w8, w1, w0
+; CHECK-SD-NEXT: ccmp w8, #16, #0, ge
+; CHECK-SD-NEXT: b.le LBB4_2
+; CHECK-SD-NEXT: ; %bb.1: ; %if.end
+; CHECK-SD-NEXT: mov w0, #7 ; =0x7
+; CHECK-SD-NEXT: ret
+; CHECK-SD-NEXT: LBB4_2: ; %if.then
+; CHECK-SD-NEXT: stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
+; CHECK-SD-NEXT: bl _foo
+; CHECK-SD-NEXT: ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
+; CHECK-SD-NEXT: mov w0, #7 ; =0x7
+; CHECK-SD-NEXT: ret
;
-; GISEL-LABEL: speculate_division:
-; GISEL: ; %bb.0: ; %entry
-; GISEL-NEXT: cmp w0, #0
-; GISEL-NEXT: sdiv w8, w1, w0
-; GISEL-NEXT: ccmp w8, #17, #0, gt
-; GISEL-NEXT: b.lt LBB4_2
-; GISEL-NEXT: ; %bb.1: ; %if.end
-; GISEL-NEXT: mov w0, #7 ; =0x7
-; GISEL-NEXT: ret
-; GISEL-NEXT: LBB4_2: ; %if.then
-; GISEL-NEXT: stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
-; GISEL-NEXT: bl _foo
-; GISEL-NEXT: ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
-; GISEL-NEXT: mov w0, #7 ; =0x7
-; GISEL-NEXT: ret
+; CHECK-GI-LABEL: speculate_division:
+; CHECK-GI: ; %bb.0: ; %entry
+; CHECK-GI-NEXT: cmp w0, #0
+; CHECK-GI-NEXT: sdiv w8, w1, w0
+; CHECK-GI-NEXT: ccmp w8, #17, #0, gt
+; CHECK-GI-NEXT: b.lt LBB4_2
+; CHECK-GI-NEXT: ; %bb.1: ; %if.end
+; CHECK-GI-NEXT: mov w0, #7 ; =0x7
+; CHECK-GI-NEXT: ret
+; CHECK-GI-NEXT: LBB4_2: ; %if.then
+; CHECK-GI-NEXT: stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
+; CHECK-GI-NEXT: bl _foo
+; CHECK-GI-NEXT: ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
+; CHECK-GI-NEXT: mov w0, #7 ; =0x7
+; CHECK-GI-NEXT: ret
entry:
%cmp = icmp sgt i32 %a, 0
br i1 %cmp, label %land.lhs.true, label %if.end
@@ -221,41 +221,41 @@ if.end:
; Floating point compare.
define i32 @single_fcmp(i32 %a, float %b) nounwind ssp {
-; SDISEL-LABEL: single_fcmp:
-; SDISEL: ; %bb.0: ; %entry
-; SDISEL-NEXT: cmp w0, #1
-; SDISEL-NEXT: scvtf s1, w0
-; SDISEL-NEXT: fdiv s0, s0, s1
-; SDISEL-NEXT: fmov s1, #17.00000000
-; SDISEL-NEXT: fccmp s0, s1, #8, ge
-; SDISEL-NEXT: b.ge LBB5_2
-; SDISEL-NEXT: ; %bb.1: ; %if.end
-; SDISEL-NEXT: mov w0, #7 ; =0x7
-; SDISEL-NEXT: ret
-; SDISEL-NEXT: LBB5_2: ; %if.then
-; SDISEL-NEXT: stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
-; SDISEL-NEXT: bl _foo
-; SDISEL-NEXT: ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
-; SDISEL-NEXT: mov w0, #7 ; =0x7
-; SDISEL-NEXT: ret
+; CHECK-SD-LABEL: single_fcmp:
+; CHECK-SD: ; %bb.0: ; %entry
+; CHECK-SD-NEXT: cmp w0, #1
+; CHECK-SD-NEXT: scvtf s1, w0
+; CHECK-SD-NEXT: fdiv s0, s0, s1
+; CHECK-SD-NEXT: fmov s1, #17.00000000
+; CHECK-SD-NEXT: fccmp s0, s1, #8, ge
+; CHECK-SD-NEXT: b.ge LBB5_2
+; CHECK-SD-NEXT: ; %bb.1: ; %if.end
+; CHECK-SD-NEXT: mov w0, #7 ; =0x7
+; CHECK-SD-NEXT: ret
+; CHECK-SD-NEXT: LBB5_2: ; %if.then
+; CHECK-SD-NEXT: stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
+; CHECK-SD-NEXT: bl _foo
+; CHECK-SD-NEXT: ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
+; CHECK-SD-NEXT: mov w0, #7 ; =0x7
+; CHECK-SD-NEXT: ret
;
-; GISEL-LABEL: single_fcmp:
-; GISEL: ; %bb.0: ; %entry
-; GISEL-NEXT: cmp w0, #0
-; GISEL-NEXT: scvtf s1, w0
-; GISEL-NEXT: fdiv s0, s0, s1
-; GISEL-NEXT: fmov s1, #17.00000000
-; GISEL-NEXT: fccmp s0, s1, #8, gt
-; GISEL-NEXT: b.ge LBB5_2
-; GISEL-NEXT: ; %bb.1: ; %if.end
-; GISEL-NEXT: mov w0, #7 ; =0x7
-; GISEL-NEXT: ret
-; GISEL-NEXT: LBB5_2: ; %if.then
-; GISEL-NEXT: stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
-; GISEL-NEXT: bl _foo
-; GISEL-NEXT: ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
-; GISEL-NEXT: mov w0, #7 ; =0x7
-; GISEL-NEXT: ret
+; CHECK-GI-LABEL: single_fcmp:
+; CHECK-GI: ; %bb.0: ; %entry
+; CHECK-GI-NEXT: cmp w0, #0
+; CHECK-GI-NEXT: scvtf s1, w0
+; CHECK-GI-NEXT: fdiv s0, s0, s1
+; CHECK-GI-NEXT: fmov s1, #17.00000000
+; CHECK-GI-NEXT: fccmp s0, s1, #8, gt
+; CHECK-GI-NEXT: b.ge LBB5_2
+; CHECK-GI-NEXT: ; %bb.1: ; %if.end
+; CHECK-GI-NEXT: mov w0, #7 ; =0x7
+; CHECK-GI-NEXT: ret
+; CHECK-GI-NEXT: LBB5_2: ; %if.then
+; CHECK-GI-NEXT: stp x29, x30, [sp, #-16]! ; 16-byte Folded Spill
+; CHECK-GI-NEXT: bl _foo
+; CHECK-GI-NEXT: ldp x29, x30, [sp], #16 ; 16-byte Folded Reload
+; CHECK-GI-NEXT: mov w0, #7 ; =0x7
+; CHECK-GI-NEXT: ret
entry:
%cmp = icmp sgt i32 %a, 0
br i1 %cmp, label %land.lhs.true, label %if.end
@@ -499,28 +499,28 @@ define float @select_or_float(i32 %w0, i32 %w1, float %x2, float %x3) {
}
define i64 @gccbug(i64 %x0, i64 %x1) {
-; SDISEL-LABEL: gccbug:
-; SDISEL: ; %bb.0:
-; SDISEL-NEXT: cmp x0, #2
-; SDISEL-NEXT: ccmp x0, #4, #4, ne
-; SDISEL-NEXT: ccmp x1, #0, #0, eq
-; SDISEL-NEXT: mov w8, #1 ; =0x1
-; SDISEL-NEXT: cinc x0, x8, eq
-; SDISEL-NEXT: ret
+; CHECK-SD-LABEL: gccbug:
+; CHECK-SD: ; %bb.0:
+; CHECK-SD-NEXT: cmp x0, #2
+; CHECK-SD-NEXT: ccmp x0, #4, #4, ne
+; CHECK-SD-NEXT: ccmp x1, #0, #0, eq
+; CHECK-SD-NEXT: mov w8, #1 ; =0x1
+; CHECK-SD-NEXT: cinc x0, x8, eq
+; CHECK-SD-NEXT: ret
;
-; GISEL-LABEL: gccbug:
-; GISEL: ; %bb.0:
-; GISEL-NEXT: cmp x1, #0
-; GISEL-NEXT: cset w8, eq
-; GISEL-NEXT: cmp x0, #2
-; GISEL-NEXT: cset w9, eq
-; GISEL-NEXT: cmp x0, #4
-; GISEL-NEXT: cset w10, eq
-; GISEL-NEXT: orr w9, w10, w9
-; GISEL-NEXT: and w8, w9, w8
-; GISEL-NEXT: and x8, x8, #0x1
-; GISEL-NEXT: add x0, x8, #1
-; GISEL-NEXT: ret
+; CHECK-GI-LABEL: gccbug:
+; CHECK-GI: ; %bb.0:
+; CHECK-GI-NEXT: cmp x1, #0
+; CHECK-GI-NEXT: cset w8, eq
+; CHECK-GI-NEXT: cmp x0, #2
+; CHECK-GI-NEXT: cset w9, eq
+; CHECK-GI-NEXT: cmp x0, #4
+; CHECK-GI-NEXT: cset w10, eq
+; CHECK-GI-NEXT: orr w9, w10, w9
+; CHECK-GI-NEXT: and w8, w9, w8
+; CHECK-GI-NEXT: and x8, x8, #0x1
+; CHECK-GI-NEXT: add x0, x8, #1
+; CHECK-GI-NEXT: ret
%cmp0 = icmp eq i64 %x1, 0
%cmp1 = icmp eq i64 %x0, 2
%cmp2 = icmp eq i64 %x0, 4
@@ -570,23 +570,23 @@ define i32 @select_andor(i32 %v1, i32 %v2, i32 %v3) {
}
define i32 @select_andor32(i32 %v1, i32 %v2, i32 %v3) {
-; SDISEL-LABEL: select_andor32:
-; SDISEL: ; %bb.0:
-; SDISEL-NEXT: cmp w1, w2
-; SDISEL-NEXT: mov w8, #32 ; =0x20
-; SDISEL-NEXT: ccmp w0, w8, #4, lt
-; SDISEL-NEXT: ccmp w0, w1, #0, eq
-; SDISEL-NEXT: csel w0, w0, w1, eq
-; SDISEL-NEXT: ret
+; CHECK-SD-LABEL: select_andor32:
+; CHECK-SD: ; %bb.0:
+; CHECK-SD-NEXT: cmp w1, w2
+; CHECK-SD-NEXT: mov w8, #32 ; =0x20
+; CHECK-SD-NEXT: ccmp w0, w8, #4, lt
+; CHECK-SD-NEXT: ccmp w0, w1, #0, eq
+; CHECK-SD-NEXT: csel w0, w0, w1, eq
+; CHECK-SD-NEXT: ret
;
-; GISEL-LABEL: select_andor32:
-; GISEL: ; %bb.0:
-; GISEL-NEXT: mov w8, #32 ; =0x20
-; GISEL-NEXT: cmp w1, w2
-; GISEL-NEXT: ccmp w0, w8, #4, lt
-; GISEL-NEXT: ccmp w0, w1, #0, eq
-; GISEL-NEXT: csel w0, w0, w1, eq
-; GISEL-NEXT: ret
+; CHECK-GI-LABEL: select_andor32:
+; CHECK-GI: ; %bb.0:
+; CHECK-GI-NEXT: mov w8, #32 ; =0x20
+; CHECK-GI-NEXT: cmp w1, w2
+; CHECK-GI-NEXT: ccmp w0, w8, #4, lt
+; CHECK-GI-NEXT: ccmp w0, w1, #0, eq
+; CHECK-GI-NEXT: csel w0, w0, w1, eq
+; CHECK-GI-NEXT: ret
%c0 = icmp eq i32 %v1, %v2
%c1 = icmp sge i32 %v2, %v3
%c2 = icmp eq i32 %v1, 32
@@ -597,22 +597,22 @@ define i32 @select_andor32(i32 %v1, i32 %v2, i32 %v3) {
}
define i64 @select_noccmp1(i64 %v1, i64 %v2, i64 %v3, i64 %r) {
-; SDISEL-LABEL: select_noccmp1:
-; SDISEL: ; %bb.0:
-; SDISEL-NEXT: cmp x0, #0
-; SDISEL-NEXT: ccmp x0, #13, #4, lt
-; SDISEL-NEXT: cset w8, gt
-; SDISEL-NEXT: cmp x2, #2
-; SDISEL-NEXT: ccmp x2, #4, #4, lt
-; SDISEL-NEXT: csinc w8, w8, wzr, le
-; SDISEL-NEXT: cmp w8, #0
-; SDISEL-NEXT: csel x0, xzr, x3, ne
-; SDISEL-NEXT: ret
+; CHECK-SD-LABEL: select_noccmp1:
+; CHECK-SD: ; %bb.0:
+; CHECK-SD-NEXT: cmp x0, #0
+; CHECK-SD-NEXT: ccmp x0, #13, #4, lt
+; CHECK-SD-NEXT: cset w8, gt
+; CHECK-SD-NEXT: cmp x2, #2
+; CHECK-SD-NEXT: ccmp x2, #4, #4, lt
+; CHECK-SD-NEXT: csinc w8, w8, wzr, le
+; CHECK-SD-NEXT: cmp w8, #0
+; CHECK-SD-NEXT: csel x0, xzr, x3, ne
+; CHECK-SD-NEXT: ret
;
-; GISEL-LABEL: select_noccmp1:
-; GISEL: ; %bb.0:
-; GISEL-NEXT: mov x0, x3
-; GISEL-NEXT: ret
+; CHECK-GI-LABEL: select_noccmp1:
+; CHECK-GI: ; %bb.0:
+; CHECK-GI-NEXT: mov x0, x3
+; CHECK-GI-NEXT: ret
%c0 = icmp slt i64 %v1, 0
%c1 = icmp sgt i64 %v1, 13
%c2 = icmp slt i64 %v3, 2
@@ -627,28 +627,28 @@ define i64 @select_noccmp1(i64 %v1, i64 %v2, i64 %v3, i64 %r) {
@g = global i32 0
define i64 @select_noccmp2(i64 %v1, i64 %v2, i64 %v3, i64 %r) {
-; SDISEL-LABEL: select_noccmp2:
-; SDISEL: ; %bb.0:
-; SDISEL-NEXT: cmp x0, #0
-; SDISEL-NEXT: ccmp x0, #13, #0, ge
-; SDISEL-NEXT: cset w8, gt
-; SDISEL-NEXT: cmp w8, #0
-; SDISEL-NEXT: csel x0, xzr, x3, ne
-; SDISEL-NEXT: sbfx w8, w8, #0, #1
-; SDISEL-NEXT: adrp x9, _g@PAGE
-; SDISEL-NEXT: str w8, [x9, _g@PAGEOFF]
-; SDISEL-NEXT: ret
+; CHECK-SD-LABEL: select_noccmp2:
+; CHECK-SD: ; %bb.0:
+; CHECK-SD-NEXT: cmp x0, #0
+; CHECK-SD-NEXT: ccmp x0, #13, #0, ge
+; CHECK-SD-NEXT: cset w8, gt
+; CHECK-SD-NEXT: cmp w8, #0
+; CHECK-SD-NEXT: csel x0, xzr, x3, ne
+; CHECK-SD-NEXT: sbfx w8, w8, #0, #1
+; CHECK-SD-NEXT: adrp x9, _g@PAGE
+; CHECK-SD-NEXT: str w8, [x9, _g@PAGEOFF]
+; CHECK-SD-NEXT: ret
;
-; GISEL-LABEL: select_noccmp2:
-; GISEL: ; %bb.0:
-; GISEL-NEXT: cmp x0, #14
-; GISEL-NEXT: cset w8, hs
-; GISEL-NEXT: tst w8, #0x1
-; GISEL-NEXT: csel x0, xzr, x3, ne
-; GISEL-NEXT: sbfx w8, w8, #0, #1
-; GISEL-NEXT: adrp x9, _g@PAGE
-; GISEL-NEXT: str w8, [x9, _g@PAGEOFF]
-; GISEL-NEXT: ret
+; CHECK-GI-LABEL: select_noccmp2:
+; CHECK-GI: ; %bb.0:
+; CHECK-GI-NEXT: cmp x0, #14
+; CHECK-GI-NEXT: cset w8, hs
+; CHECK-GI-NEXT: tst w8, #0x1
+; CHECK-GI-NEXT: csel x0, xzr, x3, ne
+; CHECK-GI-NEXT: sbfx w8, w8, #0, #1
+; CHECK-GI-NEXT: adrp x9, _g@PAGE
+; CHECK-GI-NEXT: str w8, [x9, _g@PAGEOFF]
+; CHECK-GI-NEXT: ret
%c0 = icmp slt i64 %v1, 0
%c1 = icmp sgt i64 %v1, 13
%or = or i1 %c0, %c1
@@ -661,33 +661,33 @@ define i64 @select_noccmp2(i64 %v1, i64 %v2, i64 %v3, i64 %r) {
; The following is not possible to implement with a single cmp;ccmp;csel
; sequence.
define i32 @select_noccmp3(i32 %v0, i32 %v1, i32 %v2) {
-; SDISEL-LABEL: select_noccmp3:
-; SDISEL: ; %bb.0:
-; SDISEL-NEXT: cmp w0, #0
-; SDISEL-NEXT: ccmp w0, #13, #0, ge
-; SDISEL-NEXT: cset w8, gt
-; SDISEL-NEXT: cmp w0, #22
-; SDISEL-NEXT: mov w9, #44 ; =0x2c
-; SDISEL-NEXT: ccmp w0, w9, #0, ge
-; SDISEL-NEXT: csel w8, wzr, w8, le
-; SDISEL-NEXT: cmp w0, #99
-; SDISEL-NEXT: mov w9, #77 ; =0x4d
-; SDISEL-NEXT: ccmp w0, w9, #4, ne
-; SDISEL-NEXT: cset w9, eq
-; SDISEL-NEXT: tst w8, w9
-; SDISEL-NEXT: csel w0, w1, w2, ne
-; SDISEL-NEXT: ret
+; CHECK-SD-LABEL: select_noccmp3:
+; CHECK-SD: ; %bb.0:
+; CHECK-SD-NEXT: cmp w0, #0
+; CHECK-SD-NEXT: ccmp w0, #13, #0, ge
+; CHECK-SD-NEXT: cset w8, gt
+; CHECK-SD-NEXT: cmp w0, #22
+; CHECK-SD-NEXT: mov w9, #44 ; =0x2c
+; CHECK-SD-NEXT: ccmp w0, w9, #0, ge
+; CHECK-SD-NEXT: csel w8, wzr, w8, le
+; CHECK-SD-NEXT: cmp w0, #99
+; CHECK-SD-NEXT: mov w9, #77 ; =0x4d
+; CHECK-SD-NEXT: ccmp w0, w9, #4, ne
+; CHECK-SD-NEXT: cset w9, eq
+; CHECK-SD-NEXT: tst w8, w9
+; CHECK-SD-NEXT: csel w0, w1, w2, ne
+; CHECK-SD-NEXT: ret
;
-; GISEL-LABEL: select_noccmp3:
-; GISEL: ; %bb.0:
-; GISEL-NEXT: mov w8, #99 ; =0x63
-; GISEL-NEXT: sub w9, w0, #45
-; GISEL-NEXT: cmp w0, #77
-; GISEL-NEXT: ccmp w0, w8, #4, ne
-; GISEL-NEXT: ccmn w9, #23, #2, eq
-; GISEL-NEXT: ccmp w0, #14, #0, lo
-; GISEL-NEXT: csel w0, w1, w2, hs
-; GISEL-NEXT: ret
+; CHECK-GI-LABEL: select_noccmp3:
+; CHECK-GI: ; %bb.0:
+; CHECK-GI-NEXT: mov w8, #99 ; =0x63
+; CHECK-GI-NEXT: sub w9, w0, #45
+; CHECK-GI-NEXT: cmp w0, #77
+; CHECK-GI-NEXT: ccmp w0, w8, #4, ne
+; CHECK-GI-NEXT: ccmn w9, #23, #2, eq
+; CHECK-GI-NEXT: ccmp w0, #14, #0, lo
+; CHECK-GI-NEXT: csel w0, w1, w2, hs
+; CHECK-GI-NEXT: ret
%c0 = icmp slt i32 %v0, 0
%c1 = icmp sgt i32 %v0, 13
%c2 = icmp slt i32 %v0, 22
@@ -864,27 +864,27 @@ define i32 @select_or_olt_ueq_ogt(double %v0, double %v1, double %v2, double %v3
; Verify that we correctly promote f16.
define i32 @half_select_and_olt_oge(half %v0, half %v1, half %v2, half %v3, i32 %a, i32 %b) #0 {
-; SDISEL-LABEL: half_select_and_olt_oge:
-; SDISEL: ; %bb.0:
-; SDISEL-NEXT: fcvt s1, h1
-; SDISEL-NEXT: fcvt s0, h0
-; SDISEL-NEXT: fcmp s0, s1
-; SDISEL-NEXT: fcvt s0, h3
-; SDISEL-NEXT: fcvt s1, h2
-; SDISEL-NEXT: fccmp s1, s0, #8, mi
-; SDISEL-NEXT: csel w0, w0, w1, ge
-; SDISEL-NEXT: ret
+; CHECK-SD-LABEL: half_select_and_olt_oge:
+; CHECK-SD: ; %bb.0:
+; CHECK-SD-NEXT: fcvt s1, h1
+; CHECK-SD-NEXT: fcvt s0, h0
+; CHECK-SD-NEXT: fcmp s0, s1
+; CHECK-SD-NEXT: fcvt s0, h3
+; CHECK-SD-NEXT: fcvt s1, h2
+; CHECK-SD-NEXT: fccmp s1, s0, #8, mi
+; CHECK-SD-NEXT: csel w0, w0, w1, ge
+; CHECK-SD-NEXT: ret
;
-; GISEL-LABEL: half_select_and_olt_oge:
-; GISEL: ; %bb.0:
-; GISEL-NEXT: fcvt s0, h0
-; GISEL-NEXT: fcvt s1, h1
-; GISEL-NEXT: fcvt s2, h2
-; GISEL-NEXT: fcvt s3, h3
-; GISEL-NEXT: fcmp s0, s1
-; GISEL-NEXT: fccmp s2, s3, #8, mi
-; GISEL-NEXT: csel w0, w0, w1, ge
-; GISEL-NEXT: ret
+; CHECK-GI-LABEL: half_select_and_olt_oge:
+; CHECK-GI: ; %bb.0:
+; CHECK-GI-NEXT: fcvt s0, h0
+; CHECK-GI-NEXT: fcvt s1, h1
+; CHECK-GI-NEXT: fcvt s2, h2
+; CHECK-GI-NEXT: fcvt s3, h3
+; CHECK-GI-NEXT: fcmp s0, s1
+; CHECK-GI-NEXT: fccmp s2, s3, #8, mi
+; CHECK-GI-NEXT: csel w0, w0, w1, ge
+; CHECK-GI-NEXT: ret
%c0 = fcmp olt half %v0, %v1
%c1 = fcmp oge half %v2, %v3
%cr = and i1 %c1, %c0
@@ -893,29 +893,29 @@ define i32 @half_select_and_olt_oge(half %v0, half %v1, half %v2, half %v3, i32
}
define i32 @half_select_and_olt_one(half %v0, half %v1, half %v2, half %v3, i32 %a, i32 %b) #0 {
-; SDISEL-LABEL: half_select_and_olt_one:
-; SDISEL: ; %bb.0:
-; SDISEL-NEXT: fcvt s1, h1
-; SDISEL-NEXT: fcvt s0, h0
-; SDISEL-NEXT: fcmp s0, s1
-; SDISEL-NEXT: fcvt s0, h3
-; SDISEL-NEXT: fcvt s1, h2
-; SDISEL-NEXT: fccmp s1, s0, #4, mi
-; SDISEL-NEXT: fccmp s1, s0, #1, ne
-; SDISEL-NEXT: csel w0, w0, w1, vc
-; SDISEL-NEXT: ret
+; CHECK-SD-LABEL: half_select_and_olt_one:
+; CHECK-SD: ; %bb.0:
+; CHECK-SD-NEXT: fcvt s1, h1
+; CHECK-SD-NEXT: fcvt s0, h0
+; CHECK-SD-NEXT: fcmp s0, s1
+; CHECK-SD-NEXT: fcvt s0, h3
+; CHECK-SD-NEXT: fcvt s1, h2
+; CHECK-SD-NEXT: fccmp s1, s0, #4, mi
+; CHECK-SD-NEXT: fccmp s1, s0, #1, ne
+; CHECK-SD-NEXT: csel w0, w0, w1, vc
+; CHECK-SD-NEXT: ret
;
-; GISEL-LABEL: half_select_and_olt_one:
-; GISEL: ; %bb.0:
-; GISEL-NEXT: fcvt s0, h0
-; GISEL-NEXT: fcvt s1, h1
-; GISEL-NEXT: fcvt s2, h2
-; GISEL-NEXT: fcvt s3, h3
-; GISEL-NEXT: fcmp s0, s1
-; GISEL-NEXT: fccmp s2, s3, #4, mi
-; GISEL-NEXT: fccmp s2, s3, #1, ne
-; GISEL-NEXT: csel w0, w0, w1, vc
-; GISEL-NEXT: ret
+; CHECK-GI-LABEL: half_select_and_olt_one:
+; CHECK-GI: ; %bb.0:
+; CHECK-GI-NEXT: fcvt s0, h0
+; CHECK-GI-NEXT: fcvt s1, h1
+; CHECK-GI-NEXT: fcvt s2, h2
+; CHECK-GI-NEXT: fcvt s3, h3
+; CHECK-GI-NEXT: fcmp s0, s1
+; CHECK-GI-NEXT: fccmp s2, s3, #4, mi
+; CHECK-GI-NEXT: fccmp s2, s3, #1, ne
+; CHECK-GI-NEXT: csel w0, w0, w1, vc
+; CHECK-GI-NEXT: ret
%c0 = fcmp olt half %v0, %v1
%c1 = fcmp one half %v2, %v3
%cr = and i1 %c1, %c0
@@ -926,51 +926,51 @@ define i32 @half_select_and_olt_one(half %v0, half %v1, half %v2, half %v3, i32
; Also verify that we don't try to generate f128 FCCMPs, using RT calls instead.
define i32 @f128_select_and_olt_oge(fp128 %v0, fp128 %v1, fp128 %v2, fp128 %v3, i32 %a, i32 %b) #0 {
-; SDISEL-LABEL: f128_select_and_olt_oge:
-; SDISEL: ; %bb.0:
-; SDISEL-NEXT: sub sp, sp, #80
-; SDISEL-NEXT: stp x22, x21, [sp, #32] ; 16-byte Folded Spill
-; SDISEL-NEXT: stp x20, x19, [sp, #48] ; 16-byte Folded Spill
-; SDISEL-NEXT: stp x29, x30, [sp, #64] ; 16-byte Folded Spill
-; SDISEL-NEXT: mov x19, x1
-; SDISEL-NEXT: mov x20, x0
-; SDISEL-NEXT: stp q2, q3, [sp] ; 32-byte Folded Spill
-; SDISEL-NEXT: bl ___lttf2
-; SDISEL-NEXT: cmp w0, #0
-; SDISEL-NEXT: cset w21, lt
-; SDISEL-NEXT: ldp q0, q1, [sp] ; 32-byte Folded Reload
-; SDISEL-NEXT: bl ___getf2
-; SDISEL-NEXT: cmp w0, #0
-; SDISEL-NEXT: cset w8, ge
-; SDISEL-NEXT: tst w8, w21
-; SDISEL-NEXT: csel w0, w20, w19, ne
-; SDISEL-NEXT: ldp x29, x30, [sp, #64] ; 16-byte Folded Reload
-; SDISEL-NEXT: ldp x20, x19, [sp, #48] ; 16-byte Folded Reload
-; SDISEL-NEXT: ldp x22, x21, [sp, #32] ; 16-byte Folded Reload
-; SDISEL-NEXT: add sp, sp, #80
-; SDISEL-NEXT: ret
+; CHECK-SD-LABEL: f128_select_and_olt_oge:
+; CHECK-SD: ; %bb.0:
+; CHECK-SD-NEXT: sub sp, sp, #80
+; CHECK-SD-NEXT: stp x22, x21, [sp, #32] ; 16-byte Folded Spill
+; CHECK-SD-NEXT: stp x20, x19, [sp, #48] ; 16-byte Folded Spill
+; CHECK-SD-NEXT: stp x29, x30, [sp, #64] ; 16-byte Folded Spill
+; CHECK-SD-NEXT: mov x19, x1
+; CHECK-SD-NEXT: mov x20, x0
+; CHECK-SD-NEXT: stp q2, q3, [sp] ; 32-byte Folded Spill
+; CHECK-SD-NEXT: bl ___lttf2
+; CHECK-SD-NEXT: cmp w0, #0
+; CHECK-SD-NEXT: cset w21, lt
+; CHECK-SD-NEXT: ldp q0, q1, [sp] ; 32-byte Folded Reload
+; CHECK-SD-NEXT: bl ___getf2
+; CHECK-SD-NEXT: cmp w0, #0
+; CHECK-SD-NEXT: cset w8, ge
+; CHECK-SD-NEXT: tst w8, w21
+; CHECK-SD-NEXT: csel w0, w20, w19, ne
+; CHECK-SD-NEXT: ldp x29, x30, [sp, #64] ; 16-byte Folded Reload
+; CHECK-SD-NEXT: ldp x20, x19, [sp, #48] ; 16-byte Folded Reload
+; CHECK-SD-NEXT: ldp x22, x21, [sp, #32] ; 16-byte Folded Reload
+; CHECK-SD-NEXT: add sp, sp, #80
+; CHECK-SD-NEXT: ret
;
-; GISEL-LABEL: f128_select_and_olt_oge:
-; GISEL: ; %bb.0:
-; GISEL-NEXT: sub sp, sp, #80
-; GISEL-NEXT: stp x22, x21, [sp, #32] ; 16-byte Folded Spill
-; GISEL-NEXT: stp x20, x19, [sp, #48] ; 16-byte Folded Spill
-; GISEL-NEXT: stp x29, x30, [sp, #64] ; 16-byte Folded Spill
-; GISEL-NEXT: stp q3, q2, [sp] ; 32-byte Folded Spill
-; GISEL-NEXT: mov x19, x0
-; GISEL-NEXT: mov x20, x1
-; GISEL-NEXT: bl ___lttf2
-; GISEL-NEXT: mov x21, x0
-; GISEL-NEXT: ldp q1, q0, [sp] ; 32-byte Folded Reload
-; GISEL-NEXT: bl ___getf2
-; GISEL-NEXT: cmp w21, #0
-; GISEL-NEXT: ccmp w0, #0, #8, lt
-; GISEL-NEXT: csel w0, w19, w20, ge
-; GISEL-NEXT: ldp x29, x30, [sp, #64] ; 16-byte Folded Reload
-; GISEL-NEXT: ldp x20, x19, [sp, #48] ; 16-byte Folded Reload
-; GISEL-NEXT: ldp x22, x21, [sp, #32] ; 16-byte Folded Reload
-; GISEL-NEXT: add sp, sp, #80
-; GISEL-NEXT: ret
+; CHECK-GI-LABEL: f128_select_and_olt_oge:
+; CHECK-GI: ; %bb.0:
+; CHECK-GI-NEXT: sub sp, sp, #80
+; CHECK-GI-NEXT: stp x22, x21, [sp, #32] ; 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x20, x19, [sp, #48] ; 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x29, x30, [sp, #64] ; 16-byte Folded Spill
+; CHECK-GI-NEXT: stp q3, q2, [sp] ; 32-byte Folded Spill
+; CHECK-GI-NEXT: mov x19, x0
+; CHECK-GI-NEXT: mov x20, x1
+; CHECK-GI-NEXT: bl ___lttf2
+; CHECK-GI-NEXT: mov x21, x0
+; CHECK-GI-NEXT: ldp q1, q0, [sp] ; 32-byte Folded Reload
+; CHECK-GI-NEXT: bl ___getf2
+; CHECK-GI-NEXT: cmp w21, #0
+; CHECK-GI-NEXT: ccmp w0, #0, #8, lt
+; CHECK-GI-NEXT: csel w0, w19, w20, ge
+; CHECK-GI-NEXT: ldp x29, x30, [sp, #64] ; 16-byte Folded Reload
+; CHECK-GI-NEXT: ldp x20, x19, [sp, #48] ; 16-byte Folded Reload
+; CHECK-GI-NEXT: ldp x22, x21, [sp, #32] ; 16-byte Folded Reload
+; CHECK-GI-NEXT: add sp, sp, #80
+; CHECK-GI-NEXT: ret
%c0 = fcmp olt fp128 %v0, %v1
%c1 = fcmp oge fp128 %v2, %v3
%cr = and i1 %c1, %c0
@@ -1048,46 +1048,46 @@ define i32 @deep_or2(i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32 %x, i32 %y) {
; This test is trying to test that multiple ccmp's don't get created in a way
; that they would have multiple uses. It doesn't seem to.
define i32 @multiccmp(i32 %s0, i32 %s1, i32 %s2, i32 %s3, i32 %x, i32 %y) #0 {
-; SDISEL-LABEL: multiccmp:
-; SDISEL: ; %bb.0: ; %entry
-; SDISEL-NEXT: stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
-; SDISEL-NEXT: stp x20, x19, [sp, #16] ; 16-byte Folded Spill
-; SDISEL-NEXT: stp x29, x30, [sp, #32] ; 16-byte Folded Spill
-; SDISEL-NEXT: mov x19, x5
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: cset w20, gt
-; SDISEL-NEXT: cmp w2, w3
-; SDISEL-NEXT: cset w21, ne
-; SDISEL-NEXT: tst w20, w21
-; SDISEL-NEXT: csel w0, w5, w4, ne
-; SDISEL-NEXT: bl _callee
-; SDISEL-NEXT: tst w20, w21
-; SDISEL-NEXT: csel w0, w0, w19, ne
-; SDISEL-NEXT: bl _callee
-; SDISEL-NEXT: ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
-; SDISEL-NEXT: ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
-; SDISEL-NEXT: ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
-; SDISEL-NEXT: ret
+; CHECK-SD-LABEL: multiccmp:
+; CHECK-SD: ; %bb.0: ; %entry
+; CHECK-SD-NEXT: stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
+; CHECK-SD-NEXT: stp x20, x19, [sp, #16] ; 16-byte Folded Spill
+; CHECK-SD-NEXT: stp x29, x30, [sp, #32] ; 16-byte Folded Spill
+; CHECK-SD-NEXT: mov x19, x5
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: cset w20, gt
+; CHECK-SD-NEXT: cmp w2, w3
+; CHECK-SD-NEXT: cset w21, ne
+; CHECK-SD-NEXT: tst w20, w21
+; CHECK-SD-NEXT: csel w0, w5, w4, ne
+; CHECK-SD-NEXT: bl _callee
+; CHECK-SD-NEXT: tst w20, w21
+; CHECK-SD-NEXT: csel w0, w0, w19, ne
+; CHECK-SD-NEXT: bl _callee
+; CHECK-SD-NEXT: ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
+; CHECK-SD-NEXT: ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
+; CHECK-SD-NEXT: ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
+; CHECK-SD-NEXT: ret
;
-; GISEL-LABEL: multiccmp:
-; GISEL: ; %bb.0: ; %entry
-; GISEL-NEXT: stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
-; GISEL-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill
-; GISEL-NEXT: mov x19, x5
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, gt
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, ne
-; GISEL-NEXT: and w20, w8, w9
-; GISEL-NEXT: tst w20, #0x1
-; GISEL-NEXT: csel w0, w5, w4, ne
-; GISEL-NEXT: bl _callee
-; GISEL-NEXT: tst w20, #0x1
-; GISEL-NEXT: csel w0, w0, w19, ne
-; GISEL-NEXT: bl _callee
-; GISEL-NEXT: ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
-; GISEL-NEXT: ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
-; GISEL-NEXT: ret
+; CHECK-GI-LABEL: multiccmp:
+; CHECK-GI: ; %bb.0: ; %entry
+; CHECK-GI-NEXT: stp x20, x19, [sp, #-32]! ; 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x29, x30, [sp, #16] ; 16-byte Folded Spill
+; CHECK-GI-NEXT: mov x19, x5
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, gt
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, ne
+; CHECK-GI-NEXT: and w20, w8, w9
+; CHECK-GI-NEXT: tst w20, #0x1
+; CHECK-GI-NEXT: csel w0, w5, w4, ne
+; CHECK-GI-NEXT: bl _callee
+; CHECK-GI-NEXT: tst w20, #0x1
+; CHECK-GI-NEXT: csel w0, w0, w19, ne
+; CHECK-GI-NEXT: bl _callee
+; CHECK-GI-NEXT: ldp x29, x30, [sp, #16] ; 16-byte Folded Reload
+; CHECK-GI-NEXT: ldp x20, x19, [sp], #32 ; 16-byte Folded Reload
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp sgt i32 %s0, %s1
%c1 = icmp ne i32 %s2, %s3
@@ -1100,57 +1100,57 @@ entry:
}
define i32 @multiccmp2(i32 %s0, i32 %s1, i32 %s2, i32 %s3, i32 %x, i32 %y) #0 {
-; SDISEL-LABEL: multiccmp2:
-; SDISEL: ; %bb.0: ; %entry
-; SDISEL-NEXT: stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
-; SDISEL-NEXT: stp x20, x19, [sp, #16] ; 16-byte Folded Spill
-; SDISEL-NEXT: stp x29, x30, [sp, #32] ; 16-byte Folded Spill
-; SDISEL-NEXT: mov x19, x5
-; SDISEL-NEXT: mov x20, x3
-; SDISEL-NEXT: mov x21, x0
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: cset w8, gt
-; SDISEL-NEXT: cmp w2, w3
-; SDISEL-NEXT: cset w22, ne
-; SDISEL-NEXT: tst w8, w22
-; SDISEL-NEXT: csel w0, w5, w4, ne
-; SDISEL-NEXT: bl _callee
-; SDISEL-NEXT: cmp w21, w20
-; SDISEL-NEXT: cset w8, eq
-; SDISEL-NEXT: tst w22, w8
-; SDISEL-NEXT: csel w0, w0, w19, ne
-; SDISEL-NEXT: bl _callee
-; SDISEL-NEXT: ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
-; SDISEL-NEXT: ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
-; SDISEL-NEXT: ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
-; SDISEL-NEXT: ret
+; CHECK-SD-LABEL: multiccmp2:
+; CHECK-SD: ; %bb.0: ; %entry
+; CHECK-SD-NEXT: stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
+; CHECK-SD-NEXT: stp x20, x19, [sp, #16] ; 16-byte Folded Spill
+; CHECK-SD-NEXT: stp x29, x30, [sp, #32] ; 16-byte Folded Spill
+; CHECK-SD-NEXT: mov x19, x5
+; CHECK-SD-NEXT: mov x20, x3
+; CHECK-SD-NEXT: mov x21, x0
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: cset w8, gt
+; CHECK-SD-NEXT: cmp w2, w3
+; CHECK-SD-NEXT: cset w22, ne
+; CHECK-SD-NEXT: tst w8, w22
+; CHECK-SD-NEXT: csel w0, w5, w4, ne
+; CHECK-SD-NEXT: bl _callee
+; CHECK-SD-NEXT: cmp w21, w20
+; CHECK-SD-NEXT: cset w8, eq
+; CHECK-SD-NEXT: tst w22, w8
+; CHECK-SD-NEXT: csel w0, w0, w19, ne
+; CHECK-SD-NEXT: bl _callee
+; CHECK-SD-NEXT: ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
+; CHECK-SD-NEXT: ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
+; CHECK-SD-NEXT: ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
+; CHECK-SD-NEXT: ret
;
-; GISEL-LABEL: multiccmp2:
-; GISEL: ; %bb.0: ; %entry
-; GISEL-NEXT: stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
-; GISEL-NEXT: stp x20, x19, [sp, #16] ; 16-byte Folded Spill
-; GISEL-NEXT: stp x29, x30, [sp, #32] ; 16-byte Folded Spill
-; GISEL-NEXT: mov x19, x0
-; GISEL-NEXT: mov x20, x3
-; GISEL-NEXT: mov x21, x5
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, gt
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w22, ne
-; GISEL-NEXT: and w8, w8, w22
-; GISEL-NEXT: tst w8, #0x1
-; GISEL-NEXT: csel w0, w5, w4, ne
-; GISEL-NEXT: bl _callee
-; GISEL-NEXT: cmp w19, w20
-; GISEL-NEXT: cset w8, eq
-; GISEL-NEXT: and w8, w22, w8
-; GISEL-NEXT: tst w8, #0x1
-; GISEL-NEXT: csel w0, w0, w21, ne
-; GISEL-NEXT: bl _callee
-; GISEL-NEXT: ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
-; GISEL-NEXT: ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
-; GISEL-NEXT: ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
-; GISEL-NEXT: ret
+; CHECK-GI-LABEL: multiccmp2:
+; CHECK-GI: ; %bb.0: ; %entry
+; CHECK-GI-NEXT: stp x22, x21, [sp, #-48]! ; 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x20, x19, [sp, #16] ; 16-byte Folded Spill
+; CHECK-GI-NEXT: stp x29, x30, [sp, #32] ; 16-byte Folded Spill
+; CHECK-GI-NEXT: mov x19, x0
+; CHECK-GI-NEXT: mov x20, x3
+; CHECK-GI-NEXT: mov x21, x5
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, gt
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w22, ne
+; CHECK-GI-NEXT: and w8, w8, w22
+; CHECK-GI-NEXT: tst w8, #0x1
+; CHECK-GI-NEXT: csel w0, w5, w4, ne
+; CHECK-GI-NEXT: bl _callee
+; CHECK-GI-NEXT: cmp w19, w20
+; CHECK-GI-NEXT: cset w8, eq
+; CHECK-GI-NEXT: and w8, w22, w8
+; CHECK-GI-NEXT: tst w8, #0x1
+; CHECK-GI-NEXT: csel w0, w0, w21, ne
+; CHECK-GI-NEXT: bl _callee
+; CHECK-GI-NEXT: ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
+; CHECK-GI-NEXT: ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
+; CHECK-GI-NEXT: ldp x22, x21, [sp], #48 ; 16-byte Folded Reload
+; CHECK-GI-NEXT: ret
entry:
%c0 = icmp sgt i32 %s0, %s1
%c1 = icmp ne i32 %s2, %s3
@@ -1168,21 +1168,21 @@ entry:
declare i32 @callee(i32)
define i1 @cmp_and_negative_const(i32 %0, i32 %1) {
-; SDISEL-LABEL: cmp_and_negative_const:
-; SDISEL: ; %bb.0:
-; SDISEL-NEXT: cmn w0, #1
-; SDISEL-NEXT: ccmn w1, #2, #0, eq
-; SDISEL-NEXT: cset w0, eq
-; SDISEL-NEXT: ret
+; CHECK-SD-LABEL: cmp_and_negative_const:
+; CHECK-SD: ; %bb.0:
+; CHECK-SD-NEXT: cmn w0, #1
+; CHECK-SD-NEXT: ccmn w1, #2, #0, eq
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
;
-; GISEL-LABEL: cmp_and_negative_const:
-; GISEL: ; %bb.0:
-; GISEL-NEXT: cmn w0, #1
-; GISEL-NEXT: cset w8, eq
-; GISEL-NEXT: cmn w1, #2
-; GISEL-NEXT: cset w9, eq
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-GI-LABEL: cmp_and_negative_const:
+; CHECK-GI: ; %bb.0:
+; CHECK-GI-NEXT: cmn w0, #1
+; CHECK-GI-NEXT: cset w8, eq
+; CHECK-GI-NEXT: cmn w1, #2
+; CHECK-GI-NEXT: cset w9, eq
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
%3 = icmp eq i32 %0, -1
%4 = icmp eq i32 %1, -2
%5 = and i1 %3, %4
@@ -1190,21 +1190,21 @@ define i1 @cmp_and_negative_const(i32 %0, i32 %1) {
}
define i1 @cmp_or_negative_const(i32 %a, i32 %b) {
-; SDISEL-LABEL: cmp_or_negative_const:
-; SDISEL: ; %bb.0:
-; SDISEL-NEXT: cmn w0, #1
-; SDISEL-NEXT: ccmn w1, #2, #4, ne
-; SDISEL-NEXT: cset w0, eq
-; SDISEL-NEXT: ret
+; CHECK-SD-LABEL: cmp_or_negative_const:
+; CHECK-SD: ; %bb.0:
+; CHECK-SD-NEXT: cmn w0, #1
+; CHECK-SD-NEXT: ccmn w1, #2, #4, ne
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
;
-; GISEL-LABEL: cmp_or_negative_const:
-; GISEL: ; %bb.0:
-; GISEL-NEXT: cmn w0, #1
-; GISEL-NEXT: cset w8, eq
-; GISEL-NEXT: cmn w1, #2
-; GISEL-NEXT: cset w9, eq
-; GISEL-NEXT: orr w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-GI-LABEL: cmp_or_negative_const:
+; CHECK-GI: ; %bb.0:
+; CHECK-GI-NEXT: cmn w0, #1
+; CHECK-GI-NEXT: cset w8, eq
+; CHECK-GI-NEXT: cmn w1, #2
+; CHECK-GI-NEXT: cset w9, eq
+; CHECK-GI-NEXT: orr w0, w8, w9
+; CHECK-GI-NEXT: ret
%cmp = icmp eq i32 %a, -1
%cmp1 = icmp eq i32 %b, -2
%or.cond = or i1 %cmp, %cmp1
diff --git a/llvm/test/CodeGen/AArch64/cmp-chains.ll b/llvm/test/CodeGen/AArch64/cmp-chains.ll
index 4b816df..3620444 100644
--- a/llvm/test/CodeGen/AArch64/cmp-chains.ll
+++ b/llvm/test/CodeGen/AArch64/cmp-chains.ll
@@ -1,26 +1,26 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=aarch64-- | FileCheck %s --check-prefixes=CHECK,SDISEL
-; RUN: llc < %s -mtriple=aarch64-- -global-isel | FileCheck %s --check-prefixes=CHECK,GISEL
+; RUN: llc < %s -mtriple=aarch64-- | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc < %s -mtriple=aarch64-- -global-isel | FileCheck %s --check-prefixes=CHECK,CHECK-GI
; Ensure chains of comparisons produce chains of `ccmp`
; (x0 < x1) && (x2 > x3)
define i32 @cmp_and2(i32 %0, i32 %1, i32 %2, i32 %3) {
-; SDISEL-LABEL: cmp_and2:
-; SDISEL: // %bb.0:
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #0, lo
-; SDISEL-NEXT: cset w0, hi
-; SDISEL-NEXT: ret
+; CHECK-SD-LABEL: cmp_and2:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #0, lo
+; CHECK-SD-NEXT: cset w0, hi
+; CHECK-SD-NEXT: ret
;
-; GISEL-LABEL: cmp_and2:
-; GISEL: // %bb.0:
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, lo
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, hi
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-GI-LABEL: cmp_and2:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, lo
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, hi
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
%5 = icmp ult i32 %0, %1
%6 = icmp ugt i32 %2, %3
%7 = select i1 %5, i1 %6, i1 false
@@ -30,25 +30,25 @@ define i32 @cmp_and2(i32 %0, i32 %1, i32 %2, i32 %3) {
; (x0 < x1) && (x2 > x3) && (x4 != x5)
define i32 @cmp_and3(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5) {
-; SDISEL-LABEL: cmp_and3:
-; SDISEL: // %bb.0:
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #0, lo
-; SDISEL-NEXT: ccmp w4, w5, #4, hi
-; SDISEL-NEXT: cset w0, ne
-; SDISEL-NEXT: ret
+; CHECK-SD-LABEL: cmp_and3:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #0, lo
+; CHECK-SD-NEXT: ccmp w4, w5, #4, hi
+; CHECK-SD-NEXT: cset w0, ne
+; CHECK-SD-NEXT: ret
;
-; GISEL-LABEL: cmp_and3:
-; GISEL: // %bb.0:
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, lo
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, hi
-; GISEL-NEXT: cmp w4, w5
-; GISEL-NEXT: and w8, w8, w9
-; GISEL-NEXT: cset w9, ne
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-GI-LABEL: cmp_and3:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, lo
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, hi
+; CHECK-GI-NEXT: cmp w4, w5
+; CHECK-GI-NEXT: and w8, w8, w9
+; CHECK-GI-NEXT: cset w9, ne
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
%7 = icmp ult i32 %0, %1
%8 = icmp ugt i32 %2, %3
%9 = select i1 %7, i1 %8, i1 false
@@ -60,29 +60,29 @@ define i32 @cmp_and3(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5) {
; (x0 < x1) && (x2 > x3) && (x4 != x5) && (x6 == x7)
define i32 @cmp_and4(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7) {
-; SDISEL-LABEL: cmp_and4:
-; SDISEL: // %bb.0:
-; SDISEL-NEXT: cmp w2, w3
-; SDISEL-NEXT: ccmp w0, w1, #2, hi
-; SDISEL-NEXT: ccmp w4, w5, #4, lo
-; SDISEL-NEXT: ccmp w6, w7, #0, ne
-; SDISEL-NEXT: cset w0, eq
-; SDISEL-NEXT: ret
+; CHECK-SD-LABEL: cmp_and4:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmp w2, w3
+; CHECK-SD-NEXT: ccmp w0, w1, #2, hi
+; CHECK-SD-NEXT: ccmp w4, w5, #4, lo
+; CHECK-SD-NEXT: ccmp w6, w7, #0, ne
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
;
-; GISEL-LABEL: cmp_and4:
-; GISEL: // %bb.0:
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w8, hi
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w9, lo
-; GISEL-NEXT: cmp w4, w5
-; GISEL-NEXT: cset w10, ne
-; GISEL-NEXT: cmp w6, w7
-; GISEL-NEXT: and w8, w8, w9
-; GISEL-NEXT: cset w11, eq
-; GISEL-NEXT: and w9, w10, w11
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-GI-LABEL: cmp_and4:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w8, hi
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w9, lo
+; CHECK-GI-NEXT: cmp w4, w5
+; CHECK-GI-NEXT: cset w10, ne
+; CHECK-GI-NEXT: cmp w6, w7
+; CHECK-GI-NEXT: and w8, w8, w9
+; CHECK-GI-NEXT: cset w11, eq
+; CHECK-GI-NEXT: and w9, w10, w11
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
%9 = icmp ugt i32 %2, %3
%10 = icmp ult i32 %0, %1
%11 = select i1 %9, i1 %10, i1 false
@@ -96,22 +96,22 @@ define i32 @cmp_and4(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32
; (x0 < x1) || (x2 > x3)
define i32 @cmp_or2(i32 %0, i32 %1, i32 %2, i32 %3) {
-; SDISEL-LABEL: cmp_or2:
-; SDISEL: // %bb.0:
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #0, hs
-; SDISEL-NEXT: cset w0, ne
-; SDISEL-NEXT: ret
+; CHECK-SD-LABEL: cmp_or2:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #0, hs
+; CHECK-SD-NEXT: cset w0, ne
+; CHECK-SD-NEXT: ret
;
-; GISEL-LABEL: cmp_or2:
-; GISEL: // %bb.0:
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, lo
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, ne
-; GISEL-NEXT: orr w8, w8, w9
-; GISEL-NEXT: and w0, w8, #0x1
-; GISEL-NEXT: ret
+; CHECK-GI-LABEL: cmp_or2:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, lo
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, ne
+; CHECK-GI-NEXT: orr w8, w8, w9
+; CHECK-GI-NEXT: and w0, w8, #0x1
+; CHECK-GI-NEXT: ret
%5 = icmp ult i32 %0, %1
%6 = icmp ne i32 %2, %3
%7 = select i1 %5, i1 true, i1 %6
@@ -121,26 +121,26 @@ define i32 @cmp_or2(i32 %0, i32 %1, i32 %2, i32 %3) {
; (x0 < x1) || (x2 > x3) || (x4 != x5)
define i32 @cmp_or3(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5) {
-; SDISEL-LABEL: cmp_or3:
-; SDISEL: // %bb.0:
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #2, hs
-; SDISEL-NEXT: ccmp w4, w5, #0, ls
-; SDISEL-NEXT: cset w0, ne
-; SDISEL-NEXT: ret
+; CHECK-SD-LABEL: cmp_or3:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #2, hs
+; CHECK-SD-NEXT: ccmp w4, w5, #0, ls
+; CHECK-SD-NEXT: cset w0, ne
+; CHECK-SD-NEXT: ret
;
-; GISEL-LABEL: cmp_or3:
-; GISEL: // %bb.0:
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, lo
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, hi
-; GISEL-NEXT: cmp w4, w5
-; GISEL-NEXT: orr w8, w8, w9
-; GISEL-NEXT: cset w9, ne
-; GISEL-NEXT: orr w8, w8, w9
-; GISEL-NEXT: and w0, w8, #0x1
-; GISEL-NEXT: ret
+; CHECK-GI-LABEL: cmp_or3:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, lo
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, hi
+; CHECK-GI-NEXT: cmp w4, w5
+; CHECK-GI-NEXT: orr w8, w8, w9
+; CHECK-GI-NEXT: cset w9, ne
+; CHECK-GI-NEXT: orr w8, w8, w9
+; CHECK-GI-NEXT: and w0, w8, #0x1
+; CHECK-GI-NEXT: ret
%7 = icmp ult i32 %0, %1
%8 = icmp ugt i32 %2, %3
%9 = select i1 %7, i1 true, i1 %8
@@ -152,30 +152,30 @@ define i32 @cmp_or3(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5) {
; (x0 < x1) || (x2 > x3) || (x4 != x5) || (x6 == x7)
define i32 @cmp_or4(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7) {
-; SDISEL-LABEL: cmp_or4:
-; SDISEL: // %bb.0:
-; SDISEL-NEXT: cmp w0, w1
-; SDISEL-NEXT: ccmp w2, w3, #2, hs
-; SDISEL-NEXT: ccmp w4, w5, #0, ls
-; SDISEL-NEXT: ccmp w6, w7, #4, eq
-; SDISEL-NEXT: cset w0, eq
-; SDISEL-NEXT: ret
+; CHECK-SD-LABEL: cmp_or4:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmp w0, w1
+; CHECK-SD-NEXT: ccmp w2, w3, #2, hs
+; CHECK-SD-NEXT: ccmp w4, w5, #0, ls
+; CHECK-SD-NEXT: ccmp w6, w7, #4, eq
+; CHECK-SD-NEXT: cset w0, eq
+; CHECK-SD-NEXT: ret
;
-; GISEL-LABEL: cmp_or4:
-; GISEL: // %bb.0:
-; GISEL-NEXT: cmp w0, w1
-; GISEL-NEXT: cset w8, lo
-; GISEL-NEXT: cmp w2, w3
-; GISEL-NEXT: cset w9, hi
-; GISEL-NEXT: cmp w4, w5
-; GISEL-NEXT: cset w10, ne
-; GISEL-NEXT: cmp w6, w7
-; GISEL-NEXT: orr w8, w8, w9
-; GISEL-NEXT: cset w11, eq
-; GISEL-NEXT: orr w9, w10, w11
-; GISEL-NEXT: orr w8, w8, w9
-; GISEL-NEXT: and w0, w8, #0x1
-; GISEL-NEXT: ret
+; CHECK-GI-LABEL: cmp_or4:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: cmp w0, w1
+; CHECK-GI-NEXT: cset w8, lo
+; CHECK-GI-NEXT: cmp w2, w3
+; CHECK-GI-NEXT: cset w9, hi
+; CHECK-GI-NEXT: cmp w4, w5
+; CHECK-GI-NEXT: cset w10, ne
+; CHECK-GI-NEXT: cmp w6, w7
+; CHECK-GI-NEXT: orr w8, w8, w9
+; CHECK-GI-NEXT: cset w11, eq
+; CHECK-GI-NEXT: orr w9, w10, w11
+; CHECK-GI-NEXT: orr w8, w8, w9
+; CHECK-GI-NEXT: and w0, w8, #0x1
+; CHECK-GI-NEXT: ret
%9 = icmp ult i32 %0, %1
%10 = icmp ugt i32 %2, %3
%11 = select i1 %9, i1 true, i1 %10
@@ -189,22 +189,22 @@ define i32 @cmp_or4(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32
; (x0 != 0) || (x1 != 0)
define i32 @true_or2(i32 %0, i32 %1) {
-; SDISEL-LABEL: true_or2:
-; SDISEL: // %bb.0:
-; SDISEL-NEXT: orr w8, w0, w1
-; SDISEL-NEXT: cmp w8, #0
-; SDISEL-NEXT: cset w0, ne
-; SDISEL-NEXT: ret
+; CHECK-SD-LABEL: true_or2:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: orr w8, w0, w1
+; CHECK-SD-NEXT: cmp w8, #0
+; CHECK-SD-NEXT: cset w0, ne
+; CHECK-SD-NEXT: ret
;
-; GISEL-LABEL: true_or2:
-; GISEL: // %bb.0:
-; GISEL-NEXT: cmp w0, #0
-; GISEL-NEXT: cset w8, ne
-; GISEL-NEXT: cmp w1, #0
-; GISEL-NEXT: cset w9, ne
-; GISEL-NEXT: orr w8, w8, w9
-; GISEL-NEXT: and w0, w8, #0x1
-; GISEL-NEXT: ret
+; CHECK-GI-LABEL: true_or2:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: cmp w0, #0
+; CHECK-GI-NEXT: cset w8, ne
+; CHECK-GI-NEXT: cmp w1, #0
+; CHECK-GI-NEXT: cset w9, ne
+; CHECK-GI-NEXT: orr w8, w8, w9
+; CHECK-GI-NEXT: and w0, w8, #0x1
+; CHECK-GI-NEXT: ret
%3 = icmp ne i32 %0, 0
%4 = icmp ne i32 %1, 0
%5 = select i1 %3, i1 true, i1 %4
@@ -214,26 +214,26 @@ define i32 @true_or2(i32 %0, i32 %1) {
; (x0 != 0) || (x1 != 0) || (x2 != 0)
define i32 @true_or3(i32 %0, i32 %1, i32 %2) {
-; SDISEL-LABEL: true_or3:
-; SDISEL: // %bb.0:
-; SDISEL-NEXT: orr w8, w0, w1
-; SDISEL-NEXT: orr w8, w8, w2
-; SDISEL-NEXT: cmp w8, #0
-; SDISEL-NEXT: cset w0, ne
-; SDISEL-NEXT: ret
+; CHECK-SD-LABEL: true_or3:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: orr w8, w0, w1
+; CHECK-SD-NEXT: orr w8, w8, w2
+; CHECK-SD-NEXT: cmp w8, #0
+; CHECK-SD-NEXT: cset w0, ne
+; CHECK-SD-NEXT: ret
;
-; GISEL-LABEL: true_or3:
-; GISEL: // %bb.0:
-; GISEL-NEXT: cmp w0, #0
-; GISEL-NEXT: cset w8, ne
-; GISEL-NEXT: cmp w1, #0
-; GISEL-NEXT: cset w9, ne
-; GISEL-NEXT: cmp w2, #0
-; GISEL-NEXT: orr w8, w8, w9
-; GISEL-NEXT: cset w9, ne
-; GISEL-NEXT: orr w8, w8, w9
-; GISEL-NEXT: and w0, w8, #0x1
-; GISEL-NEXT: ret
+; CHECK-GI-LABEL: true_or3:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: cmp w0, #0
+; CHECK-GI-NEXT: cset w8, ne
+; CHECK-GI-NEXT: cmp w1, #0
+; CHECK-GI-NEXT: cset w9, ne
+; CHECK-GI-NEXT: cmp w2, #0
+; CHECK-GI-NEXT: orr w8, w8, w9
+; CHECK-GI-NEXT: cset w9, ne
+; CHECK-GI-NEXT: orr w8, w8, w9
+; CHECK-GI-NEXT: and w0, w8, #0x1
+; CHECK-GI-NEXT: ret
%4 = icmp ne i32 %0, 0
%5 = icmp ne i32 %1, 0
%6 = select i1 %4, i1 true, i1 %5
@@ -260,22 +260,22 @@ define i32 @neg_range_int(i32 %a, i32 %b, i32 %c) {
; (b > -(d | 1) && a < c)
define i32 @neg_range_int_comp(i32 %a, i32 %b, i32 %c, i32 %d) {
-; SDISEL-LABEL: neg_range_int_comp:
-; SDISEL: // %bb.0:
-; SDISEL-NEXT: orr w8, w3, #0x1
-; SDISEL-NEXT: cmp w0, w2
-; SDISEL-NEXT: ccmn w1, w8, #4, lt
-; SDISEL-NEXT: csel w0, w1, w0, gt
-; SDISEL-NEXT: ret
+; CHECK-SD-LABEL: neg_range_int_comp:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: orr w8, w3, #0x1
+; CHECK-SD-NEXT: cmp w0, w2
+; CHECK-SD-NEXT: ccmn w1, w8, #4, lt
+; CHECK-SD-NEXT: csel w0, w1, w0, gt
+; CHECK-SD-NEXT: ret
;
-; GISEL-LABEL: neg_range_int_comp:
-; GISEL: // %bb.0:
-; GISEL-NEXT: orr w8, w3, #0x1
-; GISEL-NEXT: cmp w0, w2
-; GISEL-NEXT: neg w8, w8
-; GISEL-NEXT: ccmp w1, w8, #4, lt
-; GISEL-NEXT: csel w0, w1, w0, gt
-; GISEL-NEXT: ret
+; CHECK-GI-LABEL: neg_range_int_comp:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: orr w8, w3, #0x1
+; CHECK-GI-NEXT: cmp w0, w2
+; CHECK-GI-NEXT: neg w8, w8
+; CHECK-GI-NEXT: ccmp w1, w8, #4, lt
+; CHECK-GI-NEXT: csel w0, w1, w0, gt
+; CHECK-GI-NEXT: ret
%dor = or i32 %d, 1
%negd = sub i32 0, %dor
%cmp = icmp sgt i32 %b, %negd
@@ -287,22 +287,22 @@ define i32 @neg_range_int_comp(i32 %a, i32 %b, i32 %c, i32 %d) {
; (b >u -(d | 1) && a < c)
define i32 @neg_range_int_comp_u(i32 %a, i32 %b, i32 %c, i32 %d) {
-; SDISEL-LABEL: neg_range_int_comp_u:
-; SDISEL: // %bb.0:
-; SDISEL-NEXT: orr w8, w3, #0x1
-; SDISEL-NEXT: cmp w0, w2
-; SDISEL-NEXT: ccmn w1, w8, #0, lt
-; SDISEL-NEXT: csel w0, w1, w0, hi
-; SDISEL-NEXT: ret
+; CHECK-SD-LABEL: neg_range_int_comp_u:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: orr w8, w3, #0x1
+; CHECK-SD-NEXT: cmp w0, w2
+; CHECK-SD-NEXT: ccmn w1, w8, #0, lt
+; CHECK-SD-NEXT: csel w0, w1, w0, hi
+; CHECK-SD-NEXT: ret
;
-; GISEL-LABEL: neg_range_int_comp_u:
-; GISEL: // %bb.0:
-; GISEL-NEXT: orr w8, w3, #0x1
-; GISEL-NEXT: cmp w0, w2
-; GISEL-NEXT: neg w8, w8
-; GISEL-NEXT: ccmp w1, w8, #0, lt
-; GISEL-NEXT: csel w0, w1, w0, hi
-; GISEL-NEXT: ret
+; CHECK-GI-LABEL: neg_range_int_comp_u:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: orr w8, w3, #0x1
+; CHECK-GI-NEXT: cmp w0, w2
+; CHECK-GI-NEXT: neg w8, w8
+; CHECK-GI-NEXT: ccmp w1, w8, #0, lt
+; CHECK-GI-NEXT: csel w0, w1, w0, hi
+; CHECK-GI-NEXT: ret
%dor = or i32 %d, 1
%negd = sub i32 0, %dor
%cmp = icmp ugt i32 %b, %negd
@@ -314,22 +314,22 @@ define i32 @neg_range_int_comp_u(i32 %a, i32 %b, i32 %c, i32 %d) {
; (b > -(d | 1) && a u < c)
define i32 @neg_range_int_comp_ua(i32 %a, i32 %b, i32 %c, i32 %d) {
-; SDISEL-LABEL: neg_range_int_comp_ua:
-; SDISEL: // %bb.0:
-; SDISEL-NEXT: orr w8, w3, #0x1
-; SDISEL-NEXT: cmp w0, w2
-; SDISEL-NEXT: ccmn w1, w8, #4, lo
-; SDISEL-NEXT: csel w0, w1, w0, gt
-; SDISEL-NEXT: ret
+; CHECK-SD-LABEL: neg_range_int_comp_ua:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: orr w8, w3, #0x1
+; CHECK-SD-NEXT: cmp w0, w2
+; CHECK-SD-NEXT: ccmn w1, w8, #4, lo
+; CHECK-SD-NEXT: csel w0, w1, w0, gt
+; CHECK-SD-NEXT: ret
;
-; GISEL-LABEL: neg_range_int_comp_ua:
-; GISEL: // %bb.0:
-; GISEL-NEXT: orr w8, w3, #0x1
-; GISEL-NEXT: cmp w0, w2
-; GISEL-NEXT: neg w8, w8
-; GISEL-NEXT: ccmp w1, w8, #4, lo
-; GISEL-NEXT: csel w0, w1, w0, gt
-; GISEL-NEXT: ret
+; CHECK-GI-LABEL: neg_range_int_comp_ua:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: orr w8, w3, #0x1
+; CHECK-GI-NEXT: cmp w0, w2
+; CHECK-GI-NEXT: neg w8, w8
+; CHECK-GI-NEXT: ccmp w1, w8, #4, lo
+; CHECK-GI-NEXT: csel w0, w1, w0, gt
+; CHECK-GI-NEXT: ret
%dor = or i32 %d, 1
%negd = sub i32 0, %dor
%cmp = icmp sgt i32 %b, %negd
@@ -341,19 +341,19 @@ define i32 @neg_range_int_comp_ua(i32 %a, i32 %b, i32 %c, i32 %d) {
; (b <= -3 && a > c)
define i32 @neg_range_int_2(i32 %a, i32 %b, i32 %c) {
-; SDISEL-LABEL: neg_range_int_2:
-; SDISEL: // %bb.0:
-; SDISEL-NEXT: cmp w0, w2
-; SDISEL-NEXT: ccmn w1, #4, #4, gt
-; SDISEL-NEXT: csel w0, w1, w0, gt
-; SDISEL-NEXT: ret
+; CHECK-SD-LABEL: neg_range_int_2:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmp w0, w2
+; CHECK-SD-NEXT: ccmn w1, #4, #4, gt
+; CHECK-SD-NEXT: csel w0, w1, w0, gt
+; CHECK-SD-NEXT: ret
;
-; GISEL-LABEL: neg_range_int_2:
-; GISEL: // %bb.0:
-; GISEL-NEXT: cmp w0, w2
-; GISEL-NEXT: ccmn w1, #3, #8, gt
-; GISEL-NEXT: csel w0, w1, w0, ge
-; GISEL-NEXT: ret
+; CHECK-GI-LABEL: neg_range_int_2:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: cmp w0, w2
+; CHECK-GI-NEXT: ccmn w1, #3, #8, gt
+; CHECK-GI-NEXT: csel w0, w1, w0, ge
+; CHECK-GI-NEXT: ret
%cmp = icmp sge i32 %b, -3
%cmp1 = icmp sgt i32 %a, %c
%or.cond = and i1 %cmp, %cmp1
@@ -363,22 +363,22 @@ define i32 @neg_range_int_2(i32 %a, i32 %b, i32 %c) {
; (b < -(d | 1) && a >= c)
define i32 @neg_range_int_comp2(i32 %a, i32 %b, i32 %c, i32 %d) {
-; SDISEL-LABEL: neg_range_int_comp2:
-; SDISEL: // %bb.0:
-; SDISEL-NEXT: orr w8, w3, #0x1
-; SDISEL-NEXT: cmp w0, w2
-; SDISEL-NEXT: ccmn w1, w8, #0, ge
-; SDISEL-NEXT: csel w0, w1, w0, lt
-; SDISEL-NEXT: ret
+; CHECK-SD-LABEL: neg_range_int_comp2:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: orr w8, w3, #0x1
+; CHECK-SD-NEXT: cmp w0, w2
+; CHECK-SD-NEXT: ccmn w1, w8, #0, ge
+; CHECK-SD-NEXT: csel w0, w1, w0, lt
+; CHECK-SD-NEXT: ret
;
-; GISEL-LABEL: neg_range_int_comp2:
-; GISEL: // %bb.0:
-; GISEL-NEXT: orr w8, w3, #0x1
-; GISEL-NEXT: cmp w0, w2
-; GISEL-NEXT: neg w8, w8
-; GISEL-NEXT: ccmp w1, w8, #0, ge
-; GISEL-NEXT: csel w0, w1, w0, lt
-; GISEL-NEXT: ret
+; CHECK-GI-LABEL: neg_range_int_comp2:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: orr w8, w3, #0x1
+; CHECK-GI-NEXT: cmp w0, w2
+; CHECK-GI-NEXT: neg w8, w8
+; CHECK-GI-NEXT: ccmp w1, w8, #0, ge
+; CHECK-GI-NEXT: csel w0, w1, w0, lt
+; CHECK-GI-NEXT: ret
%dor = or i32 %d, 1
%negd = sub i32 0, %dor
%cmp = icmp slt i32 %b, %negd
@@ -390,22 +390,22 @@ define i32 @neg_range_int_comp2(i32 %a, i32 %b, i32 %c, i32 %d) {
; (b <u -(d | 1) && a > c)
define i32 @neg_range_int_comp_u2(i32 %a, i32 %b, i32 %c, i32 %d) {
-; SDISEL-LABEL: neg_range_int_comp_u2:
-; SDISEL: // %bb.0:
-; SDISEL-NEXT: orr w8, w3, #0x1
-; SDISEL-NEXT: cmp w0, w2
-; SDISEL-NEXT: ccmn w1, w8, #2, gt
-; SDISEL-NEXT: csel w0, w1, w0, lo
-; SDISEL-NEXT: ret
+; CHECK-SD-LABEL: neg_range_int_comp_u2:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: orr w8, w3, #0x1
+; CHECK-SD-NEXT: cmp w0, w2
+; CHECK-SD-NEXT: ccmn w1, w8, #2, gt
+; CHECK-SD-NEXT: csel w0, w1, w0, lo
+; CHECK-SD-NEXT: ret
;
-; GISEL-LABEL: neg_range_int_comp_u2:
-; GISEL: // %bb.0:
-; GISEL-NEXT: orr w8, w3, #0x1
-; GISEL-NEXT: cmp w0, w2
-; GISEL-NEXT: neg w8, w8
-; GISEL-NEXT: ccmp w1, w8, #2, gt
-; GISEL-NEXT: csel w0, w1, w0, lo
-; GISEL-NEXT: ret
+; CHECK-GI-LABEL: neg_range_int_comp_u2:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: orr w8, w3, #0x1
+; CHECK-GI-NEXT: cmp w0, w2
+; CHECK-GI-NEXT: neg w8, w8
+; CHECK-GI-NEXT: ccmp w1, w8, #2, gt
+; CHECK-GI-NEXT: csel w0, w1, w0, lo
+; CHECK-GI-NEXT: ret
%dor = or i32 %d, 1
%negd = sub i32 0, %dor
%cmp = icmp ult i32 %b, %negd
@@ -417,22 +417,22 @@ define i32 @neg_range_int_comp_u2(i32 %a, i32 %b, i32 %c, i32 %d) {
; (b > -(d | 1) && a u > c)
define i32 @neg_range_int_comp_ua2(i32 %a, i32 %b, i32 %c, i32 %d) {
-; SDISEL-LABEL: neg_range_int_comp_ua2:
-; SDISEL: // %bb.0:
-; SDISEL-NEXT: orr w8, w3, #0x1
-; SDISEL-NEXT: cmp w0, w2
-; SDISEL-NEXT: ccmn w1, w8, #4, hi
-; SDISEL-NEXT: csel w0, w1, w0, gt
-; SDISEL-NEXT: ret
+; CHECK-SD-LABEL: neg_range_int_comp_ua2:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: orr w8, w3, #0x1
+; CHECK-SD-NEXT: cmp w0, w2
+; CHECK-SD-NEXT: ccmn w1, w8, #4, hi
+; CHECK-SD-NEXT: csel w0, w1, w0, gt
+; CHECK-SD-NEXT: ret
;
-; GISEL-LABEL: neg_range_int_comp_ua2:
-; GISEL: // %bb.0:
-; GISEL-NEXT: orr w8, w3, #0x1
-; GISEL-NEXT: cmp w0, w2
-; GISEL-NEXT: neg w8, w8
-; GISEL-NEXT: ccmp w1, w8, #4, hi
-; GISEL-NEXT: csel w0, w1, w0, gt
-; GISEL-NEXT: ret
+; CHECK-GI-LABEL: neg_range_int_comp_ua2:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: orr w8, w3, #0x1
+; CHECK-GI-NEXT: cmp w0, w2
+; CHECK-GI-NEXT: neg w8, w8
+; CHECK-GI-NEXT: ccmp w1, w8, #4, hi
+; CHECK-GI-NEXT: csel w0, w1, w0, gt
+; CHECK-GI-NEXT: ret
%dor = or i32 %d, 1
%negd = sub i32 0, %dor
%cmp = icmp sgt i32 %b, %negd
@@ -444,22 +444,22 @@ define i32 @neg_range_int_comp_ua2(i32 %a, i32 %b, i32 %c, i32 %d) {
; (b > -(d | 1) && a u == c)
define i32 @neg_range_int_comp_ua3(i32 %a, i32 %b, i32 %c, i32 %d) {
-; SDISEL-LABEL: neg_range_int_comp_ua3:
-; SDISEL: // %bb.0:
-; SDISEL-NEXT: orr w8, w3, #0x1
-; SDISEL-NEXT: cmp w0, w2
-; SDISEL-NEXT: ccmn w1, w8, #4, eq
-; SDISEL-NEXT: csel w0, w1, w0, gt
-; SDISEL-NEXT: ret
+; CHECK-SD-LABEL: neg_range_int_comp_ua3:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: orr w8, w3, #0x1
+; CHECK-SD-NEXT: cmp w0, w2
+; CHECK-SD-NEXT: ccmn w1, w8, #4, eq
+; CHECK-SD-NEXT: csel w0, w1, w0, gt
+; CHECK-SD-NEXT: ret
;
-; GISEL-LABEL: neg_range_int_comp_ua3:
-; GISEL: // %bb.0:
-; GISEL-NEXT: orr w8, w3, #0x1
-; GISEL-NEXT: cmp w0, w2
-; GISEL-NEXT: neg w8, w8
-; GISEL-NEXT: ccmp w1, w8, #4, eq
-; GISEL-NEXT: csel w0, w1, w0, gt
-; GISEL-NEXT: ret
+; CHECK-GI-LABEL: neg_range_int_comp_ua3:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: orr w8, w3, #0x1
+; CHECK-GI-NEXT: cmp w0, w2
+; CHECK-GI-NEXT: neg w8, w8
+; CHECK-GI-NEXT: ccmp w1, w8, #4, eq
+; CHECK-GI-NEXT: csel w0, w1, w0, gt
+; CHECK-GI-NEXT: ret
%dor = or i32 %d, 1
%negd = sub i32 0, %dor
%cmp = icmp sgt i32 %b, %negd
@@ -471,26 +471,26 @@ define i32 @neg_range_int_comp_ua3(i32 %a, i32 %b, i32 %c, i32 %d) {
; -(a | 1) > (b | 3) && a < c
define i32 @neg_range_int_c(i32 %a, i32 %b, i32 %c) {
-; SDISEL-LABEL: neg_range_int_c:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: orr w8, w0, #0x1
-; SDISEL-NEXT: orr w9, w1, #0x3
-; SDISEL-NEXT: cmn w9, w8
-; SDISEL-NEXT: ccmp w2, w0, #2, lo
-; SDISEL-NEXT: cset w0, lo
-; SDISEL-NEXT: ret
+; CHECK-SD-LABEL: neg_range_int_c:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: orr w8, w0, #0x1
+; CHECK-SD-NEXT: orr w9, w1, #0x3
+; CHECK-SD-NEXT: cmn w9, w8
+; CHECK-SD-NEXT: ccmp w2, w0, #2, lo
+; CHECK-SD-NEXT: cset w0, lo
+; CHECK-SD-NEXT: ret
;
-; GISEL-LABEL: neg_range_int_c:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: orr w8, w0, #0x1
-; GISEL-NEXT: orr w9, w1, #0x3
-; GISEL-NEXT: neg w8, w8
-; GISEL-NEXT: cmp w9, w8
-; GISEL-NEXT: cset w8, lo
-; GISEL-NEXT: cmp w2, w0
-; GISEL-NEXT: cset w9, lo
-; GISEL-NEXT: and w0, w8, w9
-; GISEL-NEXT: ret
+; CHECK-GI-LABEL: neg_range_int_c:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: orr w8, w0, #0x1
+; CHECK-GI-NEXT: orr w9, w1, #0x3
+; CHECK-GI-NEXT: neg w8, w8
+; CHECK-GI-NEXT: cmp w9, w8
+; CHECK-GI-NEXT: cset w8, lo
+; CHECK-GI-NEXT: cmp w2, w0
+; CHECK-GI-NEXT: cset w9, lo
+; CHECK-GI-NEXT: and w0, w8, w9
+; CHECK-GI-NEXT: ret
entry:
%or = or i32 %a, 1
%sub = sub i32 0, %or
diff --git a/llvm/test/CodeGen/AArch64/dag-combine-select.ll b/llvm/test/CodeGen/AArch64/dag-combine-select.ll
index 56208f1..02b0077 100644
--- a/llvm/test/CodeGen/AArch64/dag-combine-select.ll
+++ b/llvm/test/CodeGen/AArch64/dag-combine-select.ll
@@ -1,26 +1,26 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple arm64-none-eabi -o - %s | FileCheck %s --check-prefixes=CHECK,SDISEL
-; RUN: llc -mtriple arm64-none-eabi -global-isel -o - %s | FileCheck %s --check-prefixes=CHECK,GISEL
+; RUN: llc -mtriple arm64-none-eabi -o - %s | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc -mtriple arm64-none-eabi -global-isel -o - %s | FileCheck %s --check-prefixes=CHECK,CHECK-GI
@out = internal global i32 0, align 4
; Ensure that we transform select(C0, x, select(C1, x, y)) towards
; select(C0 | C1, x, y) so we can use CMP;CCMP for the implementation.
define i32 @test0(i32 %v0, i32 %v1, i32 %v2) {
-; SDISEL-LABEL: test0:
-; SDISEL: // %bb.0:
-; SDISEL-NEXT: cmp w0, #7
-; SDISEL-NEXT: ccmp w1, #0, #0, ne
-; SDISEL-NEXT: csel w0, w1, w2, gt
-; SDISEL-NEXT: ret
+; CHECK-SD-LABEL: test0:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmp w0, #7
+; CHECK-SD-NEXT: ccmp w1, #0, #0, ne
+; CHECK-SD-NEXT: csel w0, w1, w2, gt
+; CHECK-SD-NEXT: ret
;
-; GISEL-LABEL: test0:
-; GISEL: // %bb.0:
-; GISEL-NEXT: cmp w0, #7
-; GISEL-NEXT: csel w8, w1, w2, eq
-; GISEL-NEXT: cmp w1, #0
-; GISEL-NEXT: csel w0, w1, w8, gt
-; GISEL-NEXT: ret
+; CHECK-GI-LABEL: test0:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: cmp w0, #7
+; CHECK-GI-NEXT: csel w8, w1, w2, eq
+; CHECK-GI-NEXT: cmp w1, #0
+; CHECK-GI-NEXT: csel w0, w1, w8, gt
+; CHECK-GI-NEXT: ret
%cmp1 = icmp eq i32 %v0, 7
%cmp2 = icmp sgt i32 %v1, 0
%sel0 = select i1 %cmp1, i32 %v1, i32 %v2
@@ -32,36 +32,36 @@ define i32 @test0(i32 %v0, i32 %v1, i32 %v2) {
; sequences. This case should be transformed to select(C0, select(C1, x, y), y)
; anyway to get CSE effects.
define void @test1(i32 %bitset, i32 %val0, i32 %val1) {
-; SDISEL-LABEL: test1:
-; SDISEL: // %bb.0:
-; SDISEL-NEXT: cmp w0, #7
-; SDISEL-NEXT: adrp x9, out
-; SDISEL-NEXT: csel w8, w1, w2, eq
-; SDISEL-NEXT: cmp w8, #13
-; SDISEL-NEXT: csel w8, w1, w2, lo
-; SDISEL-NEXT: cmp w0, #42
-; SDISEL-NEXT: csel w10, w1, w8, eq
-; SDISEL-NEXT: str w8, [x9, :lo12:out]
-; SDISEL-NEXT: str w10, [x9, :lo12:out]
-; SDISEL-NEXT: ret
+; CHECK-SD-LABEL: test1:
+; CHECK-SD: // %bb.0:
+; CHECK-SD-NEXT: cmp w0, #7
+; CHECK-SD-NEXT: adrp x9, out
+; CHECK-SD-NEXT: csel w8, w1, w2, eq
+; CHECK-SD-NEXT: cmp w8, #13
+; CHECK-SD-NEXT: csel w8, w1, w2, lo
+; CHECK-SD-NEXT: cmp w0, #42
+; CHECK-SD-NEXT: csel w10, w1, w8, eq
+; CHECK-SD-NEXT: str w8, [x9, :lo12:out]
+; CHECK-SD-NEXT: str w10, [x9, :lo12:out]
+; CHECK-SD-NEXT: ret
;
-; GISEL-LABEL: test1:
-; GISEL: // %bb.0:
-; GISEL-NEXT: cmp w0, #7
-; GISEL-NEXT: csel w8, w1, w2, eq
-; GISEL-NEXT: cmp w8, #13
-; GISEL-NEXT: cset w8, lo
-; GISEL-NEXT: tst w8, #0x1
-; GISEL-NEXT: csel w9, w1, w2, ne
-; GISEL-NEXT: cmp w0, #42
-; GISEL-NEXT: cset w10, eq
-; GISEL-NEXT: orr w8, w10, w8
-; GISEL-NEXT: tst w8, #0x1
-; GISEL-NEXT: adrp x8, out
-; GISEL-NEXT: csel w10, w1, w2, ne
-; GISEL-NEXT: str w9, [x8, :lo12:out]
-; GISEL-NEXT: str w10, [x8, :lo12:out]
-; GISEL-NEXT: ret
+; CHECK-GI-LABEL: test1:
+; CHECK-GI: // %bb.0:
+; CHECK-GI-NEXT: cmp w0, #7
+; CHECK-GI-NEXT: csel w8, w1, w2, eq
+; CHECK-GI-NEXT: cmp w8, #13
+; CHECK-GI-NEXT: cset w8, lo
+; CHECK-GI-NEXT: tst w8, #0x1
+; CHECK-GI-NEXT: csel w9, w1, w2, ne
+; CHECK-GI-NEXT: cmp w0, #42
+; CHECK-GI-NEXT: cset w10, eq
+; CHECK-GI-NEXT: orr w8, w10, w8
+; CHECK-GI-NEXT: tst w8, #0x1
+; CHECK-GI-NEXT: adrp x8, out
+; CHECK-GI-NEXT: csel w10, w1, w2, ne
+; CHECK-GI-NEXT: str w9, [x8, :lo12:out]
+; CHECK-GI-NEXT: str w10, [x8, :lo12:out]
+; CHECK-GI-NEXT: ret
%cmp1 = icmp eq i32 %bitset, 7
%cond = select i1 %cmp1, i32 %val0, i32 %val1
%cmp5 = icmp ult i32 %cond, 13
diff --git a/llvm/test/CodeGen/AArch64/fp16_intrinsic_scalar_1op.ll b/llvm/test/CodeGen/AArch64/fp16_intrinsic_scalar_1op.ll
index 1b98954..b056460 100644
--- a/llvm/test/CodeGen/AArch64/fp16_intrinsic_scalar_1op.ll
+++ b/llvm/test/CodeGen/AArch64/fp16_intrinsic_scalar_1op.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=aarch64 -global-isel=0 -mattr=+v8.2a,+fullfp16 | FileCheck %s --check-prefixes=CHECK,SDISEL
-; RUN: llc < %s -mtriple=aarch64 -global-isel=1 -mattr=+v8.2a,+fullfp16 | FileCheck %s --check-prefixes=CHECK,GISEL
+; RUN: llc < %s -mtriple=aarch64 -global-isel=0 -mattr=+v8.2a,+fullfp16 | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc < %s -mtriple=aarch64 -global-isel=1 -mattr=+v8.2a,+fullfp16 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
declare i64 @llvm.aarch64.neon.fcvtpu.i64.f16(half)
declare i32 @llvm.aarch64.neon.fcvtpu.i32.f16(half)
@@ -27,18 +27,18 @@ declare half @llvm.aarch64.neon.frecpx.f16(half)
declare half @llvm.aarch64.neon.frecpe.f16(half)
define dso_local i16 @t2(half %a) {
-; SDISEL-LABEL: t2:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: fcmp h0, #0.0
-; SDISEL-NEXT: csetm w0, eq
-; SDISEL-NEXT: ret
+; CHECK-SD-LABEL: t2:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: fcmp h0, #0.0
+; CHECK-SD-NEXT: csetm w0, eq
+; CHECK-SD-NEXT: ret
;
-; GISEL-LABEL: t2:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: fcmp h0, #0.0
-; GISEL-NEXT: cset w8, eq
-; GISEL-NEXT: sbfx w0, w8, #0, #1
-; GISEL-NEXT: ret
+; CHECK-GI-LABEL: t2:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: fcmp h0, #0.0
+; CHECK-GI-NEXT: cset w8, eq
+; CHECK-GI-NEXT: sbfx w0, w8, #0, #1
+; CHECK-GI-NEXT: ret
entry:
%0 = fcmp oeq half %a, 0xH0000
%vceqz = sext i1 %0 to i16
@@ -46,18 +46,18 @@ entry:
}
define dso_local i16 @t3(half %a) {
-; SDISEL-LABEL: t3:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: fcmp h0, #0.0
-; SDISEL-NEXT: csetm w0, ge
-; SDISEL-NEXT: ret
+; CHECK-SD-LABEL: t3:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: fcmp h0, #0.0
+; CHECK-SD-NEXT: csetm w0, ge
+; CHECK-SD-NEXT: ret
;
-; GISEL-LABEL: t3:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: fcmp h0, #0.0
-; GISEL-NEXT: cset w8, ge
-; GISEL-NEXT: sbfx w0, w8, #0, #1
-; GISEL-NEXT: ret
+; CHECK-GI-LABEL: t3:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: fcmp h0, #0.0
+; CHECK-GI-NEXT: cset w8, ge
+; CHECK-GI-NEXT: sbfx w0, w8, #0, #1
+; CHECK-GI-NEXT: ret
entry:
%0 = fcmp oge half %a, 0xH0000
%vcgez = sext i1 %0 to i16
@@ -65,18 +65,18 @@ entry:
}
define dso_local i16 @t4(half %a) {
-; SDISEL-LABEL: t4:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: fcmp h0, #0.0
-; SDISEL-NEXT: csetm w0, gt
-; SDISEL-NEXT: ret
+; CHECK-SD-LABEL: t4:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: fcmp h0, #0.0
+; CHECK-SD-NEXT: csetm w0, gt
+; CHECK-SD-NEXT: ret
;
-; GISEL-LABEL: t4:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: fcmp h0, #0.0
-; GISEL-NEXT: cset w8, gt
-; GISEL-NEXT: sbfx w0, w8, #0, #1
-; GISEL-NEXT: ret
+; CHECK-GI-LABEL: t4:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: fcmp h0, #0.0
+; CHECK-GI-NEXT: cset w8, gt
+; CHECK-GI-NEXT: sbfx w0, w8, #0, #1
+; CHECK-GI-NEXT: ret
entry:
%0 = fcmp ogt half %a, 0xH0000
%vcgtz = sext i1 %0 to i16
@@ -84,18 +84,18 @@ entry:
}
define dso_local i16 @t5(half %a) {
-; SDISEL-LABEL: t5:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: fcmp h0, #0.0
-; SDISEL-NEXT: csetm w0, ls
-; SDISEL-NEXT: ret
+; CHECK-SD-LABEL: t5:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: fcmp h0, #0.0
+; CHECK-SD-NEXT: csetm w0, ls
+; CHECK-SD-NEXT: ret
;
-; GISEL-LABEL: t5:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: fcmp h0, #0.0
-; GISEL-NEXT: cset w8, ls
-; GISEL-NEXT: sbfx w0, w8, #0, #1
-; GISEL-NEXT: ret
+; CHECK-GI-LABEL: t5:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: fcmp h0, #0.0
+; CHECK-GI-NEXT: cset w8, ls
+; CHECK-GI-NEXT: sbfx w0, w8, #0, #1
+; CHECK-GI-NEXT: ret
entry:
%0 = fcmp ole half %a, 0xH0000
%vclez = sext i1 %0 to i16
@@ -103,18 +103,18 @@ entry:
}
define dso_local i16 @t6(half %a) {
-; SDISEL-LABEL: t6:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: fcmp h0, #0.0
-; SDISEL-NEXT: csetm w0, mi
-; SDISEL-NEXT: ret
+; CHECK-SD-LABEL: t6:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: fcmp h0, #0.0
+; CHECK-SD-NEXT: csetm w0, mi
+; CHECK-SD-NEXT: ret
;
-; GISEL-LABEL: t6:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: fcmp h0, #0.0
-; GISEL-NEXT: cset w8, mi
-; GISEL-NEXT: sbfx w0, w8, #0, #1
-; GISEL-NEXT: ret
+; CHECK-GI-LABEL: t6:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: fcmp h0, #0.0
+; CHECK-GI-NEXT: cset w8, mi
+; CHECK-GI-NEXT: sbfx w0, w8, #0, #1
+; CHECK-GI-NEXT: ret
entry:
%0 = fcmp olt half %a, 0xH0000
%vcltz = sext i1 %0 to i16
@@ -172,15 +172,15 @@ entry:
}
define dso_local i16 @t16(half %a) {
-; SDISEL-LABEL: t16:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: fcvtzs w0, h0
-; SDISEL-NEXT: ret
+; CHECK-SD-LABEL: t16:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: fcvtzs w0, h0
+; CHECK-SD-NEXT: ret
;
-; GISEL-LABEL: t16:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: fcvtzu w0, h0
-; GISEL-NEXT: ret
+; CHECK-GI-LABEL: t16:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: fcvtzu w0, h0
+; CHECK-GI-NEXT: ret
entry:
%0 = fptoui half %a to i16
ret i16 %0
diff --git a/llvm/test/CodeGen/AArch64/fp16_intrinsic_scalar_2op.ll b/llvm/test/CodeGen/AArch64/fp16_intrinsic_scalar_2op.ll
index 5b08ef2..da70599 100644
--- a/llvm/test/CodeGen/AArch64/fp16_intrinsic_scalar_2op.ll
+++ b/llvm/test/CodeGen/AArch64/fp16_intrinsic_scalar_2op.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=aarch64 -global-isel=0 -mattr=+v8.2a,+fullfp16 | FileCheck %s --check-prefixes=CHECK,SDISEL
-; RUN: llc < %s -mtriple=aarch64 -global-isel=1 -mattr=+v8.2a,+fullfp16 | FileCheck %s --check-prefixes=CHECK,GISEL
+; RUN: llc < %s -mtriple=aarch64 -global-isel=0 -mattr=+v8.2a,+fullfp16 | FileCheck %s --check-prefixes=CHECK,CHECK-SD
+; RUN: llc < %s -mtriple=aarch64 -global-isel=1 -mattr=+v8.2a,+fullfp16 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
declare half @llvm.aarch64.sisd.fabd.f16(half, half)
@@ -35,18 +35,18 @@ entry:
}
define dso_local i16 @t_vceqh_f16(half %a, half %b) {
-; SDISEL-LABEL: t_vceqh_f16:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: fcmp h0, h1
-; SDISEL-NEXT: csetm w0, eq
-; SDISEL-NEXT: ret
+; CHECK-SD-LABEL: t_vceqh_f16:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: fcmp h0, h1
+; CHECK-SD-NEXT: csetm w0, eq
+; CHECK-SD-NEXT: ret
;
-; GISEL-LABEL: t_vceqh_f16:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: fcmp h0, h1
-; GISEL-NEXT: cset w8, eq
-; GISEL-NEXT: sbfx w0, w8, #0, #1
-; GISEL-NEXT: ret
+; CHECK-GI-LABEL: t_vceqh_f16:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: fcmp h0, h1
+; CHECK-GI-NEXT: cset w8, eq
+; CHECK-GI-NEXT: sbfx w0, w8, #0, #1
+; CHECK-GI-NEXT: ret
entry:
%0 = fcmp oeq half %a, %b
%vcmpd = sext i1 %0 to i16
@@ -54,18 +54,18 @@ entry:
}
define dso_local i16 @t_vcgeh_f16(half %a, half %b) {
-; SDISEL-LABEL: t_vcgeh_f16:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: fcmp h0, h1
-; SDISEL-NEXT: csetm w0, ge
-; SDISEL-NEXT: ret
+; CHECK-SD-LABEL: t_vcgeh_f16:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: fcmp h0, h1
+; CHECK-SD-NEXT: csetm w0, ge
+; CHECK-SD-NEXT: ret
;
-; GISEL-LABEL: t_vcgeh_f16:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: fcmp h0, h1
-; GISEL-NEXT: cset w8, ge
-; GISEL-NEXT: sbfx w0, w8, #0, #1
-; GISEL-NEXT: ret
+; CHECK-GI-LABEL: t_vcgeh_f16:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: fcmp h0, h1
+; CHECK-GI-NEXT: cset w8, ge
+; CHECK-GI-NEXT: sbfx w0, w8, #0, #1
+; CHECK-GI-NEXT: ret
entry:
%0 = fcmp oge half %a, %b
%vcmpd = sext i1 %0 to i16
@@ -73,18 +73,18 @@ entry:
}
define dso_local i16 @t_vcgth_f16(half %a, half %b) {
-; SDISEL-LABEL: t_vcgth_f16:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: fcmp h0, h1
-; SDISEL-NEXT: csetm w0, gt
-; SDISEL-NEXT: ret
+; CHECK-SD-LABEL: t_vcgth_f16:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: fcmp h0, h1
+; CHECK-SD-NEXT: csetm w0, gt
+; CHECK-SD-NEXT: ret
;
-; GISEL-LABEL: t_vcgth_f16:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: fcmp h0, h1
-; GISEL-NEXT: cset w8, gt
-; GISEL-NEXT: sbfx w0, w8, #0, #1
-; GISEL-NEXT: ret
+; CHECK-GI-LABEL: t_vcgth_f16:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: fcmp h0, h1
+; CHECK-GI-NEXT: cset w8, gt
+; CHECK-GI-NEXT: sbfx w0, w8, #0, #1
+; CHECK-GI-NEXT: ret
entry:
%0 = fcmp ogt half %a, %b
%vcmpd = sext i1 %0 to i16
@@ -92,18 +92,18 @@ entry:
}
define dso_local i16 @t_vcleh_f16(half %a, half %b) {
-; SDISEL-LABEL: t_vcleh_f16:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: fcmp h0, h1
-; SDISEL-NEXT: csetm w0, ls
-; SDISEL-NEXT: ret
+; CHECK-SD-LABEL: t_vcleh_f16:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: fcmp h0, h1
+; CHECK-SD-NEXT: csetm w0, ls
+; CHECK-SD-NEXT: ret
;
-; GISEL-LABEL: t_vcleh_f16:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: fcmp h0, h1
-; GISEL-NEXT: cset w8, ls
-; GISEL-NEXT: sbfx w0, w8, #0, #1
-; GISEL-NEXT: ret
+; CHECK-GI-LABEL: t_vcleh_f16:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: fcmp h0, h1
+; CHECK-GI-NEXT: cset w8, ls
+; CHECK-GI-NEXT: sbfx w0, w8, #0, #1
+; CHECK-GI-NEXT: ret
entry:
%0 = fcmp ole half %a, %b
%vcmpd = sext i1 %0 to i16
@@ -111,18 +111,18 @@ entry:
}
define dso_local i16 @t_vclth_f16(half %a, half %b) {
-; SDISEL-LABEL: t_vclth_f16:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: fcmp h0, h1
-; SDISEL-NEXT: csetm w0, mi
-; SDISEL-NEXT: ret
+; CHECK-SD-LABEL: t_vclth_f16:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: fcmp h0, h1
+; CHECK-SD-NEXT: csetm w0, mi
+; CHECK-SD-NEXT: ret
;
-; GISEL-LABEL: t_vclth_f16:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: fcmp h0, h1
-; GISEL-NEXT: cset w8, mi
-; GISEL-NEXT: sbfx w0, w8, #0, #1
-; GISEL-NEXT: ret
+; CHECK-GI-LABEL: t_vclth_f16:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: fcmp h0, h1
+; CHECK-GI-NEXT: cset w8, mi
+; CHECK-GI-NEXT: sbfx w0, w8, #0, #1
+; CHECK-GI-NEXT: ret
entry:
%0 = fcmp olt half %a, %b
%vcmpd = sext i1 %0 to i16
@@ -187,18 +187,18 @@ declare half @llvm.aarch64.neon.vcvtfxu2fp.f16.i32(i32, i32) #1
declare i32 @llvm.aarch64.neon.vcvtfp2fxu.i32.f16(half, i32) #1
define dso_local half @test_vcvth_n_f16_s16_1(i16 %a) {
-; SDISEL-LABEL: test_vcvth_n_f16_s16_1:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: fmov s0, w0
-; SDISEL-NEXT: scvtf h0, h0, #1
-; SDISEL-NEXT: ret
+; CHECK-SD-LABEL: test_vcvth_n_f16_s16_1:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: fmov s0, w0
+; CHECK-SD-NEXT: scvtf h0, h0, #1
+; CHECK-SD-NEXT: ret
;
-; GISEL-LABEL: test_vcvth_n_f16_s16_1:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: sxth w8, w0
-; GISEL-NEXT: fmov s0, w8
-; GISEL-NEXT: scvtf h0, h0, #1
-; GISEL-NEXT: ret
+; CHECK-GI-LABEL: test_vcvth_n_f16_s16_1:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: sxth w8, w0
+; CHECK-GI-NEXT: fmov s0, w8
+; CHECK-GI-NEXT: scvtf h0, h0, #1
+; CHECK-GI-NEXT: ret
entry:
%sext = sext i16 %a to i32
%fcvth_n = tail call half @llvm.aarch64.neon.vcvtfxs2fp.f16.i32(i32 %sext, i32 1)
@@ -206,18 +206,18 @@ entry:
}
define dso_local half @test_vcvth_n_f16_s16_16(i16 %a) {
-; SDISEL-LABEL: test_vcvth_n_f16_s16_16:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: fmov s0, w0
-; SDISEL-NEXT: scvtf h0, h0, #16
-; SDISEL-NEXT: ret
+; CHECK-SD-LABEL: test_vcvth_n_f16_s16_16:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: fmov s0, w0
+; CHECK-SD-NEXT: scvtf h0, h0, #16
+; CHECK-SD-NEXT: ret
;
-; GISEL-LABEL: test_vcvth_n_f16_s16_16:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: sxth w8, w0
-; GISEL-NEXT: fmov s0, w8
-; GISEL-NEXT: scvtf h0, h0, #16
-; GISEL-NEXT: ret
+; CHECK-GI-LABEL: test_vcvth_n_f16_s16_16:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: sxth w8, w0
+; CHECK-GI-NEXT: fmov s0, w8
+; CHECK-GI-NEXT: scvtf h0, h0, #16
+; CHECK-GI-NEXT: ret
entry:
%sext = sext i16 %a to i32
%fcvth_n = tail call half @llvm.aarch64.neon.vcvtfxs2fp.f16.i32(i32 %sext, i32 16)
@@ -315,18 +315,18 @@ entry:
}
define dso_local half @test_vcvth_n_f16_u16_1(i16 %a) {
-; SDISEL-LABEL: test_vcvth_n_f16_u16_1:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: fmov s0, w0
-; SDISEL-NEXT: ucvtf h0, h0, #1
-; SDISEL-NEXT: ret
+; CHECK-SD-LABEL: test_vcvth_n_f16_u16_1:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: fmov s0, w0
+; CHECK-SD-NEXT: ucvtf h0, h0, #1
+; CHECK-SD-NEXT: ret
;
-; GISEL-LABEL: test_vcvth_n_f16_u16_1:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: and w8, w0, #0xffff
-; GISEL-NEXT: fmov s0, w8
-; GISEL-NEXT: ucvtf h0, h0, #1
-; GISEL-NEXT: ret
+; CHECK-GI-LABEL: test_vcvth_n_f16_u16_1:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: and w8, w0, #0xffff
+; CHECK-GI-NEXT: fmov s0, w8
+; CHECK-GI-NEXT: ucvtf h0, h0, #1
+; CHECK-GI-NEXT: ret
entry:
%0 = zext i16 %a to i32
%fcvth_n = tail call half @llvm.aarch64.neon.vcvtfxu2fp.f16.i32(i32 %0, i32 1)
@@ -334,18 +334,18 @@ entry:
}
define dso_local half @test_vcvth_n_f16_u16_16(i16 %a) {
-; SDISEL-LABEL: test_vcvth_n_f16_u16_16:
-; SDISEL: // %bb.0: // %entry
-; SDISEL-NEXT: fmov s0, w0
-; SDISEL-NEXT: ucvtf h0, h0, #16
-; SDISEL-NEXT: ret
+; CHECK-SD-LABEL: test_vcvth_n_f16_u16_16:
+; CHECK-SD: // %bb.0: // %entry
+; CHECK-SD-NEXT: fmov s0, w0
+; CHECK-SD-NEXT: ucvtf h0, h0, #16
+; CHECK-SD-NEXT: ret
;
-; GISEL-LABEL: test_vcvth_n_f16_u16_16:
-; GISEL: // %bb.0: // %entry
-; GISEL-NEXT: and w8, w0, #0xffff
-; GISEL-NEXT: fmov s0, w8
-; GISEL-NEXT: ucvtf h0, h0, #16
-; GISEL-NEXT: ret
+; CHECK-GI-LABEL: test_vcvth_n_f16_u16_16:
+; CHECK-GI: // %bb.0: // %entry
+; CHECK-GI-NEXT: and w8, w0, #0xffff
+; CHECK-GI-NEXT: fmov s0, w8
+; CHECK-GI-NEXT: ucvtf h0, h0, #16
+; CHECK-GI-NEXT: ret
entry:
%0 = zext i16 %a to i32
%fcvth_n = tail call half @llvm.aarch64.neon.vcvtfxu2fp.f16.i32(i32 %0, i32 16)
diff --git a/llvm/test/CodeGen/AMDGPU/gfx1250-scratch-scope-se.ll b/llvm/test/CodeGen/AMDGPU/gfx1250-scratch-scope-se.ll
new file mode 100644
index 0000000..d1e82a0
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/gfx1250-scratch-scope-se.ll
@@ -0,0 +1,95 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GCN,GCN-SDAG %s
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1250 < %s | FileCheck -check-prefixes=GCN,GCN-GISEL %s
+
+; Test that stores that may hit scratch are correctly promoted to SCOPE_SE.
+
+define void @test_scratch_store(ptr addrspace(5) %ptr, i32 %val) {
+; GCN-LABEL: test_scratch_store:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: scratch_store_b32 v0, v1, off scope:SCOPE_SE
+; GCN-NEXT: s_set_pc_i64 s[30:31]
+ store i32 %val, ptr addrspace(5) %ptr
+ ret void
+}
+
+define void @test_unknown_flat_store(ptr %ptr, i32 %val) {
+; GCN-LABEL: test_unknown_flat_store:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: flat_store_b32 v[0:1], v2 scope:SCOPE_SE
+; GCN-NEXT: s_wait_dscnt 0x0
+; GCN-NEXT: s_set_pc_i64 s[30:31]
+ store i32 %val, ptr %ptr
+ ret void
+}
+
+define void @test_flat_store_no_scratch_alloc(ptr %ptr, i32 %val) #0 {
+; GCN-LABEL: test_flat_store_no_scratch_alloc:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: flat_store_b32 v[0:1], v2
+; GCN-NEXT: s_wait_dscnt 0x0
+; GCN-NEXT: s_set_pc_i64 s[30:31]
+ store i32 %val, ptr %ptr
+ ret void
+}
+
+; TODO: handle
+define void @test_flat_store_noalias_addrspace(ptr %ptr, i32 %val) {
+; GCN-LABEL: test_flat_store_noalias_addrspace:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-NEXT: s_wait_kmcnt 0x0
+; GCN-NEXT: flat_store_b32 v[0:1], v2 scope:SCOPE_SE
+; GCN-NEXT: s_wait_dscnt 0x0
+; GCN-NEXT: s_set_pc_i64 s[30:31]
+ store i32 %val, ptr %ptr, !noalias.addrspace !{i32 5, i32 6}
+ ret void
+}
+
+; TODO: would be nice to handle too
+define void @test_flat_store_select(ptr addrspace(1) %a, ptr addrspace(3) %b, i1 %cond, i32 %val) {
+; GCN-SDAG-LABEL: test_flat_store_select:
+; GCN-SDAG: ; %bb.0:
+; GCN-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-SDAG-NEXT: s_wait_kmcnt 0x0
+; GCN-SDAG-NEXT: v_cmp_ne_u32_e32 vcc_lo, -1, v2
+; GCN-SDAG-NEXT: v_and_b32_e32 v3, 1, v3
+; GCN-SDAG-NEXT: s_mov_b64 s[0:1], src_shared_base
+; GCN-SDAG-NEXT: v_cndmask_b32_e32 v2, 0, v2, vcc_lo
+; GCN-SDAG-NEXT: v_cndmask_b32_e64 v5, 0, s1, vcc_lo
+; GCN-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GCN-SDAG-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v3
+; GCN-SDAG-NEXT: v_dual_cndmask_b32 v1, v5, v1 :: v_dual_cndmask_b32 v0, v2, v0
+; GCN-SDAG-NEXT: flat_store_b32 v[0:1], v4 scope:SCOPE_SE
+; GCN-SDAG-NEXT: s_wait_dscnt 0x0
+; GCN-SDAG-NEXT: s_set_pc_i64 s[30:31]
+;
+; GCN-GISEL-LABEL: test_flat_store_select:
+; GCN-GISEL: ; %bb.0:
+; GCN-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
+; GCN-GISEL-NEXT: s_wait_kmcnt 0x0
+; GCN-GISEL-NEXT: v_cmp_ne_u32_e32 vcc_lo, -1, v2
+; GCN-GISEL-NEXT: v_and_b32_e32 v3, 1, v3
+; GCN-GISEL-NEXT: s_mov_b64 s[0:1], src_shared_base
+; GCN-GISEL-NEXT: v_cndmask_b32_e32 v2, 0, v2, vcc_lo
+; GCN-GISEL-NEXT: v_cndmask_b32_e64 v5, 0, s1, vcc_lo
+; GCN-GISEL-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GCN-GISEL-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v3
+; GCN-GISEL-NEXT: v_dual_cndmask_b32 v0, v2, v0 :: v_dual_cndmask_b32 v1, v5, v1
+; GCN-GISEL-NEXT: flat_store_b32 v[0:1], v4 scope:SCOPE_SE
+; GCN-GISEL-NEXT: s_wait_dscnt 0x0
+; GCN-GISEL-NEXT: s_set_pc_i64 s[30:31]
+ %a.ascast = addrspacecast ptr addrspace(1) %a to ptr
+ %b.ascast = addrspacecast ptr addrspace(3) %b to ptr
+ %ptr = select i1 %cond, ptr %a.ascast, ptr %b.ascast
+ store i32 %val, ptr %ptr
+ ret void
+}
+
+attributes #0 = { "amdgpu-no-flat-scratch-init" }
diff --git a/llvm/test/CodeGen/AMDGPU/global-load-xcnt.ll b/llvm/test/CodeGen/AMDGPU/global-load-xcnt.ll
index fd644a3..3a898a9 100644
--- a/llvm/test/CodeGen/AMDGPU/global-load-xcnt.ll
+++ b/llvm/test/CodeGen/AMDGPU/global-load-xcnt.ll
@@ -124,27 +124,27 @@ define i32 @test_v64i32_load_store(ptr addrspace(1) %ptr, i32 %idx, ptr addrspac
; GCN-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
; GCN-SDAG-NEXT: s_wait_kmcnt 0x0
; GCN-SDAG-NEXT: s_clause 0xd
-; GCN-SDAG-NEXT: scratch_store_b32 off, v40, s32 offset:52
-; GCN-SDAG-NEXT: scratch_store_b32 off, v41, s32 offset:48
-; GCN-SDAG-NEXT: scratch_store_b32 off, v42, s32 offset:44
-; GCN-SDAG-NEXT: scratch_store_b32 off, v43, s32 offset:40
-; GCN-SDAG-NEXT: scratch_store_b32 off, v44, s32 offset:36
-; GCN-SDAG-NEXT: scratch_store_b32 off, v45, s32 offset:32
-; GCN-SDAG-NEXT: scratch_store_b32 off, v56, s32 offset:28
-; GCN-SDAG-NEXT: scratch_store_b32 off, v57, s32 offset:24
-; GCN-SDAG-NEXT: scratch_store_b32 off, v58, s32 offset:20
-; GCN-SDAG-NEXT: scratch_store_b32 off, v59, s32 offset:16
-; GCN-SDAG-NEXT: scratch_store_b32 off, v60, s32 offset:12
-; GCN-SDAG-NEXT: scratch_store_b32 off, v61, s32 offset:8
-; GCN-SDAG-NEXT: scratch_store_b32 off, v62, s32 offset:4
-; GCN-SDAG-NEXT: scratch_store_b32 off, v63, s32
+; GCN-SDAG-NEXT: scratch_store_b32 off, v40, s32 offset:52 scope:SCOPE_SE
+; GCN-SDAG-NEXT: scratch_store_b32 off, v41, s32 offset:48 scope:SCOPE_SE
+; GCN-SDAG-NEXT: scratch_store_b32 off, v42, s32 offset:44 scope:SCOPE_SE
+; GCN-SDAG-NEXT: scratch_store_b32 off, v43, s32 offset:40 scope:SCOPE_SE
+; GCN-SDAG-NEXT: scratch_store_b32 off, v44, s32 offset:36 scope:SCOPE_SE
+; GCN-SDAG-NEXT: scratch_store_b32 off, v45, s32 offset:32 scope:SCOPE_SE
+; GCN-SDAG-NEXT: scratch_store_b32 off, v56, s32 offset:28 scope:SCOPE_SE
+; GCN-SDAG-NEXT: scratch_store_b32 off, v57, s32 offset:24 scope:SCOPE_SE
+; GCN-SDAG-NEXT: scratch_store_b32 off, v58, s32 offset:20 scope:SCOPE_SE
+; GCN-SDAG-NEXT: scratch_store_b32 off, v59, s32 offset:16 scope:SCOPE_SE
+; GCN-SDAG-NEXT: scratch_store_b32 off, v60, s32 offset:12 scope:SCOPE_SE
+; GCN-SDAG-NEXT: scratch_store_b32 off, v61, s32 offset:8 scope:SCOPE_SE
+; GCN-SDAG-NEXT: scratch_store_b32 off, v62, s32 offset:4 scope:SCOPE_SE
+; GCN-SDAG-NEXT: scratch_store_b32 off, v63, s32 scope:SCOPE_SE
; GCN-SDAG-NEXT: global_load_b128 v[6:9], v[0:1], off offset:224
; GCN-SDAG-NEXT: v_dual_mov_b32 v5, v4 :: v_dual_mov_b32 v4, v3
; GCN-SDAG-NEXT: s_wait_loadcnt 0x0
-; GCN-SDAG-NEXT: scratch_store_b128 off, v[6:9], s32 offset:56 ; 16-byte Folded Spill
+; GCN-SDAG-NEXT: scratch_store_b128 off, v[6:9], s32 offset:56 scope:SCOPE_SE ; 16-byte Folded Spill
; GCN-SDAG-NEXT: global_load_b128 v[6:9], v[0:1], off offset:240
; GCN-SDAG-NEXT: s_wait_loadcnt 0x0
-; GCN-SDAG-NEXT: scratch_store_b128 off, v[6:9], s32 offset:72 ; 16-byte Folded Spill
+; GCN-SDAG-NEXT: scratch_store_b128 off, v[6:9], s32 offset:72 scope:SCOPE_SE ; 16-byte Folded Spill
; GCN-SDAG-NEXT: s_clause 0xd
; GCN-SDAG-NEXT: global_load_b128 v[10:13], v[0:1], off offset:192
; GCN-SDAG-NEXT: global_load_b128 v[14:17], v[0:1], off offset:208
@@ -206,27 +206,27 @@ define i32 @test_v64i32_load_store(ptr addrspace(1) %ptr, i32 %idx, ptr addrspac
; GCN-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
; GCN-GISEL-NEXT: s_wait_kmcnt 0x0
; GCN-GISEL-NEXT: s_clause 0xf
-; GCN-GISEL-NEXT: scratch_store_b32 off, v40, s32 offset:60
-; GCN-GISEL-NEXT: scratch_store_b32 off, v41, s32 offset:56
-; GCN-GISEL-NEXT: scratch_store_b32 off, v42, s32 offset:52
-; GCN-GISEL-NEXT: scratch_store_b32 off, v43, s32 offset:48
-; GCN-GISEL-NEXT: scratch_store_b32 off, v44, s32 offset:44
-; GCN-GISEL-NEXT: scratch_store_b32 off, v45, s32 offset:40
-; GCN-GISEL-NEXT: scratch_store_b32 off, v46, s32 offset:36
-; GCN-GISEL-NEXT: scratch_store_b32 off, v47, s32 offset:32
-; GCN-GISEL-NEXT: scratch_store_b32 off, v56, s32 offset:28
-; GCN-GISEL-NEXT: scratch_store_b32 off, v57, s32 offset:24
-; GCN-GISEL-NEXT: scratch_store_b32 off, v58, s32 offset:20
-; GCN-GISEL-NEXT: scratch_store_b32 off, v59, s32 offset:16
-; GCN-GISEL-NEXT: scratch_store_b32 off, v60, s32 offset:12
-; GCN-GISEL-NEXT: scratch_store_b32 off, v61, s32 offset:8
-; GCN-GISEL-NEXT: scratch_store_b32 off, v62, s32 offset:4
-; GCN-GISEL-NEXT: scratch_store_b32 off, v63, s32
+; GCN-GISEL-NEXT: scratch_store_b32 off, v40, s32 offset:60 scope:SCOPE_SE
+; GCN-GISEL-NEXT: scratch_store_b32 off, v41, s32 offset:56 scope:SCOPE_SE
+; GCN-GISEL-NEXT: scratch_store_b32 off, v42, s32 offset:52 scope:SCOPE_SE
+; GCN-GISEL-NEXT: scratch_store_b32 off, v43, s32 offset:48 scope:SCOPE_SE
+; GCN-GISEL-NEXT: scratch_store_b32 off, v44, s32 offset:44 scope:SCOPE_SE
+; GCN-GISEL-NEXT: scratch_store_b32 off, v45, s32 offset:40 scope:SCOPE_SE
+; GCN-GISEL-NEXT: scratch_store_b32 off, v46, s32 offset:36 scope:SCOPE_SE
+; GCN-GISEL-NEXT: scratch_store_b32 off, v47, s32 offset:32 scope:SCOPE_SE
+; GCN-GISEL-NEXT: scratch_store_b32 off, v56, s32 offset:28 scope:SCOPE_SE
+; GCN-GISEL-NEXT: scratch_store_b32 off, v57, s32 offset:24 scope:SCOPE_SE
+; GCN-GISEL-NEXT: scratch_store_b32 off, v58, s32 offset:20 scope:SCOPE_SE
+; GCN-GISEL-NEXT: scratch_store_b32 off, v59, s32 offset:16 scope:SCOPE_SE
+; GCN-GISEL-NEXT: scratch_store_b32 off, v60, s32 offset:12 scope:SCOPE_SE
+; GCN-GISEL-NEXT: scratch_store_b32 off, v61, s32 offset:8 scope:SCOPE_SE
+; GCN-GISEL-NEXT: scratch_store_b32 off, v62, s32 offset:4 scope:SCOPE_SE
+; GCN-GISEL-NEXT: scratch_store_b32 off, v63, s32 scope:SCOPE_SE
; GCN-GISEL-NEXT: s_wait_xcnt 0x8
; GCN-GISEL-NEXT: v_dual_mov_b32 v46, v3 :: v_dual_mov_b32 v47, v4
; GCN-GISEL-NEXT: global_load_b128 v[2:5], v[0:1], off offset:32
; GCN-GISEL-NEXT: s_wait_loadcnt 0x0
-; GCN-GISEL-NEXT: scratch_store_b128 off, v[2:5], s32 offset:80 ; 16-byte Folded Spill
+; GCN-GISEL-NEXT: scratch_store_b128 off, v[2:5], s32 offset:80 scope:SCOPE_SE ; 16-byte Folded Spill
; GCN-GISEL-NEXT: s_clause 0xe
; GCN-GISEL-NEXT: global_load_b128 v[6:9], v[0:1], off offset:48
; GCN-GISEL-NEXT: global_load_b128 v[10:13], v[0:1], off offset:64
@@ -244,7 +244,7 @@ define i32 @test_v64i32_load_store(ptr addrspace(1) %ptr, i32 %idx, ptr addrspac
; GCN-GISEL-NEXT: global_load_b128 v[60:63], v[0:1], off offset:16
; GCN-GISEL-NEXT: global_load_b128 v[0:3], v[0:1], off offset:240
; GCN-GISEL-NEXT: s_wait_loadcnt 0x0
-; GCN-GISEL-NEXT: scratch_store_b128 off, v[0:3], s32 offset:64 ; 16-byte Folded Spill
+; GCN-GISEL-NEXT: scratch_store_b128 off, v[0:3], s32 offset:64 scope:SCOPE_SE ; 16-byte Folded Spill
; GCN-GISEL-NEXT: scratch_load_b128 v[0:3], off, s32 offset:80 th:TH_LOAD_LU ; 16-byte Folded Reload
; GCN-GISEL-NEXT: s_wait_loadcnt 0x0
; GCN-GISEL-NEXT: s_clause 0xe
@@ -299,10 +299,10 @@ define i64 @test_v16i64_load_store(ptr addrspace(1) %ptr_a, ptr addrspace(1) %pt
; GCN-SDAG-NEXT: s_wait_loadcnt_dscnt 0x0
; GCN-SDAG-NEXT: s_wait_kmcnt 0x0
; GCN-SDAG-NEXT: s_clause 0x3
-; GCN-SDAG-NEXT: scratch_store_b32 off, v40, s32 offset:12
-; GCN-SDAG-NEXT: scratch_store_b32 off, v41, s32 offset:8
-; GCN-SDAG-NEXT: scratch_store_b32 off, v42, s32 offset:4
-; GCN-SDAG-NEXT: scratch_store_b32 off, v43, s32
+; GCN-SDAG-NEXT: scratch_store_b32 off, v40, s32 offset:12 scope:SCOPE_SE
+; GCN-SDAG-NEXT: scratch_store_b32 off, v41, s32 offset:8 scope:SCOPE_SE
+; GCN-SDAG-NEXT: scratch_store_b32 off, v42, s32 offset:4 scope:SCOPE_SE
+; GCN-SDAG-NEXT: scratch_store_b32 off, v43, s32 scope:SCOPE_SE
; GCN-SDAG-NEXT: s_clause 0x7
; GCN-SDAG-NEXT: global_load_b128 v[10:13], v[0:1], off offset:112
; GCN-SDAG-NEXT: global_load_b128 v[18:21], v[0:1], off offset:96
@@ -385,12 +385,12 @@ define i64 @test_v16i64_load_store(ptr addrspace(1) %ptr_a, ptr addrspace(1) %pt
; GCN-GISEL-NEXT: s_wait_loadcnt_dscnt 0x0
; GCN-GISEL-NEXT: s_wait_kmcnt 0x0
; GCN-GISEL-NEXT: s_clause 0x5
-; GCN-GISEL-NEXT: scratch_store_b32 off, v40, s32 offset:20
-; GCN-GISEL-NEXT: scratch_store_b32 off, v41, s32 offset:16
-; GCN-GISEL-NEXT: scratch_store_b32 off, v42, s32 offset:12
-; GCN-GISEL-NEXT: scratch_store_b32 off, v43, s32 offset:8
-; GCN-GISEL-NEXT: scratch_store_b32 off, v44, s32 offset:4
-; GCN-GISEL-NEXT: scratch_store_b32 off, v45, s32
+; GCN-GISEL-NEXT: scratch_store_b32 off, v40, s32 offset:20 scope:SCOPE_SE
+; GCN-GISEL-NEXT: scratch_store_b32 off, v41, s32 offset:16 scope:SCOPE_SE
+; GCN-GISEL-NEXT: scratch_store_b32 off, v42, s32 offset:12 scope:SCOPE_SE
+; GCN-GISEL-NEXT: scratch_store_b32 off, v43, s32 offset:8 scope:SCOPE_SE
+; GCN-GISEL-NEXT: scratch_store_b32 off, v44, s32 offset:4 scope:SCOPE_SE
+; GCN-GISEL-NEXT: scratch_store_b32 off, v45, s32 scope:SCOPE_SE
; GCN-GISEL-NEXT: s_clause 0x7
; GCN-GISEL-NEXT: global_load_b128 v[6:9], v[0:1], off offset:80
; GCN-GISEL-NEXT: global_load_b128 v[10:13], v[0:1], off
diff --git a/llvm/test/CodeGen/AMDGPU/partial-regcopy-and-spill-missed-at-regalloc.ll b/llvm/test/CodeGen/AMDGPU/partial-regcopy-and-spill-missed-at-regalloc.ll
index c3164b8..f54a383 100644
--- a/llvm/test/CodeGen/AMDGPU/partial-regcopy-and-spill-missed-at-regalloc.ll
+++ b/llvm/test/CodeGen/AMDGPU/partial-regcopy-and-spill-missed-at-regalloc.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-;RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx908 --stop-after=greedy,1 < %s | FileCheck -check-prefix=REGALLOC-GFX908 %s
+;RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx908 --stop-after=greedy,2 < %s | FileCheck -check-prefix=REGALLOC-GFX908 %s
;RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx908 --stop-after=prologepilog < %s | FileCheck -check-prefix=PEI-GFX908 %s
-;RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a --stop-after=greedy,1 < %s | FileCheck -check-prefix=REGALLOC-GFX90A %s
+;RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a --stop-after=greedy,2 < %s | FileCheck -check-prefix=REGALLOC-GFX90A %s
;RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a --stop-after=prologepilog < %s | FileCheck -check-prefix=PEI-GFX90A %s
; Partial reg copy and spill missed during regalloc handled later at frame lowering.
@@ -12,17 +12,21 @@ define amdgpu_kernel void @partial_copy(<4 x i32> %arg) #0 {
; REGALLOC-GFX908-NEXT: liveins: $sgpr4_sgpr5
; REGALLOC-GFX908-NEXT: {{ $}}
; REGALLOC-GFX908-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 2162697 /* reguse:AGPR_32 */, undef %6:agpr_32
- ; REGALLOC-GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 6094858 /* regdef:VReg_128 */, def %7
- ; REGALLOC-GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 3538954 /* regdef:VReg_64 */, def %8
- ; REGALLOC-GFX908-NEXT: GLOBAL_STORE_DWORDX4 undef %15:vreg_64, %7, 0, 0, implicit $exec :: (volatile store (s128) into `ptr addrspace(1) poison`, addrspace 1)
+ ; REGALLOC-GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 6094858 /* regdef:VReg_128 */, def %25
+ ; REGALLOC-GFX908-NEXT: [[COPY:%[0-9]+]]:av_128 = COPY %25
+ ; REGALLOC-GFX908-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 3538954 /* regdef:VReg_64 */, def %27
+ ; REGALLOC-GFX908-NEXT: SI_SPILL_AV64_SAVE %27, %stack.0, $sgpr32, 0, implicit $exec :: (store (s64) into %stack.0, align 4, addrspace 5)
+ ; REGALLOC-GFX908-NEXT: [[COPY1:%[0-9]+]]:vreg_128 = COPY [[COPY]]
+ ; REGALLOC-GFX908-NEXT: GLOBAL_STORE_DWORDX4 undef %15:vreg_64, [[COPY1]], 0, 0, implicit $exec :: (volatile store (s128) into `ptr addrspace(1) poison`, addrspace 1)
; REGALLOC-GFX908-NEXT: renamable $sgpr0_sgpr1_sgpr2_sgpr3 = S_LOAD_DWORDX4_IMM killed renamable $sgpr4_sgpr5, 0, 0 :: (dereferenceable invariant load (s128) from %ir.arg.kernarg.offset1, addrspace 4)
- ; REGALLOC-GFX908-NEXT: [[COPY:%[0-9]+]]:areg_128 = COPY killed renamable $sgpr0_sgpr1_sgpr2_sgpr3
+ ; REGALLOC-GFX908-NEXT: [[COPY2:%[0-9]+]]:areg_128 = COPY killed renamable $sgpr0_sgpr1_sgpr2_sgpr3
; REGALLOC-GFX908-NEXT: [[AV_MOV_:%[0-9]+]]:vgpr_32 = AV_MOV_B32_IMM_PSEUDO 1, implicit $exec
; REGALLOC-GFX908-NEXT: [[AV_MOV_1:%[0-9]+]]:vgpr_32 = AV_MOV_B32_IMM_PSEUDO 2, implicit $exec
- ; REGALLOC-GFX908-NEXT: [[V_MFMA_I32_4X4X4I8_e64_:%[0-9]+]]:areg_128 = V_MFMA_I32_4X4X4I8_e64 [[AV_MOV_]], [[AV_MOV_1]], [[COPY]], 0, 0, 0, implicit $mode, implicit $exec
- ; REGALLOC-GFX908-NEXT: GLOBAL_STORE_DWORDX2 undef %17:vreg_64, %8, 0, 0, implicit $exec :: (volatile store (s64) into `ptr addrspace(1) poison`, addrspace 1)
- ; REGALLOC-GFX908-NEXT: [[COPY1:%[0-9]+]]:vreg_128 = COPY [[V_MFMA_I32_4X4X4I8_e64_]]
- ; REGALLOC-GFX908-NEXT: GLOBAL_STORE_DWORDX4 undef %19:vreg_64, [[COPY1]], 0, 0, implicit $exec :: (volatile store (s128) into `ptr addrspace(1) poison`, addrspace 1)
+ ; REGALLOC-GFX908-NEXT: [[V_MFMA_I32_4X4X4I8_e64_:%[0-9]+]]:areg_128 = V_MFMA_I32_4X4X4I8_e64 [[AV_MOV_]], [[AV_MOV_1]], [[COPY2]], 0, 0, 0, implicit $mode, implicit $exec
+ ; REGALLOC-GFX908-NEXT: [[SI_SPILL_AV64_RESTORE:%[0-9]+]]:vreg_64 = SI_SPILL_AV64_RESTORE %stack.0, $sgpr32, 0, implicit $exec :: (load (s64) from %stack.0, align 4, addrspace 5)
+ ; REGALLOC-GFX908-NEXT: GLOBAL_STORE_DWORDX2 undef %17:vreg_64, [[SI_SPILL_AV64_RESTORE]], 0, 0, implicit $exec :: (volatile store (s64) into `ptr addrspace(1) poison`, addrspace 1)
+ ; REGALLOC-GFX908-NEXT: [[COPY3:%[0-9]+]]:vreg_128 = COPY [[V_MFMA_I32_4X4X4I8_e64_]]
+ ; REGALLOC-GFX908-NEXT: GLOBAL_STORE_DWORDX4 undef %19:vreg_64, [[COPY3]], 0, 0, implicit $exec :: (volatile store (s128) into `ptr addrspace(1) poison`, addrspace 1)
; REGALLOC-GFX908-NEXT: S_ENDPGM 0
;
; PEI-GFX908-LABEL: name: partial_copy
@@ -57,15 +61,17 @@ define amdgpu_kernel void @partial_copy(<4 x i32> %arg) #0 {
; REGALLOC-GFX90A-NEXT: liveins: $sgpr4_sgpr5
; REGALLOC-GFX90A-NEXT: {{ $}}
; REGALLOC-GFX90A-NEXT: INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 2162697 /* reguse:AGPR_32 */, undef %6:agpr_32
- ; REGALLOC-GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 6422538 /* regdef:VReg_128_Align2 */, def %7
- ; REGALLOC-GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 3866634 /* regdef:VReg_64_Align2 */, def %8
- ; REGALLOC-GFX90A-NEXT: GLOBAL_STORE_DWORDX4 undef %15:vreg_64_align2, %7, 0, 0, implicit $exec :: (volatile store (s128) into `ptr addrspace(1) poison`, addrspace 1)
+ ; REGALLOC-GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 6422538 /* regdef:VReg_128_Align2 */, def %24
+ ; REGALLOC-GFX90A-NEXT: [[COPY:%[0-9]+]]:av_128_align2 = COPY %24
+ ; REGALLOC-GFX90A-NEXT: INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 3866634 /* regdef:VReg_64_Align2 */, def %22
+ ; REGALLOC-GFX90A-NEXT: [[COPY1:%[0-9]+]]:av_64_align2 = COPY %22
+ ; REGALLOC-GFX90A-NEXT: GLOBAL_STORE_DWORDX4 undef %15:vreg_64_align2, [[COPY]], 0, 0, implicit $exec :: (volatile store (s128) into `ptr addrspace(1) poison`, addrspace 1)
; REGALLOC-GFX90A-NEXT: renamable $sgpr0_sgpr1_sgpr2_sgpr3 = S_LOAD_DWORDX4_IMM killed renamable $sgpr4_sgpr5, 0, 0 :: (dereferenceable invariant load (s128) from %ir.arg.kernarg.offset1, addrspace 4)
- ; REGALLOC-GFX90A-NEXT: [[COPY:%[0-9]+]]:areg_128_align2 = COPY killed renamable $sgpr0_sgpr1_sgpr2_sgpr3
+ ; REGALLOC-GFX90A-NEXT: [[COPY2:%[0-9]+]]:areg_128_align2 = COPY killed renamable $sgpr0_sgpr1_sgpr2_sgpr3
; REGALLOC-GFX90A-NEXT: [[AV_MOV_:%[0-9]+]]:vgpr_32 = AV_MOV_B32_IMM_PSEUDO 1, implicit $exec
; REGALLOC-GFX90A-NEXT: [[AV_MOV_1:%[0-9]+]]:vgpr_32 = AV_MOV_B32_IMM_PSEUDO 2, implicit $exec
- ; REGALLOC-GFX90A-NEXT: [[V_MFMA_I32_4X4X4I8_e64_:%[0-9]+]]:areg_128_align2 = V_MFMA_I32_4X4X4I8_e64 [[AV_MOV_]], [[AV_MOV_1]], [[COPY]], 0, 0, 0, implicit $mode, implicit $exec
- ; REGALLOC-GFX90A-NEXT: GLOBAL_STORE_DWORDX2 undef %17:vreg_64_align2, %8, 0, 0, implicit $exec :: (volatile store (s64) into `ptr addrspace(1) poison`, addrspace 1)
+ ; REGALLOC-GFX90A-NEXT: [[V_MFMA_I32_4X4X4I8_e64_:%[0-9]+]]:areg_128_align2 = V_MFMA_I32_4X4X4I8_e64 [[AV_MOV_]], [[AV_MOV_1]], [[COPY2]], 0, 0, 0, implicit $mode, implicit $exec
+ ; REGALLOC-GFX90A-NEXT: GLOBAL_STORE_DWORDX2 undef %17:vreg_64_align2, [[COPY1]], 0, 0, implicit $exec :: (volatile store (s64) into `ptr addrspace(1) poison`, addrspace 1)
; REGALLOC-GFX90A-NEXT: GLOBAL_STORE_DWORDX4 undef %19:vreg_64_align2, [[V_MFMA_I32_4X4X4I8_e64_]], 0, 0, implicit $exec :: (volatile store (s128) into `ptr addrspace(1) poison`, addrspace 1)
; REGALLOC-GFX90A-NEXT: S_ENDPGM 0
;
diff --git a/llvm/test/CodeGen/AMDGPU/wwm-regalloc-error.ll b/llvm/test/CodeGen/AMDGPU/wwm-regalloc-error.ll
index 145f1e4..ff18b32 100644
--- a/llvm/test/CodeGen/AMDGPU/wwm-regalloc-error.ll
+++ b/llvm/test/CodeGen/AMDGPU/wwm-regalloc-error.ll
@@ -2,7 +2,7 @@
; A negative test to capture the expected error when the VGPRs are insufficient for wwm-regalloc.
-; CHECK: error: can't find enough VGPRs for wwm-regalloc
+; CHECK: error: cannot find enough VGPRs for wwm-regalloc
define amdgpu_kernel void @test(i32 %in) {
entry:
diff --git a/llvm/test/CodeGen/LoongArch/lasx/build-vector.ll b/llvm/test/CodeGen/LoongArch/lasx/build-vector.ll
index 5130865..c18c637 100644
--- a/llvm/test/CodeGen/LoongArch/lasx/build-vector.ll
+++ b/llvm/test/CodeGen/LoongArch/lasx/build-vector.ll
@@ -436,49 +436,47 @@ entry:
define void @buildvector_v32i8_with_constant(ptr %dst, i8 %a0, i8 %a1, i8 %a2, i8 %a5, i8 %a8, i8 %a9, i8 %a15, i8 %a17, i8 %a18, i8 %a20, i8 %a22, i8 %a23, i8 %a27, i8 %a28, i8 %a31) nounwind {
; CHECK-LABEL: buildvector_v32i8_with_constant:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi.d $sp, $sp, -96
-; CHECK-NEXT: st.d $ra, $sp, 88 # 8-byte Folded Spill
-; CHECK-NEXT: st.d $fp, $sp, 80 # 8-byte Folded Spill
-; CHECK-NEXT: addi.d $fp, $sp, 96
-; CHECK-NEXT: bstrins.d $sp, $zero, 4, 0
-; CHECK-NEXT: ld.b $t0, $fp, 0
-; CHECK-NEXT: ld.b $t1, $fp, 8
-; CHECK-NEXT: ld.b $t2, $fp, 16
-; CHECK-NEXT: ld.b $t3, $fp, 24
-; CHECK-NEXT: ld.b $t4, $fp, 56
-; CHECK-NEXT: ld.b $t5, $fp, 32
-; CHECK-NEXT: ld.b $t6, $fp, 48
-; CHECK-NEXT: ld.b $t7, $fp, 40
-; CHECK-NEXT: st.b $t4, $sp, 63
-; CHECK-NEXT: st.b $zero, $sp, 61
-; CHECK-NEXT: st.b $t6, $sp, 60
-; CHECK-NEXT: st.b $t7, $sp, 59
-; CHECK-NEXT: st.b $zero, $sp, 56
-; CHECK-NEXT: st.b $t5, $sp, 55
-; CHECK-NEXT: st.b $t3, $sp, 54
-; CHECK-NEXT: st.b $zero, $sp, 53
-; CHECK-NEXT: st.b $t2, $sp, 52
-; CHECK-NEXT: st.b $zero, $sp, 51
-; CHECK-NEXT: st.b $t1, $sp, 50
-; CHECK-NEXT: st.b $t0, $sp, 49
-; CHECK-NEXT: st.b $zero, $sp, 48
-; CHECK-NEXT: st.b $a7, $sp, 47
-; CHECK-NEXT: st.h $zero, $sp, 44
-; CHECK-NEXT: st.b $zero, $sp, 42
-; CHECK-NEXT: st.b $a6, $sp, 41
-; CHECK-NEXT: st.b $a5, $sp, 40
-; CHECK-NEXT: st.b $zero, $sp, 39
-; CHECK-NEXT: st.b $a4, $sp, 37
-; CHECK-NEXT: st.h $zero, $sp, 35
-; CHECK-NEXT: st.b $a3, $sp, 34
-; CHECK-NEXT: st.b $a2, $sp, 33
-; CHECK-NEXT: st.b $a1, $sp, 32
-; CHECK-NEXT: xvld $xr0, $sp, 32
-; CHECK-NEXT: xvst $xr0, $a0, 0
-; CHECK-NEXT: addi.d $sp, $fp, -96
-; CHECK-NEXT: ld.d $fp, $sp, 80 # 8-byte Folded Reload
-; CHECK-NEXT: ld.d $ra, $sp, 88 # 8-byte Folded Reload
-; CHECK-NEXT: addi.d $sp, $sp, 96
+; CHECK-NEXT: ld.b $t0, $sp, 56
+; CHECK-NEXT: ld.b $t1, $sp, 48
+; CHECK-NEXT: ld.b $t2, $sp, 40
+; CHECK-NEXT: ld.b $t3, $sp, 32
+; CHECK-NEXT: ld.b $t4, $sp, 24
+; CHECK-NEXT: ld.b $t5, $sp, 16
+; CHECK-NEXT: ld.b $t6, $sp, 8
+; CHECK-NEXT: ld.b $t7, $sp, 0
+; CHECK-NEXT: xvrepli.b $xr0, 0
+; CHECK-NEXT: vinsgr2vr.b $vr0, $a1, 0
+; CHECK-NEXT: vinsgr2vr.b $vr0, $a2, 1
+; CHECK-NEXT: vinsgr2vr.b $vr0, $a3, 2
+; CHECK-NEXT: vinsgr2vr.b $vr0, $a4, 5
+; CHECK-NEXT: vinsgr2vr.b $vr0, $a5, 8
+; CHECK-NEXT: vinsgr2vr.b $vr0, $a6, 9
+; CHECK-NEXT: vinsgr2vr.b $vr0, $a7, 15
+; CHECK-NEXT: xvpermi.d $xr1, $xr0, 14
+; CHECK-NEXT: vinsgr2vr.b $vr1, $t7, 1
+; CHECK-NEXT: xvpermi.q $xr0, $xr1, 2
+; CHECK-NEXT: xvpermi.d $xr1, $xr0, 14
+; CHECK-NEXT: vinsgr2vr.b $vr1, $t6, 2
+; CHECK-NEXT: xvpermi.q $xr0, $xr1, 2
+; CHECK-NEXT: xvpermi.d $xr1, $xr0, 14
+; CHECK-NEXT: vinsgr2vr.b $vr1, $t5, 4
+; CHECK-NEXT: xvpermi.q $xr0, $xr1, 2
+; CHECK-NEXT: xvpermi.d $xr1, $xr0, 14
+; CHECK-NEXT: vinsgr2vr.b $vr1, $t4, 6
+; CHECK-NEXT: xvpermi.q $xr0, $xr1, 2
+; CHECK-NEXT: xvpermi.d $xr1, $xr0, 14
+; CHECK-NEXT: vinsgr2vr.b $vr1, $t3, 7
+; CHECK-NEXT: xvpermi.q $xr0, $xr1, 2
+; CHECK-NEXT: xvpermi.d $xr1, $xr0, 14
+; CHECK-NEXT: vinsgr2vr.b $vr1, $t2, 11
+; CHECK-NEXT: xvpermi.q $xr0, $xr1, 2
+; CHECK-NEXT: xvpermi.d $xr1, $xr0, 14
+; CHECK-NEXT: vinsgr2vr.b $vr1, $t1, 12
+; CHECK-NEXT: xvpermi.q $xr0, $xr1, 2
+; CHECK-NEXT: xvpermi.d $xr1, $xr0, 14
+; CHECK-NEXT: vinsgr2vr.b $vr1, $t0, 15
+; CHECK-NEXT: xvpermi.q $xr0, $xr1, 2
+; CHECK-NEXT: xvst $xr0, $a0, 0
; CHECK-NEXT: ret
entry:
%ins0 = insertelement <32 x i8> undef, i8 %a0, i32 0
@@ -624,32 +622,19 @@ entry:
define void @buildvector_v16i16_with_constant(ptr %dst, i16 %a2, i16 %a3, i16 %a5, i16 %a6, i16 %a7, i16 %a12, i16 %a13) nounwind {
; CHECK-LABEL: buildvector_v16i16_with_constant:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi.d $sp, $sp, -96
-; CHECK-NEXT: st.d $ra, $sp, 88 # 8-byte Folded Spill
-; CHECK-NEXT: st.d $fp, $sp, 80 # 8-byte Folded Spill
-; CHECK-NEXT: addi.d $fp, $sp, 96
-; CHECK-NEXT: bstrins.d $sp, $zero, 4, 0
-; CHECK-NEXT: st.h $a7, $sp, 58
-; CHECK-NEXT: st.h $a6, $sp, 56
-; CHECK-NEXT: st.h $a5, $sp, 46
-; CHECK-NEXT: st.h $a4, $sp, 44
-; CHECK-NEXT: st.h $a3, $sp, 42
-; CHECK-NEXT: ori $a3, $zero, 2
-; CHECK-NEXT: st.h $a3, $sp, 40
-; CHECK-NEXT: st.h $a2, $sp, 38
-; CHECK-NEXT: st.h $a1, $sp, 36
-; CHECK-NEXT: lu12i.w $a1, 32
-; CHECK-NEXT: ori $a1, $a1, 2
-; CHECK-NEXT: st.w $a1, $sp, 60
-; CHECK-NEXT: st.w $a1, $sp, 32
-; CHECK-NEXT: lu32i.d $a1, 131074
-; CHECK-NEXT: st.d $a1, $sp, 48
-; CHECK-NEXT: xvld $xr0, $sp, 32
-; CHECK-NEXT: xvst $xr0, $a0, 0
-; CHECK-NEXT: addi.d $sp, $fp, -96
-; CHECK-NEXT: ld.d $fp, $sp, 80 # 8-byte Folded Reload
-; CHECK-NEXT: ld.d $ra, $sp, 88 # 8-byte Folded Reload
-; CHECK-NEXT: addi.d $sp, $sp, 96
+; CHECK-NEXT: xvrepli.h $xr0, 2
+; CHECK-NEXT: vinsgr2vr.h $vr0, $a1, 2
+; CHECK-NEXT: vinsgr2vr.h $vr0, $a2, 3
+; CHECK-NEXT: vinsgr2vr.h $vr0, $a3, 5
+; CHECK-NEXT: vinsgr2vr.h $vr0, $a4, 6
+; CHECK-NEXT: vinsgr2vr.h $vr0, $a5, 7
+; CHECK-NEXT: xvpermi.d $xr1, $xr0, 14
+; CHECK-NEXT: vinsgr2vr.h $vr1, $a6, 4
+; CHECK-NEXT: xvpermi.q $xr0, $xr1, 2
+; CHECK-NEXT: xvpermi.d $xr1, $xr0, 14
+; CHECK-NEXT: vinsgr2vr.h $vr1, $a7, 5
+; CHECK-NEXT: xvpermi.q $xr0, $xr1, 2
+; CHECK-NEXT: xvst $xr0, $a0, 0
; CHECK-NEXT: ret
entry:
%ins0 = insertelement <16 x i16> undef, i16 2, i32 0
@@ -724,24 +709,12 @@ entry:
define void @buildvector_v8i32_with_constant(ptr %dst, i32 %a2, i32 %a4, i32 %a5, i32 %a6) nounwind {
; CHECK-LABEL: buildvector_v8i32_with_constant:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi.d $sp, $sp, -96
-; CHECK-NEXT: st.d $ra, $sp, 88 # 8-byte Folded Spill
-; CHECK-NEXT: st.d $fp, $sp, 80 # 8-byte Folded Spill
-; CHECK-NEXT: addi.d $fp, $sp, 96
-; CHECK-NEXT: bstrins.d $sp, $zero, 4, 0
-; CHECK-NEXT: st.w $zero, $sp, 60
-; CHECK-NEXT: st.w $a4, $sp, 56
-; CHECK-NEXT: st.w $a3, $sp, 52
-; CHECK-NEXT: st.w $a2, $sp, 48
-; CHECK-NEXT: st.w $zero, $sp, 44
-; CHECK-NEXT: st.w $a1, $sp, 40
-; CHECK-NEXT: st.d $zero, $sp, 32
-; CHECK-NEXT: xvld $xr0, $sp, 32
-; CHECK-NEXT: xvst $xr0, $a0, 0
-; CHECK-NEXT: addi.d $sp, $fp, -96
-; CHECK-NEXT: ld.d $fp, $sp, 80 # 8-byte Folded Reload
-; CHECK-NEXT: ld.d $ra, $sp, 88 # 8-byte Folded Reload
-; CHECK-NEXT: addi.d $sp, $sp, 96
+; CHECK-NEXT: xvrepli.b $xr0, 0
+; CHECK-NEXT: xvinsgr2vr.w $xr0, $a1, 2
+; CHECK-NEXT: xvinsgr2vr.w $xr0, $a2, 4
+; CHECK-NEXT: xvinsgr2vr.w $xr0, $a3, 5
+; CHECK-NEXT: xvinsgr2vr.w $xr0, $a4, 6
+; CHECK-NEXT: xvst $xr0, $a0, 0
; CHECK-NEXT: ret
entry:
%ins0 = insertelement <8 x i32> undef, i32 0, i32 0
@@ -793,21 +766,10 @@ entry:
define void @buildvector_v4i64_with_constant(ptr %dst, i64 %a0, i64 %a2) nounwind {
; CHECK-LABEL: buildvector_v4i64_with_constant:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi.d $sp, $sp, -96
-; CHECK-NEXT: st.d $ra, $sp, 88 # 8-byte Folded Spill
-; CHECK-NEXT: st.d $fp, $sp, 80 # 8-byte Folded Spill
-; CHECK-NEXT: addi.d $fp, $sp, 96
-; CHECK-NEXT: bstrins.d $sp, $zero, 4, 0
-; CHECK-NEXT: st.d $zero, $sp, 56
-; CHECK-NEXT: st.d $a2, $sp, 48
-; CHECK-NEXT: st.d $zero, $sp, 40
-; CHECK-NEXT: st.d $a1, $sp, 32
-; CHECK-NEXT: xvld $xr0, $sp, 32
+; CHECK-NEXT: xvrepli.b $xr0, 0
+; CHECK-NEXT: xvinsgr2vr.d $xr0, $a1, 0
+; CHECK-NEXT: xvinsgr2vr.d $xr0, $a2, 2
; CHECK-NEXT: xvst $xr0, $a0, 0
-; CHECK-NEXT: addi.d $sp, $fp, -96
-; CHECK-NEXT: ld.d $fp, $sp, 80 # 8-byte Folded Reload
-; CHECK-NEXT: ld.d $ra, $sp, 88 # 8-byte Folded Reload
-; CHECK-NEXT: addi.d $sp, $sp, 96
; CHECK-NEXT: ret
entry:
%ins0 = insertelement <4 x i64> undef, i64 %a0, i32 0
@@ -880,27 +842,17 @@ entry:
define void @buildvector_v8f32_with_constant(ptr %dst, float %a1, float %a2, float %a5, float %a7) nounwind {
; CHECK-LABEL: buildvector_v8f32_with_constant:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi.d $sp, $sp, -96
-; CHECK-NEXT: st.d $ra, $sp, 88 # 8-byte Folded Spill
-; CHECK-NEXT: st.d $fp, $sp, 80 # 8-byte Folded Spill
-; CHECK-NEXT: addi.d $fp, $sp, 96
-; CHECK-NEXT: bstrins.d $sp, $zero, 4, 0
-; CHECK-NEXT: fst.s $fa3, $sp, 60
-; CHECK-NEXT: fst.s $fa2, $sp, 52
-; CHECK-NEXT: fst.s $fa1, $sp, 40
-; CHECK-NEXT: fst.s $fa0, $sp, 36
-; CHECK-NEXT: vldi $vr0, -1280
-; CHECK-NEXT: fst.s $fa0, $sp, 56
+; CHECK-NEXT: # kill: def $f3 killed $f3 def $xr3
+; CHECK-NEXT: # kill: def $f2 killed $f2 def $xr2
+; CHECK-NEXT: # kill: def $f1 killed $f1 def $xr1
+; CHECK-NEXT: # kill: def $f0 killed $f0 def $xr0
; CHECK-NEXT: lu12i.w $a1, 262144
-; CHECK-NEXT: lu52i.d $a1, $a1, 1024
-; CHECK-NEXT: st.d $a1, $sp, 44
-; CHECK-NEXT: fst.s $fa0, $sp, 32
-; CHECK-NEXT: xvld $xr0, $sp, 32
-; CHECK-NEXT: xvst $xr0, $a0, 0
-; CHECK-NEXT: addi.d $sp, $fp, -96
-; CHECK-NEXT: ld.d $fp, $sp, 80 # 8-byte Folded Reload
-; CHECK-NEXT: ld.d $ra, $sp, 88 # 8-byte Folded Reload
-; CHECK-NEXT: addi.d $sp, $sp, 96
+; CHECK-NEXT: xvreplgr2vr.w $xr4, $a1
+; CHECK-NEXT: xvinsve0.w $xr4, $xr0, 1
+; CHECK-NEXT: xvinsve0.w $xr4, $xr1, 2
+; CHECK-NEXT: xvinsve0.w $xr4, $xr2, 5
+; CHECK-NEXT: xvinsve0.w $xr4, $xr3, 7
+; CHECK-NEXT: xvst $xr4, $a0, 0
; CHECK-NEXT: ret
entry:
%ins0 = insertelement <8 x float> undef, float 2.0, i32 0
@@ -956,21 +908,12 @@ entry:
define void @buildvector_v4f64_with_constant(ptr %dst, double %a0, double %a3) nounwind {
; CHECK-LABEL: buildvector_v4f64_with_constant:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi.d $sp, $sp, -96
-; CHECK-NEXT: st.d $ra, $sp, 88 # 8-byte Folded Spill
-; CHECK-NEXT: st.d $fp, $sp, 80 # 8-byte Folded Spill
-; CHECK-NEXT: addi.d $fp, $sp, 96
-; CHECK-NEXT: bstrins.d $sp, $zero, 4, 0
-; CHECK-NEXT: fst.d $fa1, $sp, 56
-; CHECK-NEXT: vrepli.b $vr1, 0
-; CHECK-NEXT: vst $vr1, $sp, 40
-; CHECK-NEXT: fst.d $fa0, $sp, 32
-; CHECK-NEXT: xvld $xr0, $sp, 32
-; CHECK-NEXT: xvst $xr0, $a0, 0
-; CHECK-NEXT: addi.d $sp, $fp, -96
-; CHECK-NEXT: ld.d $fp, $sp, 80 # 8-byte Folded Reload
-; CHECK-NEXT: ld.d $ra, $sp, 88 # 8-byte Folded Reload
-; CHECK-NEXT: addi.d $sp, $sp, 96
+; CHECK-NEXT: # kill: def $f1_64 killed $f1_64 def $xr1
+; CHECK-NEXT: # kill: def $f0_64 killed $f0_64 def $xr0
+; CHECK-NEXT: xvrepli.b $xr2, 0
+; CHECK-NEXT: xvinsve0.d $xr2, $xr0, 0
+; CHECK-NEXT: xvinsve0.d $xr2, $xr1, 3
+; CHECK-NEXT: xvst $xr2, $a0, 0
; CHECK-NEXT: ret
entry:
%ins0 = insertelement <4 x double> undef, double %a0, i32 0
diff --git a/llvm/test/CodeGen/LoongArch/lsx/build-vector.ll b/llvm/test/CodeGen/LoongArch/lsx/build-vector.ll
index 78588c5..9517558 100644
--- a/llvm/test/CodeGen/LoongArch/lsx/build-vector.ll
+++ b/llvm/test/CodeGen/LoongArch/lsx/build-vector.ll
@@ -307,23 +307,15 @@ entry:
define void @buildvector_v16i8_with_constant(ptr %dst, i8 %a0, i8 %a4, i8 %a6, i8 %a8, i8 %a11, i8 %a12, i8 %a15) nounwind {
; CHECK-LABEL: buildvector_v16i8_with_constant:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi.d $sp, $sp, -16
-; CHECK-NEXT: st.b $a7, $sp, 15
-; CHECK-NEXT: st.h $zero, $sp, 13
-; CHECK-NEXT: st.b $a6, $sp, 12
-; CHECK-NEXT: st.b $a5, $sp, 11
-; CHECK-NEXT: st.h $zero, $sp, 9
-; CHECK-NEXT: st.b $a4, $sp, 8
-; CHECK-NEXT: st.b $zero, $sp, 7
-; CHECK-NEXT: st.b $a3, $sp, 6
-; CHECK-NEXT: st.b $zero, $sp, 5
-; CHECK-NEXT: st.b $a2, $sp, 4
-; CHECK-NEXT: st.b $zero, $sp, 3
-; CHECK-NEXT: st.h $zero, $sp, 1
-; CHECK-NEXT: st.b $a1, $sp, 0
-; CHECK-NEXT: vld $vr0, $sp, 0
-; CHECK-NEXT: vst $vr0, $a0, 0
-; CHECK-NEXT: addi.d $sp, $sp, 16
+; CHECK-NEXT: vrepli.b $vr0, 0
+; CHECK-NEXT: vinsgr2vr.b $vr0, $a1, 0
+; CHECK-NEXT: vinsgr2vr.b $vr0, $a2, 4
+; CHECK-NEXT: vinsgr2vr.b $vr0, $a3, 6
+; CHECK-NEXT: vinsgr2vr.b $vr0, $a4, 8
+; CHECK-NEXT: vinsgr2vr.b $vr0, $a5, 11
+; CHECK-NEXT: vinsgr2vr.b $vr0, $a6, 12
+; CHECK-NEXT: vinsgr2vr.b $vr0, $a7, 15
+; CHECK-NEXT: vst $vr0, $a0, 0
; CHECK-NEXT: ret
entry:
%ins0 = insertelement <16 x i8> undef, i8 %a0, i32 0
@@ -398,16 +390,12 @@ entry:
define void @buildvector_v8i16_with_constant(ptr %dst, i16 %a0, i16 %a3, i16 %a4, i16 %a5) nounwind {
; CHECK-LABEL: buildvector_v8i16_with_constant:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi.d $sp, $sp, -16
-; CHECK-NEXT: st.h $zero, $sp, 12
-; CHECK-NEXT: st.h $a4, $sp, 10
-; CHECK-NEXT: st.h $a3, $sp, 8
-; CHECK-NEXT: st.h $a2, $sp, 6
-; CHECK-NEXT: st.h $zero, $sp, 2
-; CHECK-NEXT: st.h $a1, $sp, 0
-; CHECK-NEXT: vld $vr0, $sp, 0
+; CHECK-NEXT: vrepli.b $vr0, 0
+; CHECK-NEXT: vinsgr2vr.h $vr0, $a1, 0
+; CHECK-NEXT: vinsgr2vr.h $vr0, $a2, 3
+; CHECK-NEXT: vinsgr2vr.h $vr0, $a3, 4
+; CHECK-NEXT: vinsgr2vr.h $vr0, $a4, 5
; CHECK-NEXT: vst $vr0, $a0, 0
-; CHECK-NEXT: addi.d $sp, $sp, 16
; CHECK-NEXT: ret
entry:
%ins0 = insertelement <8 x i16> undef, i16 %a0, i32 0
@@ -459,15 +447,11 @@ entry:
define void @buildvector_v4i32_with_constant(ptr %dst, i32 %a0, i32 %a2, i32 %a3) nounwind {
; CHECK-LABEL: buildvector_v4i32_with_constant:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi.d $sp, $sp, -16
-; CHECK-NEXT: st.w $a3, $sp, 12
-; CHECK-NEXT: st.w $a2, $sp, 8
-; CHECK-NEXT: ori $a2, $zero, 2
-; CHECK-NEXT: st.w $a2, $sp, 4
-; CHECK-NEXT: st.w $a1, $sp, 0
-; CHECK-NEXT: vld $vr0, $sp, 0
+; CHECK-NEXT: vrepli.w $vr0, 2
+; CHECK-NEXT: vinsgr2vr.w $vr0, $a1, 0
+; CHECK-NEXT: vinsgr2vr.w $vr0, $a2, 2
+; CHECK-NEXT: vinsgr2vr.w $vr0, $a3, 3
; CHECK-NEXT: vst $vr0, $a0, 0
-; CHECK-NEXT: addi.d $sp, $sp, 16
; CHECK-NEXT: ret
entry:
%ins0 = insertelement <4 x i32> undef, i32 %a0, i32 0
@@ -508,9 +492,8 @@ entry:
define void @buildvector_v2i64_with_constant(ptr %dst, i64 %a1) nounwind {
; CHECK-LABEL: buildvector_v2i64_with_constant:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vinsgr2vr.d $vr0, $zero, 0
-; CHECK-NEXT: vinsgr2vr.d $vr1, $a1, 0
-; CHECK-NEXT: vpackev.d $vr0, $vr1, $vr0
+; CHECK-NEXT: vrepli.b $vr0, 0
+; CHECK-NEXT: vinsgr2vr.d $vr0, $a1, 1
; CHECK-NEXT: vst $vr0, $a0, 0
; CHECK-NEXT: ret
entry:
@@ -561,15 +544,14 @@ entry:
define void @buildvector_v4f32_with_constant(ptr %dst, float %a1, float %a2, float %a3) nounwind {
; CHECK-LABEL: buildvector_v4f32_with_constant:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi.d $sp, $sp, -16
-; CHECK-NEXT: fst.s $fa2, $sp, 12
-; CHECK-NEXT: fst.s $fa1, $sp, 8
-; CHECK-NEXT: fst.s $fa0, $sp, 4
-; CHECK-NEXT: movgr2fr.w $fa0, $zero
-; CHECK-NEXT: fst.s $fa0, $sp, 0
-; CHECK-NEXT: vld $vr0, $sp, 0
-; CHECK-NEXT: vst $vr0, $a0, 0
-; CHECK-NEXT: addi.d $sp, $sp, 16
+; CHECK-NEXT: # kill: def $f2 killed $f2 def $vr2
+; CHECK-NEXT: # kill: def $f1 killed $f1 def $vr1
+; CHECK-NEXT: # kill: def $f0 killed $f0 def $vr0
+; CHECK-NEXT: vrepli.b $vr3, 0
+; CHECK-NEXT: vextrins.w $vr3, $vr0, 16
+; CHECK-NEXT: vextrins.w $vr3, $vr1, 32
+; CHECK-NEXT: vextrins.w $vr3, $vr2, 48
+; CHECK-NEXT: vst $vr3, $a0, 0
; CHECK-NEXT: ret
entry:
%ins0 = insertelement <4 x float> undef, float 0.0, i32 0
diff --git a/llvm/test/CodeGen/Mips/abiflags-soft-float.ll b/llvm/test/CodeGen/Mips/abiflags-soft-float.ll
new file mode 100644
index 0000000..01821f2
--- /dev/null
+++ b/llvm/test/CodeGen/Mips/abiflags-soft-float.ll
@@ -0,0 +1,12 @@
+; RUN: llc -filetype=obj -mtriple mipsel-unknown-linux -mcpu=mips32 %s -o tmp.o
+; RUN: llvm-readobj -A tmp.o | FileCheck %s -check-prefix=OBJ
+; RUN: llc -filetype=asm -mtriple mipsel-unknown-linux -mcpu=mips32 %s -o - | \
+; RUN: FileCheck %s -check-prefix=ASM
+
+; OBJ: FP ABI: Soft float
+; ASM: .module softfloat
+
+define dso_local void @asm_is_null() "use-soft-float"="true" {
+ call void asm sideeffect "", ""()
+ ret void
+}
diff --git a/llvm/test/CodeGen/Mips/nan_lowering.ll b/llvm/test/CodeGen/Mips/nan_lowering.ll
new file mode 100644
index 0000000..2a11278
--- /dev/null
+++ b/llvm/test/CodeGen/Mips/nan_lowering.ll
@@ -0,0 +1,25 @@
+; RUN: llc -mtriple=mips-linux-gnu -mattr=-nan2008 < %s | FileCheck %s
+; RUN: llc -mtriple=mips-linux-gnu -mattr=+nan2008 < %s | FileCheck %s
+
+; Make sure that lowering does not corrupt the value of NaN values,
+; regardless of what the NaN mode is.
+
+define float @test1() {
+; CHECK: .4byte 0x7fc00000
+ ret float bitcast (i32 u0x7fc00000 to float)
+}
+
+define float @test2() {
+; CHECK: .4byte 0x7fc00001
+ ret float bitcast (i32 u0x7fc00001 to float)
+}
+
+define float @test3() {
+; CHECK: .4byte 0x7f800000
+ ret float bitcast (i32 u0x7f800000 to float)
+}
+
+define float @test4() {
+; CHECK: .4byte 0x7f800001
+ ret float bitcast (i32 u0x7f800001 to float)
+}
diff --git a/llvm/test/CodeGen/Mips/qnan.ll b/llvm/test/CodeGen/Mips/qnan.ll
deleted file mode 100644
index e5b4aa1..0000000
--- a/llvm/test/CodeGen/Mips/qnan.ll
+++ /dev/null
@@ -1,14 +0,0 @@
-; RUN: llc -O3 -mcpu=mips32r2 -mtriple=mips-linux-gnu < %s -o - | FileCheck %s -check-prefixes=MIPS_Legacy
-; RUN: llc -O3 -mcpu=mips32r2 -mtriple=mips-linux-gnu -mattr=+nan2008 < %s -o - | FileCheck %s -check-prefixes=MIPS_NaN2008
-
-define dso_local float @nan(float noundef %a, float noundef %b) local_unnamed_addr #0 {
-; MIPS_Legacy: $CPI0_0:
-; MIPS_Legacy-NEXT: .4byte 0x7fa00000 # float NaN
-
-; MIPS_NaN2008: $CPI0_0:
-; MIPS_NaN2008-NEXT: .4byte 0x7fc00000 # float NaN
-
-entry:
- %0 = tail call float @llvm.minimum.f32(float %a, float %b)
- ret float %0
-}
diff --git a/llvm/test/CodeGen/PowerPC/froundeven-legalization.ll b/llvm/test/CodeGen/PowerPC/froundeven-legalization.ll
new file mode 100644
index 0000000..238e200
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/froundeven-legalization.ll
@@ -0,0 +1,111 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=powerpc64le < %s | FileCheck %s
+
+define void @test(ptr %p1, ptr %p2) nounwind {
+; CHECK-LABEL: test:
+; CHECK: # %bb.0:
+; CHECK-NEXT: mflr 0
+; CHECK-NEXT: stdu 1, -224(1)
+; CHECK-NEXT: li 5, 48
+; CHECK-NEXT: std 0, 240(1)
+; CHECK-NEXT: std 27, 184(1) # 8-byte Folded Spill
+; CHECK-NEXT: li 27, 16
+; CHECK-NEXT: std 28, 192(1) # 8-byte Folded Spill
+; CHECK-NEXT: std 29, 200(1) # 8-byte Folded Spill
+; CHECK-NEXT: li 29, 32
+; CHECK-NEXT: li 28, 48
+; CHECK-NEXT: stxvd2x 56, 1, 5 # 16-byte Folded Spill
+; CHECK-NEXT: li 5, 64
+; CHECK-NEXT: std 30, 208(1) # 8-byte Folded Spill
+; CHECK-NEXT: mr 30, 4
+; CHECK-NEXT: stxvd2x 57, 1, 5 # 16-byte Folded Spill
+; CHECK-NEXT: li 5, 80
+; CHECK-NEXT: stxvd2x 58, 1, 5 # 16-byte Folded Spill
+; CHECK-NEXT: li 5, 96
+; CHECK-NEXT: lxvd2x 58, 0, 3
+; CHECK-NEXT: stxvd2x 59, 1, 5 # 16-byte Folded Spill
+; CHECK-NEXT: li 5, 112
+; CHECK-NEXT: lxvd2x 59, 3, 27
+; CHECK-NEXT: stxvd2x 60, 1, 5 # 16-byte Folded Spill
+; CHECK-NEXT: li 5, 128
+; CHECK-NEXT: stxvd2x 61, 1, 5 # 16-byte Folded Spill
+; CHECK-NEXT: li 5, 144
+; CHECK-NEXT: stxvd2x 62, 1, 5 # 16-byte Folded Spill
+; CHECK-NEXT: li 5, 160
+; CHECK-NEXT: lxvd2x 62, 3, 28
+; CHECK-NEXT: stxvd2x 63, 1, 5 # 16-byte Folded Spill
+; CHECK-NEXT: lxvd2x 63, 3, 29
+; CHECK-NEXT: xxswapd 57, 58
+; CHECK-NEXT: xxswapd 1, 59
+; CHECK-NEXT: xxswapd 60, 62
+; CHECK-NEXT: xxswapd 61, 63
+; CHECK-NEXT: bl roundeven
+; CHECK-NEXT: nop
+; CHECK-NEXT: xxswapd 56, 1
+; CHECK-NEXT: xxlor 1, 59, 59
+; CHECK-NEXT: bl roundeven
+; CHECK-NEXT: nop
+; CHECK-NEXT: xxswapd 0, 1
+; CHECK-NEXT: xxlor 1, 60, 60
+; CHECK-NEXT: xxmrgld 59, 0, 56
+; CHECK-NEXT: bl roundeven
+; CHECK-NEXT: nop
+; CHECK-NEXT: xxswapd 60, 1
+; CHECK-NEXT: xxlor 1, 62, 62
+; CHECK-NEXT: bl roundeven
+; CHECK-NEXT: nop
+; CHECK-NEXT: xxswapd 0, 1
+; CHECK-NEXT: xxlor 1, 61, 61
+; CHECK-NEXT: xxmrgld 62, 0, 60
+; CHECK-NEXT: bl roundeven
+; CHECK-NEXT: nop
+; CHECK-NEXT: xxswapd 61, 1
+; CHECK-NEXT: xxlor 1, 63, 63
+; CHECK-NEXT: bl roundeven
+; CHECK-NEXT: nop
+; CHECK-NEXT: xxswapd 0, 1
+; CHECK-NEXT: xxlor 1, 57, 57
+; CHECK-NEXT: xxmrgld 63, 0, 61
+; CHECK-NEXT: bl roundeven
+; CHECK-NEXT: nop
+; CHECK-NEXT: xxswapd 61, 1
+; CHECK-NEXT: xxlor 1, 58, 58
+; CHECK-NEXT: bl roundeven
+; CHECK-NEXT: nop
+; CHECK-NEXT: li 3, 160
+; CHECK-NEXT: stxvd2x 63, 30, 29
+; CHECK-NEXT: xxswapd 0, 1
+; CHECK-NEXT: stxvd2x 62, 30, 28
+; CHECK-NEXT: stxvd2x 59, 30, 27
+; CHECK-NEXT: ld 29, 200(1) # 8-byte Folded Reload
+; CHECK-NEXT: ld 28, 192(1) # 8-byte Folded Reload
+; CHECK-NEXT: ld 27, 184(1) # 8-byte Folded Reload
+; CHECK-NEXT: lxvd2x 63, 1, 3 # 16-byte Folded Reload
+; CHECK-NEXT: li 3, 144
+; CHECK-NEXT: xxmrgld 0, 0, 61
+; CHECK-NEXT: lxvd2x 62, 1, 3 # 16-byte Folded Reload
+; CHECK-NEXT: li 3, 128
+; CHECK-NEXT: stxvd2x 0, 0, 30
+; CHECK-NEXT: ld 30, 208(1) # 8-byte Folded Reload
+; CHECK-NEXT: lxvd2x 61, 1, 3 # 16-byte Folded Reload
+; CHECK-NEXT: li 3, 112
+; CHECK-NEXT: lxvd2x 60, 1, 3 # 16-byte Folded Reload
+; CHECK-NEXT: li 3, 96
+; CHECK-NEXT: lxvd2x 59, 1, 3 # 16-byte Folded Reload
+; CHECK-NEXT: li 3, 80
+; CHECK-NEXT: lxvd2x 58, 1, 3 # 16-byte Folded Reload
+; CHECK-NEXT: li 3, 64
+; CHECK-NEXT: lxvd2x 57, 1, 3 # 16-byte Folded Reload
+; CHECK-NEXT: li 3, 48
+; CHECK-NEXT: lxvd2x 56, 1, 3 # 16-byte Folded Reload
+; CHECK-NEXT: addi 1, 1, 224
+; CHECK-NEXT: ld 0, 16(1)
+; CHECK-NEXT: mtlr 0
+; CHECK-NEXT: blr
+ %v = load <8 x double>, ptr %p1, align 64
+ %res = call <8 x double> @llvm.roundeven.v8f64(<8 x double> %v)
+ store <8 x double> %res, ptr %p2, align 64
+ ret void
+}
+
+declare <8 x double> @llvm.roundeven.v8f64(<8 x double>)
diff --git a/llvm/test/CodeGen/X86/embed-bitcode.ll b/llvm/test/CodeGen/X86/embed-bitcode.ll
index 0d66ba8..d4af954 100644
--- a/llvm/test/CodeGen/X86/embed-bitcode.ll
+++ b/llvm/test/CodeGen/X86/embed-bitcode.ll
@@ -1,10 +1,23 @@
; RUN: llc -filetype=obj -mtriple=x86_64 %s -o %t
; RUN: llvm-readelf -S %t | FileCheck %s
+; RUN: llc -filetype=obj -mtriple=x86_64-pc-windows-msvc %s -o %t
+; RUN: llvm-readobj -S %t | FileCheck %s --check-prefix=COFF
; CHECK: .text PROGBITS 0000000000000000 [[#%x,OFF:]] 000000 00 AX 0
; CHECK-NEXT: .llvmbc PROGBITS 0000000000000000 [[#%x,OFF:]] 000004 00 0
; CHECK-NEXT: .llvmcmd PROGBITS 0000000000000000 [[#%x,OFF:]] 000005 00 0
+; COFF: Name: .llvmbc (2E 6C 6C 76 6D 62 63 00)
+; COFF: Characteristics [
+; COFF-NEXT: IMAGE_SCN_ALIGN_1BYTES
+; COFF-NEXT: IMAGE_SCN_MEM_DISCARDABLE
+; COFF-NEXT: ]
+; COFF: Name: .llvmcmd (2E 6C 6C 76 6D 63 6D 64)
+; COFF: Characteristics [
+; COFF-NEXT: IMAGE_SCN_ALIGN_1BYTES
+; COFF-NEXT: IMAGE_SCN_MEM_DISCARDABLE
+; COFF-NEXT: ]
+
@llvm.embedded.module = private constant [4 x i8] c"BC\C0\DE", section ".llvmbc", align 1
@llvm.cmdline = private constant [5 x i8] c"-cc1\00", section ".llvmcmd", align 1
@llvm.compiler.used = appending global [2 x ptr] [ptr @llvm.embedded.module, ptr @llvm.cmdline], section "llvm.metadata"
diff --git a/llvm/test/CodeGen/X86/late-tail-dup-computed-goto.mir b/llvm/test/CodeGen/X86/late-tail-dup-computed-goto.mir
new file mode 100644
index 0000000..e272e7e
--- /dev/null
+++ b/llvm/test/CodeGen/X86/late-tail-dup-computed-goto.mir
@@ -0,0 +1,128 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+# RUN: llc -mtriple=x86_64-unknown-linux-gnu -run-pass=tailduplication -tail-dup-pred-size=1 -tail-dup-succ-size=1 %s -o - | FileCheck %s
+#
+# Check that only the computed gotos are duplicated aggressively.
+#
+--- |
+ @computed_goto.dispatch = constant [5 x ptr] [ptr null, ptr blockaddress(@computed_goto, %bb1), ptr blockaddress(@computed_goto, %bb2), ptr blockaddress(@computed_goto, %bb3), ptr blockaddress(@computed_goto, %bb4)]
+ declare i64 @f0()
+ declare i64 @f1()
+ declare i64 @f2()
+ declare i64 @f3()
+ declare i64 @f4()
+ declare i64 @f5()
+ define void @computed_goto() {
+ start:
+ ret void
+ bb1:
+ ret void
+ bb2:
+ ret void
+ bb3:
+ ret void
+ bb4:
+ ret void
+ }
+ define void @jump_table() { ret void }
+ define void @jump_table_pic() { ret void }
+...
+---
+name: computed_goto
+alignment: 1
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+failedISel: false
+tracksRegLiveness: true
+body: |
+ ; CHECK-LABEL: name: computed_goto
+ ; CHECK: bb.0:
+ ; CHECK-NEXT: successors: %bb.1(0x20000000), %bb.2(0x20000000), %bb.3(0x20000000), %bb.4(0x20000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: CALL64pcrel32 target-flags(x86-plt) @f0, csr_64, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def $rax
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gr64 = COPY $rax
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gr64_nosp = COPY [[COPY]]
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gr64_nosp = COPY [[COPY1]]
+ ; CHECK-NEXT: JMP64m $noreg, 8, [[COPY2]], @computed_goto.dispatch, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.bb1 (ir-block-address-taken %ir-block.bb1):
+ ; CHECK-NEXT: successors: %bb.1(0x20000000), %bb.2(0x20000000), %bb.3(0x20000000), %bb.4(0x20000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: CALL64pcrel32 target-flags(x86-plt) @f1, csr_64, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def $rax
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:gr64 = COPY $rax
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gr64_nosp = COPY [[COPY3]]
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gr64_nosp = COPY [[COPY1]]
+ ; CHECK-NEXT: JMP64m $noreg, 8, [[COPY2]], @computed_goto.dispatch, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.bb2 (ir-block-address-taken %ir-block.bb2):
+ ; CHECK-NEXT: successors: %bb.1(0x20000000), %bb.2(0x20000000), %bb.3(0x20000000), %bb.4(0x20000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: CALL64pcrel32 target-flags(x86-plt) @f2, csr_64, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def $rax
+ ; CHECK-NEXT: [[COPY4:%[0-9]+]]:gr64 = COPY $rax
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gr64_nosp = COPY [[COPY4]]
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gr64_nosp = COPY [[COPY1]]
+ ; CHECK-NEXT: JMP64m $noreg, 8, [[COPY2]], @computed_goto.dispatch, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3.bb3 (ir-block-address-taken %ir-block.bb3):
+ ; CHECK-NEXT: successors: %bb.1(0x20000000), %bb.2(0x20000000), %bb.3(0x20000000), %bb.4(0x20000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: CALL64pcrel32 target-flags(x86-plt) @f3, csr_64, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def $rax
+ ; CHECK-NEXT: [[COPY5:%[0-9]+]]:gr64 = COPY $rax
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gr64_nosp = COPY [[COPY5]]
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gr64_nosp = COPY [[COPY1]]
+ ; CHECK-NEXT: JMP64m $noreg, 8, [[COPY2]], @computed_goto.dispatch, $noreg
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4.bb4 (ir-block-address-taken %ir-block.bb4):
+ ; CHECK-NEXT: successors: %bb.1(0x20000000), %bb.2(0x20000000), %bb.3(0x20000000), %bb.4(0x20000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: CALL64pcrel32 target-flags(x86-plt) @f4, csr_64, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def $rax
+ ; CHECK-NEXT: [[COPY6:%[0-9]+]]:gr64 = COPY $rax
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gr64_nosp = COPY [[COPY6]]
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gr64_nosp = COPY [[COPY1]]
+ ; CHECK-NEXT: JMP64m $noreg, 8, [[COPY2]], @computed_goto.dispatch, $noreg
+ bb.0:
+ successors: %bb.5(0x80000000)
+
+ CALL64pcrel32 target-flags(x86-plt) @f0, csr_64, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def $rax
+ %0:gr64 = COPY $rax
+ %6:gr64_nosp = COPY %0
+ JMP_1 %bb.5
+
+ bb.1.bb1 (ir-block-address-taken %ir-block.bb1):
+ successors: %bb.5(0x80000000)
+
+ CALL64pcrel32 target-flags(x86-plt) @f1, csr_64, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def $rax
+ %1:gr64 = COPY $rax
+ %6:gr64_nosp = COPY %1
+ JMP_1 %bb.5
+
+ bb.2.bb2 (ir-block-address-taken %ir-block.bb2):
+ successors: %bb.5(0x80000000)
+
+ CALL64pcrel32 target-flags(x86-plt) @f2, csr_64, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def $rax
+ %2:gr64 = COPY $rax
+ %6:gr64_nosp = COPY %2
+ JMP_1 %bb.5
+
+ bb.3.bb3 (ir-block-address-taken %ir-block.bb3):
+ successors: %bb.5(0x80000000)
+
+ CALL64pcrel32 target-flags(x86-plt) @f3, csr_64, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def $rax
+ %3:gr64 = COPY $rax
+ %6:gr64_nosp = COPY %3
+ JMP_1 %bb.5
+
+ bb.4.bb4 (ir-block-address-taken %ir-block.bb4):
+ successors: %bb.5(0x80000000)
+
+ CALL64pcrel32 target-flags(x86-plt) @f4, csr_64, implicit $rsp, implicit $ssp, implicit-def $rsp, implicit-def $ssp, implicit-def $rax
+ %4:gr64 = COPY $rax
+ %6:gr64_nosp = COPY %4
+
+ bb.5:
+ successors: %bb.1(0x20000000), %bb.2(0x20000000), %bb.3(0x20000000), %bb.4(0x20000000)
+
+ %5:gr64_nosp = COPY %6
+ JMP64m $noreg, 8, %5, @computed_goto.dispatch, $noreg
+...
diff --git a/llvm/test/ThinLTO/X86/memprof_func_assign_fix.ll b/llvm/test/ThinLTO/X86/memprof_func_assign_fix.ll
deleted file mode 100644
index bd35850..0000000
--- a/llvm/test/ThinLTO/X86/memprof_func_assign_fix.ll
+++ /dev/null
@@ -1,145 +0,0 @@
-;; Make sure we assign the original callsite to a function clone (which will be
-;; the original function clone), even when we cannot update its caller (due to
-;; missing metadata e.g. from mismatched profiles). Otherwise we will try to use
-;; the original function for a different clone, leading to confusion later when
-;; rewriting the calls.
-
-;; -stats requires asserts
-; REQUIRES: asserts
-
-; RUN: opt -thinlto-bc %s >%t.o
-; RUN: llvm-lto2 run %t.o -enable-memprof-context-disambiguation \
-; RUN: -supports-hot-cold-new \
-; RUN: -r=%t.o,A,plx \
-; RUN: -r=%t.o,B,plx \
-; RUN: -r=%t.o,C,plx \
-; RUN: -r=%t.o,D,plx \
-; RUN: -r=%t.o,E,plx \
-; RUN: -r=%t.o,F,plx \
-; RUN: -r=%t.o,G,plx \
-; RUN: -r=%t.o,A1,plx \
-; RUN: -r=%t.o,B1,plx \
-; RUN: -r=%t.o,_Znwm, \
-; RUN: -memprof-verify-ccg -memprof-verify-nodes -debug-only=memprof-context-disambiguation \
-; RUN: -stats -pass-remarks=memprof-context-disambiguation -save-temps \
-; RUN: -o %t.out 2>&1 | FileCheck %s \
-; RUN: --implicit-check-not="Mismatch in call clone assignment" \
-; RUN: --implicit-check-not="Number of callsites assigned to call multiple non-matching clones"
-
-; RUN: llvm-dis %t.out.1.4.opt.bc -o - | FileCheck %s --check-prefix=IR
-
-; ModuleID = '<stdin>'
-source_filename = "reduced.ll"
-target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128"
-target triple = "x86_64-grtev4-linux-gnu"
-
-; IR-LABEL: define dso_local void @A()
-define void @A() #0 {
- ; IR: call void @C()
- call void @C()
- ret void
-}
-
-; IR-LABEL: define dso_local void @B()
-define void @B() #0 {
- ; IR: call void @C.memprof.1()
- call void @C(), !callsite !1
- ret void
-}
-
-; IR-LABEL: define dso_local void @C()
-define void @C() #0 {
- ; IR: call void @F()
- call void @F(), !callsite !16
- ; IR: call void @D()
- call void @D(), !callsite !2
- ret void
-}
-
-; IR-LABEL: define dso_local void @D()
-define void @D() #0 {
- ; IR: call void @E()
- call void @E(), !callsite !3
- ; IR: call void @G()
- call void @G(), !callsite !17
- ret void
-}
-
-; IR-LABEL: define dso_local void @E()
-define void @E() #0 {
- ; IR: call ptr @_Znwm(i64 0) #[[NOTCOLD:[0-9]+]]
- %1 = call ptr @_Znwm(i64 0), !memprof !4, !callsite !9
- ret void
-}
-
-; IR-LABEL: define dso_local void @F()
-define void @F() #0 {
- ; IR: call void @G()
- call void @G(), !callsite !17
- ret void
-}
-
-; IR-LABEL: define dso_local void @G()
-define void @G() #0 {
- ; IR: call ptr @_Znwm(i64 0) #[[NOTCOLD]]
- %2 = call ptr @_Znwm(i64 0), !memprof !10, !callsite !15
- ret void
-}
-
-; IR-LABEL: define dso_local void @A1()
-define void @A1() #0 {
- ; IR: call void @C()
- call void @C(), !callsite !18
- ret void
-}
-
-; IR-LABEL: define dso_local void @B1()
-define void @B1() #0 {
- ; IR: call void @C.memprof.1()
- call void @C(), !callsite !19
- ret void
-}
-
-; IR-LABEL: define dso_local void @C.memprof.1()
- ; IR: call void @F.memprof.1()
- ; IR: call void @D.memprof.1()
-
-; IR-LABEL: define dso_local void @D.memprof.1()
- ; IR: call void @E.memprof.1()
- ; IR: call void @G()
-
-; IR-LABEL: define dso_local void @E.memprof.1()
- ; IR: call ptr @_Znwm(i64 0) #[[COLD:[0-9]+]]
-
-; IR-LABEL: define dso_local void @F.memprof.1()
- ; IR: call void @G.memprof.1()
-
-; IR-LABEL: define dso_local void @G.memprof.1()
- ; IR: call ptr @_Znwm(i64 0) #[[COLD]]
-
-declare ptr @_Znwm(i64)
-
-attributes #0 = { noinline optnone }
-; IR: attributes #[[NOTCOLD]] = { "memprof"="notcold" }
-; IR: attributes #[[COLD]] = { "memprof"="cold" }
-
-!0 = !{i64 123}
-!1 = !{i64 234}
-!2 = !{i64 345}
-!3 = !{i64 456}
-!4 = !{!5, !7}
-!5 = !{!6, !"notcold"}
-!6 = !{i64 567, i64 456, i64 345, i64 123}
-!7 = !{!8, !"cold"}
-!8 = !{i64 567, i64 456, i64 345, i64 234}
-!9 = !{i64 567}
-!10 = !{!11, !13}
-!11 = !{!12, !"notcold"}
-!12 = !{i64 678, i64 891, i64 789, i64 912}
-!13 = !{!14, !"cold"}
-!14 = !{i64 678, i64 891, i64 789, i64 812}
-!15 = !{i64 678}
-!16 = !{i64 789}
-!17 = !{i64 891}
-!18 = !{i64 912}
-!19 = !{i64 812}
diff --git a/llvm/test/Transforms/LoopFusion/sunk-phi-nodes.ll b/llvm/test/Transforms/LoopFusion/sunk-phi-nodes.ll
new file mode 100644
index 0000000..36c6bdd
--- /dev/null
+++ b/llvm/test/Transforms/LoopFusion/sunk-phi-nodes.ll
@@ -0,0 +1,65 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -passes=loop-fusion -S < %s 2>&1 | FileCheck %s
+define dso_local i32 @check_sunk_phi_nodes() {
+; CHECK-LABEL: define dso_local i32 @check_sunk_phi_nodes() {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br label %[[FOR_BODY:.*]]
+; CHECK: [[FOR_BODY]]:
+; CHECK-NEXT: [[SUM1_02:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[ADD:%.*]], %[[FOR_INC6:.*]] ]
+; CHECK-NEXT: [[I_01:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[INC:%.*]], %[[FOR_INC6]] ]
+; CHECK-NEXT: [[I1_04:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[INC7:%.*]], %[[FOR_INC6]] ]
+; CHECK-NEXT: [[SUM2_03:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[ADD5:%.*]], %[[FOR_INC6]] ]
+; CHECK-NEXT: [[ADD]] = add nsw i32 [[SUM1_02]], [[I_01]]
+; CHECK-NEXT: br label %[[FOR_INC:.*]]
+; CHECK: [[FOR_INC]]:
+; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[I1_04]], [[I1_04]]
+; CHECK-NEXT: [[ADD5]] = add nsw i32 [[SUM2_03]], [[MUL]]
+; CHECK-NEXT: br label %[[FOR_INC6]]
+; CHECK: [[FOR_INC6]]:
+; CHECK-NEXT: [[INC]] = add nsw i32 [[I_01]], 1
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[INC]], 10
+; CHECK-NEXT: [[INC7]] = add nsw i32 [[I1_04]], 1
+; CHECK-NEXT: [[CMP3:%.*]] = icmp slt i32 [[INC7]], 10
+; CHECK-NEXT: br i1 [[CMP3]], label %[[FOR_BODY]], label %[[FOR_END8:.*]]
+; CHECK: [[FOR_END8]]:
+; CHECK-NEXT: [[SUM2_0_LCSSA:%.*]] = phi i32 [ [[ADD5]], %[[FOR_INC6]] ]
+; CHECK-NEXT: [[SUM1_0_LCSSA:%.*]] = phi i32 [ [[ADD]], %[[FOR_INC6]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[SUM1_0_LCSSA]], [[SUM2_0_LCSSA]]
+; CHECK-NEXT: ret i32 [[TMP0]]
+;
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.inc
+ %sum1.02 = phi i32 [ 0, %entry ], [ %add, %for.inc ]
+ %i.01 = phi i32 [ 0, %entry ], [ %inc, %for.inc ]
+ %add = add nsw i32 %sum1.02, %i.01
+ br label %for.inc
+
+for.inc: ; preds = %for.body
+ %inc = add nsw i32 %i.01, 1
+ %cmp = icmp slt i32 %inc, 10
+ br i1 %cmp, label %for.body, label %for.end
+
+for.end: ; preds = %for.inc
+ %sum1.0.lcssa = phi i32 [ %add, %for.inc ]
+ br label %for.body4
+
+for.body4: ; preds = %for.end, %for.inc6
+ %i1.04 = phi i32 [ 0, %for.end ], [ %inc7, %for.inc6 ]
+ %sum2.03 = phi i32 [ 0, %for.end ], [ %add5, %for.inc6 ]
+ %mul = mul nsw i32 %i1.04, %i1.04
+ %add5 = add nsw i32 %sum2.03, %mul
+ br label %for.inc6
+
+for.inc6: ; preds = %for.body4
+ %inc7 = add nsw i32 %i1.04, 1
+ %cmp3 = icmp slt i32 %inc7, 10
+ br i1 %cmp3, label %for.body4, label %for.end8
+
+for.end8: ; preds = %for.inc6
+ %sum2.0.lcssa = phi i32 [ %add5, %for.inc6 ]
+ %0 = add i32 %sum1.0.lcssa, %sum2.0.lcssa
+ ret i32 %0
+}
+
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll b/llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll
index 32d17b9..5d032e6 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/evl-compatible-loops.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
-; RUN: opt -passes=loop-vectorize -force-tail-folding-style=data-with-evl \
-; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \
+; RUN: opt -passes=loop-vectorize \
+; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \
; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck %s
; Make sure we do not vectorize a loop with a widened int induction.
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll
index f44cbf2..5b2d308 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/inloop-reduction.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -mtriple riscv64-linux-gnu -mattr=+v,+d -passes=loop-vectorize < %s -S -o - | FileCheck %s -check-prefix=OUTLOOP
; RUN: opt -mtriple riscv64-linux-gnu -mattr=+v,+d -passes=loop-vectorize -prefer-inloop-reductions < %s -S -o - | FileCheck %s -check-prefix=INLOOP
-; RUN: opt -passes=loop-vectorize -force-tail-folding-style=data-with-evl -prefer-predicate-over-epilogue=predicate-dont-vectorize -mtriple=riscv64 -mattr=+v -S < %s 2>&1 | FileCheck --check-prefix=IF-EVL-OUTLOOP %s
-; RUN: opt -passes=loop-vectorize -prefer-inloop-reductions -force-tail-folding-style=data-with-evl -prefer-predicate-over-epilogue=predicate-dont-vectorize -mtriple=riscv64 -mattr=+v -S < %s 2>&1 | FileCheck --check-prefix=IF-EVL-INLOOP %s
+; RUN: opt -passes=loop-vectorize -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue -mtriple=riscv64 -mattr=+v -S < %s 2>&1 | FileCheck --check-prefix=IF-EVL-OUTLOOP %s
+; RUN: opt -passes=loop-vectorize -prefer-inloop-reductions -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue -mtriple=riscv64 -mattr=+v -S < %s 2>&1 | FileCheck --check-prefix=IF-EVL-INLOOP %s
target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n64-S128"
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-masked-access.ll b/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-masked-access.ll
index dbe6f27..c35a6ab 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-masked-access.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/interleaved-masked-access.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --filter-out-after "^scalar.ph:" --version 2
; RUN: opt -mtriple=riscv64-none-linux-gnu -S -passes=loop-vectorize,instcombine -mattr=+v -prefer-predicate-over-epilogue=scalar-epilogue %s 2>&1 | FileCheck %s -check-prefix=SCALAR_EPILOGUE
-; RUN: opt -mtriple=riscv64-none-linux-gnu -S -passes=loop-vectorize,instcombine -mattr=+v -prefer-predicate-over-epilogue=predicate-dont-vectorize -force-tail-folding-style=data %s 2>&1 | FileCheck %s -check-prefix=PREDICATED_DATA
-; RUN: opt -mtriple=riscv64-none-linux-gnu -S -passes=loop-vectorize,instcombine -mattr=+v -prefer-predicate-over-epilogue=predicate-dont-vectorize -force-tail-folding-style=data-with-evl %s 2>&1 | FileCheck %s -check-prefix=PREDICATED_DATA-WITH-EVL
+; RUN: opt -mtriple=riscv64-none-linux-gnu -S -passes=loop-vectorize,instcombine -mattr=+v -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue -force-tail-folding-style=data %s 2>&1 | FileCheck %s -check-prefix=PREDICATED_DATA
+; RUN: opt -mtriple=riscv64-none-linux-gnu -S -passes=loop-vectorize,instcombine -mattr=+v -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue %s 2>&1 | FileCheck %s -check-prefix=PREDICATED_DATA-WITH-EVL
target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/only-compute-cost-for-vplan-vfs.ll b/llvm/test/Transforms/LoopVectorize/RISCV/only-compute-cost-for-vplan-vfs.ll
index 4f3b8d8..0afe04e 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/only-compute-cost-for-vplan-vfs.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/only-compute-cost-for-vplan-vfs.ll
@@ -1,5 +1,5 @@
-; RUN: opt -passes=loop-vectorize -force-tail-folding-style=data-with-evl \
-; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \
+; RUN: opt -passes=loop-vectorize \
+; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \
; RUN: -mtriple=riscv64 -mattr=+v -S -debug %s 2>&1 | FileCheck %s
; REQUIRES: asserts
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/preserve-dbg-loc.ll b/llvm/test/Transforms/LoopVectorize/RISCV/preserve-dbg-loc.ll
index e4892db..f4817bb 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/preserve-dbg-loc.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/preserve-dbg-loc.ll
@@ -1,6 +1,5 @@
; RUN: opt -passes=debugify,loop-vectorize \
-; RUN: -force-tail-folding-style=data-with-evl \
-; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \
+; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \
; RUN: -mtriple=riscv64 -mattr=+v -riscv-v-vector-bits-max=128 -S < %s 2>&1 | FileCheck --check-prefix=DEBUGLOC %s
; Testing the debug locations of the generated vector intrinsic is same as
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll b/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll
index 8d8ea5a..f6f8f6a 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/scalable-tailfold.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt < %s -passes=loop-vectorize -scalable-vectorization=on -prefer-predicate-over-epilogue=predicate-dont-vectorize -mtriple riscv64-linux-gnu -mattr=+v,+f -S 2>%t | FileCheck %s -check-prefix=CHECK
+; RUN: opt < %s -passes=loop-vectorize -scalable-vectorization=on -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue -mtriple riscv64-linux-gnu -mattr=+v,+f -S 2>%t | FileCheck %s -check-prefix=CHECK
; Exercise tail folding on RISCV w/scalable vectors.
@@ -330,17 +330,44 @@ for.end:
define i64 @uniform_load(ptr noalias nocapture %a, ptr noalias nocapture %b, i64 %n) {
; CHECK-LABEL: @uniform_load(
; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1025, [[TMP1]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]]
+; CHECK: vector.ph:
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK: vector.body:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[V:%.*]] = load i64, ptr [[B:%.*]], align 8
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[IV]]
-; CHECK-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]]
+; CHECK-NEXT: store <vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr [[ARRAYIDX]], align 8
+; CHECK-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], [[TMP5]]
+; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
+; CHECK: middle.block:
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
+; CHECK: scalar.ph:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY1:%.*]] ]
+; CHECK-NEXT: br label [[FOR_BODY1:%.*]]
+; CHECK: for.body:
+; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], [[FOR_BODY1]] ]
+; CHECK-NEXT: [[V1:%.*]] = load i64, ptr [[B]], align 8
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV1]]
+; CHECK-NEXT: store i64 [[V1]], ptr [[ARRAYIDX1]], align 8
+; CHECK-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1
+; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT1]], 1025
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP14:![0-9]+]]
; CHECK: for.end:
-; CHECK-NEXT: [[V_LCSSA:%.*]] = phi i64 [ [[V]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[V_LCSSA:%.*]] = phi i64 [ [[V1]], [[FOR_BODY1]] ], [ [[V]], [[MIDDLE_BLOCK]] ]
; CHECK-NEXT: ret i64 [[V_LCSSA]]
;
entry:
@@ -389,7 +416,7 @@ define void @vector_add_trip1024(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP8]], [[EVL_BASED_IV]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
+; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br label [[FOR_END:%.*]]
; CHECK: scalar.ph:
@@ -403,7 +430,7 @@ define void @vector_add_trip1024(ptr noalias nocapture %a, i64 %v, i64 %n) {
; CHECK-NEXT: store i64 [[ADD]], ptr [[ARRAYIDX]], align 8
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-bin-unary-ops-args.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-bin-unary-ops-args.ll
index f90c450..f9395aa 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-bin-unary-ops-args.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-bin-unary-ops-args.ll
@@ -1,12 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
; RUN: opt -passes=loop-vectorize \
-; RUN: -force-tail-folding-style=data-with-evl \
-; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \
+; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \
; RUN: -mtriple=riscv64 -mattr=+v -S %s | FileCheck %s --check-prefix=IF-EVL
; RUN: opt -passes=loop-vectorize \
-; RUN: -force-tail-folding-style=none \
-; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \
+; RUN: -prefer-predicate-over-epilogue=scalar-epilogue \
; RUN: -mtriple=riscv64 -mattr=+v -S %s | FileCheck %s --check-prefix=NO-VP
@@ -69,9 +67,45 @@ define void @test_and(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-LABEL: define void @test_and(
; NO-VP-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]]) #[[ATTR0:[0-9]+]] {
; NO-VP-NEXT: [[LOOP_PREHEADER:.*]]:
+; NO-VP-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64
+; NO-VP-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64
+; NO-VP-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP13]], 16
+; NO-VP-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 32, i64 [[TMP1]])
+; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 100, [[TMP14]]
+; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
+; NO-VP: [[VECTOR_MEMCHECK]]:
+; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16
+; NO-VP-NEXT: [[TMP4:%.*]] = sub i64 [[B1]], [[A2]]
+; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP4]], [[TMP3]]
+; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
+; NO-VP: [[VECTOR_PH]]:
+; NO-VP-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16
+; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP6]]
+; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]]
+; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 16
+; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
+; NO-VP: [[VECTOR_BODY]]:
+; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[INDEX]]
+; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP9]], align 1
+; NO-VP-NEXT: [[TMP10:%.*]] = and <vscale x 16 x i8> [[WIDE_LOAD]], splat (i8 1)
+; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]]
+; NO-VP-NEXT: store <vscale x 16 x i8> [[TMP10]], ptr [[TMP11]], align 1
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
+; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; NO-VP: [[MIDDLE_BLOCK]]:
+; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 100, [[N_VEC]]
+; NO-VP-NEXT: br i1 [[CMP_N]], label %[[FINISH_LOOPEXIT:.*]], label %[[SCALAR_PH]]
+; NO-VP: [[SCALAR_PH]]:
+; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[LOOP_PREHEADER]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; NO-VP-NEXT: br label %[[LOOP:.*]]
; NO-VP: [[LOOP]]:
-; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ 0, %[[LOOP_PREHEADER]] ]
+; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
; NO-VP-NEXT: [[DEC]] = add nsw i64 [[LEN]], 1
; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[LEN]]
; NO-VP-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
@@ -79,7 +113,7 @@ define void @test_and(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[LEN]]
; NO-VP-NEXT: store i8 [[TMP]], ptr [[ARRAYIDX1]], align 1
; NO-VP-NEXT: [[DOTNOT:%.*]] = icmp eq i64 [[DEC]], 100
-; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT:.*]], label %[[LOOP]]
+; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT]], label %[[LOOP]], !llvm.loop [[LOOP3:![0-9]+]]
; NO-VP: [[FINISH_LOOPEXIT]]:
; NO-VP-NEXT: ret void
;
@@ -160,9 +194,45 @@ define void @test_or(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-LABEL: define void @test_or(
; NO-VP-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]]) #[[ATTR0]] {
; NO-VP-NEXT: [[LOOP_PREHEADER:.*]]:
+; NO-VP-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64
+; NO-VP-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64
+; NO-VP-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP13]], 16
+; NO-VP-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 32, i64 [[TMP1]])
+; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 100, [[TMP14]]
+; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
+; NO-VP: [[VECTOR_MEMCHECK]]:
+; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16
+; NO-VP-NEXT: [[TMP4:%.*]] = sub i64 [[B1]], [[A2]]
+; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP4]], [[TMP3]]
+; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
+; NO-VP: [[VECTOR_PH]]:
+; NO-VP-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16
+; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP6]]
+; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]]
+; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 16
+; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
+; NO-VP: [[VECTOR_BODY]]:
+; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[INDEX]]
+; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP9]], align 1
+; NO-VP-NEXT: [[TMP10:%.*]] = or <vscale x 16 x i8> [[WIDE_LOAD]], splat (i8 1)
+; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]]
+; NO-VP-NEXT: store <vscale x 16 x i8> [[TMP10]], ptr [[TMP11]], align 1
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
+; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; NO-VP: [[MIDDLE_BLOCK]]:
+; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 100, [[N_VEC]]
+; NO-VP-NEXT: br i1 [[CMP_N]], label %[[FINISH_LOOPEXIT:.*]], label %[[SCALAR_PH]]
+; NO-VP: [[SCALAR_PH]]:
+; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[LOOP_PREHEADER]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; NO-VP-NEXT: br label %[[LOOP:.*]]
; NO-VP: [[LOOP]]:
-; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ 0, %[[LOOP_PREHEADER]] ]
+; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
; NO-VP-NEXT: [[DEC]] = add nsw i64 [[LEN]], 1
; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[LEN]]
; NO-VP-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
@@ -170,7 +240,7 @@ define void @test_or(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[LEN]]
; NO-VP-NEXT: store i8 [[TMP]], ptr [[ARRAYIDX1]], align 1
; NO-VP-NEXT: [[DOTNOT:%.*]] = icmp eq i64 [[DEC]], 100
-; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT:.*]], label %[[LOOP]]
+; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT]], label %[[LOOP]], !llvm.loop [[LOOP5:![0-9]+]]
; NO-VP: [[FINISH_LOOPEXIT]]:
; NO-VP-NEXT: ret void
;
@@ -251,9 +321,45 @@ define void @test_xor(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-LABEL: define void @test_xor(
; NO-VP-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]]) #[[ATTR0]] {
; NO-VP-NEXT: [[LOOP_PREHEADER:.*]]:
+; NO-VP-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64
+; NO-VP-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64
+; NO-VP-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP13]], 16
+; NO-VP-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 32, i64 [[TMP1]])
+; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 100, [[TMP14]]
+; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
+; NO-VP: [[VECTOR_MEMCHECK]]:
+; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16
+; NO-VP-NEXT: [[TMP4:%.*]] = sub i64 [[B1]], [[A2]]
+; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP4]], [[TMP3]]
+; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
+; NO-VP: [[VECTOR_PH]]:
+; NO-VP-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16
+; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP6]]
+; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]]
+; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 16
+; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
+; NO-VP: [[VECTOR_BODY]]:
+; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[INDEX]]
+; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP9]], align 1
+; NO-VP-NEXT: [[TMP10:%.*]] = xor <vscale x 16 x i8> [[WIDE_LOAD]], splat (i8 1)
+; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]]
+; NO-VP-NEXT: store <vscale x 16 x i8> [[TMP10]], ptr [[TMP11]], align 1
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
+; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; NO-VP: [[MIDDLE_BLOCK]]:
+; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 100, [[N_VEC]]
+; NO-VP-NEXT: br i1 [[CMP_N]], label %[[FINISH_LOOPEXIT:.*]], label %[[SCALAR_PH]]
+; NO-VP: [[SCALAR_PH]]:
+; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[LOOP_PREHEADER]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; NO-VP-NEXT: br label %[[LOOP:.*]]
; NO-VP: [[LOOP]]:
-; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ 0, %[[LOOP_PREHEADER]] ]
+; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
; NO-VP-NEXT: [[DEC]] = add nsw i64 [[LEN]], 1
; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[LEN]]
; NO-VP-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
@@ -261,7 +367,7 @@ define void @test_xor(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[LEN]]
; NO-VP-NEXT: store i8 [[TMP]], ptr [[ARRAYIDX1]], align 1
; NO-VP-NEXT: [[DOTNOT:%.*]] = icmp eq i64 [[DEC]], 100
-; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT:.*]], label %[[LOOP]]
+; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT]], label %[[LOOP]], !llvm.loop [[LOOP7:![0-9]+]]
; NO-VP: [[FINISH_LOOPEXIT]]:
; NO-VP-NEXT: ret void
;
@@ -342,9 +448,45 @@ define void @test_shl(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-LABEL: define void @test_shl(
; NO-VP-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]]) #[[ATTR0]] {
; NO-VP-NEXT: [[LOOP_PREHEADER:.*]]:
+; NO-VP-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64
+; NO-VP-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64
+; NO-VP-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP13]], 16
+; NO-VP-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 32, i64 [[TMP1]])
+; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 100, [[TMP14]]
+; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
+; NO-VP: [[VECTOR_MEMCHECK]]:
+; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16
+; NO-VP-NEXT: [[TMP4:%.*]] = sub i64 [[B1]], [[A2]]
+; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP4]], [[TMP3]]
+; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
+; NO-VP: [[VECTOR_PH]]:
+; NO-VP-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16
+; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP6]]
+; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]]
+; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 16
+; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
+; NO-VP: [[VECTOR_BODY]]:
+; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[INDEX]]
+; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP9]], align 1
+; NO-VP-NEXT: [[TMP10:%.*]] = shl <vscale x 16 x i8> [[WIDE_LOAD]], splat (i8 1)
+; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]]
+; NO-VP-NEXT: store <vscale x 16 x i8> [[TMP10]], ptr [[TMP11]], align 1
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
+; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; NO-VP: [[MIDDLE_BLOCK]]:
+; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 100, [[N_VEC]]
+; NO-VP-NEXT: br i1 [[CMP_N]], label %[[FINISH_LOOPEXIT:.*]], label %[[SCALAR_PH]]
+; NO-VP: [[SCALAR_PH]]:
+; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[LOOP_PREHEADER]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; NO-VP-NEXT: br label %[[LOOP:.*]]
; NO-VP: [[LOOP]]:
-; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ 0, %[[LOOP_PREHEADER]] ]
+; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
; NO-VP-NEXT: [[DEC]] = add nsw i64 [[LEN]], 1
; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[LEN]]
; NO-VP-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
@@ -352,7 +494,7 @@ define void @test_shl(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[LEN]]
; NO-VP-NEXT: store i8 [[TMP]], ptr [[ARRAYIDX1]], align 1
; NO-VP-NEXT: [[DOTNOT:%.*]] = icmp eq i64 [[DEC]], 100
-; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT:.*]], label %[[LOOP]]
+; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT]], label %[[LOOP]], !llvm.loop [[LOOP9:![0-9]+]]
; NO-VP: [[FINISH_LOOPEXIT]]:
; NO-VP-NEXT: ret void
;
@@ -433,9 +575,45 @@ define void @test_lshr(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-LABEL: define void @test_lshr(
; NO-VP-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]]) #[[ATTR0]] {
; NO-VP-NEXT: [[LOOP_PREHEADER:.*]]:
+; NO-VP-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64
+; NO-VP-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64
+; NO-VP-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP13]], 16
+; NO-VP-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 32, i64 [[TMP1]])
+; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 100, [[TMP14]]
+; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
+; NO-VP: [[VECTOR_MEMCHECK]]:
+; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16
+; NO-VP-NEXT: [[TMP4:%.*]] = sub i64 [[B1]], [[A2]]
+; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP4]], [[TMP3]]
+; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
+; NO-VP: [[VECTOR_PH]]:
+; NO-VP-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16
+; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP6]]
+; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]]
+; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 16
+; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
+; NO-VP: [[VECTOR_BODY]]:
+; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[INDEX]]
+; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP9]], align 1
+; NO-VP-NEXT: [[TMP10:%.*]] = lshr <vscale x 16 x i8> [[WIDE_LOAD]], splat (i8 1)
+; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]]
+; NO-VP-NEXT: store <vscale x 16 x i8> [[TMP10]], ptr [[TMP11]], align 1
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
+; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; NO-VP: [[MIDDLE_BLOCK]]:
+; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 100, [[N_VEC]]
+; NO-VP-NEXT: br i1 [[CMP_N]], label %[[FINISH_LOOPEXIT:.*]], label %[[SCALAR_PH]]
+; NO-VP: [[SCALAR_PH]]:
+; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[LOOP_PREHEADER]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; NO-VP-NEXT: br label %[[LOOP:.*]]
; NO-VP: [[LOOP]]:
-; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ 0, %[[LOOP_PREHEADER]] ]
+; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
; NO-VP-NEXT: [[DEC]] = add nsw i64 [[LEN]], 1
; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[LEN]]
; NO-VP-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
@@ -443,7 +621,7 @@ define void @test_lshr(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[LEN]]
; NO-VP-NEXT: store i8 [[TMP]], ptr [[ARRAYIDX1]], align 1
; NO-VP-NEXT: [[DOTNOT:%.*]] = icmp eq i64 [[DEC]], 100
-; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT:.*]], label %[[LOOP]]
+; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT]], label %[[LOOP]], !llvm.loop [[LOOP11:![0-9]+]]
; NO-VP: [[FINISH_LOOPEXIT]]:
; NO-VP-NEXT: ret void
;
@@ -524,9 +702,45 @@ define void @test_ashr(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-LABEL: define void @test_ashr(
; NO-VP-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]]) #[[ATTR0]] {
; NO-VP-NEXT: [[LOOP_PREHEADER:.*]]:
+; NO-VP-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64
+; NO-VP-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64
+; NO-VP-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP13]], 16
+; NO-VP-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 32, i64 [[TMP1]])
+; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 100, [[TMP14]]
+; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
+; NO-VP: [[VECTOR_MEMCHECK]]:
+; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16
+; NO-VP-NEXT: [[TMP4:%.*]] = sub i64 [[B1]], [[A2]]
+; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP4]], [[TMP3]]
+; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
+; NO-VP: [[VECTOR_PH]]:
+; NO-VP-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16
+; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP6]]
+; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]]
+; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 16
+; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
+; NO-VP: [[VECTOR_BODY]]:
+; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[INDEX]]
+; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP9]], align 1
+; NO-VP-NEXT: [[TMP10:%.*]] = ashr <vscale x 16 x i8> [[WIDE_LOAD]], splat (i8 1)
+; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]]
+; NO-VP-NEXT: store <vscale x 16 x i8> [[TMP10]], ptr [[TMP11]], align 1
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
+; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; NO-VP: [[MIDDLE_BLOCK]]:
+; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 100, [[N_VEC]]
+; NO-VP-NEXT: br i1 [[CMP_N]], label %[[FINISH_LOOPEXIT:.*]], label %[[SCALAR_PH]]
+; NO-VP: [[SCALAR_PH]]:
+; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[LOOP_PREHEADER]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; NO-VP-NEXT: br label %[[LOOP:.*]]
; NO-VP: [[LOOP]]:
-; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ 0, %[[LOOP_PREHEADER]] ]
+; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
; NO-VP-NEXT: [[DEC]] = add nsw i64 [[LEN]], 1
; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[LEN]]
; NO-VP-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
@@ -534,7 +748,7 @@ define void @test_ashr(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[LEN]]
; NO-VP-NEXT: store i8 [[TMP]], ptr [[ARRAYIDX1]], align 1
; NO-VP-NEXT: [[DOTNOT:%.*]] = icmp eq i64 [[DEC]], 100
-; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT:.*]], label %[[LOOP]]
+; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT]], label %[[LOOP]], !llvm.loop [[LOOP13:![0-9]+]]
; NO-VP: [[FINISH_LOOPEXIT]]:
; NO-VP-NEXT: ret void
;
@@ -615,9 +829,45 @@ define void @test_add(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-LABEL: define void @test_add(
; NO-VP-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]]) #[[ATTR0]] {
; NO-VP-NEXT: [[LOOP_PREHEADER:.*]]:
+; NO-VP-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64
+; NO-VP-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64
+; NO-VP-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP13]], 16
+; NO-VP-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 32, i64 [[TMP1]])
+; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 100, [[TMP14]]
+; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
+; NO-VP: [[VECTOR_MEMCHECK]]:
+; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16
+; NO-VP-NEXT: [[TMP4:%.*]] = sub i64 [[B1]], [[A2]]
+; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP4]], [[TMP3]]
+; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
+; NO-VP: [[VECTOR_PH]]:
+; NO-VP-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16
+; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP6]]
+; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]]
+; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 16
+; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
+; NO-VP: [[VECTOR_BODY]]:
+; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[INDEX]]
+; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP9]], align 1
+; NO-VP-NEXT: [[TMP10:%.*]] = add <vscale x 16 x i8> [[WIDE_LOAD]], splat (i8 1)
+; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]]
+; NO-VP-NEXT: store <vscale x 16 x i8> [[TMP10]], ptr [[TMP11]], align 1
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
+; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
+; NO-VP: [[MIDDLE_BLOCK]]:
+; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 100, [[N_VEC]]
+; NO-VP-NEXT: br i1 [[CMP_N]], label %[[FINISH_LOOPEXIT:.*]], label %[[SCALAR_PH]]
+; NO-VP: [[SCALAR_PH]]:
+; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[LOOP_PREHEADER]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; NO-VP-NEXT: br label %[[LOOP:.*]]
; NO-VP: [[LOOP]]:
-; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ 0, %[[LOOP_PREHEADER]] ]
+; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
; NO-VP-NEXT: [[DEC]] = add nsw i64 [[LEN]], 1
; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[LEN]]
; NO-VP-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
@@ -625,7 +875,7 @@ define void @test_add(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[LEN]]
; NO-VP-NEXT: store i8 [[TMP]], ptr [[ARRAYIDX1]], align 1
; NO-VP-NEXT: [[DOTNOT:%.*]] = icmp eq i64 [[DEC]], 100
-; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT:.*]], label %[[LOOP]]
+; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT]], label %[[LOOP]], !llvm.loop [[LOOP15:![0-9]+]]
; NO-VP: [[FINISH_LOOPEXIT]]:
; NO-VP-NEXT: ret void
;
@@ -706,9 +956,45 @@ define void @test_sub(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-LABEL: define void @test_sub(
; NO-VP-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]]) #[[ATTR0]] {
; NO-VP-NEXT: [[LOOP_PREHEADER:.*]]:
+; NO-VP-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64
+; NO-VP-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64
+; NO-VP-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP13]], 16
+; NO-VP-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 32, i64 [[TMP1]])
+; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 100, [[TMP14]]
+; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
+; NO-VP: [[VECTOR_MEMCHECK]]:
+; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16
+; NO-VP-NEXT: [[TMP4:%.*]] = sub i64 [[B1]], [[A2]]
+; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP4]], [[TMP3]]
+; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
+; NO-VP: [[VECTOR_PH]]:
+; NO-VP-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16
+; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP6]]
+; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]]
+; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 16
+; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
+; NO-VP: [[VECTOR_BODY]]:
+; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[INDEX]]
+; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP9]], align 1
+; NO-VP-NEXT: [[TMP10:%.*]] = sub <vscale x 16 x i8> [[WIDE_LOAD]], splat (i8 1)
+; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]]
+; NO-VP-NEXT: store <vscale x 16 x i8> [[TMP10]], ptr [[TMP11]], align 1
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
+; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
+; NO-VP: [[MIDDLE_BLOCK]]:
+; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 100, [[N_VEC]]
+; NO-VP-NEXT: br i1 [[CMP_N]], label %[[FINISH_LOOPEXIT:.*]], label %[[SCALAR_PH]]
+; NO-VP: [[SCALAR_PH]]:
+; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[LOOP_PREHEADER]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; NO-VP-NEXT: br label %[[LOOP:.*]]
; NO-VP: [[LOOP]]:
-; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ 0, %[[LOOP_PREHEADER]] ]
+; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
; NO-VP-NEXT: [[DEC]] = add nsw i64 [[LEN]], 1
; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[LEN]]
; NO-VP-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
@@ -716,7 +1002,7 @@ define void @test_sub(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[LEN]]
; NO-VP-NEXT: store i8 [[TMP]], ptr [[ARRAYIDX1]], align 1
; NO-VP-NEXT: [[DOTNOT:%.*]] = icmp eq i64 [[DEC]], 100
-; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT:.*]], label %[[LOOP]]
+; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT]], label %[[LOOP]], !llvm.loop [[LOOP17:![0-9]+]]
; NO-VP: [[FINISH_LOOPEXIT]]:
; NO-VP-NEXT: ret void
;
@@ -797,9 +1083,45 @@ define void @test_mul(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-LABEL: define void @test_mul(
; NO-VP-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]]) #[[ATTR0]] {
; NO-VP-NEXT: [[LOOP_PREHEADER:.*]]:
+; NO-VP-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64
+; NO-VP-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64
+; NO-VP-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP13]], 16
+; NO-VP-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 32, i64 [[TMP1]])
+; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 100, [[TMP14]]
+; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
+; NO-VP: [[VECTOR_MEMCHECK]]:
+; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16
+; NO-VP-NEXT: [[TMP4:%.*]] = sub i64 [[B1]], [[A2]]
+; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP4]], [[TMP3]]
+; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
+; NO-VP: [[VECTOR_PH]]:
+; NO-VP-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16
+; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP6]]
+; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]]
+; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 16
+; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
+; NO-VP: [[VECTOR_BODY]]:
+; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[INDEX]]
+; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP9]], align 1
+; NO-VP-NEXT: [[TMP10:%.*]] = mul <vscale x 16 x i8> [[WIDE_LOAD]], splat (i8 3)
+; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]]
+; NO-VP-NEXT: store <vscale x 16 x i8> [[TMP10]], ptr [[TMP11]], align 1
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
+; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
+; NO-VP: [[MIDDLE_BLOCK]]:
+; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 100, [[N_VEC]]
+; NO-VP-NEXT: br i1 [[CMP_N]], label %[[FINISH_LOOPEXIT:.*]], label %[[SCALAR_PH]]
+; NO-VP: [[SCALAR_PH]]:
+; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[LOOP_PREHEADER]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; NO-VP-NEXT: br label %[[LOOP:.*]]
; NO-VP: [[LOOP]]:
-; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ 0, %[[LOOP_PREHEADER]] ]
+; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
; NO-VP-NEXT: [[DEC]] = add nsw i64 [[LEN]], 1
; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[LEN]]
; NO-VP-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
@@ -807,7 +1129,7 @@ define void @test_mul(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[LEN]]
; NO-VP-NEXT: store i8 [[TMP]], ptr [[ARRAYIDX1]], align 1
; NO-VP-NEXT: [[DOTNOT:%.*]] = icmp eq i64 [[DEC]], 100
-; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT:.*]], label %[[LOOP]]
+; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT]], label %[[LOOP]], !llvm.loop [[LOOP19:![0-9]+]]
; NO-VP: [[FINISH_LOOPEXIT]]:
; NO-VP-NEXT: ret void
;
@@ -888,9 +1210,45 @@ define void @test_sdiv(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-LABEL: define void @test_sdiv(
; NO-VP-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]]) #[[ATTR0]] {
; NO-VP-NEXT: [[LOOP_PREHEADER:.*]]:
+; NO-VP-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64
+; NO-VP-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64
+; NO-VP-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP13]], 16
+; NO-VP-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 32, i64 [[TMP1]])
+; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 100, [[TMP14]]
+; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
+; NO-VP: [[VECTOR_MEMCHECK]]:
+; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16
+; NO-VP-NEXT: [[TMP4:%.*]] = sub i64 [[B1]], [[A2]]
+; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP4]], [[TMP3]]
+; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
+; NO-VP: [[VECTOR_PH]]:
+; NO-VP-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16
+; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP6]]
+; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]]
+; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 16
+; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
+; NO-VP: [[VECTOR_BODY]]:
+; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[INDEX]]
+; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP9]], align 1
+; NO-VP-NEXT: [[TMP10:%.*]] = sdiv <vscale x 16 x i8> [[WIDE_LOAD]], splat (i8 3)
+; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]]
+; NO-VP-NEXT: store <vscale x 16 x i8> [[TMP10]], ptr [[TMP11]], align 1
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
+; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
+; NO-VP: [[MIDDLE_BLOCK]]:
+; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 100, [[N_VEC]]
+; NO-VP-NEXT: br i1 [[CMP_N]], label %[[FINISH_LOOPEXIT:.*]], label %[[SCALAR_PH]]
+; NO-VP: [[SCALAR_PH]]:
+; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[LOOP_PREHEADER]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; NO-VP-NEXT: br label %[[LOOP:.*]]
; NO-VP: [[LOOP]]:
-; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ 0, %[[LOOP_PREHEADER]] ]
+; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
; NO-VP-NEXT: [[DEC]] = add nsw i64 [[LEN]], 1
; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[LEN]]
; NO-VP-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
@@ -898,7 +1256,7 @@ define void @test_sdiv(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[LEN]]
; NO-VP-NEXT: store i8 [[TMP]], ptr [[ARRAYIDX1]], align 1
; NO-VP-NEXT: [[DOTNOT:%.*]] = icmp eq i64 [[DEC]], 100
-; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT:.*]], label %[[LOOP]]
+; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT]], label %[[LOOP]], !llvm.loop [[LOOP21:![0-9]+]]
; NO-VP: [[FINISH_LOOPEXIT]]:
; NO-VP-NEXT: ret void
;
@@ -979,9 +1337,45 @@ define void @test_udiv(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-LABEL: define void @test_udiv(
; NO-VP-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]]) #[[ATTR0]] {
; NO-VP-NEXT: [[LOOP_PREHEADER:.*]]:
+; NO-VP-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64
+; NO-VP-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64
+; NO-VP-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP13]], 16
+; NO-VP-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 32, i64 [[TMP1]])
+; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 100, [[TMP14]]
+; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
+; NO-VP: [[VECTOR_MEMCHECK]]:
+; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16
+; NO-VP-NEXT: [[TMP4:%.*]] = sub i64 [[B1]], [[A2]]
+; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP4]], [[TMP3]]
+; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
+; NO-VP: [[VECTOR_PH]]:
+; NO-VP-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16
+; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP6]]
+; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]]
+; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 16
+; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
+; NO-VP: [[VECTOR_BODY]]:
+; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[INDEX]]
+; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP9]], align 1
+; NO-VP-NEXT: [[TMP10:%.*]] = udiv <vscale x 16 x i8> [[WIDE_LOAD]], splat (i8 3)
+; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]]
+; NO-VP-NEXT: store <vscale x 16 x i8> [[TMP10]], ptr [[TMP11]], align 1
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
+; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
+; NO-VP: [[MIDDLE_BLOCK]]:
+; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 100, [[N_VEC]]
+; NO-VP-NEXT: br i1 [[CMP_N]], label %[[FINISH_LOOPEXIT:.*]], label %[[SCALAR_PH]]
+; NO-VP: [[SCALAR_PH]]:
+; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[LOOP_PREHEADER]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; NO-VP-NEXT: br label %[[LOOP:.*]]
; NO-VP: [[LOOP]]:
-; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ 0, %[[LOOP_PREHEADER]] ]
+; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
; NO-VP-NEXT: [[DEC]] = add nsw i64 [[LEN]], 1
; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[LEN]]
; NO-VP-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
@@ -989,7 +1383,7 @@ define void @test_udiv(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[LEN]]
; NO-VP-NEXT: store i8 [[TMP]], ptr [[ARRAYIDX1]], align 1
; NO-VP-NEXT: [[DOTNOT:%.*]] = icmp eq i64 [[DEC]], 100
-; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT:.*]], label %[[LOOP]]
+; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT]], label %[[LOOP]], !llvm.loop [[LOOP23:![0-9]+]]
; NO-VP: [[FINISH_LOOPEXIT]]:
; NO-VP-NEXT: ret void
;
@@ -1070,9 +1464,45 @@ define void @test_srem(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-LABEL: define void @test_srem(
; NO-VP-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]]) #[[ATTR0]] {
; NO-VP-NEXT: [[LOOP_PREHEADER:.*]]:
+; NO-VP-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64
+; NO-VP-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64
+; NO-VP-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP13]], 16
+; NO-VP-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 32, i64 [[TMP1]])
+; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 100, [[TMP14]]
+; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
+; NO-VP: [[VECTOR_MEMCHECK]]:
+; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16
+; NO-VP-NEXT: [[TMP4:%.*]] = sub i64 [[B1]], [[A2]]
+; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP4]], [[TMP3]]
+; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
+; NO-VP: [[VECTOR_PH]]:
+; NO-VP-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16
+; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP6]]
+; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]]
+; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 16
+; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
+; NO-VP: [[VECTOR_BODY]]:
+; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[INDEX]]
+; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP9]], align 1
+; NO-VP-NEXT: [[TMP10:%.*]] = srem <vscale x 16 x i8> [[WIDE_LOAD]], splat (i8 3)
+; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]]
+; NO-VP-NEXT: store <vscale x 16 x i8> [[TMP10]], ptr [[TMP11]], align 1
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
+; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]]
+; NO-VP: [[MIDDLE_BLOCK]]:
+; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 100, [[N_VEC]]
+; NO-VP-NEXT: br i1 [[CMP_N]], label %[[FINISH_LOOPEXIT:.*]], label %[[SCALAR_PH]]
+; NO-VP: [[SCALAR_PH]]:
+; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[LOOP_PREHEADER]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; NO-VP-NEXT: br label %[[LOOP:.*]]
; NO-VP: [[LOOP]]:
-; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ 0, %[[LOOP_PREHEADER]] ]
+; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
; NO-VP-NEXT: [[DEC]] = add nsw i64 [[LEN]], 1
; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[LEN]]
; NO-VP-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
@@ -1080,7 +1510,7 @@ define void @test_srem(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[LEN]]
; NO-VP-NEXT: store i8 [[TMP]], ptr [[ARRAYIDX1]], align 1
; NO-VP-NEXT: [[DOTNOT:%.*]] = icmp eq i64 [[DEC]], 100
-; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT:.*]], label %[[LOOP]]
+; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT]], label %[[LOOP]], !llvm.loop [[LOOP25:![0-9]+]]
; NO-VP: [[FINISH_LOOPEXIT]]:
; NO-VP-NEXT: ret void
;
@@ -1161,9 +1591,45 @@ define void @test_urem(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-LABEL: define void @test_urem(
; NO-VP-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]]) #[[ATTR0]] {
; NO-VP-NEXT: [[LOOP_PREHEADER:.*]]:
+; NO-VP-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64
+; NO-VP-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64
+; NO-VP-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP13]], 16
+; NO-VP-NEXT: [[TMP14:%.*]] = call i64 @llvm.umax.i64(i64 32, i64 [[TMP1]])
+; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 100, [[TMP14]]
+; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
+; NO-VP: [[VECTOR_MEMCHECK]]:
+; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 16
+; NO-VP-NEXT: [[TMP4:%.*]] = sub i64 [[B1]], [[A2]]
+; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP4]], [[TMP3]]
+; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
+; NO-VP: [[VECTOR_PH]]:
+; NO-VP-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 16
+; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP6]]
+; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]]
+; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 16
+; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
+; NO-VP: [[VECTOR_BODY]]:
+; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[INDEX]]
+; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 16 x i8>, ptr [[TMP9]], align 1
+; NO-VP-NEXT: [[TMP10:%.*]] = urem <vscale x 16 x i8> [[WIDE_LOAD]], splat (i8 3)
+; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[INDEX]]
+; NO-VP-NEXT: store <vscale x 16 x i8> [[TMP10]], ptr [[TMP11]], align 1
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
+; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]]
+; NO-VP: [[MIDDLE_BLOCK]]:
+; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 100, [[N_VEC]]
+; NO-VP-NEXT: br i1 [[CMP_N]], label %[[FINISH_LOOPEXIT:.*]], label %[[SCALAR_PH]]
+; NO-VP: [[SCALAR_PH]]:
+; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[LOOP_PREHEADER]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; NO-VP-NEXT: br label %[[LOOP:.*]]
; NO-VP: [[LOOP]]:
-; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ 0, %[[LOOP_PREHEADER]] ]
+; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
; NO-VP-NEXT: [[DEC]] = add nsw i64 [[LEN]], 1
; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 [[LEN]]
; NO-VP-NEXT: [[TMP0:%.*]] = load i8, ptr [[ARRAYIDX]], align 1
@@ -1171,7 +1637,7 @@ define void @test_urem(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 [[LEN]]
; NO-VP-NEXT: store i8 [[TMP]], ptr [[ARRAYIDX1]], align 1
; NO-VP-NEXT: [[DOTNOT:%.*]] = icmp eq i64 [[DEC]], 100
-; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT:.*]], label %[[LOOP]]
+; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT]], label %[[LOOP]], !llvm.loop [[LOOP27:![0-9]+]]
; NO-VP: [[FINISH_LOOPEXIT]]:
; NO-VP-NEXT: ret void
;
@@ -1255,9 +1721,46 @@ define void @test_fadd(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-LABEL: define void @test_fadd(
; NO-VP-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]]) #[[ATTR0]] {
; NO-VP-NEXT: [[LOOP_PREHEADER:.*]]:
+; NO-VP-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64
+; NO-VP-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64
+; NO-VP-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP15]], 4
+; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP1]])
+; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 100, [[TMP2]]
+; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
+; NO-VP: [[VECTOR_MEMCHECK]]:
+; NO-VP-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
+; NO-VP-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4
+; NO-VP-NEXT: [[TMP6:%.*]] = sub i64 [[B1]], [[A2]]
+; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]]
+; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
+; NO-VP: [[VECTOR_PH]]:
+; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
+; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP8]]
+; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]]
+; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
+; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
+; NO-VP: [[VECTOR_BODY]]:
+; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
+; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP11]], align 4
+; NO-VP-NEXT: [[TMP12:%.*]] = fadd fast <vscale x 4 x float> [[WIDE_LOAD]], splat (float 3.000000e+00)
+; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]]
+; NO-VP-NEXT: store <vscale x 4 x float> [[TMP12]], ptr [[TMP13]], align 4
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]]
+; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]]
+; NO-VP: [[MIDDLE_BLOCK]]:
+; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 100, [[N_VEC]]
+; NO-VP-NEXT: br i1 [[CMP_N]], label %[[FINISH_LOOPEXIT:.*]], label %[[SCALAR_PH]]
+; NO-VP: [[SCALAR_PH]]:
+; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[LOOP_PREHEADER]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; NO-VP-NEXT: br label %[[LOOP:.*]]
; NO-VP: [[LOOP]]:
-; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ 0, %[[LOOP_PREHEADER]] ]
+; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
; NO-VP-NEXT: [[DEC]] = add nsw i64 [[LEN]], 1
; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[LEN]]
; NO-VP-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4
@@ -1265,7 +1768,7 @@ define void @test_fadd(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[LEN]]
; NO-VP-NEXT: store float [[TMP]], ptr [[ARRAYIDX1]], align 4
; NO-VP-NEXT: [[DOTNOT:%.*]] = icmp eq i64 [[DEC]], 100
-; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT:.*]], label %[[LOOP]]
+; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT]], label %[[LOOP]], !llvm.loop [[LOOP29:![0-9]+]]
; NO-VP: [[FINISH_LOOPEXIT]]:
; NO-VP-NEXT: ret void
;
@@ -1347,9 +1850,46 @@ define void @test_fsub(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-LABEL: define void @test_fsub(
; NO-VP-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]]) #[[ATTR0]] {
; NO-VP-NEXT: [[LOOP_PREHEADER:.*]]:
+; NO-VP-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64
+; NO-VP-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64
+; NO-VP-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP15]], 4
+; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP1]])
+; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 100, [[TMP2]]
+; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
+; NO-VP: [[VECTOR_MEMCHECK]]:
+; NO-VP-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
+; NO-VP-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4
+; NO-VP-NEXT: [[TMP6:%.*]] = sub i64 [[B1]], [[A2]]
+; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]]
+; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
+; NO-VP: [[VECTOR_PH]]:
+; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
+; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP8]]
+; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]]
+; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
+; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
+; NO-VP: [[VECTOR_BODY]]:
+; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
+; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP11]], align 4
+; NO-VP-NEXT: [[TMP12:%.*]] = fsub fast <vscale x 4 x float> [[WIDE_LOAD]], splat (float 3.000000e+00)
+; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]]
+; NO-VP-NEXT: store <vscale x 4 x float> [[TMP12]], ptr [[TMP13]], align 4
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]]
+; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]]
+; NO-VP: [[MIDDLE_BLOCK]]:
+; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 100, [[N_VEC]]
+; NO-VP-NEXT: br i1 [[CMP_N]], label %[[FINISH_LOOPEXIT:.*]], label %[[SCALAR_PH]]
+; NO-VP: [[SCALAR_PH]]:
+; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[LOOP_PREHEADER]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; NO-VP-NEXT: br label %[[LOOP:.*]]
; NO-VP: [[LOOP]]:
-; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ 0, %[[LOOP_PREHEADER]] ]
+; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
; NO-VP-NEXT: [[DEC]] = add nsw i64 [[LEN]], 1
; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[LEN]]
; NO-VP-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4
@@ -1357,7 +1897,7 @@ define void @test_fsub(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[LEN]]
; NO-VP-NEXT: store float [[TMP]], ptr [[ARRAYIDX1]], align 4
; NO-VP-NEXT: [[DOTNOT:%.*]] = icmp eq i64 [[DEC]], 100
-; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT:.*]], label %[[LOOP]]
+; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT]], label %[[LOOP]], !llvm.loop [[LOOP31:![0-9]+]]
; NO-VP: [[FINISH_LOOPEXIT]]:
; NO-VP-NEXT: ret void
;
@@ -1439,9 +1979,46 @@ define void @test_fmul(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-LABEL: define void @test_fmul(
; NO-VP-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]]) #[[ATTR0]] {
; NO-VP-NEXT: [[LOOP_PREHEADER:.*]]:
+; NO-VP-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64
+; NO-VP-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64
+; NO-VP-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP15]], 4
+; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP1]])
+; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 100, [[TMP2]]
+; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
+; NO-VP: [[VECTOR_MEMCHECK]]:
+; NO-VP-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
+; NO-VP-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4
+; NO-VP-NEXT: [[TMP6:%.*]] = sub i64 [[B1]], [[A2]]
+; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]]
+; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
+; NO-VP: [[VECTOR_PH]]:
+; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
+; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP8]]
+; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]]
+; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
+; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
+; NO-VP: [[VECTOR_BODY]]:
+; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
+; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP11]], align 4
+; NO-VP-NEXT: [[TMP12:%.*]] = fmul fast <vscale x 4 x float> [[WIDE_LOAD]], splat (float 3.000000e+00)
+; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]]
+; NO-VP-NEXT: store <vscale x 4 x float> [[TMP12]], ptr [[TMP13]], align 4
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]]
+; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]]
+; NO-VP: [[MIDDLE_BLOCK]]:
+; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 100, [[N_VEC]]
+; NO-VP-NEXT: br i1 [[CMP_N]], label %[[FINISH_LOOPEXIT:.*]], label %[[SCALAR_PH]]
+; NO-VP: [[SCALAR_PH]]:
+; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[LOOP_PREHEADER]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; NO-VP-NEXT: br label %[[LOOP:.*]]
; NO-VP: [[LOOP]]:
-; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ 0, %[[LOOP_PREHEADER]] ]
+; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
; NO-VP-NEXT: [[DEC]] = add nsw i64 [[LEN]], 1
; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[LEN]]
; NO-VP-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4
@@ -1449,7 +2026,7 @@ define void @test_fmul(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[LEN]]
; NO-VP-NEXT: store float [[TMP]], ptr [[ARRAYIDX1]], align 4
; NO-VP-NEXT: [[DOTNOT:%.*]] = icmp eq i64 [[DEC]], 100
-; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT:.*]], label %[[LOOP]]
+; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT]], label %[[LOOP]], !llvm.loop [[LOOP33:![0-9]+]]
; NO-VP: [[FINISH_LOOPEXIT]]:
; NO-VP-NEXT: ret void
;
@@ -1531,9 +2108,46 @@ define void @test_fdiv(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-LABEL: define void @test_fdiv(
; NO-VP-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]]) #[[ATTR0]] {
; NO-VP-NEXT: [[LOOP_PREHEADER:.*]]:
+; NO-VP-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64
+; NO-VP-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64
+; NO-VP-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP15]], 4
+; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP1]])
+; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 100, [[TMP2]]
+; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
+; NO-VP: [[VECTOR_MEMCHECK]]:
+; NO-VP-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
+; NO-VP-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4
+; NO-VP-NEXT: [[TMP6:%.*]] = sub i64 [[B1]], [[A2]]
+; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]]
+; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
+; NO-VP: [[VECTOR_PH]]:
+; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
+; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP8]]
+; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]]
+; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
+; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
+; NO-VP: [[VECTOR_BODY]]:
+; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
+; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP11]], align 4
+; NO-VP-NEXT: [[TMP12:%.*]] = fdiv fast <vscale x 4 x float> [[WIDE_LOAD]], splat (float 3.000000e+00)
+; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]]
+; NO-VP-NEXT: store <vscale x 4 x float> [[TMP12]], ptr [[TMP13]], align 4
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]]
+; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP34:![0-9]+]]
+; NO-VP: [[MIDDLE_BLOCK]]:
+; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 100, [[N_VEC]]
+; NO-VP-NEXT: br i1 [[CMP_N]], label %[[FINISH_LOOPEXIT:.*]], label %[[SCALAR_PH]]
+; NO-VP: [[SCALAR_PH]]:
+; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[LOOP_PREHEADER]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; NO-VP-NEXT: br label %[[LOOP:.*]]
; NO-VP: [[LOOP]]:
-; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ 0, %[[LOOP_PREHEADER]] ]
+; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
; NO-VP-NEXT: [[DEC]] = add nsw i64 [[LEN]], 1
; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[LEN]]
; NO-VP-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4
@@ -1541,7 +2155,7 @@ define void @test_fdiv(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[LEN]]
; NO-VP-NEXT: store float [[TMP]], ptr [[ARRAYIDX1]], align 4
; NO-VP-NEXT: [[DOTNOT:%.*]] = icmp eq i64 [[DEC]], 100
-; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT:.*]], label %[[LOOP]]
+; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT]], label %[[LOOP]], !llvm.loop [[LOOP35:![0-9]+]]
; NO-VP: [[FINISH_LOOPEXIT]]:
; NO-VP-NEXT: ret void
;
@@ -1676,9 +2290,46 @@ define void @test_fneg(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-LABEL: define void @test_fneg(
; NO-VP-SAME: ptr captures(none) [[A:%.*]], ptr readonly captures(none) [[B:%.*]]) #[[ATTR0]] {
; NO-VP-NEXT: [[LOOP_PREHEADER:.*]]:
+; NO-VP-NEXT: [[A2:%.*]] = ptrtoint ptr [[A]] to i64
+; NO-VP-NEXT: [[B1:%.*]] = ptrtoint ptr [[B]] to i64
+; NO-VP-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP15]], 4
+; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP1]])
+; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 100, [[TMP2]]
+; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
+; NO-VP: [[VECTOR_MEMCHECK]]:
+; NO-VP-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
+; NO-VP-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4
+; NO-VP-NEXT: [[TMP6:%.*]] = sub i64 [[B1]], [[A2]]
+; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]]
+; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
+; NO-VP: [[VECTOR_PH]]:
+; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
+; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 100, [[TMP8]]
+; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 100, [[N_MOD_VF]]
+; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
+; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
+; NO-VP: [[VECTOR_BODY]]:
+; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDEX]]
+; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[TMP11]], align 4
+; NO-VP-NEXT: [[TMP12:%.*]] = fneg fast <vscale x 4 x float> [[WIDE_LOAD]]
+; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]]
+; NO-VP-NEXT: store <vscale x 4 x float> [[TMP12]], ptr [[TMP13]], align 4
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]]
+; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP36:![0-9]+]]
+; NO-VP: [[MIDDLE_BLOCK]]:
+; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 100, [[N_VEC]]
+; NO-VP-NEXT: br i1 [[CMP_N]], label %[[FINISH_LOOPEXIT:.*]], label %[[SCALAR_PH]]
+; NO-VP: [[SCALAR_PH]]:
+; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[LOOP_PREHEADER]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; NO-VP-NEXT: br label %[[LOOP:.*]]
; NO-VP: [[LOOP]]:
-; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ 0, %[[LOOP_PREHEADER]] ]
+; NO-VP-NEXT: [[LEN:%.*]] = phi i64 [ [[DEC:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
; NO-VP-NEXT: [[DEC]] = add nsw i64 [[LEN]], 1
; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[LEN]]
; NO-VP-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4
@@ -1686,7 +2337,7 @@ define void @test_fneg(ptr nocapture %a, ptr nocapture readonly %b) {
; NO-VP-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[LEN]]
; NO-VP-NEXT: store float [[TMP]], ptr [[ARRAYIDX1]], align 4
; NO-VP-NEXT: [[DOTNOT:%.*]] = icmp eq i64 [[DEC]], 100
-; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT:.*]], label %[[LOOP]]
+; NO-VP-NEXT: br i1 [[DOTNOT]], label %[[FINISH_LOOPEXIT]], label %[[LOOP]], !llvm.loop [[LOOP37:![0-9]+]]
; NO-VP: [[FINISH_LOOPEXIT]]:
; NO-VP-NEXT: ret void
;
@@ -1748,3 +2399,42 @@ finish.loopexit:
; IF-EVL: [[LOOP37]] = distinct !{[[LOOP37]], [[META1]], [[META2]], [[META3]]}
; IF-EVL: [[LOOP38]] = distinct !{[[LOOP38]], [[META1]]}
;.
+; NO-VP: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
+; NO-VP: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
+; NO-VP: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
+; NO-VP: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]]}
+; NO-VP: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]}
+; NO-VP: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]]}
+; NO-VP: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]], [[META2]]}
+; NO-VP: [[LOOP7]] = distinct !{[[LOOP7]], [[META1]]}
+; NO-VP: [[LOOP8]] = distinct !{[[LOOP8]], [[META1]], [[META2]]}
+; NO-VP: [[LOOP9]] = distinct !{[[LOOP9]], [[META1]]}
+; NO-VP: [[LOOP10]] = distinct !{[[LOOP10]], [[META1]], [[META2]]}
+; NO-VP: [[LOOP11]] = distinct !{[[LOOP11]], [[META1]]}
+; NO-VP: [[LOOP12]] = distinct !{[[LOOP12]], [[META1]], [[META2]]}
+; NO-VP: [[LOOP13]] = distinct !{[[LOOP13]], [[META1]]}
+; NO-VP: [[LOOP14]] = distinct !{[[LOOP14]], [[META1]], [[META2]]}
+; NO-VP: [[LOOP15]] = distinct !{[[LOOP15]], [[META1]]}
+; NO-VP: [[LOOP16]] = distinct !{[[LOOP16]], [[META1]], [[META2]]}
+; NO-VP: [[LOOP17]] = distinct !{[[LOOP17]], [[META1]]}
+; NO-VP: [[LOOP18]] = distinct !{[[LOOP18]], [[META1]], [[META2]]}
+; NO-VP: [[LOOP19]] = distinct !{[[LOOP19]], [[META1]]}
+; NO-VP: [[LOOP20]] = distinct !{[[LOOP20]], [[META1]], [[META2]]}
+; NO-VP: [[LOOP21]] = distinct !{[[LOOP21]], [[META1]]}
+; NO-VP: [[LOOP22]] = distinct !{[[LOOP22]], [[META1]], [[META2]]}
+; NO-VP: [[LOOP23]] = distinct !{[[LOOP23]], [[META1]]}
+; NO-VP: [[LOOP24]] = distinct !{[[LOOP24]], [[META1]], [[META2]]}
+; NO-VP: [[LOOP25]] = distinct !{[[LOOP25]], [[META1]]}
+; NO-VP: [[LOOP26]] = distinct !{[[LOOP26]], [[META1]], [[META2]]}
+; NO-VP: [[LOOP27]] = distinct !{[[LOOP27]], [[META1]]}
+; NO-VP: [[LOOP28]] = distinct !{[[LOOP28]], [[META1]], [[META2]]}
+; NO-VP: [[LOOP29]] = distinct !{[[LOOP29]], [[META1]]}
+; NO-VP: [[LOOP30]] = distinct !{[[LOOP30]], [[META1]], [[META2]]}
+; NO-VP: [[LOOP31]] = distinct !{[[LOOP31]], [[META1]]}
+; NO-VP: [[LOOP32]] = distinct !{[[LOOP32]], [[META1]], [[META2]]}
+; NO-VP: [[LOOP33]] = distinct !{[[LOOP33]], [[META1]]}
+; NO-VP: [[LOOP34]] = distinct !{[[LOOP34]], [[META1]], [[META2]]}
+; NO-VP: [[LOOP35]] = distinct !{[[LOOP35]], [[META1]]}
+; NO-VP: [[LOOP36]] = distinct !{[[LOOP36]], [[META1]], [[META2]]}
+; NO-VP: [[LOOP37]] = distinct !{[[LOOP37]], [[META1]]}
+;.
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-call-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-call-intrinsics.ll
index dfa01a5..7d21c4d 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-call-intrinsics.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-call-intrinsics.ll
@@ -1,12 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
; RUN: opt -passes=loop-vectorize \
-; RUN: -force-tail-folding-style=data-with-evl \
-; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \
+; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \
; RUN: -mtriple=riscv64 -mattr=+v -S %s | FileCheck %s --check-prefix=IF-EVL
; RUN: opt -passes=loop-vectorize \
-; RUN: -force-tail-folding-style=none \
-; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \
+; RUN: -prefer-predicate-over-epilogue=scalar-epilogue \
; RUN: -mtriple=riscv64 -mattr=+v -S %s | FileCheck %s --check-prefix=NO-VP
define void @vp_smax(ptr %a, ptr %b, ptr %c, i64 %N) {
@@ -78,9 +76,53 @@ define void @vp_smax(ptr %a, ptr %b, ptr %c, i64 %N) {
; NO-VP-LABEL: define void @vp_smax(
; NO-VP-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
; NO-VP-NEXT: [[ENTRY:.*]]:
+; NO-VP-NEXT: [[C3:%.*]] = ptrtoint ptr [[C]] to i64
+; NO-VP-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64
+; NO-VP-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64
+; NO-VP-NEXT: [[TMP18:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP19:%.*]] = mul nuw i64 [[TMP18]], 4
+; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP19]])
+; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP2]]
+; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
+; NO-VP: [[VECTOR_MEMCHECK]]:
+; NO-VP-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
+; NO-VP-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4
+; NO-VP-NEXT: [[TMP6:%.*]] = sub i64 [[A1]], [[B2]]
+; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]]
+; NO-VP-NEXT: [[TMP7:%.*]] = mul i64 [[TMP4]], 4
+; NO-VP-NEXT: [[TMP8:%.*]] = sub i64 [[A1]], [[C3]]
+; NO-VP-NEXT: [[DIFF_CHECK4:%.*]] = icmp ult i64 [[TMP8]], [[TMP7]]
+; NO-VP-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]]
+; NO-VP-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
+; NO-VP: [[VECTOR_PH]]:
+; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
+; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP10]]
+; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; NO-VP-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 4
+; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
+; NO-VP: [[VECTOR_BODY]]:
+; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDEX]]
+; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP13]], align 4
+; NO-VP-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[INDEX]]
+; NO-VP-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 4 x i32>, ptr [[TMP14]], align 4
+; NO-VP-NEXT: [[TMP15:%.*]] = call <vscale x 4 x i32> @llvm.smax.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> [[WIDE_LOAD5]])
+; NO-VP-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
+; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP15]], ptr [[TMP16]], align 4
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]]
+; NO-VP-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; NO-VP: [[MIDDLE_BLOCK]]:
+; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; NO-VP: [[SCALAR_PH]]:
+; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; NO-VP-NEXT: br label %[[LOOP:.*]]
; NO-VP: [[LOOP]]:
-; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ]
+; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]]
; NO-VP-NEXT: [[TMP0:%.*]] = load i32, ptr [[GEP]], align 4
; NO-VP-NEXT: [[GEP3:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]]
@@ -90,7 +132,7 @@ define void @vp_smax(ptr %a, ptr %b, ptr %c, i64 %N) {
; NO-VP-NEXT: store i32 [[DOT]], ptr [[GEP11]], align 4
; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[LOOP]]
+; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP3:![0-9]+]]
; NO-VP: [[EXIT]]:
; NO-VP-NEXT: ret void
;
@@ -184,9 +226,53 @@ define void @vp_smin(ptr %a, ptr %b, ptr %c, i64 %N) {
; NO-VP-LABEL: define void @vp_smin(
; NO-VP-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; NO-VP-NEXT: [[ENTRY:.*]]:
+; NO-VP-NEXT: [[C3:%.*]] = ptrtoint ptr [[C]] to i64
+; NO-VP-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64
+; NO-VP-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64
+; NO-VP-NEXT: [[TMP18:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP19:%.*]] = mul nuw i64 [[TMP18]], 4
+; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP19]])
+; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP2]]
+; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
+; NO-VP: [[VECTOR_MEMCHECK]]:
+; NO-VP-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
+; NO-VP-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4
+; NO-VP-NEXT: [[TMP6:%.*]] = sub i64 [[A1]], [[B2]]
+; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]]
+; NO-VP-NEXT: [[TMP7:%.*]] = mul i64 [[TMP4]], 4
+; NO-VP-NEXT: [[TMP8:%.*]] = sub i64 [[A1]], [[C3]]
+; NO-VP-NEXT: [[DIFF_CHECK4:%.*]] = icmp ult i64 [[TMP8]], [[TMP7]]
+; NO-VP-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]]
+; NO-VP-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
+; NO-VP: [[VECTOR_PH]]:
+; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
+; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP10]]
+; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; NO-VP-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 4
+; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
+; NO-VP: [[VECTOR_BODY]]:
+; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDEX]]
+; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP13]], align 4
+; NO-VP-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[INDEX]]
+; NO-VP-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 4 x i32>, ptr [[TMP14]], align 4
+; NO-VP-NEXT: [[TMP15:%.*]] = call <vscale x 4 x i32> @llvm.smin.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> [[WIDE_LOAD5]])
+; NO-VP-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
+; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP15]], ptr [[TMP16]], align 4
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]]
+; NO-VP-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; NO-VP: [[MIDDLE_BLOCK]]:
+; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; NO-VP: [[SCALAR_PH]]:
+; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; NO-VP-NEXT: br label %[[LOOP:.*]]
; NO-VP: [[LOOP]]:
-; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ]
+; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]]
; NO-VP-NEXT: [[TMP0:%.*]] = load i32, ptr [[GEP]], align 4
; NO-VP-NEXT: [[GEP3:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]]
@@ -196,7 +282,7 @@ define void @vp_smin(ptr %a, ptr %b, ptr %c, i64 %N) {
; NO-VP-NEXT: store i32 [[DOT]], ptr [[GEP11]], align 4
; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[LOOP]]
+; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP5:![0-9]+]]
; NO-VP: [[EXIT]]:
; NO-VP-NEXT: ret void
;
@@ -290,9 +376,53 @@ define void @vp_umax(ptr %a, ptr %b, ptr %c, i64 %N) {
; NO-VP-LABEL: define void @vp_umax(
; NO-VP-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; NO-VP-NEXT: [[ENTRY:.*]]:
+; NO-VP-NEXT: [[C3:%.*]] = ptrtoint ptr [[C]] to i64
+; NO-VP-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64
+; NO-VP-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64
+; NO-VP-NEXT: [[TMP18:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP19:%.*]] = mul nuw i64 [[TMP18]], 4
+; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP19]])
+; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP2]]
+; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
+; NO-VP: [[VECTOR_MEMCHECK]]:
+; NO-VP-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
+; NO-VP-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4
+; NO-VP-NEXT: [[TMP6:%.*]] = sub i64 [[A1]], [[B2]]
+; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]]
+; NO-VP-NEXT: [[TMP7:%.*]] = mul i64 [[TMP4]], 4
+; NO-VP-NEXT: [[TMP8:%.*]] = sub i64 [[A1]], [[C3]]
+; NO-VP-NEXT: [[DIFF_CHECK4:%.*]] = icmp ult i64 [[TMP8]], [[TMP7]]
+; NO-VP-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]]
+; NO-VP-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
+; NO-VP: [[VECTOR_PH]]:
+; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
+; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP10]]
+; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; NO-VP-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 4
+; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
+; NO-VP: [[VECTOR_BODY]]:
+; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDEX]]
+; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP13]], align 4
+; NO-VP-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[INDEX]]
+; NO-VP-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 4 x i32>, ptr [[TMP14]], align 4
+; NO-VP-NEXT: [[TMP15:%.*]] = call <vscale x 4 x i32> @llvm.umax.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> [[WIDE_LOAD5]])
+; NO-VP-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
+; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP15]], ptr [[TMP16]], align 4
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]]
+; NO-VP-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; NO-VP: [[MIDDLE_BLOCK]]:
+; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; NO-VP: [[SCALAR_PH]]:
+; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; NO-VP-NEXT: br label %[[LOOP:.*]]
; NO-VP: [[LOOP]]:
-; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ]
+; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]]
; NO-VP-NEXT: [[TMP0:%.*]] = load i32, ptr [[GEP]], align 4
; NO-VP-NEXT: [[GEP3:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]]
@@ -302,7 +432,7 @@ define void @vp_umax(ptr %a, ptr %b, ptr %c, i64 %N) {
; NO-VP-NEXT: store i32 [[DOT]], ptr [[GEP11]], align 4
; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[LOOP]]
+; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP7:![0-9]+]]
; NO-VP: [[EXIT]]:
; NO-VP-NEXT: ret void
;
@@ -396,9 +526,53 @@ define void @vp_umin(ptr %a, ptr %b, ptr %c, i64 %N) {
; NO-VP-LABEL: define void @vp_umin(
; NO-VP-SAME: ptr [[A:%.*]], ptr [[B:%.*]], ptr [[C:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; NO-VP-NEXT: [[ENTRY:.*]]:
+; NO-VP-NEXT: [[C3:%.*]] = ptrtoint ptr [[C]] to i64
+; NO-VP-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64
+; NO-VP-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64
+; NO-VP-NEXT: [[TMP18:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP19:%.*]] = mul nuw i64 [[TMP18]], 4
+; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP19]])
+; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP2]]
+; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
+; NO-VP: [[VECTOR_MEMCHECK]]:
+; NO-VP-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
+; NO-VP-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4
+; NO-VP-NEXT: [[TMP6:%.*]] = sub i64 [[A1]], [[B2]]
+; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]]
+; NO-VP-NEXT: [[TMP7:%.*]] = mul i64 [[TMP4]], 4
+; NO-VP-NEXT: [[TMP8:%.*]] = sub i64 [[A1]], [[C3]]
+; NO-VP-NEXT: [[DIFF_CHECK4:%.*]] = icmp ult i64 [[TMP8]], [[TMP7]]
+; NO-VP-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK4]]
+; NO-VP-NEXT: br i1 [[CONFLICT_RDX]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
+; NO-VP: [[VECTOR_PH]]:
+; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
+; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP10]]
+; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; NO-VP-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 4
+; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
+; NO-VP: [[VECTOR_BODY]]:
+; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDEX]]
+; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP13]], align 4
+; NO-VP-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[INDEX]]
+; NO-VP-NEXT: [[WIDE_LOAD5:%.*]] = load <vscale x 4 x i32>, ptr [[TMP14]], align 4
+; NO-VP-NEXT: [[TMP15:%.*]] = call <vscale x 4 x i32> @llvm.umin.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]], <vscale x 4 x i32> [[WIDE_LOAD5]])
+; NO-VP-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDEX]]
+; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP15]], ptr [[TMP16]], align 4
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]]
+; NO-VP-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; NO-VP: [[MIDDLE_BLOCK]]:
+; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; NO-VP: [[SCALAR_PH]]:
+; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; NO-VP-NEXT: br label %[[LOOP:.*]]
; NO-VP: [[LOOP]]:
-; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ]
+; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]]
; NO-VP-NEXT: [[TMP0:%.*]] = load i32, ptr [[GEP]], align 4
; NO-VP-NEXT: [[GEP3:%.*]] = getelementptr inbounds i32, ptr [[C]], i64 [[IV]]
@@ -408,7 +582,7 @@ define void @vp_umin(ptr %a, ptr %b, ptr %c, i64 %N) {
; NO-VP-NEXT: store i32 [[DOT]], ptr [[GEP11]], align 4
; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[LOOP]]
+; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP9:![0-9]+]]
; NO-VP: [[EXIT]]:
; NO-VP-NEXT: ret void
;
@@ -494,17 +668,54 @@ define void @vp_ctlz(ptr %a, ptr %b, i64 %N) {
; NO-VP-LABEL: define void @vp_ctlz(
; NO-VP-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; NO-VP-NEXT: [[ENTRY:.*]]:
+; NO-VP-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64
+; NO-VP-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64
+; NO-VP-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP15:%.*]] = mul nuw i64 [[TMP11]], 4
+; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 8, i64 [[TMP15]])
+; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP2]]
+; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
+; NO-VP: [[VECTOR_MEMCHECK]]:
+; NO-VP-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
+; NO-VP-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4
+; NO-VP-NEXT: [[TMP6:%.*]] = sub i64 [[A1]], [[B2]]
+; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]]
+; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
+; NO-VP: [[VECTOR_PH]]:
+; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
+; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP8]]
+; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
+; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
+; NO-VP: [[VECTOR_BODY]]:
+; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]]
+; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[GEP]], align 4
+; NO-VP-NEXT: [[TMP12:%.*]] = call <vscale x 4 x i32> @llvm.ctlz.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]], i1 true)
+; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
+; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP12]], ptr [[TMP13]], align 4
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP10]]
+; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; NO-VP: [[MIDDLE_BLOCK]]:
+; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; NO-VP: [[SCALAR_PH]]:
+; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; NO-VP-NEXT: br label %[[LOOP:.*]]
; NO-VP: [[LOOP]]:
-; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ]
-; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]]
-; NO-VP-NEXT: [[TMP0:%.*]] = load i32, ptr [[GEP]], align 4
+; NO-VP-NEXT: [[IV1:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; NO-VP-NEXT: [[GEP1:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV1]]
+; NO-VP-NEXT: [[TMP0:%.*]] = load i32, ptr [[GEP1]], align 4
; NO-VP-NEXT: [[TMP1:%.*]] = tail call range(i32 0, 33) i32 @llvm.ctlz.i32(i32 [[TMP0]], i1 true)
-; NO-VP-NEXT: [[GEP3:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
+; NO-VP-NEXT: [[GEP3:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV1]]
; NO-VP-NEXT: store i32 [[TMP1]], ptr [[GEP3]], align 4
-; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1
; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[LOOP]]
+; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP11:![0-9]+]]
; NO-VP: [[EXIT]]:
; NO-VP-NEXT: ret void
;
@@ -587,17 +798,54 @@ define void @vp_cttz(ptr %a, ptr %b, i64 %N) {
; NO-VP-LABEL: define void @vp_cttz(
; NO-VP-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; NO-VP-NEXT: [[ENTRY:.*]]:
+; NO-VP-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64
+; NO-VP-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64
+; NO-VP-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP15:%.*]] = mul nuw i64 [[TMP11]], 4
+; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 8, i64 [[TMP15]])
+; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP2]]
+; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
+; NO-VP: [[VECTOR_MEMCHECK]]:
+; NO-VP-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
+; NO-VP-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4
+; NO-VP-NEXT: [[TMP6:%.*]] = sub i64 [[A1]], [[B2]]
+; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]]
+; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
+; NO-VP: [[VECTOR_PH]]:
+; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
+; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP8]]
+; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
+; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
+; NO-VP: [[VECTOR_BODY]]:
+; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]]
+; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[GEP]], align 4
+; NO-VP-NEXT: [[TMP12:%.*]] = call <vscale x 4 x i32> @llvm.cttz.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]], i1 true)
+; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
+; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP12]], ptr [[TMP13]], align 4
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP10]]
+; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; NO-VP: [[MIDDLE_BLOCK]]:
+; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; NO-VP: [[SCALAR_PH]]:
+; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; NO-VP-NEXT: br label %[[LOOP:.*]]
; NO-VP: [[LOOP]]:
-; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ]
-; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]]
-; NO-VP-NEXT: [[TMP0:%.*]] = load i32, ptr [[GEP]], align 4
+; NO-VP-NEXT: [[IV1:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; NO-VP-NEXT: [[GEP1:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV1]]
+; NO-VP-NEXT: [[TMP0:%.*]] = load i32, ptr [[GEP1]], align 4
; NO-VP-NEXT: [[TMP1:%.*]] = tail call range(i32 0, 33) i32 @llvm.cttz.i32(i32 [[TMP0]], i1 true)
-; NO-VP-NEXT: [[GEP3:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
+; NO-VP-NEXT: [[GEP3:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV1]]
; NO-VP-NEXT: store i32 [[TMP1]], ptr [[GEP3]], align 4
-; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1
; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[LOOP]]
+; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP13:![0-9]+]]
; NO-VP: [[EXIT]]:
; NO-VP-NEXT: ret void
;
@@ -684,19 +932,58 @@ define void @vp_lrint(ptr %a, ptr %b, i64 %N) {
; NO-VP-LABEL: define void @vp_lrint(
; NO-VP-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; NO-VP-NEXT: [[ENTRY:.*]]:
+; NO-VP-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64
+; NO-VP-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64
+; NO-VP-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP17:%.*]] = mul nuw i64 [[TMP11]], 4
+; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP17]])
+; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP2]]
+; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
+; NO-VP: [[VECTOR_MEMCHECK]]:
+; NO-VP-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
+; NO-VP-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4
+; NO-VP-NEXT: [[TMP6:%.*]] = sub i64 [[A1]], [[B2]]
+; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]]
+; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
+; NO-VP: [[VECTOR_PH]]:
+; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
+; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP8]]
+; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
+; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
+; NO-VP: [[VECTOR_BODY]]:
+; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
+; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[GEP]], align 4
+; NO-VP-NEXT: [[TMP12:%.*]] = fpext <vscale x 4 x float> [[WIDE_LOAD]] to <vscale x 4 x double>
+; NO-VP-NEXT: [[TMP13:%.*]] = call <vscale x 4 x i64> @llvm.lrint.nxv4i64.nxv4f64(<vscale x 4 x double> [[TMP12]])
+; NO-VP-NEXT: [[TMP14:%.*]] = trunc <vscale x 4 x i64> [[TMP13]] to <vscale x 4 x i32>
+; NO-VP-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
+; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP14]], ptr [[TMP15]], align 4
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP10]]
+; NO-VP-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
+; NO-VP: [[MIDDLE_BLOCK]]:
+; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; NO-VP: [[SCALAR_PH]]:
+; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; NO-VP-NEXT: br label %[[LOOP:.*]]
; NO-VP: [[LOOP]]:
-; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ]
-; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
-; NO-VP-NEXT: [[TMP0:%.*]] = load float, ptr [[GEP]], align 4
+; NO-VP-NEXT: [[IV1:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; NO-VP-NEXT: [[GEP1:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV1]]
+; NO-VP-NEXT: [[TMP0:%.*]] = load float, ptr [[GEP1]], align 4
; NO-VP-NEXT: [[CONV2:%.*]] = fpext float [[TMP0]] to double
; NO-VP-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.lrint.i64.f64(double [[CONV2]])
; NO-VP-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP1]] to i32
-; NO-VP-NEXT: [[GEP5:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
+; NO-VP-NEXT: [[GEP5:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV1]]
; NO-VP-NEXT: store i32 [[CONV3]], ptr [[GEP5]], align 4
-; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1
; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[LOOP]]
+; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP15:![0-9]+]]
; NO-VP: [[EXIT]]:
; NO-VP-NEXT: ret void
;
@@ -785,19 +1072,58 @@ define void @vp_llrint(ptr %a, ptr %b, i64 %N) {
; NO-VP-LABEL: define void @vp_llrint(
; NO-VP-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; NO-VP-NEXT: [[ENTRY:.*]]:
+; NO-VP-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64
+; NO-VP-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64
+; NO-VP-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP17:%.*]] = mul nuw i64 [[TMP11]], 4
+; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP17]])
+; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP2]]
+; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
+; NO-VP: [[VECTOR_MEMCHECK]]:
+; NO-VP-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
+; NO-VP-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4
+; NO-VP-NEXT: [[TMP6:%.*]] = sub i64 [[A1]], [[B2]]
+; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]]
+; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
+; NO-VP: [[VECTOR_PH]]:
+; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
+; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP8]]
+; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
+; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
+; NO-VP: [[VECTOR_BODY]]:
+; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
+; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[GEP]], align 4
+; NO-VP-NEXT: [[TMP12:%.*]] = fpext <vscale x 4 x float> [[WIDE_LOAD]] to <vscale x 4 x double>
+; NO-VP-NEXT: [[TMP13:%.*]] = call <vscale x 4 x i64> @llvm.llrint.nxv4i64.nxv4f64(<vscale x 4 x double> [[TMP12]])
+; NO-VP-NEXT: [[TMP14:%.*]] = trunc <vscale x 4 x i64> [[TMP13]] to <vscale x 4 x i32>
+; NO-VP-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
+; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP14]], ptr [[TMP15]], align 4
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP10]]
+; NO-VP-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
+; NO-VP: [[MIDDLE_BLOCK]]:
+; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; NO-VP: [[SCALAR_PH]]:
+; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; NO-VP-NEXT: br label %[[LOOP:.*]]
; NO-VP: [[LOOP]]:
-; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ]
-; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
-; NO-VP-NEXT: [[TMP0:%.*]] = load float, ptr [[GEP]], align 4
+; NO-VP-NEXT: [[IV1:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; NO-VP-NEXT: [[GEP1:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV1]]
+; NO-VP-NEXT: [[TMP0:%.*]] = load float, ptr [[GEP1]], align 4
; NO-VP-NEXT: [[CONV2:%.*]] = fpext float [[TMP0]] to double
; NO-VP-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.llrint.i64.f64(double [[CONV2]])
; NO-VP-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP1]] to i32
-; NO-VP-NEXT: [[GEP5:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
+; NO-VP-NEXT: [[GEP5:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV1]]
; NO-VP-NEXT: store i32 [[CONV3]], ptr [[GEP5]], align 4
-; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1
; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[LOOP]]
+; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP17:![0-9]+]]
; NO-VP: [[EXIT]]:
; NO-VP-NEXT: ret void
;
@@ -882,17 +1208,54 @@ define void @vp_abs(ptr %a, ptr %b, i64 %N) {
; NO-VP-LABEL: define void @vp_abs(
; NO-VP-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; NO-VP-NEXT: [[ENTRY:.*]]:
+; NO-VP-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64
+; NO-VP-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64
+; NO-VP-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP11]], 4
+; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 8, i64 [[TMP1]])
+; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP2]]
+; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
+; NO-VP: [[VECTOR_MEMCHECK]]:
+; NO-VP-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
+; NO-VP-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4
+; NO-VP-NEXT: [[TMP6:%.*]] = sub i64 [[A1]], [[B2]]
+; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]]
+; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
+; NO-VP: [[VECTOR_PH]]:
+; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
+; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP8]]
+; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
+; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
+; NO-VP: [[VECTOR_BODY]]:
+; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]]
+; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[GEP]], align 4
+; NO-VP-NEXT: [[TMP12:%.*]] = call <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]], i1 true)
+; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
+; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP12]], ptr [[TMP13]], align 4
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP10]]
+; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
+; NO-VP: [[MIDDLE_BLOCK]]:
+; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; NO-VP: [[SCALAR_PH]]:
+; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; NO-VP-NEXT: br label %[[LOOP:.*]]
; NO-VP: [[LOOP]]:
-; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ]
-; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]]
-; NO-VP-NEXT: [[TMP0:%.*]] = load i32, ptr [[GEP]], align 4
+; NO-VP-NEXT: [[IV1:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; NO-VP-NEXT: [[GEP1:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV1]]
+; NO-VP-NEXT: [[TMP0:%.*]] = load i32, ptr [[GEP1]], align 4
; NO-VP-NEXT: [[COND:%.*]] = tail call i32 @llvm.abs.i32(i32 [[TMP0]], i1 true)
-; NO-VP-NEXT: [[GEP9:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
+; NO-VP-NEXT: [[GEP9:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV1]]
; NO-VP-NEXT: store i32 [[COND]], ptr [[GEP9]], align 4
-; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1
; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[LOOP]]
+; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP19:![0-9]+]]
; NO-VP: [[EXIT]]:
; NO-VP-NEXT: ret void
;
@@ -937,17 +1300,44 @@ define void @log10(ptr %a, ptr %b, i64 %N) {
; NO-VP-LABEL: define void @log10(
; NO-VP-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; NO-VP-NEXT: [[ENTRY:.*]]:
+; NO-VP-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64
+; NO-VP-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64
+; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 8
+; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
+; NO-VP: [[VECTOR_MEMCHECK]]:
+; NO-VP-NEXT: [[TMP1:%.*]] = sub i64 [[A1]], [[B2]]
+; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP1]], 16
+; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
+; NO-VP: [[VECTOR_PH]]:
+; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 4
+; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
+; NO-VP: [[VECTOR_BODY]]:
+; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
+; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[GEP]], align 4
+; NO-VP-NEXT: [[TMP2:%.*]] = call <4 x float> @llvm.log10.v4f32(<4 x float> [[WIDE_LOAD]])
+; NO-VP-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
+; NO-VP-NEXT: store <4 x float> [[TMP2]], ptr [[TMP3]], align 4
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], 4
+; NO-VP-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
+; NO-VP: [[MIDDLE_BLOCK]]:
+; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; NO-VP: [[SCALAR_PH]]:
+; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; NO-VP-NEXT: br label %[[LOOP:.*]]
; NO-VP: [[LOOP]]:
-; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ]
-; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
-; NO-VP-NEXT: [[TMP0:%.*]] = load float, ptr [[GEP]], align 4
+; NO-VP-NEXT: [[IV1:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; NO-VP-NEXT: [[GEP1:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV1]]
+; NO-VP-NEXT: [[TMP0:%.*]] = load float, ptr [[GEP1]], align 4
; NO-VP-NEXT: [[COND:%.*]] = tail call float @llvm.log10.f32(float [[TMP0]])
-; NO-VP-NEXT: [[GEP9:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
+; NO-VP-NEXT: [[GEP9:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV1]]
; NO-VP-NEXT: store float [[COND]], ptr [[GEP9]], align 4
-; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1
; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[LOOP]]
+; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP21:![0-9]+]]
; NO-VP: [[EXIT]]:
; NO-VP-NEXT: ret void
;
@@ -1003,3 +1393,26 @@ declare i32 @llvm.abs.i32(i32, i1 immarg)
; IF-EVL: [[LOOP19]] = distinct !{[[LOOP19]], [[META1]], [[META2]], [[META3]]}
; IF-EVL: [[LOOP20]] = distinct !{[[LOOP20]], [[META1]]}
;.
+; NO-VP: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
+; NO-VP: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
+; NO-VP: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
+; NO-VP: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]]}
+; NO-VP: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]}
+; NO-VP: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]]}
+; NO-VP: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]], [[META2]]}
+; NO-VP: [[LOOP7]] = distinct !{[[LOOP7]], [[META1]]}
+; NO-VP: [[LOOP8]] = distinct !{[[LOOP8]], [[META1]], [[META2]]}
+; NO-VP: [[LOOP9]] = distinct !{[[LOOP9]], [[META1]]}
+; NO-VP: [[LOOP10]] = distinct !{[[LOOP10]], [[META1]], [[META2]]}
+; NO-VP: [[LOOP11]] = distinct !{[[LOOP11]], [[META1]]}
+; NO-VP: [[LOOP12]] = distinct !{[[LOOP12]], [[META1]], [[META2]]}
+; NO-VP: [[LOOP13]] = distinct !{[[LOOP13]], [[META1]]}
+; NO-VP: [[LOOP14]] = distinct !{[[LOOP14]], [[META1]], [[META2]]}
+; NO-VP: [[LOOP15]] = distinct !{[[LOOP15]], [[META1]]}
+; NO-VP: [[LOOP16]] = distinct !{[[LOOP16]], [[META1]], [[META2]]}
+; NO-VP: [[LOOP17]] = distinct !{[[LOOP17]], [[META1]]}
+; NO-VP: [[LOOP18]] = distinct !{[[LOOP18]], [[META1]], [[META2]]}
+; NO-VP: [[LOOP19]] = distinct !{[[LOOP19]], [[META1]]}
+; NO-VP: [[LOOP20]] = distinct !{[[LOOP20]], [[META1]], [[META2]]}
+; NO-VP: [[LOOP21]] = distinct !{[[LOOP21]], [[META1]]}
+;.
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-cast-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll
index 7139170..4b718dd 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-cast-intrinsics.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cast-intrinsics.ll
@@ -1,12 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
; RUN: opt -passes=loop-vectorize \
-; RUN: -force-tail-folding-style=data-with-evl \
-; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \
+; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \
; RUN: -mtriple=riscv64 -mattr=+v -S %s | FileCheck %s --check-prefix=IF-EVL
; RUN: opt -passes=loop-vectorize \
-; RUN: -force-tail-folding-style=none \
-; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \
+; RUN: -prefer-predicate-over-epilogue=scalar-epilogue \
; RUN: -mtriple=riscv64 -mattr=+v -S %s | FileCheck %s --check-prefix=NO-VP
define void @vp_sext(ptr %a, ptr %b, i64 %N) {
@@ -69,17 +67,54 @@ define void @vp_sext(ptr %a, ptr %b, i64 %N) {
; NO-VP-LABEL: define void @vp_sext(
; NO-VP-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
; NO-VP-NEXT: [[ENTRY:.*]]:
+; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP9]], 2
+; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 20, i64 [[TMP1]])
+; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP2]]
+; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
+; NO-VP: [[VECTOR_MEMCHECK]]:
+; NO-VP-NEXT: [[TMP3:%.*]] = shl i64 [[N]], 3
+; NO-VP-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP3]]
+; NO-VP-NEXT: [[TMP4:%.*]] = shl i64 [[N]], 2
+; NO-VP-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP4]]
+; NO-VP-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[A]], [[SCEVGEP1]]
+; NO-VP-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[B]], [[SCEVGEP]]
+; NO-VP-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
+; NO-VP-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
+; NO-VP: [[VECTOR_PH]]:
+; NO-VP-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2
+; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP6]]
+; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 2
+; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
+; NO-VP: [[VECTOR_BODY]]:
+; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]]
+; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i32>, ptr [[GEP]], align 4, !alias.scope [[META0:![0-9]+]]
+; NO-VP-NEXT: [[TMP10:%.*]] = sext <vscale x 2 x i32> [[WIDE_LOAD]] to <vscale x 2 x i64>
+; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
+; NO-VP-NEXT: store <vscale x 2 x i64> [[TMP10]], ptr [[TMP11]], align 8, !alias.scope [[META3:![0-9]+]], !noalias [[META0]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP8]]
+; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+; NO-VP: [[MIDDLE_BLOCK]]:
+; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; NO-VP: [[SCALAR_PH]]:
+; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; NO-VP-NEXT: br label %[[LOOP:.*]]
; NO-VP: [[LOOP]]:
-; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ]
-; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]]
-; NO-VP-NEXT: [[TMP0:%.*]] = load i32, ptr [[GEP]], align 4
+; NO-VP-NEXT: [[IV1:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
+; NO-VP-NEXT: [[GEP1:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV1]]
+; NO-VP-NEXT: [[TMP0:%.*]] = load i32, ptr [[GEP1]], align 4
; NO-VP-NEXT: [[CONV2:%.*]] = sext i32 [[TMP0]] to i64
-; NO-VP-NEXT: [[GEP4:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
+; NO-VP-NEXT: [[GEP4:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV1]]
; NO-VP-NEXT: store i64 [[CONV2]], ptr [[GEP4]], align 8
-; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1
; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[LOOP]]
+; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP8:![0-9]+]]
; NO-VP: [[EXIT]]:
; NO-VP-NEXT: ret void
;
@@ -161,17 +196,54 @@ define void @vp_zext(ptr %a, ptr %b, i64 %N) {
; NO-VP-LABEL: define void @vp_zext(
; NO-VP-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; NO-VP-NEXT: [[ENTRY:.*]]:
+; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP9]], 2
+; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 20, i64 [[TMP1]])
+; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP2]]
+; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
+; NO-VP: [[VECTOR_MEMCHECK]]:
+; NO-VP-NEXT: [[TMP3:%.*]] = shl i64 [[N]], 3
+; NO-VP-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP3]]
+; NO-VP-NEXT: [[TMP4:%.*]] = shl i64 [[N]], 2
+; NO-VP-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP4]]
+; NO-VP-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[A]], [[SCEVGEP1]]
+; NO-VP-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[B]], [[SCEVGEP]]
+; NO-VP-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
+; NO-VP-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
+; NO-VP: [[VECTOR_PH]]:
+; NO-VP-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2
+; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP6]]
+; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 2
+; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
+; NO-VP: [[VECTOR_BODY]]:
+; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]]
+; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i32>, ptr [[GEP]], align 4, !alias.scope [[META9:![0-9]+]]
+; NO-VP-NEXT: [[TMP10:%.*]] = zext <vscale x 2 x i32> [[WIDE_LOAD]] to <vscale x 2 x i64>
+; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
+; NO-VP-NEXT: store <vscale x 2 x i64> [[TMP10]], ptr [[TMP11]], align 8, !alias.scope [[META12:![0-9]+]], !noalias [[META9]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP8]]
+; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
+; NO-VP: [[MIDDLE_BLOCK]]:
+; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; NO-VP: [[SCALAR_PH]]:
+; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; NO-VP-NEXT: br label %[[LOOP:.*]]
; NO-VP: [[LOOP]]:
-; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]]
-; NO-VP-NEXT: [[TMP0:%.*]] = load i32, ptr [[GEP]], align 4
+; NO-VP-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; NO-VP-NEXT: [[GEP1:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV1]]
+; NO-VP-NEXT: [[TMP0:%.*]] = load i32, ptr [[GEP1]], align 4
; NO-VP-NEXT: [[CONV:%.*]] = zext i32 [[TMP0]] to i64
-; NO-VP-NEXT: [[GEP2:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
+; NO-VP-NEXT: [[GEP2:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV1]]
; NO-VP-NEXT: store i64 [[CONV]], ptr [[GEP2]], align 8
-; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1
; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[LOOP]]
+; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP15:![0-9]+]]
; NO-VP: [[EXIT]]:
; NO-VP-NEXT: ret void
;
@@ -253,17 +325,54 @@ define void @vp_trunc(ptr %a, ptr %b, i64 %N) {
; NO-VP-LABEL: define void @vp_trunc(
; NO-VP-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; NO-VP-NEXT: [[ENTRY:.*]]:
+; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP9]], 2
+; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 20, i64 [[TMP1]])
+; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP2]]
+; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
+; NO-VP: [[VECTOR_MEMCHECK]]:
+; NO-VP-NEXT: [[TMP3:%.*]] = shl i64 [[N]], 2
+; NO-VP-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP3]]
+; NO-VP-NEXT: [[TMP4:%.*]] = shl i64 [[N]], 3
+; NO-VP-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP4]]
+; NO-VP-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[A]], [[SCEVGEP1]]
+; NO-VP-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[B]], [[SCEVGEP]]
+; NO-VP-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
+; NO-VP-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
+; NO-VP: [[VECTOR_PH]]:
+; NO-VP-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2
+; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP6]]
+; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 2
+; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
+; NO-VP: [[VECTOR_BODY]]:
+; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[IV]]
+; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[GEP]], align 8, !alias.scope [[META16:![0-9]+]]
+; NO-VP-NEXT: [[TMP10:%.*]] = trunc <vscale x 2 x i64> [[WIDE_LOAD]] to <vscale x 2 x i32>
+; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
+; NO-VP-NEXT: store <vscale x 2 x i32> [[TMP10]], ptr [[TMP11]], align 4, !alias.scope [[META19:![0-9]+]], !noalias [[META16]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP8]]
+; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
+; NO-VP: [[MIDDLE_BLOCK]]:
+; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; NO-VP: [[SCALAR_PH]]:
+; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; NO-VP-NEXT: br label %[[LOOP:.*]]
; NO-VP: [[LOOP]]:
-; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[IV]]
-; NO-VP-NEXT: [[TMP0:%.*]] = load i64, ptr [[GEP]], align 8
+; NO-VP-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; NO-VP-NEXT: [[GEP1:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[IV1]]
+; NO-VP-NEXT: [[TMP0:%.*]] = load i64, ptr [[GEP1]], align 8
; NO-VP-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
-; NO-VP-NEXT: [[GEP2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
+; NO-VP-NEXT: [[GEP2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV1]]
; NO-VP-NEXT: store i32 [[CONV]], ptr [[GEP2]], align 4
-; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1
; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[LOOP]]
+; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP22:![0-9]+]]
; NO-VP: [[EXIT]]:
; NO-VP-NEXT: ret void
;
@@ -345,17 +454,54 @@ define void @vp_fpext(ptr %a, ptr %b, i64 %N) {
; NO-VP-LABEL: define void @vp_fpext(
; NO-VP-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; NO-VP-NEXT: [[ENTRY:.*]]:
+; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP9]], 2
+; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP1]])
+; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP2]]
+; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
+; NO-VP: [[VECTOR_MEMCHECK]]:
+; NO-VP-NEXT: [[TMP3:%.*]] = shl i64 [[N]], 3
+; NO-VP-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP3]]
+; NO-VP-NEXT: [[TMP4:%.*]] = shl i64 [[N]], 2
+; NO-VP-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP4]]
+; NO-VP-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[A]], [[SCEVGEP1]]
+; NO-VP-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[B]], [[SCEVGEP]]
+; NO-VP-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
+; NO-VP-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
+; NO-VP: [[VECTOR_PH]]:
+; NO-VP-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2
+; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP6]]
+; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 2
+; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
+; NO-VP: [[VECTOR_BODY]]:
+; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
+; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x float>, ptr [[GEP]], align 4, !alias.scope [[META23:![0-9]+]]
+; NO-VP-NEXT: [[TMP10:%.*]] = fpext <vscale x 2 x float> [[WIDE_LOAD]] to <vscale x 2 x double>
+; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds double, ptr [[A]], i64 [[IV]]
+; NO-VP-NEXT: store <vscale x 2 x double> [[TMP10]], ptr [[TMP11]], align 8, !alias.scope [[META26:![0-9]+]], !noalias [[META23]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP8]]
+; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]]
+; NO-VP: [[MIDDLE_BLOCK]]:
+; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; NO-VP: [[SCALAR_PH]]:
+; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; NO-VP-NEXT: br label %[[LOOP:.*]]
; NO-VP: [[LOOP]]:
-; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
-; NO-VP-NEXT: [[TMP0:%.*]] = load float, ptr [[GEP]], align 4
+; NO-VP-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; NO-VP-NEXT: [[GEP1:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV1]]
+; NO-VP-NEXT: [[TMP0:%.*]] = load float, ptr [[GEP1]], align 4
; NO-VP-NEXT: [[CONV:%.*]] = fpext float [[TMP0]] to double
-; NO-VP-NEXT: [[GEP2:%.*]] = getelementptr inbounds double, ptr [[A]], i64 [[IV]]
+; NO-VP-NEXT: [[GEP2:%.*]] = getelementptr inbounds double, ptr [[A]], i64 [[IV1]]
; NO-VP-NEXT: store double [[CONV]], ptr [[GEP2]], align 8
-; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1
; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[LOOP]]
+; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP29:![0-9]+]]
; NO-VP: [[EXIT]]:
; NO-VP-NEXT: ret void
;
@@ -437,17 +583,54 @@ define void @vp_fptrunc(ptr %a, ptr %b, i64 %N) {
; NO-VP-LABEL: define void @vp_fptrunc(
; NO-VP-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; NO-VP-NEXT: [[ENTRY:.*]]:
+; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP9]], 2
+; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP1]])
+; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP2]]
+; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
+; NO-VP: [[VECTOR_MEMCHECK]]:
+; NO-VP-NEXT: [[TMP3:%.*]] = shl i64 [[N]], 2
+; NO-VP-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP3]]
+; NO-VP-NEXT: [[TMP4:%.*]] = shl i64 [[N]], 3
+; NO-VP-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP4]]
+; NO-VP-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[A]], [[SCEVGEP1]]
+; NO-VP-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[B]], [[SCEVGEP]]
+; NO-VP-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
+; NO-VP-NEXT: br i1 [[FOUND_CONFLICT]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
+; NO-VP: [[VECTOR_PH]]:
+; NO-VP-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], 2
+; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP6]]
+; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 2
+; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
+; NO-VP: [[VECTOR_BODY]]:
+; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds double, ptr [[B]], i64 [[IV]]
+; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x double>, ptr [[GEP]], align 8, !alias.scope [[META30:![0-9]+]]
+; NO-VP-NEXT: [[TMP10:%.*]] = fptrunc <vscale x 2 x double> [[WIDE_LOAD]] to <vscale x 2 x float>
+; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
+; NO-VP-NEXT: store <vscale x 2 x float> [[TMP10]], ptr [[TMP11]], align 4, !alias.scope [[META33:![0-9]+]], !noalias [[META30]]
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP8]]
+; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP35:![0-9]+]]
+; NO-VP: [[MIDDLE_BLOCK]]:
+; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; NO-VP: [[SCALAR_PH]]:
+; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; NO-VP-NEXT: br label %[[LOOP:.*]]
; NO-VP: [[LOOP]]:
-; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds double, ptr [[B]], i64 [[IV]]
-; NO-VP-NEXT: [[TMP0:%.*]] = load double, ptr [[GEP]], align 8
+; NO-VP-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; NO-VP-NEXT: [[GEP1:%.*]] = getelementptr inbounds double, ptr [[B]], i64 [[IV1]]
+; NO-VP-NEXT: [[TMP0:%.*]] = load double, ptr [[GEP1]], align 8
; NO-VP-NEXT: [[CONV:%.*]] = fptrunc double [[TMP0]] to float
-; NO-VP-NEXT: [[GEP2:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
+; NO-VP-NEXT: [[GEP2:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV1]]
; NO-VP-NEXT: store float [[CONV]], ptr [[GEP2]], align 4
-; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1
; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[LOOP]]
+; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP36:![0-9]+]]
; NO-VP: [[EXIT]]:
; NO-VP-NEXT: ret void
;
@@ -529,17 +712,54 @@ define void @vp_sitofp(ptr %a, ptr %b, i64 %N) {
; NO-VP-LABEL: define void @vp_sitofp(
; NO-VP-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; NO-VP-NEXT: [[ENTRY:.*]]:
+; NO-VP-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64
+; NO-VP-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64
+; NO-VP-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP11]], 4
+; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP1]])
+; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP2]]
+; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
+; NO-VP: [[VECTOR_MEMCHECK]]:
+; NO-VP-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
+; NO-VP-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4
+; NO-VP-NEXT: [[TMP6:%.*]] = sub i64 [[A1]], [[B2]]
+; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]]
+; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
+; NO-VP: [[VECTOR_PH]]:
+; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
+; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP8]]
+; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
+; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
+; NO-VP: [[VECTOR_BODY]]:
+; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]]
+; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[GEP]], align 4
+; NO-VP-NEXT: [[TMP12:%.*]] = sitofp <vscale x 4 x i32> [[WIDE_LOAD]] to <vscale x 4 x float>
+; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
+; NO-VP-NEXT: store <vscale x 4 x float> [[TMP12]], ptr [[TMP13]], align 4
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP10]]
+; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP37:![0-9]+]]
+; NO-VP: [[MIDDLE_BLOCK]]:
+; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; NO-VP: [[SCALAR_PH]]:
+; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; NO-VP-NEXT: br label %[[LOOP:.*]]
; NO-VP: [[LOOP]]:
-; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]]
-; NO-VP-NEXT: [[TMP0:%.*]] = load i32, ptr [[GEP]], align 4
+; NO-VP-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; NO-VP-NEXT: [[GEP1:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV1]]
+; NO-VP-NEXT: [[TMP0:%.*]] = load i32, ptr [[GEP1]], align 4
; NO-VP-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP0]] to float
-; NO-VP-NEXT: [[GEP2:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
+; NO-VP-NEXT: [[GEP2:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV1]]
; NO-VP-NEXT: store float [[CONV]], ptr [[GEP2]], align 4
-; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1
; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[LOOP]]
+; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP38:![0-9]+]]
; NO-VP: [[EXIT]]:
; NO-VP-NEXT: ret void
;
@@ -621,17 +841,54 @@ define void @vp_uitofp(ptr %a, ptr %b, i64 %N) {
; NO-VP-LABEL: define void @vp_uitofp(
; NO-VP-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; NO-VP-NEXT: [[ENTRY:.*]]:
+; NO-VP-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64
+; NO-VP-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64
+; NO-VP-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP11]], 4
+; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP1]])
+; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP2]]
+; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
+; NO-VP: [[VECTOR_MEMCHECK]]:
+; NO-VP-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
+; NO-VP-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4
+; NO-VP-NEXT: [[TMP6:%.*]] = sub i64 [[A1]], [[B2]]
+; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]]
+; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
+; NO-VP: [[VECTOR_PH]]:
+; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
+; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP8]]
+; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
+; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
+; NO-VP: [[VECTOR_BODY]]:
+; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]]
+; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[GEP]], align 4
+; NO-VP-NEXT: [[TMP12:%.*]] = uitofp <vscale x 4 x i32> [[WIDE_LOAD]] to <vscale x 4 x float>
+; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
+; NO-VP-NEXT: store <vscale x 4 x float> [[TMP12]], ptr [[TMP13]], align 4
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP10]]
+; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP39:![0-9]+]]
+; NO-VP: [[MIDDLE_BLOCK]]:
+; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; NO-VP: [[SCALAR_PH]]:
+; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; NO-VP-NEXT: br label %[[LOOP:.*]]
; NO-VP: [[LOOP]]:
-; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]]
-; NO-VP-NEXT: [[TMP0:%.*]] = load i32, ptr [[GEP]], align 4
+; NO-VP-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; NO-VP-NEXT: [[GEP1:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV1]]
+; NO-VP-NEXT: [[TMP0:%.*]] = load i32, ptr [[GEP1]], align 4
; NO-VP-NEXT: [[CONV:%.*]] = uitofp i32 [[TMP0]] to float
-; NO-VP-NEXT: [[GEP2:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV]]
+; NO-VP-NEXT: [[GEP2:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV1]]
; NO-VP-NEXT: store float [[CONV]], ptr [[GEP2]], align 4
-; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1
; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[LOOP]]
+; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP40:![0-9]+]]
; NO-VP: [[EXIT]]:
; NO-VP-NEXT: ret void
;
@@ -713,17 +970,54 @@ define void @vp_fptosi(ptr %a, ptr %b, i64 %N) {
; NO-VP-LABEL: define void @vp_fptosi(
; NO-VP-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; NO-VP-NEXT: [[ENTRY:.*]]:
+; NO-VP-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64
+; NO-VP-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64
+; NO-VP-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP11]], 4
+; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP1]])
+; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP2]]
+; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
+; NO-VP: [[VECTOR_MEMCHECK]]:
+; NO-VP-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
+; NO-VP-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4
+; NO-VP-NEXT: [[TMP6:%.*]] = sub i64 [[A1]], [[B2]]
+; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]]
+; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
+; NO-VP: [[VECTOR_PH]]:
+; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
+; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP8]]
+; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
+; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
+; NO-VP: [[VECTOR_BODY]]:
+; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
+; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[GEP]], align 4
+; NO-VP-NEXT: [[TMP12:%.*]] = fptosi <vscale x 4 x float> [[WIDE_LOAD]] to <vscale x 4 x i32>
+; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
+; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP12]], ptr [[TMP13]], align 4
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP10]]
+; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP41:![0-9]+]]
+; NO-VP: [[MIDDLE_BLOCK]]:
+; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; NO-VP: [[SCALAR_PH]]:
+; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; NO-VP-NEXT: br label %[[LOOP:.*]]
; NO-VP: [[LOOP]]:
-; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
-; NO-VP-NEXT: [[TMP0:%.*]] = load float, ptr [[GEP]], align 4
+; NO-VP-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; NO-VP-NEXT: [[GEP1:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV1]]
+; NO-VP-NEXT: [[TMP0:%.*]] = load float, ptr [[GEP1]], align 4
; NO-VP-NEXT: [[CONV:%.*]] = fptosi float [[TMP0]] to i32
-; NO-VP-NEXT: [[GEP2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
+; NO-VP-NEXT: [[GEP2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV1]]
; NO-VP-NEXT: store i32 [[CONV]], ptr [[GEP2]], align 4
-; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1
; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[LOOP]]
+; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP42:![0-9]+]]
; NO-VP: [[EXIT]]:
; NO-VP-NEXT: ret void
;
@@ -805,17 +1099,54 @@ define void @vp_fptoui(ptr %a, ptr %b, i64 %N) {
; NO-VP-LABEL: define void @vp_fptoui(
; NO-VP-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; NO-VP-NEXT: [[ENTRY:.*]]:
+; NO-VP-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64
+; NO-VP-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64
+; NO-VP-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP11]], 4
+; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP1]])
+; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP2]]
+; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
+; NO-VP: [[VECTOR_MEMCHECK]]:
+; NO-VP-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
+; NO-VP-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4
+; NO-VP-NEXT: [[TMP6:%.*]] = sub i64 [[A1]], [[B2]]
+; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]]
+; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
+; NO-VP: [[VECTOR_PH]]:
+; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 4
+; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP8]]
+; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 4
+; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
+; NO-VP: [[VECTOR_BODY]]:
+; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
+; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[GEP]], align 4
+; NO-VP-NEXT: [[TMP12:%.*]] = fptoui <vscale x 4 x float> [[WIDE_LOAD]] to <vscale x 4 x i32>
+; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
+; NO-VP-NEXT: store <vscale x 4 x i32> [[TMP12]], ptr [[TMP13]], align 4
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP10]]
+; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP43:![0-9]+]]
+; NO-VP: [[MIDDLE_BLOCK]]:
+; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; NO-VP: [[SCALAR_PH]]:
+; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; NO-VP-NEXT: br label %[[LOOP:.*]]
; NO-VP: [[LOOP]]:
-; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
-; NO-VP-NEXT: [[TMP0:%.*]] = load float, ptr [[GEP]], align 4
+; NO-VP-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; NO-VP-NEXT: [[GEP1:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV1]]
+; NO-VP-NEXT: [[TMP0:%.*]] = load float, ptr [[GEP1]], align 4
; NO-VP-NEXT: [[CONV:%.*]] = fptoui float [[TMP0]] to i32
-; NO-VP-NEXT: [[GEP2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV]]
+; NO-VP-NEXT: [[GEP2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IV1]]
; NO-VP-NEXT: store i32 [[CONV]], ptr [[GEP2]], align 4
-; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1
; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[LOOP]]
+; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP44:![0-9]+]]
; NO-VP: [[EXIT]]:
; NO-VP-NEXT: ret void
;
@@ -897,17 +1228,54 @@ define void @vp_inttoptr(ptr %a, ptr %b, i64 %N) {
; NO-VP-LABEL: define void @vp_inttoptr(
; NO-VP-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; NO-VP-NEXT: [[ENTRY:.*]]:
+; NO-VP-NEXT: [[B2:%.*]] = ptrtoint ptr [[B]] to i64
+; NO-VP-NEXT: [[A1:%.*]] = ptrtoint ptr [[A]] to i64
+; NO-VP-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP15:%.*]] = mul nuw i64 [[TMP11]], 2
+; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP15]])
+; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP2]]
+; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]]
+; NO-VP: [[VECTOR_MEMCHECK]]:
+; NO-VP-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 2
+; NO-VP-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 8
+; NO-VP-NEXT: [[TMP6:%.*]] = sub i64 [[A1]], [[B2]]
+; NO-VP-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]]
+; NO-VP-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]]
+; NO-VP: [[VECTOR_PH]]:
+; NO-VP-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP8:%.*]] = mul nuw i64 [[TMP7]], 2
+; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP8]]
+; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; NO-VP-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP10:%.*]] = mul nuw i64 [[TMP9]], 2
+; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
+; NO-VP: [[VECTOR_BODY]]:
+; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[IV]]
+; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[GEP]], align 8
+; NO-VP-NEXT: [[TMP12:%.*]] = inttoptr <vscale x 2 x i64> [[WIDE_LOAD]] to <vscale x 2 x ptr>
+; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds ptr, ptr [[A]], i64 [[IV]]
+; NO-VP-NEXT: store <vscale x 2 x ptr> [[TMP12]], ptr [[TMP13]], align 8
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP10]]
+; NO-VP-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP45:![0-9]+]]
+; NO-VP: [[MIDDLE_BLOCK]]:
+; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; NO-VP: [[SCALAR_PH]]:
+; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ]
; NO-VP-NEXT: br label %[[LOOP:.*]]
; NO-VP: [[LOOP]]:
-; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
-; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[IV]]
-; NO-VP-NEXT: [[TMP0:%.*]] = load i64, ptr [[GEP]], align 8
+; NO-VP-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; NO-VP-NEXT: [[GEP1:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[IV1]]
+; NO-VP-NEXT: [[TMP0:%.*]] = load i64, ptr [[GEP1]], align 8
; NO-VP-NEXT: [[TMP1:%.*]] = inttoptr i64 [[TMP0]] to ptr
-; NO-VP-NEXT: [[GEP2:%.*]] = getelementptr inbounds ptr, ptr [[A]], i64 [[IV]]
+; NO-VP-NEXT: [[GEP2:%.*]] = getelementptr inbounds ptr, ptr [[A]], i64 [[IV1]]
; NO-VP-NEXT: store ptr [[TMP1]], ptr [[GEP2]], align 8
-; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1
; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[LOOP]]
+; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP46:![0-9]+]]
; NO-VP: [[EXIT]]:
; NO-VP-NEXT: ret void
;
@@ -987,16 +1355,50 @@ define void @vp_ptrtoint(ptr %a, ptr %b, i64 %N) {
; NO-VP-LABEL: define void @vp_ptrtoint(
; NO-VP-SAME: ptr [[A:%.*]], ptr [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; NO-VP-NEXT: [[ENTRY:.*]]:
+; NO-VP-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP13]], 2
+; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
+; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; NO-VP: [[VECTOR_PH]]:
+; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
+; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
+; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
+; NO-VP-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
+; NO-VP-NEXT: [[TMP7:%.*]] = mul <vscale x 2 x i64> [[TMP6]], splat (i64 1)
+; NO-VP-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP7]]
+; NO-VP-NEXT: [[TMP8:%.*]] = mul i64 1, [[TMP5]]
+; NO-VP-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP8]], i64 0
+; NO-VP-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
+; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
+; NO-VP: [[VECTOR_BODY]]:
+; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[INDUCTION]], %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[B]], <vscale x 2 x i64> [[VEC_IND]]
+; NO-VP-NEXT: [[TMP10:%.*]] = ptrtoint <vscale x 2 x ptr> [[TMP9]] to <vscale x 2 x i64>
+; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]]
+; NO-VP-NEXT: store <vscale x 2 x i64> [[TMP10]], ptr [[TMP11]], align 8
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; NO-VP-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
+; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP47:![0-9]+]]
+; NO-VP: [[MIDDLE_BLOCK]]:
+; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; NO-VP: [[SCALAR_PH]]:
+; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
; NO-VP-NEXT: br label %[[LOOP:.*]]
; NO-VP: [[LOOP]]:
-; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
; NO-VP-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]]
; NO-VP-NEXT: [[TMP0:%.*]] = ptrtoint ptr [[GEP]] to i64
; NO-VP-NEXT: [[GEP2:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
; NO-VP-NEXT: store i64 [[TMP0]], ptr [[GEP2]], align 8
; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[LOOP]]
+; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP48:![0-9]+]]
; NO-VP: [[EXIT]]:
; NO-VP-NEXT: ret void
;
@@ -1068,3 +1470,53 @@ exit:
; IF-EVL: [[LOOP48]] = distinct !{[[LOOP48]], [[META6]], [[META7]], [[META8]]}
; IF-EVL: [[LOOP49]] = distinct !{[[LOOP49]], [[META8]], [[META6]]}
;.
+; NO-VP: [[META0]] = !{[[META1:![0-9]+]]}
+; NO-VP: [[META1]] = distinct !{[[META1]], [[META2:![0-9]+]]}
+; NO-VP: [[META2]] = distinct !{[[META2]], !"LVerDomain"}
+; NO-VP: [[META3]] = !{[[META4:![0-9]+]]}
+; NO-VP: [[META4]] = distinct !{[[META4]], [[META2]]}
+; NO-VP: [[LOOP5]] = distinct !{[[LOOP5]], [[META6:![0-9]+]], [[META7:![0-9]+]]}
+; NO-VP: [[META6]] = !{!"llvm.loop.isvectorized", i32 1}
+; NO-VP: [[META7]] = !{!"llvm.loop.unroll.runtime.disable"}
+; NO-VP: [[LOOP8]] = distinct !{[[LOOP8]], [[META6]]}
+; NO-VP: [[META9]] = !{[[META10:![0-9]+]]}
+; NO-VP: [[META10]] = distinct !{[[META10]], [[META11:![0-9]+]]}
+; NO-VP: [[META11]] = distinct !{[[META11]], !"LVerDomain"}
+; NO-VP: [[META12]] = !{[[META13:![0-9]+]]}
+; NO-VP: [[META13]] = distinct !{[[META13]], [[META11]]}
+; NO-VP: [[LOOP14]] = distinct !{[[LOOP14]], [[META6]], [[META7]]}
+; NO-VP: [[LOOP15]] = distinct !{[[LOOP15]], [[META6]]}
+; NO-VP: [[META16]] = !{[[META17:![0-9]+]]}
+; NO-VP: [[META17]] = distinct !{[[META17]], [[META18:![0-9]+]]}
+; NO-VP: [[META18]] = distinct !{[[META18]], !"LVerDomain"}
+; NO-VP: [[META19]] = !{[[META20:![0-9]+]]}
+; NO-VP: [[META20]] = distinct !{[[META20]], [[META18]]}
+; NO-VP: [[LOOP21]] = distinct !{[[LOOP21]], [[META6]], [[META7]]}
+; NO-VP: [[LOOP22]] = distinct !{[[LOOP22]], [[META6]]}
+; NO-VP: [[META23]] = !{[[META24:![0-9]+]]}
+; NO-VP: [[META24]] = distinct !{[[META24]], [[META25:![0-9]+]]}
+; NO-VP: [[META25]] = distinct !{[[META25]], !"LVerDomain"}
+; NO-VP: [[META26]] = !{[[META27:![0-9]+]]}
+; NO-VP: [[META27]] = distinct !{[[META27]], [[META25]]}
+; NO-VP: [[LOOP28]] = distinct !{[[LOOP28]], [[META6]], [[META7]]}
+; NO-VP: [[LOOP29]] = distinct !{[[LOOP29]], [[META6]]}
+; NO-VP: [[META30]] = !{[[META31:![0-9]+]]}
+; NO-VP: [[META31]] = distinct !{[[META31]], [[META32:![0-9]+]]}
+; NO-VP: [[META32]] = distinct !{[[META32]], !"LVerDomain"}
+; NO-VP: [[META33]] = !{[[META34:![0-9]+]]}
+; NO-VP: [[META34]] = distinct !{[[META34]], [[META32]]}
+; NO-VP: [[LOOP35]] = distinct !{[[LOOP35]], [[META6]], [[META7]]}
+; NO-VP: [[LOOP36]] = distinct !{[[LOOP36]], [[META6]]}
+; NO-VP: [[LOOP37]] = distinct !{[[LOOP37]], [[META6]], [[META7]]}
+; NO-VP: [[LOOP38]] = distinct !{[[LOOP38]], [[META6]]}
+; NO-VP: [[LOOP39]] = distinct !{[[LOOP39]], [[META6]], [[META7]]}
+; NO-VP: [[LOOP40]] = distinct !{[[LOOP40]], [[META6]]}
+; NO-VP: [[LOOP41]] = distinct !{[[LOOP41]], [[META6]], [[META7]]}
+; NO-VP: [[LOOP42]] = distinct !{[[LOOP42]], [[META6]]}
+; NO-VP: [[LOOP43]] = distinct !{[[LOOP43]], [[META6]], [[META7]]}
+; NO-VP: [[LOOP44]] = distinct !{[[LOOP44]], [[META6]]}
+; NO-VP: [[LOOP45]] = distinct !{[[LOOP45]], [[META6]], [[META7]]}
+; NO-VP: [[LOOP46]] = distinct !{[[LOOP46]], [[META6]]}
+; NO-VP: [[LOOP47]] = distinct !{[[LOOP47]], [[META6]], [[META7]]}
+; NO-VP: [[LOOP48]] = distinct !{[[LOOP48]], [[META7]], [[META6]]}
+;.
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-cond-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cond-reduction.ll
index f8e8435..5f53cb6 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-cond-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cond-reduction.ll
@@ -1,24 +1,20 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt -passes=loop-vectorize \
-; RUN: -force-tail-folding-style=data-with-evl \
-; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \
+; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \
; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck %s --check-prefixes=IF-EVL-OUTLOOP
; RUN: opt -passes=loop-vectorize \
; RUN: -prefer-inloop-reductions \
-; RUN: -force-tail-folding-style=data-with-evl \
-; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \
+; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \
; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck %s --check-prefixes=IF-EVL-INLOOP
; RUN: opt -passes=loop-vectorize \
-; RUN: -force-tail-folding-style=none \
-; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \
+; RUN: -prefer-predicate-over-epilogue=scalar-epilogue \
; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck %s --check-prefixes=NO-VP-OUTLOOP
; RUN: opt -passes=loop-vectorize \
; RUN: -prefer-inloop-reductions \
-; RUN: -force-tail-folding-style=none \
-; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \
+; RUN: -prefer-predicate-over-epilogue=scalar-epilogue \
; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck %s --check-prefixes=NO-VP-INLOOP
define i32 @cond_add(ptr %a, i64 %n, i32 %start) {
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cost.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cost.ll
index cd53ea0..238aed1 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-cost.ll
@@ -3,7 +3,7 @@
; RUN: -mtriple riscv64-linux-gnu -mattr=+v,+f -S -disable-output -debug-only=loop-vectorize 2>&1 | FileCheck %s --check-prefix=DATA
; RUN: opt < %s -passes=loop-vectorize -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \
-; RUN: -mtriple riscv64-linux-gnu -force-tail-folding-style=data-with-evl -mattr=+v,+f -S \
+; RUN: -mtriple riscv64-linux-gnu -mattr=+v,+f -S \
; RUN: -disable-output -debug-only=loop-vectorize 2>&1 | FileCheck %s --check-prefix=EVL
; DATA: Cost of 2 for VF 2: EMIT{{.*}} = active lane mask
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-div.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-div.ll
index 0a66ce8..f27f49e 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-div.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-div.ll
@@ -1,12 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
; RUN: opt -passes=loop-vectorize \
-; RUN: -force-tail-folding-style=data-with-evl \
-; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \
+; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \
; RUN: -mtriple=riscv64 -mattr=+v -S %s | FileCheck %s --check-prefix=IF-EVL
; RUN: opt -passes=loop-vectorize \
-; RUN: -force-tail-folding-style=none \
-; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \
+; RUN: -prefer-predicate-over-epilogue=scalar-epilogue \
; RUN: -mtriple=riscv64 -mattr=+v -S %s | FileCheck %s --check-prefix=NO-VP
define void @test_sdiv(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
@@ -65,9 +63,38 @@ define void @test_sdiv(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; NO-VP-LABEL: define void @test_sdiv(
; NO-VP-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0:[0-9]+]] {
; NO-VP-NEXT: [[LOOP_PREHEADER:.*]]:
+; NO-VP-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 2
+; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP12]]
+; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; NO-VP: [[VECTOR_PH]]:
+; NO-VP-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP13]], 2
+; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
+; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
+; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
+; NO-VP: [[VECTOR_BODY]]:
+; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[A]], i64 [[INDEX]]
+; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP6]], align 8
+; NO-VP-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[B]], i64 [[INDEX]]
+; NO-VP-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 2 x i64>, ptr [[TMP7]], align 8
+; NO-VP-NEXT: [[TMP8:%.*]] = sdiv <vscale x 2 x i64> [[WIDE_LOAD]], [[WIDE_LOAD1]]
+; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[C]], i64 [[INDEX]]
+; NO-VP-NEXT: store <vscale x 2 x i64> [[TMP8]], ptr [[TMP9]], align 8
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; NO-VP-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; NO-VP: [[MIDDLE_BLOCK]]:
+; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
+; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; NO-VP: [[SCALAR_PH]]:
+; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[LOOP_PREHEADER]] ]
; NO-VP-NEXT: br label %[[LOOP:.*]]
; NO-VP: [[LOOP]]:
-; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[LOOP_PREHEADER]] ]
+; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
; NO-VP-NEXT: [[A_GEP:%.*]] = getelementptr i64, ptr [[A]], i64 [[IV]]
; NO-VP-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_GEP]], align 8
; NO-VP-NEXT: [[B_GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[IV]]
@@ -77,7 +104,7 @@ define void @test_sdiv(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; NO-VP-NEXT: store i64 [[TMP2]], ptr [[C_GEP]], align 8
; NO-VP-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
; NO-VP-NEXT: [[DONE:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; NO-VP-NEXT: br i1 [[DONE]], label %[[EXIT:.*]], label %[[LOOP]]
+; NO-VP-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP3:![0-9]+]]
; NO-VP: [[EXIT]]:
; NO-VP-NEXT: ret void
;
@@ -158,9 +185,38 @@ define void @test_udiv(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; NO-VP-LABEL: define void @test_udiv(
; NO-VP-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] {
; NO-VP-NEXT: [[LOOP_PREHEADER:.*]]:
+; NO-VP-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 2
+; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP12]]
+; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; NO-VP: [[VECTOR_PH]]:
+; NO-VP-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP13]], 2
+; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
+; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
+; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
+; NO-VP: [[VECTOR_BODY]]:
+; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[A]], i64 [[INDEX]]
+; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP6]], align 8
+; NO-VP-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[B]], i64 [[INDEX]]
+; NO-VP-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 2 x i64>, ptr [[TMP7]], align 8
+; NO-VP-NEXT: [[TMP8:%.*]] = udiv <vscale x 2 x i64> [[WIDE_LOAD]], [[WIDE_LOAD1]]
+; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[C]], i64 [[INDEX]]
+; NO-VP-NEXT: store <vscale x 2 x i64> [[TMP8]], ptr [[TMP9]], align 8
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; NO-VP-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; NO-VP: [[MIDDLE_BLOCK]]:
+; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
+; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; NO-VP: [[SCALAR_PH]]:
+; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[LOOP_PREHEADER]] ]
; NO-VP-NEXT: br label %[[LOOP:.*]]
; NO-VP: [[LOOP]]:
-; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[LOOP_PREHEADER]] ]
+; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
; NO-VP-NEXT: [[A_GEP:%.*]] = getelementptr i64, ptr [[A]], i64 [[IV]]
; NO-VP-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_GEP]], align 8
; NO-VP-NEXT: [[B_GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[IV]]
@@ -170,7 +226,7 @@ define void @test_udiv(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; NO-VP-NEXT: store i64 [[TMP2]], ptr [[C_GEP]], align 8
; NO-VP-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
; NO-VP-NEXT: [[DONE:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; NO-VP-NEXT: br i1 [[DONE]], label %[[EXIT:.*]], label %[[LOOP]]
+; NO-VP-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP5:![0-9]+]]
; NO-VP: [[EXIT]]:
; NO-VP-NEXT: ret void
;
@@ -250,9 +306,38 @@ define void @test_srem(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; NO-VP-LABEL: define void @test_srem(
; NO-VP-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] {
; NO-VP-NEXT: [[LOOP_PREHEADER:.*]]:
+; NO-VP-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 2
+; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP12]]
+; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; NO-VP: [[VECTOR_PH]]:
+; NO-VP-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP13]], 2
+; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
+; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
+; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
+; NO-VP: [[VECTOR_BODY]]:
+; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[A]], i64 [[INDEX]]
+; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP6]], align 8
+; NO-VP-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[B]], i64 [[INDEX]]
+; NO-VP-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 2 x i64>, ptr [[TMP7]], align 8
+; NO-VP-NEXT: [[TMP8:%.*]] = srem <vscale x 2 x i64> [[WIDE_LOAD]], [[WIDE_LOAD1]]
+; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[C]], i64 [[INDEX]]
+; NO-VP-NEXT: store <vscale x 2 x i64> [[TMP8]], ptr [[TMP9]], align 8
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; NO-VP-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; NO-VP: [[MIDDLE_BLOCK]]:
+; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
+; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; NO-VP: [[SCALAR_PH]]:
+; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[LOOP_PREHEADER]] ]
; NO-VP-NEXT: br label %[[LOOP:.*]]
; NO-VP: [[LOOP]]:
-; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[LOOP_PREHEADER]] ]
+; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
; NO-VP-NEXT: [[A_GEP:%.*]] = getelementptr i64, ptr [[A]], i64 [[IV]]
; NO-VP-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_GEP]], align 8
; NO-VP-NEXT: [[B_GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[IV]]
@@ -262,7 +347,7 @@ define void @test_srem(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; NO-VP-NEXT: store i64 [[TMP2]], ptr [[C_GEP]], align 8
; NO-VP-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
; NO-VP-NEXT: [[DONE:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; NO-VP-NEXT: br i1 [[DONE]], label %[[EXIT:.*]], label %[[LOOP]]
+; NO-VP-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP7:![0-9]+]]
; NO-VP: [[EXIT]]:
; NO-VP-NEXT: ret void
;
@@ -342,9 +427,38 @@ define void @test_urem(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; NO-VP-LABEL: define void @test_urem(
; NO-VP-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], ptr noalias [[C:%.*]]) #[[ATTR0]] {
; NO-VP-NEXT: [[LOOP_PREHEADER:.*]]:
+; NO-VP-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 2
+; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP12]]
+; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; NO-VP: [[VECTOR_PH]]:
+; NO-VP-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP13]], 2
+; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
+; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
+; NO-VP-NEXT: br label %[[VECTOR_BODY:.*]]
+; NO-VP: [[VECTOR_BODY]]:
+; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; NO-VP-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[A]], i64 [[INDEX]]
+; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP6]], align 8
+; NO-VP-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[B]], i64 [[INDEX]]
+; NO-VP-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 2 x i64>, ptr [[TMP7]], align 8
+; NO-VP-NEXT: [[TMP8:%.*]] = urem <vscale x 2 x i64> [[WIDE_LOAD]], [[WIDE_LOAD1]]
+; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[C]], i64 [[INDEX]]
+; NO-VP-NEXT: store <vscale x 2 x i64> [[TMP8]], ptr [[TMP9]], align 8
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; NO-VP-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[TMP10]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; NO-VP: [[MIDDLE_BLOCK]]:
+; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
+; NO-VP-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; NO-VP: [[SCALAR_PH]]:
+; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[LOOP_PREHEADER]] ]
; NO-VP-NEXT: br label %[[LOOP:.*]]
; NO-VP: [[LOOP]]:
-; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[LOOP_PREHEADER]] ]
+; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ]
; NO-VP-NEXT: [[A_GEP:%.*]] = getelementptr i64, ptr [[A]], i64 [[IV]]
; NO-VP-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_GEP]], align 8
; NO-VP-NEXT: [[B_GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[IV]]
@@ -354,7 +468,7 @@ define void @test_urem(ptr noalias %a, ptr noalias %b, ptr noalias %c) {
; NO-VP-NEXT: store i64 [[TMP2]], ptr [[C_GEP]], align 8
; NO-VP-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
; NO-VP-NEXT: [[DONE:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; NO-VP-NEXT: br i1 [[DONE]], label %[[EXIT:.*]], label %[[LOOP]]
+; NO-VP-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP9:![0-9]+]]
; NO-VP: [[EXIT]]:
; NO-VP-NEXT: ret void
;
@@ -390,3 +504,14 @@ exit:
; IF-EVL: [[LOOP9]] = distinct !{[[LOOP9]], [[META1]], [[META2]], [[META3]]}
; IF-EVL: [[LOOP10]] = distinct !{[[LOOP10]], [[META3]], [[META1]]}
;.
+; NO-VP: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
+; NO-VP: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
+; NO-VP: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
+; NO-VP: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]}
+; NO-VP: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]}
+; NO-VP: [[LOOP5]] = distinct !{[[LOOP5]], [[META2]], [[META1]]}
+; NO-VP: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]], [[META2]]}
+; NO-VP: [[LOOP7]] = distinct !{[[LOOP7]], [[META2]], [[META1]]}
+; NO-VP: [[LOOP8]] = distinct !{[[LOOP8]], [[META1]], [[META2]]}
+; NO-VP: [[LOOP9]] = distinct !{[[LOOP9]], [[META2]], [[META1]]}
+;.
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-fixed-order-recurrence.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll
index 7c8f111a..6a6bca2d 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-fixed-order-recurrence.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-fixed-order-recurrence.ll
@@ -1,14 +1,12 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
; RUN: opt -passes=loop-vectorize \
; RUN: -prefer-inloop-reductions \
-; RUN: -force-tail-folding-style=data-with-evl \
-; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \
+; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \
; RUN: -mtriple=riscv64 -mattr=+v,+f -S < %s| FileCheck %s --check-prefix=IF-EVL
; RUN: opt -passes=loop-vectorize \
; RUN: -prefer-inloop-reductions \
-; RUN: -force-tail-folding-style=none \
-; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \
+; RUN: -prefer-predicate-over-epilogue=scalar-epilogue \
; RUN: -mtriple=riscv64 -mattr=+v,+f -S < %s| FileCheck %s --check-prefix=NO-VP
define void @first_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) {
@@ -482,20 +480,62 @@ define i32 @FOR_reduction(ptr noalias %A, ptr noalias %B, i64 %TC) {
; IF-EVL-LABEL: define i32 @FOR_reduction(
; IF-EVL-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[TC:%.*]]) #[[ATTR0]] {
; IF-EVL-NEXT: [[ENTRY:.*]]:
+; IF-EVL-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
+; IF-EVL-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP9]], 4
+; IF-EVL-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TC]], [[TMP1]]
+; IF-EVL-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; IF-EVL: [[VECTOR_PH]]:
+; IF-EVL-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; IF-EVL-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
+; IF-EVL-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TC]], [[TMP3]]
+; IF-EVL-NEXT: [[N_VEC:%.*]] = sub i64 [[TC]], [[N_MOD_VF]]
+; IF-EVL-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; IF-EVL-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
+; IF-EVL-NEXT: [[TMP6:%.*]] = call i32 @llvm.vscale.i32()
+; IF-EVL-NEXT: [[TMP7:%.*]] = mul nuw i32 [[TMP6]], 4
+; IF-EVL-NEXT: [[TMP8:%.*]] = sub i32 [[TMP7]], 1
+; IF-EVL-NEXT: [[VECTOR_RECUR_INIT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 33, i32 [[TMP8]]
+; IF-EVL-NEXT: br label %[[VECTOR_BODY:.*]]
+; IF-EVL: [[VECTOR_BODY]]:
+; IF-EVL-NEXT: [[INDVARS:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[VECTOR_RECUR:%.*]] = phi <vscale x 4 x i32> [ [[VECTOR_RECUR_INIT]], %[[VECTOR_PH]] ], [ [[WIDE_LOAD:%.*]], %[[VECTOR_BODY]] ]
+; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[INDVARS]]
+; IF-EVL-NEXT: [[WIDE_LOAD]] = load <vscale x 4 x i32>, ptr [[ARRAYIDX]], align 4
+; IF-EVL-NEXT: [[TMP10:%.*]] = call <vscale x 4 x i32> @llvm.vector.splice.nxv4i32(<vscale x 4 x i32> [[VECTOR_RECUR]], <vscale x 4 x i32> [[WIDE_LOAD]], i32 -1)
+; IF-EVL-NEXT: [[TMP11:%.*]] = add nsw <vscale x 4 x i32> [[TMP10]], [[WIDE_LOAD]]
+; IF-EVL-NEXT: [[TMP12:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[INDVARS]]
+; IF-EVL-NEXT: store <vscale x 4 x i32> [[TMP11]], ptr [[TMP12]], align 4
+; IF-EVL-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDVARS]], [[TMP5]]
+; IF-EVL-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; IF-EVL-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
+; IF-EVL: [[MIDDLE_BLOCK]]:
+; IF-EVL-NEXT: [[TMP14:%.*]] = call i32 @llvm.vscale.i32()
+; IF-EVL-NEXT: [[TMP15:%.*]] = mul nuw i32 [[TMP14]], 4
+; IF-EVL-NEXT: [[TMP16:%.*]] = sub i32 [[TMP15]], 1
+; IF-EVL-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <vscale x 4 x i32> [[WIDE_LOAD]], i32 [[TMP16]]
+; IF-EVL-NEXT: [[TMP17:%.*]] = call i32 @llvm.vscale.i32()
+; IF-EVL-NEXT: [[TMP18:%.*]] = mul nuw i32 [[TMP17]], 4
+; IF-EVL-NEXT: [[TMP19:%.*]] = sub i32 [[TMP18]], 2
+; IF-EVL-NEXT: [[VECTOR_RECUR_EXTRACT_FOR_PHI:%.*]] = extractelement <vscale x 4 x i32> [[WIDE_LOAD]], i32 [[TMP19]]
+; IF-EVL-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TC]], [[N_VEC]]
+; IF-EVL-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; IF-EVL: [[SCALAR_PH]]:
+; IF-EVL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; IF-EVL-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i32 [ [[VECTOR_RECUR_EXTRACT]], %[[MIDDLE_BLOCK]] ], [ 33, %[[ENTRY]] ]
; IF-EVL-NEXT: br label %[[FOR_BODY:.*]]
; IF-EVL: [[FOR_BODY]]:
-; IF-EVL-NEXT: [[INDVARS:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDVARS_NEXT:%.*]], %[[FOR_BODY]] ]
-; IF-EVL-NEXT: [[FOR1:%.*]] = phi i32 [ 33, %[[ENTRY]] ], [ [[TMP0:%.*]], %[[FOR_BODY]] ]
-; IF-EVL-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[INDVARS]]
-; IF-EVL-NEXT: [[TMP0]] = load i32, ptr [[ARRAYIDX]], align 4
+; IF-EVL-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[INDVARS_NEXT:%.*]], %[[FOR_BODY]] ]
+; IF-EVL-NEXT: [[FOR1:%.*]] = phi i32 [ [[SCALAR_RECUR_INIT]], %[[SCALAR_PH]] ], [ [[TMP0:%.*]], %[[FOR_BODY]] ]
+; IF-EVL-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds nuw i32, ptr [[A]], i64 [[IV]]
+; IF-EVL-NEXT: [[TMP0]] = load i32, ptr [[ARRAYIDX1]], align 4
; IF-EVL-NEXT: [[ADD:%.*]] = add nsw i32 [[FOR1]], [[TMP0]]
-; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[INDVARS]]
+; IF-EVL-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds nuw i32, ptr [[B]], i64 [[IV]]
; IF-EVL-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX2]], align 4
-; IF-EVL-NEXT: [[INDVARS_NEXT]] = add nuw nsw i64 [[INDVARS]], 1
+; IF-EVL-NEXT: [[INDVARS_NEXT]] = add nuw nsw i64 [[IV]], 1
; IF-EVL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_NEXT]], [[TC]]
-; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END:.*]], label %[[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
+; IF-EVL-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; IF-EVL: [[FOR_END]]:
-; IF-EVL-NEXT: [[FOR1_LCSSA:%.*]] = phi i32 [ [[FOR1]], %[[FOR_BODY]] ]
+; IF-EVL-NEXT: [[FOR1_LCSSA:%.*]] = phi i32 [ [[FOR1]], %[[FOR_BODY]] ], [ [[VECTOR_RECUR_EXTRACT_FOR_PHI]], %[[MIDDLE_BLOCK]] ]
; IF-EVL-NEXT: ret i32 [[FOR1_LCSSA]]
;
; NO-VP-LABEL: define i32 @FOR_reduction(
@@ -733,8 +773,8 @@ for.end:
; IF-EVL: [[LOOP6]] = distinct !{[[LOOP6]], [[META3]], [[META1]]}
; IF-EVL: [[LOOP7]] = distinct !{[[LOOP7]], [[META1]], [[META2]], [[META3]]}
; IF-EVL: [[LOOP8]] = distinct !{[[LOOP8]], [[META3]], [[META1]]}
-; IF-EVL: [[LOOP9]] = distinct !{[[LOOP9]], [[META10:![0-9]+]]}
-; IF-EVL: [[META10]] = !{!"llvm.loop.vectorize.enable", i1 true}
+; IF-EVL: [[LOOP9]] = distinct !{[[LOOP9]], [[META1]], [[META3]]}
+; IF-EVL: [[LOOP10]] = distinct !{[[LOOP10]], [[META3]], [[META1]]}
; IF-EVL: [[LOOP11]] = distinct !{[[LOOP11]], [[META1]], [[META2]], [[META3]]}
; IF-EVL: [[LOOP12]] = distinct !{[[LOOP12]], [[META3]], [[META1]]}
;.
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-gather-scatter.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-gather-scatter.ll
index 2b7a9fb..892293b 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-gather-scatter.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-gather-scatter.ll
@@ -1,12 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -passes=loop-vectorize \
-; RUN: -force-tail-folding-style=data-with-evl \
-; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \
+; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \
; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck %s --check-prefix=IF-EVL
; RUN: opt -passes=loop-vectorize \
-; RUN: -force-tail-folding-style=none \
-; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \
+; RUN: -prefer-predicate-over-epilogue=scalar-epilogue \
; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck %s --check-prefix=NO-VP
define void @gather_scatter(ptr noalias %in, ptr noalias %out, ptr noalias %index, i64 %n) {
@@ -69,18 +67,54 @@ define void @gather_scatter(ptr noalias %in, ptr noalias %out, ptr noalias %inde
;
; NO-VP-LABEL: @gather_scatter(
; NO-VP-NEXT: entry:
+; NO-VP-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP14:%.*]] = mul nuw i64 [[TMP13]], 2
+; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP14]]
+; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]]
+; NO-VP: vector.ph:
+; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
+; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
+; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
+; NO-VP-NEXT: [[TMP6:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
+; NO-VP-NEXT: [[TMP7:%.*]] = mul <vscale x 2 x i64> [[TMP6]], splat (i64 1)
+; NO-VP-NEXT: [[INDUCTION:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP7]]
+; NO-VP-NEXT: [[TMP8:%.*]] = mul i64 1, [[TMP5]]
+; NO-VP-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP8]], i64 0
+; NO-VP-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
; NO-VP-NEXT: br label [[FOR_BODY:%.*]]
+; NO-VP: vector.body:
+; NO-VP-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; NO-VP-NEXT: [[VEC_IND:%.*]] = phi <vscale x 2 x i64> [ [[INDUCTION]], [[ENTRY]] ], [ [[VEC_IND_NEXT:%.*]], [[FOR_BODY]] ]
+; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[INDEX:%.*]], <vscale x 2 x i64> [[VEC_IND]]
+; NO-VP-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64.nxv2p0(<vscale x 2 x ptr> [[TMP9]], i32 8, <vscale x 2 x i1> splat (i1 true), <vscale x 2 x i64> poison)
+; NO-VP-NEXT: [[TMP10:%.*]] = getelementptr inbounds float, ptr [[IN:%.*]], <vscale x 2 x i64> [[WIDE_MASKED_GATHER]]
+; NO-VP-NEXT: [[WIDE_MASKED_GATHER2:%.*]] = call <vscale x 2 x float> @llvm.masked.gather.nxv2f32.nxv2p0(<vscale x 2 x ptr> [[TMP10]], i32 4, <vscale x 2 x i1> splat (i1 true), <vscale x 2 x float> poison)
+; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr inbounds float, ptr [[OUT:%.*]], <vscale x 2 x i64> [[WIDE_MASKED_GATHER]]
+; NO-VP-NEXT: call void @llvm.masked.scatter.nxv2f32.nxv2p0(<vscale x 2 x float> [[WIDE_MASKED_GATHER2]], <vscale x 2 x ptr> [[TMP11]], i32 4, <vscale x 2 x i1> splat (i1 true))
+; NO-VP-NEXT: [[INDVARS_IV_NEXT]] = add nuw i64 [[INDVARS_IV]], [[TMP5]]
+; NO-VP-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
+; NO-VP-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; NO-VP: middle.block:
+; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
+; NO-VP: scalar.ph:
+; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY1:%.*]] ]
+; NO-VP-NEXT: br label [[FOR_BODY1:%.*]]
; NO-VP: for.body:
-; NO-VP-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
-; NO-VP-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, ptr [[INDEX:%.*]], i64 [[INDVARS_IV]]
+; NO-VP-NEXT: [[INDVARS_IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT1:%.*]], [[FOR_BODY1]] ]
+; NO-VP-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, ptr [[INDEX]], i64 [[INDVARS_IV1]]
; NO-VP-NEXT: [[TMP0:%.*]] = load i64, ptr [[ARRAYIDX3]], align 8
-; NO-VP-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[IN:%.*]], i64 [[TMP0]]
+; NO-VP-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[IN]], i64 [[TMP0]]
; NO-VP-NEXT: [[TMP1:%.*]] = load float, ptr [[ARRAYIDX5]], align 4
-; NO-VP-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[OUT:%.*]], i64 [[TMP0]]
+; NO-VP-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, ptr [[OUT]], i64 [[TMP0]]
; NO-VP-NEXT: store float [[TMP1]], ptr [[ARRAYIDX7]], align 4
-; NO-VP-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
-; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N:%.*]]
-; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]]
+; NO-VP-NEXT: [[INDVARS_IV_NEXT1]] = add nuw nsw i64 [[INDVARS_IV1]], 1
+; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT1]], [[N]]
+; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP3:![0-9]+]]
; NO-VP: for.end:
; NO-VP-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-inloop-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll
index a7955a5..b7d7fc1 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-inloop-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-inloop-reduction.ll
@@ -1,14 +1,12 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -passes=loop-vectorize \
; RUN: -prefer-inloop-reductions \
-; RUN: -force-tail-folding-style=data-with-evl \
-; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \
+; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \
; RUN: -mtriple=riscv64 -mattr=+v,+f -S < %s| FileCheck %s --check-prefix=IF-EVL
; RUN: opt -passes=loop-vectorize \
; RUN: -prefer-inloop-reductions \
-; RUN: -force-tail-folding-style=none \
-; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \
+; RUN: -prefer-predicate-over-epilogue=scalar-epilogue \
; RUN: -mtriple=riscv64 -mattr=+v,+f -S < %s| FileCheck %s --check-prefix=NO-VP
define i32 @add(ptr %a, i64 %n, i32 %start) {
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-interleave.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll
index b2ebe6f..80b0714 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-interleave.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-interleave.ll
@@ -1,12 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -passes=loop-vectorize \
-; RUN: -force-tail-folding-style=data-with-evl \
-; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \
+; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \
; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck --check-prefix=IF-EVL %s
; RUN: opt -passes=loop-vectorize \
-; RUN: -force-tail-folding-style=none \
-; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \
+; RUN: -prefer-predicate-over-epilogue=scalar-epilogue \
; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck --check-prefix=NO-VP %s
; FIXME: interleaved accesses are not supported yet with predicated vectorization.
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-intermediate-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-intermediate-store.ll
index 314c201..c7808b1 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-intermediate-store.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-intermediate-store.ll
@@ -1,24 +1,20 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt -passes=loop-vectorize \
-; RUN: -force-tail-folding-style=data-with-evl \
-; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \
+; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \
; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck %s --check-prefixes=IF-EVL-OUTLOOP
; RUN: opt -passes=loop-vectorize \
; RUN: -prefer-inloop-reductions \
-; RUN: -force-tail-folding-style=data-with-evl \
-; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \
+; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \
; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck %s --check-prefixes=IF-EVL-INLOOP
; RUN: opt -passes=loop-vectorize \
-; RUN: -force-tail-folding-style=none \
-; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \
+; RUN: -prefer-predicate-over-epilogue=scalar-epilogue \
; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck %s --check-prefixes=NO-VP-OUTLOOP
; RUN: opt -passes=loop-vectorize \
; RUN: -prefer-inloop-reductions \
-; RUN: -force-tail-folding-style=none \
-; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \
+; RUN: -prefer-predicate-over-epilogue=scalar-epilogue \
; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck %s --check-prefixes=NO-VP-INLOOP
define void @reduction_intermediate_store(ptr %a, i64 %n, i32 %start, ptr %addr) {
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-iv32.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-iv32.ll
index 2c265c9..bda9145 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-iv32.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-iv32.ll
@@ -1,12 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -passes=loop-vectorize \
-; RUN: -force-tail-folding-style=data-with-evl \
-; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \
+; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \
; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck --check-prefix=IF-EVL %s
; RUN: opt -passes=loop-vectorize \
-; RUN: -force-tail-folding-style=none \
-; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \
+; RUN: -prefer-predicate-over-epilogue=scalar-epilogue \
; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck --check-prefix=NO-VP %s
define void @iv32(ptr noalias %a, ptr noalias %b, i32 %N) {
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-known-no-overflow.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-known-no-overflow.ll
index 2c77d78..2601bfd 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-known-no-overflow.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-known-no-overflow.ll
@@ -1,7 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
; RUN: opt -passes=loop-vectorize \
-; RUN: -force-tail-folding-style=data-with-evl \
-; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \
+; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \
; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck %s
; TODO: We know the IV will never overflow here so we can skip the overflow
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-masked-loadstore.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-masked-loadstore.ll
index 1efd539..226372f 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-masked-loadstore.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-masked-loadstore.ll
@@ -1,12 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -passes=loop-vectorize \
-; RUN: -force-tail-folding-style=data-with-evl \
-; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \
+; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \
; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck %s --check-prefix=IF-EVL
; RUN: opt -passes=loop-vectorize \
-; RUN: -force-tail-folding-style=none \
-; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \
+; RUN: -prefer-predicate-over-epilogue=scalar-epilogue \
; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck %s --check-prefix=NO-VP
define void @masked_loadstore(ptr noalias %a, ptr noalias %b, i64 %n) {
@@ -66,23 +64,52 @@ define void @masked_loadstore(ptr noalias %a, ptr noalias %b, i64 %n) {
;
; NO-VP-LABEL: @masked_loadstore(
; NO-VP-NEXT: entry:
+; NO-VP-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 4
+; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP12]]
+; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; NO-VP: vector.ph:
+; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
+; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
+; NO-VP-NEXT: [[INC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; NO-VP-NEXT: br label [[FOR_BODY:%.*]]
+; NO-VP: vector.body:
+; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[FOR_BODY]] ]
+; NO-VP-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[INDEX]]
+; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP6]], align 4
+; NO-VP-NEXT: [[TMP7:%.*]] = icmp ne <vscale x 4 x i32> [[WIDE_LOAD]], zeroinitializer
+; NO-VP-NEXT: [[TMP8:%.*]] = getelementptr i32, ptr [[A:%.*]], i64 [[INDEX]]
+; NO-VP-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr [[TMP8]], i32 4, <vscale x 4 x i1> [[TMP7]], <vscale x 4 x i32> poison)
+; NO-VP-NEXT: [[TMP9:%.*]] = add <vscale x 4 x i32> [[WIDE_LOAD]], [[WIDE_MASKED_LOAD]]
+; NO-VP-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP9]], ptr [[TMP8]], i32 4, <vscale x 4 x i1> [[TMP7]])
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; NO-VP-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[INC]]
+; NO-VP-NEXT: br i1 [[TMP10]], label [[FOR_INC:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; NO-VP: middle.block:
+; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[INC]]
+; NO-VP-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; NO-VP: scalar.ph:
+; NO-VP-NEXT: [[I_011:%.*]] = phi i64 [ [[INC]], [[FOR_INC]] ], [ 0, [[ENTRY:%.*]] ]
+; NO-VP-NEXT: br label [[FOR_BODY1:%.*]]
; NO-VP: for.body:
-; NO-VP-NEXT: [[I_011:%.*]] = phi i64 [ [[INC:%.*]], [[FOR_INC:%.*]] ], [ 0, [[ENTRY:%.*]] ]
-; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[I_011]]
+; NO-VP-NEXT: [[I_11:%.*]] = phi i64 [ [[INC1:%.*]], [[FOR_INC1:%.*]] ], [ [[I_011]], [[SCALAR_PH]] ]
+; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[I_11]]
; NO-VP-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
; NO-VP-NEXT: [[CMP1:%.*]] = icmp ne i32 [[TMP0]], 0
-; NO-VP-NEXT: br i1 [[CMP1]], label [[IF_THEN:%.*]], label [[FOR_INC]]
+; NO-VP-NEXT: br i1 [[CMP1]], label [[IF_THEN:%.*]], label [[FOR_INC1]]
; NO-VP: if.then:
-; NO-VP-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[I_011]]
+; NO-VP-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[I_11]]
; NO-VP-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX3]], align 4
; NO-VP-NEXT: [[ADD:%.*]] = add i32 [[TMP0]], [[TMP1]]
; NO-VP-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX3]], align 4
-; NO-VP-NEXT: br label [[FOR_INC]]
+; NO-VP-NEXT: br label [[FOR_INC1]]
; NO-VP: for.inc:
-; NO-VP-NEXT: [[INC]] = add nuw nsw i64 [[I_011]], 1
-; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC]], [[N:%.*]]
-; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT:%.*]], label [[FOR_BODY]]
+; NO-VP-NEXT: [[INC1]] = add nuw nsw i64 [[I_11]], 1
+; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INC1]], [[N]]
+; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label [[EXIT]], label [[FOR_BODY1]], !llvm.loop [[LOOP3:![0-9]+]]
; NO-VP: exit:
; NO-VP-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-no-masking.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-no-masking.ll
index 1c49fba..6285a6f 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-no-masking.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-no-masking.ll
@@ -1,12 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -passes=loop-vectorize \
-; RUN: -force-tail-folding-style=data-with-evl \
-; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \
+; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \
; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck %s
; RUN: opt -passes=loop-vectorize \
-; RUN: -force-tail-folding-style=none \
-; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \
+; RUN: -prefer-predicate-over-epilogue=scalar-epilogue \
; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck %s
; No need to emit predicated vector code if the vector instructions with masking are not required.
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-ordered-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-ordered-reduction.ll
index 81e0f75..41bddaa 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-ordered-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-ordered-reduction.ll
@@ -1,14 +1,12 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -passes=loop-vectorize \
; RUN: -force-ordered-reductions=true -hints-allow-reordering=false \
-; RUN: -force-tail-folding-style=data-with-evl \
-; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \
+; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \
; RUN: -mtriple=riscv64 -mattr=+v,+f -S < %s| FileCheck %s --check-prefix=IF-EVL
; RUN: opt -passes=loop-vectorize \
; RUN: -force-ordered-reductions=true -hints-allow-reordering=false \
-; RUN: -force-tail-folding-style=none \
-; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \
+; RUN: -prefer-predicate-over-epilogue=scalar-epilogue \
; RUN: -mtriple=riscv64 -mattr=+v,+f -S < %s| FileCheck %s --check-prefix=NO-VP
define float @fadd(ptr noalias nocapture readonly %a, i64 %n) {
@@ -60,18 +58,45 @@ define float @fadd(ptr noalias nocapture readonly %a, i64 %n) {
;
; NO-VP-LABEL: @fadd(
; NO-VP-NEXT: entry:
+; NO-VP-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
+; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N1:%.*]], [[TMP1]]
+; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]]
+; NO-VP: vector.ph:
+; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
+; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N1]], [[TMP3]]
+; NO-VP-NEXT: [[N:%.*]] = sub i64 [[N1]], [[N_MOD_VF]]
+; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
; NO-VP-NEXT: br label [[FOR_BODY:%.*]]
-; NO-VP: for.body:
-; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; NO-VP: vector.body:
+; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; NO-VP-NEXT: [[SUM_07:%.*]] = phi float [ 0.000000e+00, [[ENTRY]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
; NO-VP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[IV]]
-; NO-VP-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4
-; NO-VP-NEXT: [[ADD]] = fadd float [[TMP0]], [[SUM_07]]
-; NO-VP-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
-; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N:%.*]]
-; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x float>, ptr [[ARRAYIDX]], align 4
+; NO-VP-NEXT: [[ADD]] = call float @llvm.vector.reduce.fadd.nxv4f32(float [[SUM_07]], <vscale x 4 x float> [[WIDE_LOAD]])
+; NO-VP-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], [[TMP5]]
+; NO-VP-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; NO-VP-NEXT: br i1 [[EXITCOND_NOT]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; NO-VP: middle.block:
+; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N1]], [[N]]
+; NO-VP-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]]
+; NO-VP: scalar.ph:
+; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY1:%.*]] ]
+; NO-VP-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[ADD]], [[MIDDLE_BLOCK]] ], [ 0.000000e+00, [[ENTRY1]] ]
+; NO-VP-NEXT: br label [[FOR_BODY1:%.*]]
+; NO-VP: for.body:
+; NO-VP-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], [[FOR_BODY1]] ]
+; NO-VP-NEXT: [[SUM_7:%.*]] = phi float [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD1:%.*]], [[FOR_BODY1]] ]
+; NO-VP-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV1]]
+; NO-VP-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX1]], align 4
+; NO-VP-NEXT: [[ADD1]] = fadd float [[TMP9]], [[SUM_7]]
+; NO-VP-NEXT: [[IV_NEXT1]] = add nuw nsw i64 [[IV1]], 1
+; NO-VP-NEXT: [[EXITCOND_NOT1:%.*]] = icmp eq i64 [[IV_NEXT1]], [[N1]]
+; NO-VP-NEXT: br i1 [[EXITCOND_NOT1]], label [[FOR_END]], label [[FOR_BODY1]], !llvm.loop [[LOOP3:![0-9]+]]
; NO-VP: for.end:
-; NO-VP-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD]], [[FOR_BODY]] ]
+; NO-VP-NEXT: [[ADD_LCSSA:%.*]] = phi float [ [[ADD1]], [[FOR_BODY1]] ], [ [[ADD]], [[MIDDLE_BLOCK]] ]
; NO-VP-NEXT: ret float [[ADD_LCSSA]]
;
entry:
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-reduction-cost.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction-cost.ll
index 15f9993..10d83f4 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-reduction-cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction-cost.ll
@@ -1,7 +1,6 @@
; REQUIRES: asserts
; RUN: opt -passes=loop-vectorize -debug-only=loop-vectorize --disable-output \
-; RUN: -force-tail-folding-style=data-with-evl \
-; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \
+; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \
; RUN: -mtriple=riscv64 -mattr=+v -S < %s 2>&1 | FileCheck %s
; CHECK: Cost of 2 for VF vscale x 4: WIDEN-INTRINSIC vp<%{{.+}}> = call llvm.vp.merge(ir<true>, ir<%add>, ir<%rdx>, vp<%{{.+}}>)
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll
index 73fd389..d2dc482 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reduction.ll
@@ -1,12 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -passes=loop-vectorize \
-; RUN: -force-tail-folding-style=data-with-evl \
-; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \
+; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \
; RUN: -mtriple=riscv64 -mattr=+v,+f -S < %s| FileCheck %s --check-prefix=IF-EVL
; RUN: opt -passes=loop-vectorize \
-; RUN: -force-tail-folding-style=none \
-; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \
+; RUN: -prefer-predicate-over-epilogue=scalar-epilogue \
; RUN: -mtriple=riscv64 -mattr=+v,+f -S < %s| FileCheck %s --check-prefix=NO-VP
define i32 @add(ptr %a, i64 %n, i32 %start) {
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-reverse-load-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll
index 01033c8..62eef6c 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-reverse-load-store.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-reverse-load-store.ll
@@ -1,12 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -passes=loop-vectorize \
-; RUN: -force-tail-folding-style=data-with-evl \
-; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \
+; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \
; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck %s --check-prefix=IF-EVL
; RUN: opt -passes=loop-vectorize \
-; RUN: -force-tail-folding-style=none \
-; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \
+; RUN: -prefer-predicate-over-epilogue=scalar-epilogue \
; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck %s --check-prefix=NO-VP
define void @reverse_load_store(i64 %startval, ptr noalias %ptr, ptr noalias %ptr2) {
@@ -75,18 +73,61 @@ define void @reverse_load_store(i64 %startval, ptr noalias %ptr, ptr noalias %pt
;
; NO-VP-LABEL: @reverse_load_store(
; NO-VP-NEXT: entry:
+; NO-VP-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
+; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
+; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; NO-VP: vector.ph:
+; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
+; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
+; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
+; NO-VP-NEXT: [[TMP6:%.*]] = sub i64 [[STARTVAL:%.*]], [[N_VEC]]
+; NO-VP-NEXT: [[TMP7:%.*]] = trunc i64 [[N_VEC]] to i32
; NO-VP-NEXT: br label [[FOR_BODY:%.*]]
+; NO-VP: vector.body:
+; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[FOR_BODY]] ]
+; NO-VP-NEXT: [[OFFSET_IDX:%.*]] = sub i64 [[STARTVAL]], [[INDEX]]
+; NO-VP-NEXT: [[TMP8:%.*]] = add i64 [[OFFSET_IDX]], -1
+; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[PTR:%.*]], i64 [[TMP8]]
+; NO-VP-NEXT: [[TMP10:%.*]] = mul i64 0, [[TMP5]]
+; NO-VP-NEXT: [[TMP11:%.*]] = sub i64 [[TMP5]], 1
+; NO-VP-NEXT: [[TMP12:%.*]] = mul i64 -1, [[TMP11]]
+; NO-VP-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[TMP9]], i64 [[TMP10]]
+; NO-VP-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[TMP13]], i64 [[TMP12]]
+; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP14]], align 4
+; NO-VP-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[WIDE_LOAD]])
+; NO-VP-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[PTR2:%.*]], i64 [[TMP8]]
+; NO-VP-NEXT: [[TMP16:%.*]] = mul i64 0, [[TMP5]]
+; NO-VP-NEXT: [[TMP17:%.*]] = sub i64 [[TMP5]], 1
+; NO-VP-NEXT: [[TMP18:%.*]] = mul i64 -1, [[TMP17]]
+; NO-VP-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[TMP15]], i64 [[TMP16]]
+; NO-VP-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, ptr [[TMP19]], i64 [[TMP18]]
+; NO-VP-NEXT: [[REVERSE1:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[REVERSE]])
+; NO-VP-NEXT: store <vscale x 4 x i32> [[REVERSE1]], ptr [[TMP20]], align 4
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; NO-VP-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; NO-VP: middle.block:
+; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
+; NO-VP-NEXT: br i1 [[CMP_N]], label [[LOOPEND:%.*]], label [[SCALAR_PH]]
+; NO-VP: scalar.ph:
+; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP6]], [[MIDDLE_BLOCK]] ], [ [[STARTVAL]], [[ENTRY:%.*]] ]
+; NO-VP-NEXT: [[BC_RESUME_VAL2:%.*]] = phi i32 [ [[TMP7]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
+; NO-VP-NEXT: br label [[FOR_BODY1:%.*]]
; NO-VP: for.body:
-; NO-VP-NEXT: [[ADD_PHI:%.*]] = phi i64 [ [[STARTVAL:%.*]], [[ENTRY:%.*]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
-; NO-VP-NEXT: [[I:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
+; NO-VP-NEXT: [[ADD_PHI:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[ADD:%.*]], [[FOR_BODY1]] ]
+; NO-VP-NEXT: [[I:%.*]] = phi i32 [ [[BC_RESUME_VAL2]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY1]] ]
; NO-VP-NEXT: [[ADD]] = add i64 [[ADD_PHI]], -1
-; NO-VP-NEXT: [[GEPL:%.*]] = getelementptr inbounds i32, ptr [[PTR:%.*]], i64 [[ADD]]
+; NO-VP-NEXT: [[GEPL:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i64 [[ADD]]
; NO-VP-NEXT: [[TMP:%.*]] = load i32, ptr [[GEPL]], align 4
-; NO-VP-NEXT: [[GEPS:%.*]] = getelementptr inbounds i32, ptr [[PTR2:%.*]], i64 [[ADD]]
+; NO-VP-NEXT: [[GEPS:%.*]] = getelementptr inbounds i32, ptr [[PTR2]], i64 [[ADD]]
; NO-VP-NEXT: store i32 [[TMP]], ptr [[GEPS]], align 4
; NO-VP-NEXT: [[INC]] = add i32 [[I]], 1
; NO-VP-NEXT: [[EXITCOND:%.*]] = icmp ne i32 [[INC]], 1024
-; NO-VP-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[LOOPEND:%.*]]
+; NO-VP-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY1]], label [[LOOPEND]], !llvm.loop [[LOOP3:![0-9]+]]
; NO-VP: loopend:
; NO-VP-NEXT: ret void
;
@@ -188,25 +229,74 @@ define void @reverse_load_store_masked(i64 %startval, ptr noalias %ptr, ptr noal
;
; NO-VP-LABEL: @reverse_load_store_masked(
; NO-VP-NEXT: entry:
+; NO-VP-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 4
+; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
+; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[ENTRY:%.*]], label [[VECTOR_PH:%.*]]
+; NO-VP: vector.ph:
+; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4
+; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
+; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
+; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 4
+; NO-VP-NEXT: [[TMP6:%.*]] = sub i64 [[STARTVAL1:%.*]], [[N_VEC]]
+; NO-VP-NEXT: [[TMP7:%.*]] = trunc i64 [[N_VEC]] to i32
; NO-VP-NEXT: br label [[FOR_BODY:%.*]]
+; NO-VP: vector.body:
+; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[FOR_BODY]] ]
+; NO-VP-NEXT: [[OFFSET_IDX:%.*]] = sub i64 [[STARTVAL1]], [[INDEX]]
+; NO-VP-NEXT: [[OFFSET_IDX1:%.*]] = trunc i64 [[INDEX]] to i32
+; NO-VP-NEXT: [[TMP8:%.*]] = add i64 [[OFFSET_IDX]], -1
+; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[PTR:%.*]], i32 [[OFFSET_IDX1]]
+; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP9]], align 4
+; NO-VP-NEXT: [[TMP10:%.*]] = icmp slt <vscale x 4 x i32> [[WIDE_LOAD]], splat (i32 100)
+; NO-VP-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[PTR1:%.*]], i64 [[TMP8]]
+; NO-VP-NEXT: [[TMP12:%.*]] = mul i64 0, [[TMP5]]
+; NO-VP-NEXT: [[TMP13:%.*]] = sub i64 [[TMP5]], 1
+; NO-VP-NEXT: [[TMP14:%.*]] = mul i64 -1, [[TMP13]]
+; NO-VP-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr [[TMP11]], i64 [[TMP12]]
+; NO-VP-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr [[TMP15]], i64 [[TMP14]]
+; NO-VP-NEXT: [[REVERSE:%.*]] = call <vscale x 4 x i1> @llvm.vector.reverse.nxv4i1(<vscale x 4 x i1> [[TMP10]])
+; NO-VP-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(ptr [[TMP16]], i32 4, <vscale x 4 x i1> [[REVERSE]], <vscale x 4 x i32> poison)
+; NO-VP-NEXT: [[REVERSE2:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[WIDE_MASKED_LOAD]])
+; NO-VP-NEXT: [[TMP17:%.*]] = getelementptr i32, ptr [[PTR2:%.*]], i64 [[TMP8]]
+; NO-VP-NEXT: [[TMP18:%.*]] = mul i64 0, [[TMP5]]
+; NO-VP-NEXT: [[TMP19:%.*]] = sub i64 [[TMP5]], 1
+; NO-VP-NEXT: [[TMP20:%.*]] = mul i64 -1, [[TMP19]]
+; NO-VP-NEXT: [[TMP21:%.*]] = getelementptr i32, ptr [[TMP17]], i64 [[TMP18]]
+; NO-VP-NEXT: [[TMP22:%.*]] = getelementptr i32, ptr [[TMP21]], i64 [[TMP20]]
+; NO-VP-NEXT: [[REVERSE3:%.*]] = call <vscale x 4 x i1> @llvm.vector.reverse.nxv4i1(<vscale x 4 x i1> [[TMP10]])
+; NO-VP-NEXT: [[REVERSE4:%.*]] = call <vscale x 4 x i32> @llvm.vector.reverse.nxv4i32(<vscale x 4 x i32> [[REVERSE2]])
+; NO-VP-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[REVERSE4]], ptr [[TMP22]], i32 4, <vscale x 4 x i1> [[REVERSE3]])
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; NO-VP-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[TMP23]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; NO-VP: middle.block:
+; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
+; NO-VP-NEXT: br i1 [[CMP_N]], label [[LOOPEND:%.*]], label [[ENTRY]]
+; NO-VP: scalar.ph:
+; NO-VP-NEXT: [[STARTVAL:%.*]] = phi i64 [ [[TMP6]], [[MIDDLE_BLOCK]] ], [ [[STARTVAL1]], [[ENTRY1:%.*]] ]
+; NO-VP-NEXT: [[BC_RESUME_VAL5:%.*]] = phi i32 [ [[TMP7]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY1]] ]
+; NO-VP-NEXT: br label [[FOR_BODY1:%.*]]
; NO-VP: for.body:
-; NO-VP-NEXT: [[ADD_PHI:%.*]] = phi i64 [ [[STARTVAL:%.*]], [[ENTRY:%.*]] ], [ [[ADD:%.*]], [[FOR_INC:%.*]] ]
-; NO-VP-NEXT: [[I:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[INC:%.*]], [[FOR_INC]] ]
+; NO-VP-NEXT: [[ADD_PHI:%.*]] = phi i64 [ [[STARTVAL]], [[ENTRY]] ], [ [[ADD:%.*]], [[FOR_INC:%.*]] ]
+; NO-VP-NEXT: [[I:%.*]] = phi i32 [ [[BC_RESUME_VAL5]], [[ENTRY]] ], [ [[INC:%.*]], [[FOR_INC]] ]
; NO-VP-NEXT: [[ADD]] = add i64 [[ADD_PHI]], -1
-; NO-VP-NEXT: [[GEPL:%.*]] = getelementptr inbounds i32, ptr [[PTR:%.*]], i32 [[I]]
+; NO-VP-NEXT: [[GEPL:%.*]] = getelementptr inbounds i32, ptr [[PTR]], i32 [[I]]
; NO-VP-NEXT: [[TMP:%.*]] = load i32, ptr [[GEPL]], align 4
; NO-VP-NEXT: [[CMP1:%.*]] = icmp slt i32 [[TMP]], 100
; NO-VP-NEXT: br i1 [[CMP1]], label [[IF_THEN:%.*]], label [[FOR_INC]]
; NO-VP: if.then:
-; NO-VP-NEXT: [[GEPL1:%.*]] = getelementptr inbounds i32, ptr [[PTR1:%.*]], i64 [[ADD]]
+; NO-VP-NEXT: [[GEPL1:%.*]] = getelementptr inbounds i32, ptr [[PTR1]], i64 [[ADD]]
; NO-VP-NEXT: [[V:%.*]] = load i32, ptr [[GEPL1]], align 4
-; NO-VP-NEXT: [[GEPS:%.*]] = getelementptr inbounds i32, ptr [[PTR2:%.*]], i64 [[ADD]]
+; NO-VP-NEXT: [[GEPS:%.*]] = getelementptr inbounds i32, ptr [[PTR2]], i64 [[ADD]]
; NO-VP-NEXT: store i32 [[V]], ptr [[GEPS]], align 4
; NO-VP-NEXT: br label [[FOR_INC]]
; NO-VP: for.inc:
; NO-VP-NEXT: [[INC]] = add i32 [[I]], 1
; NO-VP-NEXT: [[EXITCOND:%.*]] = icmp ne i32 [[INC]], 1024
-; NO-VP-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[LOOPEND:%.*]]
+; NO-VP-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY1]], label [[LOOPEND]], !llvm.loop [[LOOP5:![0-9]+]]
; NO-VP: loopend:
; NO-VP-NEXT: ret void
;
@@ -318,20 +408,49 @@ define void @multiple_reverse_vector_pointer(ptr noalias %a, ptr noalias %b, ptr
;
; NO-VP-LABEL: @multiple_reverse_vector_pointer(
; NO-VP-NEXT: entry:
+; NO-VP-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; NO-VP: vector.ph:
; NO-VP-NEXT: br label [[LOOP:%.*]]
+; NO-VP: vector.body:
+; NO-VP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[LOOP]] ]
+; NO-VP-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1024, [[INDEX]]
+; NO-VP-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 [[OFFSET_IDX]]
+; NO-VP-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[TMP0]], i32 0
+; NO-VP-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[TMP1]], i32 -15
+; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP2]], align 1
+; NO-VP-NEXT: [[REVERSE:%.*]] = shufflevector <16 x i8> [[WIDE_LOAD]], <16 x i8> poison, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; NO-VP-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[B:%.*]], <16 x i8> [[REVERSE]]
+; NO-VP-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr> [[TMP3]], i32 1, <16 x i1> splat (i1 true), <16 x i8> poison)
+; NO-VP-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[C:%.*]], i64 [[OFFSET_IDX]]
+; NO-VP-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[TMP4]], i32 0
+; NO-VP-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[TMP5]], i32 -15
+; NO-VP-NEXT: [[REVERSE1:%.*]] = shufflevector <16 x i8> [[WIDE_MASKED_GATHER]], <16 x i8> poison, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; NO-VP-NEXT: store <16 x i8> [[REVERSE1]], ptr [[TMP6]], align 1
+; NO-VP-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[D:%.*]], i64 [[OFFSET_IDX]]
+; NO-VP-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[TMP7]], i32 0
+; NO-VP-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[TMP8]], i32 -15
+; NO-VP-NEXT: store <16 x i8> [[REVERSE1]], ptr [[TMP9]], align 1
+; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
+; NO-VP-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; NO-VP-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[LOOP]], !llvm.loop [[LOOP6:![0-9]+]]
+; NO-VP: middle.block:
+; NO-VP-NEXT: br label [[SCALAR_PH]]
+; NO-VP: scalar.ph:
+; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[MIDDLE_BLOCK]] ], [ 1024, [[ENTRY:%.*]] ]
+; NO-VP-NEXT: br label [[LOOP1:%.*]]
; NO-VP: loop:
-; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 1024, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
-; NO-VP-NEXT: [[GEP_A:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 [[IV]]
+; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP1]] ]
+; NO-VP-NEXT: [[GEP_A:%.*]] = getelementptr i8, ptr [[A]], i64 [[IV]]
; NO-VP-NEXT: [[X:%.*]] = load i8, ptr [[GEP_A]], align 1
-; NO-VP-NEXT: [[GEP_B:%.*]] = getelementptr i8, ptr [[B:%.*]], i8 [[X]]
+; NO-VP-NEXT: [[GEP_B:%.*]] = getelementptr i8, ptr [[B]], i8 [[X]]
; NO-VP-NEXT: [[Y:%.*]] = load i8, ptr [[GEP_B]], align 1
-; NO-VP-NEXT: [[GEP_C:%.*]] = getelementptr i8, ptr [[C:%.*]], i64 [[IV]]
+; NO-VP-NEXT: [[GEP_C:%.*]] = getelementptr i8, ptr [[C]], i64 [[IV]]
; NO-VP-NEXT: store i8 [[Y]], ptr [[GEP_C]], align 1
-; NO-VP-NEXT: [[GEP_D:%.*]] = getelementptr i8, ptr [[D:%.*]], i64 [[IV]]
+; NO-VP-NEXT: [[GEP_D:%.*]] = getelementptr i8, ptr [[D]], i64 [[IV]]
; NO-VP-NEXT: store i8 [[Y]], ptr [[GEP_D]], align 1
; NO-VP-NEXT: [[IV_NEXT]] = add i64 [[IV]], -1
; NO-VP-NEXT: [[CMP_NOT:%.*]] = icmp eq i64 [[IV]], 0
-; NO-VP-NEXT: br i1 [[CMP_NOT]], label [[EXIT:%.*]], label [[LOOP]]
+; NO-VP-NEXT: br i1 [[CMP_NOT]], label [[EXIT:%.*]], label [[LOOP1]], !llvm.loop [[LOOP7:![0-9]+]]
; NO-VP: exit:
; NO-VP-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-safe-dep-distance.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-safe-dep-distance.ll
index 2dd017c..a891eef 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-safe-dep-distance.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-safe-dep-distance.ll
@@ -1,12 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -passes=loop-vectorize \
-; RUN: -force-tail-folding-style=data-with-evl \
-; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \
+; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \
; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck %s --check-prefix=IF-EVL
; RUN: opt -passes=loop-vectorize \
-; RUN: -force-tail-folding-style=none \
-; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \
+; RUN: -prefer-predicate-over-epilogue=scalar-epilogue \
; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck %s --check-prefix=NO-VP
; Dependence distance between read and write is greater than the trip
@@ -61,17 +59,44 @@ define void @test(ptr %p) {
;
; NO-VP-LABEL: @test(
; NO-VP-NEXT: entry:
+; NO-VP-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
+; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 200, [[TMP1]]
+; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]]
+; NO-VP: vector.ph:
+; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
+; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 200, [[TMP3]]
+; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 200, [[N_MOD_VF]]
+; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; NO-VP-NEXT: br label [[LOOP:%.*]]
-; NO-VP: loop:
-; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
+; NO-VP: vector.body:
+; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
; NO-VP-NEXT: [[A1:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[IV]]
-; NO-VP-NEXT: [[V:%.*]] = load i64, ptr [[A1]], align 8
-; NO-VP-NEXT: [[OFFSET:%.*]] = add i64 [[IV]], 200
+; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[A1]], align 8
+; NO-VP-NEXT: [[TMP7:%.*]] = add i64 [[IV]], 200
+; NO-VP-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP7]]
+; NO-VP-NEXT: store <vscale x 2 x i64> [[WIDE_LOAD]], ptr [[TMP8]], align 8
+; NO-VP-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], [[TMP5]]
+; NO-VP-NEXT: [[TMP9:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[LOOP]], !llvm.loop [[LOOP0:![0-9]+]]
+; NO-VP: middle.block:
+; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 200, [[N_VEC]]
+; NO-VP-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; NO-VP: scalar.ph:
+; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY1:%.*]] ]
+; NO-VP-NEXT: br label [[LOOP1:%.*]]
+; NO-VP: loop:
+; NO-VP-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], [[LOOP1]] ]
+; NO-VP-NEXT: [[A3:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV1]]
+; NO-VP-NEXT: [[V:%.*]] = load i64, ptr [[A3]], align 8
+; NO-VP-NEXT: [[OFFSET:%.*]] = add i64 [[IV1]], 200
; NO-VP-NEXT: [[A2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET]]
; NO-VP-NEXT: store i64 [[V]], ptr [[A2]], align 8
-; NO-VP-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; NO-VP-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV]], 199
-; NO-VP-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT:%.*]]
+; NO-VP-NEXT: [[IV_NEXT1]] = add i64 [[IV1]], 1
+; NO-VP-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV1]], 199
+; NO-VP-NEXT: br i1 [[CMP]], label [[LOOP1]], label [[EXIT]], !llvm.loop [[LOOP3:![0-9]+]]
; NO-VP: exit:
; NO-VP-NEXT: ret void
;
@@ -143,7 +168,7 @@ define void @test_may_clobber1(ptr %p) {
; NO-VP-NEXT: store <4 x i64> [[WIDE_LOAD]], ptr [[TMP4]], align 32
; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; NO-VP-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 200
-; NO-VP-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; NO-VP-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; NO-VP: middle.block:
; NO-VP-NEXT: br label [[EXIT:%.*]]
; NO-VP: scalar.ph:
@@ -158,7 +183,7 @@ define void @test_may_clobber1(ptr %p) {
; NO-VP-NEXT: store i64 [[V]], ptr [[A2]], align 32
; NO-VP-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
; NO-VP-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV]], 199
-; NO-VP-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP3:![0-9]+]]
+; NO-VP-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP5:![0-9]+]]
; NO-VP: exit:
; NO-VP-NEXT: ret void
;
@@ -279,7 +304,7 @@ define void @test_may_clobber3(ptr %p) {
; NO-VP-NEXT: store <2 x i64> [[WIDE_LOAD]], ptr [[TMP4]], align 32
; NO-VP-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
; NO-VP-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 200
-; NO-VP-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; NO-VP-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; NO-VP: middle.block:
; NO-VP-NEXT: br label [[EXIT:%.*]]
; NO-VP: scalar.ph:
@@ -294,7 +319,7 @@ define void @test_may_clobber3(ptr %p) {
; NO-VP-NEXT: store i64 [[V]], ptr [[A2]], align 32
; NO-VP-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
; NO-VP-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV]], 199
-; NO-VP-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP5:![0-9]+]]
+; NO-VP-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT]], !llvm.loop [[LOOP7:![0-9]+]]
; NO-VP: exit:
; NO-VP-NEXT: ret void
;
@@ -366,17 +391,44 @@ define void @trivial_due_max_vscale(ptr %p) {
;
; NO-VP-LABEL: @trivial_due_max_vscale(
; NO-VP-NEXT: entry:
+; NO-VP-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
+; NO-VP-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 200, [[TMP1]]
+; NO-VP-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]]
+; NO-VP: vector.ph:
+; NO-VP-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
+; NO-VP-NEXT: [[N_MOD_VF:%.*]] = urem i64 200, [[TMP3]]
+; NO-VP-NEXT: [[N_VEC:%.*]] = sub i64 200, [[N_MOD_VF]]
+; NO-VP-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; NO-VP-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
; NO-VP-NEXT: br label [[LOOP:%.*]]
-; NO-VP: loop:
-; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
+; NO-VP: vector.body:
+; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
; NO-VP-NEXT: [[A1:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[IV]]
-; NO-VP-NEXT: [[V:%.*]] = load i64, ptr [[A1]], align 32
-; NO-VP-NEXT: [[OFFSET:%.*]] = add i64 [[IV]], 8192
+; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[A1]], align 32
+; NO-VP-NEXT: [[TMP7:%.*]] = add i64 [[IV]], 8192
+; NO-VP-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP7]]
+; NO-VP-NEXT: store <vscale x 2 x i64> [[WIDE_LOAD]], ptr [[TMP8]], align 32
+; NO-VP-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], [[TMP5]]
+; NO-VP-NEXT: [[TMP9:%.*]] = icmp eq i64 [[IV_NEXT]], [[N_VEC]]
+; NO-VP-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[LOOP]], !llvm.loop [[LOOP8:![0-9]+]]
+; NO-VP: middle.block:
+; NO-VP-NEXT: [[CMP_N:%.*]] = icmp eq i64 200, [[N_VEC]]
+; NO-VP-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
+; NO-VP: scalar.ph:
+; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY1:%.*]] ]
+; NO-VP-NEXT: br label [[LOOP1:%.*]]
+; NO-VP: loop:
+; NO-VP-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], [[LOOP1]] ]
+; NO-VP-NEXT: [[A3:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV1]]
+; NO-VP-NEXT: [[V:%.*]] = load i64, ptr [[A3]], align 32
+; NO-VP-NEXT: [[OFFSET:%.*]] = add i64 [[IV1]], 8192
; NO-VP-NEXT: [[A2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET]]
; NO-VP-NEXT: store i64 [[V]], ptr [[A2]], align 32
-; NO-VP-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; NO-VP-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV]], 199
-; NO-VP-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT:%.*]]
+; NO-VP-NEXT: [[IV_NEXT1]] = add i64 [[IV1]], 1
+; NO-VP-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV1]], 199
+; NO-VP-NEXT: br i1 [[CMP]], label [[LOOP1]], label [[EXIT]], !llvm.loop [[LOOP9:![0-9]+]]
; NO-VP: exit:
; NO-VP-NEXT: ret void
;
@@ -448,17 +500,34 @@ define void @no_high_lmul_or_interleave(ptr %p) {
;
; NO-VP-LABEL: @no_high_lmul_or_interleave(
; NO-VP-NEXT: entry:
+; NO-VP-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[ENTRY:%.*]]
+; NO-VP: vector.ph:
; NO-VP-NEXT: br label [[LOOP:%.*]]
-; NO-VP: loop:
-; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
+; NO-VP: vector.body:
+; NO-VP-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
; NO-VP-NEXT: [[A1:%.*]] = getelementptr i64, ptr [[P:%.*]], i64 [[IV]]
-; NO-VP-NEXT: [[V:%.*]] = load i64, ptr [[A1]], align 32
-; NO-VP-NEXT: [[OFFSET:%.*]] = add i64 [[IV]], 1024
+; NO-VP-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[A1]], align 32
+; NO-VP-NEXT: [[TMP1:%.*]] = add i64 [[IV]], 1024
+; NO-VP-NEXT: [[TMP2:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP1]]
+; NO-VP-NEXT: store <4 x i64> [[WIDE_LOAD]], ptr [[TMP2]], align 32
+; NO-VP-NEXT: [[IV_NEXT]] = add nuw i64 [[IV]], 4
+; NO-VP-NEXT: [[TMP3:%.*]] = icmp eq i64 [[IV_NEXT]], 3000
+; NO-VP-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[LOOP]], !llvm.loop [[LOOP10:![0-9]+]]
+; NO-VP: middle.block:
+; NO-VP-NEXT: br label [[SCALAR_PH]]
+; NO-VP: scalar.ph:
+; NO-VP-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 3000, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY1:%.*]] ]
+; NO-VP-NEXT: br label [[LOOP1:%.*]]
+; NO-VP: loop:
+; NO-VP-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], [[LOOP1]] ]
+; NO-VP-NEXT: [[A3:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV1]]
+; NO-VP-NEXT: [[V:%.*]] = load i64, ptr [[A3]], align 32
+; NO-VP-NEXT: [[OFFSET:%.*]] = add i64 [[IV1]], 1024
; NO-VP-NEXT: [[A2:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET]]
; NO-VP-NEXT: store i64 [[V]], ptr [[A2]], align 32
-; NO-VP-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
-; NO-VP-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV]], 3001
-; NO-VP-NEXT: br i1 [[CMP]], label [[LOOP]], label [[EXIT:%.*]]
+; NO-VP-NEXT: [[IV_NEXT1]] = add i64 [[IV1]], 1
+; NO-VP-NEXT: [[CMP:%.*]] = icmp ne i64 [[IV1]], 3001
+; NO-VP-NEXT: br i1 [[CMP]], label [[LOOP1]], label [[EXIT:%.*]], !llvm.loop [[LOOP11:![0-9]+]]
; NO-VP: exit:
; NO-VP-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-uniform-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-uniform-store.ll
index 984b64c..8ab3817 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-uniform-store.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/tail-folding-uniform-store.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 5
-; RUN: opt < %s --prefer-predicate-over-epilogue=predicate-dont-vectorize --passes=loop-vectorize -mcpu=sifive-p470 -mattr=+v,+f -force-tail-folding-style=data-with-evl -S | FileCheck %s
+; RUN: opt < %s --prefer-predicate-over-epilogue=predicate-else-scalar-epilogue --passes=loop-vectorize -mcpu=sifive-p470 -mattr=+v,+f -S | FileCheck %s
; Generated from issue #109468.
; In this test case, the vector store with tail mask will transfer to the vp intrinsic with EVL.
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll b/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll
index 1e26d18..27d7bd0 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/truncate-to-minimal-bitwidth-evl-crash.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
-; RUN: opt -passes=loop-vectorize -force-tail-folding-style=data-with-evl -prefer-predicate-over-epilogue=predicate-dont-vectorize -mtriple=riscv64 -mattr=+v -S %s | FileCheck %s
+; RUN: opt -passes=loop-vectorize -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue -mtriple=riscv64 -mattr=+v -S %s | FileCheck %s
; Make sure we don't crash when transforming a VPWidenCastRecipe created without
; an underlying value to an EVL recipe. This occurs in this test via
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/type-info-cache-evl-crash.ll b/llvm/test/Transforms/LoopVectorize/RISCV/type-info-cache-evl-crash.ll
index 0e1ec57..27abddf 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/type-info-cache-evl-crash.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/type-info-cache-evl-crash.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
-; RUN: opt -passes=loop-vectorize -force-tail-folding-style=data-with-evl -prefer-predicate-over-epilogue=predicate-dont-vectorize -mtriple=riscv64 -mattr=+v -S %s | FileCheck %s
+; RUN: opt -passes=loop-vectorize -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue -mtriple=riscv64 -mattr=+v -S %s | FileCheck %s
; This test tries to recreate the conditions for a crash that occurred when the
; VPTypeAnalysis cache wasn't cleared after a recipe was erased and clobbered
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll
index 9062542..d3c2ccf 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 5
; RUN: opt < %s -passes=loop-vectorize -scalable-vectorization=on -riscv-v-vector-bits-min=0 -mtriple riscv64-linux-gnu -mattr=+v,+f -S 2>%t | FileCheck %s -check-prefix=SCALABLE
; RUN: opt < %s -passes=loop-vectorize -scalable-vectorization=off -mtriple riscv64-linux-gnu -mattr=+v,+f -S 2>%t | FileCheck %s -check-prefix=FIXEDLEN
-; RUN: opt < %s -passes=loop-vectorize -scalable-vectorization=on -riscv-v-vector-bits-min=0 -prefer-predicate-over-epilogue=predicate-dont-vectorize -mtriple riscv64-linux-gnu -mattr=+v,+f -S 2>%t | FileCheck %s -check-prefix=TF-SCALABLE
+; RUN: opt < %s -passes=loop-vectorize -scalable-vectorization=on -riscv-v-vector-bits-min=0 -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue -mtriple riscv64-linux-gnu -mattr=+v,+f -S 2>%t | FileCheck %s -check-prefix=TF-SCALABLE
target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n64-S128"
target triple = "riscv64"
@@ -226,17 +226,44 @@ define i64 @uniform_load_outside_use(ptr noalias nocapture %a, ptr noalias nocap
; TF-SCALABLE-LABEL: define i64 @uniform_load_outside_use(
; TF-SCALABLE-SAME: ptr noalias captures(none) [[A:%.*]], ptr noalias captures(none) [[B:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; TF-SCALABLE-NEXT: [[ENTRY:.*]]:
-; TF-SCALABLE-NEXT: br label %[[FOR_BODY:.*]]
-; TF-SCALABLE: [[FOR_BODY]]:
-; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; TF-SCALABLE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; TF-SCALABLE-NEXT: [[TMP1:%.*]] = mul nuw i64 [[TMP0]], 2
+; TF-SCALABLE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1025, [[TMP1]]
+; TF-SCALABLE-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; TF-SCALABLE: [[VECTOR_PH]]:
+; TF-SCALABLE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; TF-SCALABLE-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 2
+; TF-SCALABLE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]]
+; TF-SCALABLE-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]]
+; TF-SCALABLE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; TF-SCALABLE-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP4]], 2
+; TF-SCALABLE-NEXT: br label %[[VECTOR_BODY:.*]]
+; TF-SCALABLE: [[VECTOR_BODY]]:
+; TF-SCALABLE-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
; TF-SCALABLE-NEXT: [[V:%.*]] = load i64, ptr [[B]], align 8
+; TF-SCALABLE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[V]], i64 0
+; TF-SCALABLE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
; TF-SCALABLE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
-; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8
-; TF-SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; TF-SCALABLE-NEXT: store <vscale x 2 x i64> [[BROADCAST_SPLAT]], ptr [[ARRAYIDX]], align 8
+; TF-SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[IV]], [[TMP5]]
+; TF-SCALABLE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; TF-SCALABLE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+; TF-SCALABLE: [[MIDDLE_BLOCK]]:
+; TF-SCALABLE-NEXT: [[CMP_N:%.*]] = icmp eq i64 1025, [[N_VEC]]
+; TF-SCALABLE-NEXT: br i1 [[CMP_N]], label %[[FOR_END:.*]], label %[[SCALAR_PH]]
+; TF-SCALABLE: [[SCALAR_PH]]:
+; TF-SCALABLE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; TF-SCALABLE-NEXT: br label %[[FOR_BODY:.*]]
+; TF-SCALABLE: [[FOR_BODY]]:
+; TF-SCALABLE-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; TF-SCALABLE-NEXT: [[V1:%.*]] = load i64, ptr [[B]], align 8
+; TF-SCALABLE-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV1]]
+; TF-SCALABLE-NEXT: store i64 [[V1]], ptr [[ARRAYIDX1]], align 8
+; TF-SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1
; TF-SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025
-; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END:.*]], label %[[FOR_BODY]]
+; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; TF-SCALABLE: [[FOR_END]]:
-; TF-SCALABLE-NEXT: [[V_LCSSA:%.*]] = phi i64 [ [[V]], %[[FOR_BODY]] ]
+; TF-SCALABLE-NEXT: [[V_LCSSA:%.*]] = phi i64 [ [[V1]], %[[FOR_BODY]] ], [ [[V]], %[[MIDDLE_BLOCK]] ]
; TF-SCALABLE-NEXT: ret i64 [[V_LCSSA]]
;
entry:
@@ -404,7 +431,7 @@ define void @conditional_uniform_load(ptr noalias nocapture %a, ptr noalias noca
; TF-SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX1]], [[TMP4]]
; TF-SCALABLE-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]]
; TF-SCALABLE-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; TF-SCALABLE-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+; TF-SCALABLE-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; TF-SCALABLE: [[MIDDLE_BLOCK]]:
; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]]
; TF-SCALABLE: [[SCALAR_PH]]:
@@ -423,7 +450,7 @@ define void @conditional_uniform_load(ptr noalias nocapture %a, ptr noalias noca
; TF-SCALABLE-NEXT: store i64 [[PHI]], ptr [[ARRAYIDX]], align 8
; TF-SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; TF-SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025
-; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; TF-SCALABLE: [[FOR_END]]:
; TF-SCALABLE-NEXT: ret void
;
@@ -555,7 +582,7 @@ define void @uniform_load_unaligned(ptr noalias nocapture %a, ptr noalias nocapt
; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP8]], [[INDEX]]
; TF-SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX1]], [[TMP4]]
; TF-SCALABLE-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; TF-SCALABLE-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+; TF-SCALABLE-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; TF-SCALABLE: [[MIDDLE_BLOCK]]:
; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]]
; TF-SCALABLE: [[SCALAR_PH]]:
@@ -568,7 +595,7 @@ define void @uniform_load_unaligned(ptr noalias nocapture %a, ptr noalias nocapt
; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8
; TF-SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; TF-SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025
-; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
; TF-SCALABLE: [[FOR_END]]:
; TF-SCALABLE-NEXT: ret void
;
@@ -693,7 +720,7 @@ define void @uniform_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i
; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP7]], [[INDEX]]
; TF-SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX1]], [[TMP4]]
; TF-SCALABLE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; TF-SCALABLE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
+; TF-SCALABLE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
; TF-SCALABLE: [[MIDDLE_BLOCK]]:
; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]]
; TF-SCALABLE: [[SCALAR_PH]]:
@@ -706,7 +733,7 @@ define void @uniform_store(ptr noalias nocapture %a, ptr noalias nocapture %b, i
; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8
; TF-SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; TF-SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025
-; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
; TF-SCALABLE: [[FOR_END]]:
; TF-SCALABLE-NEXT: ret void
;
@@ -858,7 +885,7 @@ define void @uniform_store_of_loop_varying(ptr noalias nocapture %a, ptr noalias
; TF-SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX1]], [[TMP4]]
; TF-SCALABLE-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT2]]
; TF-SCALABLE-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; TF-SCALABLE-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
+; TF-SCALABLE-NEXT: br i1 [[TMP12]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
; TF-SCALABLE: [[MIDDLE_BLOCK]]:
; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]]
; TF-SCALABLE: [[SCALAR_PH]]:
@@ -871,7 +898,7 @@ define void @uniform_store_of_loop_varying(ptr noalias nocapture %a, ptr noalias
; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8
; TF-SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; TF-SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025
-; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
; TF-SCALABLE: [[FOR_END]]:
; TF-SCALABLE-NEXT: ret void
;
@@ -1037,7 +1064,7 @@ define void @conditional_uniform_store(ptr noalias nocapture %a, ptr noalias noc
; TF-SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX1]], [[TMP4]]
; TF-SCALABLE-NEXT: [[VEC_IND_NEXT]] = add <vscale x 2 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
; TF-SCALABLE-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; TF-SCALABLE-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
+; TF-SCALABLE-NEXT: br i1 [[TMP13]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
; TF-SCALABLE: [[MIDDLE_BLOCK]]:
; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]]
; TF-SCALABLE: [[SCALAR_PH]]:
@@ -1055,7 +1082,7 @@ define void @conditional_uniform_store(ptr noalias nocapture %a, ptr noalias noc
; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8
; TF-SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; TF-SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025
-; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
+; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
; TF-SCALABLE: [[FOR_END]]:
; TF-SCALABLE-NEXT: ret void
;
@@ -1186,7 +1213,7 @@ define void @uniform_store_unaligned(ptr noalias nocapture %a, ptr noalias nocap
; TF-SCALABLE-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[TMP7]], [[INDEX]]
; TF-SCALABLE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX1]], [[TMP4]]
; TF-SCALABLE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; TF-SCALABLE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
+; TF-SCALABLE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
; TF-SCALABLE: [[MIDDLE_BLOCK]]:
; TF-SCALABLE-NEXT: br label %[[FOR_END:.*]]
; TF-SCALABLE: [[SCALAR_PH]]:
@@ -1199,7 +1226,7 @@ define void @uniform_store_unaligned(ptr noalias nocapture %a, ptr noalias nocap
; TF-SCALABLE-NEXT: store i64 [[V]], ptr [[ARRAYIDX]], align 8
; TF-SCALABLE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; TF-SCALABLE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1025
-; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]]
+; TF-SCALABLE-NEXT: br i1 [[EXITCOND_NOT]], label %[[FOR_END]], label %[[FOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]]
; TF-SCALABLE: [[FOR_END]]:
; TF-SCALABLE-NEXT: ret void
;
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll
index 74eb094..7537a43 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-vp-intrinsics.ll
@@ -1,12 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -passes=loop-vectorize \
-; RUN: -force-tail-folding-style=data-with-evl \
-; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \
+; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \
; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck --check-prefix=IF-EVL %s
; RUN: opt -passes=loop-vectorize \
-; RUN: -force-tail-folding-style=none \
-; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \
+; RUN: -prefer-predicate-over-epilogue=scalar-epilogue \
; RUN: -mtriple=riscv64 -mattr=+v -S < %s | FileCheck --check-prefix=NO-VP %s
define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) {
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-call-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-call-intrinsics.ll
index 9652351..7f1066c 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-call-intrinsics.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-call-intrinsics.ll
@@ -1,8 +1,7 @@
; REQUIRES: asserts
; RUN: opt -passes=loop-vectorize -debug-only=loop-vectorize \
-; RUN: -force-tail-folding-style=data-with-evl \
-; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \
+; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \
; RUN: -mtriple=riscv64 -mattr=+v -riscv-v-vector-bits-max=128 -disable-output < %s 2>&1 | FileCheck --check-prefix=IF-EVL %s
define void @vp_smax(ptr %a, ptr %b, ptr %c, i64 %N) {
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-cast-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-cast-intrinsics.ll
index 3ec48ef..c1b656a 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-cast-intrinsics.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-cast-intrinsics.ll
@@ -1,7 +1,6 @@
; REQUIRES: asserts
; RUN: opt -passes=loop-vectorize -debug-only=loop-vectorize \
-; RUN: -force-tail-folding-style=data-with-evl \
-; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \
+; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \
; RUN: -mtriple=riscv64 -mattr=+v -riscv-v-vector-bits-max=128 -disable-output < %s 2>&1 | FileCheck --check-prefix=IF-EVL %s
define void @vp_sext(ptr %a, ptr %b, i64 %N) {
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics-fixed-order-recurrence.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics-fixed-order-recurrence.ll
index 7f29213..9900602 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics-fixed-order-recurrence.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics-fixed-order-recurrence.ll
@@ -1,8 +1,7 @@
; REQUIRES: asserts
; RUN: opt -passes=loop-vectorize -debug-only=loop-vectorize \
-; RUN: -force-tail-folding-style=data-with-evl \
-; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \
+; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \
; RUN: -mtriple=riscv64 -mattr=+v -riscv-v-vector-bits-max=128 -disable-output < %s 2>&1 | FileCheck --check-prefix=IF-EVL %s
define void @first_order_recurrence(ptr noalias %A, ptr noalias %B, i64 %TC) {
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics-reduction.ll
index baf546b..1c9554d 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics-reduction.ll
@@ -1,25 +1,21 @@
; REQUIRES: asserts
; RUN: opt -passes=loop-vectorize -debug-only=loop-vectorize \
-; RUN: -force-tail-folding-style=data-with-evl \
-; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \
+; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \
; RUN: -mtriple=riscv64 -mattr=+v -riscv-v-vector-bits-max=128 -disable-output < %s 2>&1 | FileCheck --check-prefixes=IF-EVL-OUTLOOP,IF-EVL %s
; RUN: opt -passes=loop-vectorize -debug-only=loop-vectorize \
; RUN: -prefer-inloop-reductions \
-; RUN: -force-tail-folding-style=data-with-evl \
-; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \
+; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \
; RUN: -mtriple=riscv64 -mattr=+v -riscv-v-vector-bits-max=128 -disable-output < %s 2>&1 | FileCheck --check-prefixes=IF-EVL-INLOOP,IF-EVL %s
; RUN: opt -passes=loop-vectorize -debug-only=loop-vectorize \
-; RUN: -force-tail-folding-style=none \
-; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \
+; RUN: -prefer-predicate-over-epilogue=scalar-epilogue \
; RUN: -mtriple=riscv64 -mattr=+v -riscv-v-vector-bits-max=128 -disable-output < %s 2>&1 | FileCheck --check-prefixes=NO-VP-OUTLOOP %s
; RUN: opt -passes=loop-vectorize -debug-only=loop-vectorize \
; RUN: -prefer-inloop-reductions \
-; RUN: -force-tail-folding-style=none \
-; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \
+; RUN: -prefer-predicate-over-epilogue=scalar-epilogue \
; RUN: -mtriple=riscv64 -mattr=+v -riscv-v-vector-bits-max=128 -disable-output < %s 2>&1 | FileCheck --check-prefixes=NO-VP-INLOOP %s
diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics.ll
index 97a6130..42a846a 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/vplan-vp-intrinsics.ll
@@ -1,13 +1,11 @@
; REQUIRES: asserts
; RUN: opt -passes=loop-vectorize -debug-only=loop-vectorize \
-; RUN: -force-tail-folding-style=data-with-evl \
-; RUN: -prefer-predicate-over-epilogue=predicate-dont-vectorize \
+; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \
; RUN: -mtriple=riscv64 -mattr=+v -riscv-v-vector-bits-max=128 -disable-output < %s 2>&1 | FileCheck --check-prefixes=IF-EVL,CHECK %s
; RUN: opt -passes=loop-vectorize -debug-only=loop-vectorize \
-; RUN: -force-tail-folding-style=none \
-; RUN: -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue \
+; RUN: -prefer-predicate-over-epilogue=scalar-epilogue \
; RUN: -mtriple=riscv64 -mattr=+v -riscv-v-vector-bits-max=128 -disable-output < %s 2>&1 | FileCheck --check-prefixes=NO-VP,CHECK %s
define void @foo(ptr noalias %a, ptr noalias %b, ptr noalias %c, i64 %N) {
diff --git a/llvm/test/Transforms/MemProfContextDisambiguation/func_assign_fix.ll b/llvm/test/Transforms/MemProfContextDisambiguation/func_assign_fix.ll
deleted file mode 100644
index 29c58c9..0000000
--- a/llvm/test/Transforms/MemProfContextDisambiguation/func_assign_fix.ll
+++ /dev/null
@@ -1,130 +0,0 @@
-;; Make sure we assign the original callsite to a function clone (which will be
-;; the original function clone), even when we cannot update its caller (due to
-;; missing metadata e.g. from mismatched profiles). Otherwise we will try to use
-;; the original function for a different clone, leading to confusion later when
-;; rewriting the calls.
-
-;; -stats requires asserts
-; REQUIRES: asserts
-
-; RUN: opt -passes=memprof-context-disambiguation -supports-hot-cold-new \
-; RUN: -memprof-verify-ccg -memprof-verify-nodes -stats -debug \
-; RUN: -pass-remarks=memprof-context-disambiguation %s -S 2>&1 | \
-; RUN: FileCheck %s --implicit-check-not="Mismatch in call clone assignment" \
-; RUN: --implicit-check-not="Number of callsites assigned to call multiple non-matching clones"
-
-
-; ModuleID = '<stdin>'
-source_filename = "reduced.ll"
-target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128"
-target triple = "x86_64-grtev4-linux-gnu"
-
-; CHECK-LABEL: define void @A()
-define void @A() {
- ; CHECK: call void @C()
- call void @C()
- ret void
-}
-
-; CHECK-LABEL: define void @B()
-define void @B() {
- ; CHECK: call void @C.memprof.1()
- call void @C(), !callsite !1
- ret void
-}
-
-; CHECK-LABEL: define void @C()
-define void @C() {
- ; CHECK: call void @F()
- call void @F(), !callsite !16
- ; CHECK: call void @D()
- call void @D(), !callsite !2
- ret void
-}
-
-; CHECK-LABEL: define void @D()
-define void @D() {
- ; CHECK: call void @E()
- call void @E(), !callsite !3
- ; CHECK: call void @G()
- call void @G(), !callsite !17
- ret void
-}
-
-; CHECK-LABEL: define void @E()
-define void @E() {
- ; CHECK: call ptr @_Znwm(i64 0) #[[NOTCOLD:[0-9]+]]
- %1 = call ptr @_Znwm(i64 0), !memprof !4, !callsite !9
- ret void
-}
-
-; CHECK-LABEL: define void @F()
-define void @F() {
- ; CHECK: call void @G()
- call void @G(), !callsite !17
- ret void
-}
-
-; CHECK-LABEL: define void @G()
-define void @G() {
- ; CHECK: call ptr @_Znwm(i64 0) #[[NOTCOLD]]
- %2 = call ptr @_Znwm(i64 0), !memprof !10, !callsite !15
- ret void
-}
-
-; CHECK-LABEL: define void @A1()
-define void @A1() {
- ; CHECK: call void @C()
- call void @C(), !callsite !18
- ret void
-}
-
-; CHECK-LABEL: define void @B1()
-define void @B1() {
- ; CHECK: call void @C.memprof.1()
- call void @C(), !callsite !19
- ret void
-}
-
-; CHECK-LABEL: define void @C.memprof.1()
- ; CHECK: call void @F.memprof.1()
- ; CHECK: call void @D.memprof.1()
-
-; CHECK-LABEL: define void @D.memprof.1()
- ; CHECK: call void @E.memprof.1()
- ; CHECK: call void @G()
-
-; CHECK-LABEL: define void @E.memprof.1()
- ; CHECK: call ptr @_Znwm(i64 0) #[[COLD:[0-9]+]]
-
-; CHECK-LABEL: define void @F.memprof.1()
- ; CHECK: call void @G.memprof.1()
-
-; CHECK-LABEL: define void @G.memprof.1()
- ; CHECK: call ptr @_Znwm(i64 0) #[[COLD]]
-
-declare ptr @_Znwm(i64)
-
-; IR: attributes #[[NOTCOLD]] = { "memprof"="notcold" }
-; IR: attributes #[[COLD]] = { "memprof"="cold" }
-
-!0 = !{i64 123}
-!1 = !{i64 234}
-!2 = !{i64 345}
-!3 = !{i64 456}
-!4 = !{!5, !7}
-!5 = !{!6, !"notcold"}
-!6 = !{i64 567, i64 456, i64 345, i64 123}
-!7 = !{!8, !"cold"}
-!8 = !{i64 567, i64 456, i64 345, i64 234}
-!9 = !{i64 567}
-!10 = !{!11, !13}
-!11 = !{!12, !"notcold"}
-!12 = !{i64 678, i64 891, i64 789, i64 912}
-!13 = !{!14, !"cold"}
-!14 = !{i64 678, i64 891, i64 789, i64 812}
-!15 = !{i64 678}
-!16 = !{i64 789}
-!17 = !{i64 891}
-!18 = !{i64 912}
-!19 = !{i64 812}
diff --git a/llvm/utils/gn/build/write_vcsrevision.py b/llvm/utils/gn/build/write_vcsrevision.py
index afd6aae..3a627ee 100755
--- a/llvm/utils/gn/build/write_vcsrevision.py
+++ b/llvm/utils/gn/build/write_vcsrevision.py
@@ -6,22 +6,13 @@ import argparse
import os
import subprocess
import sys
+import shutil
THIS_DIR = os.path.abspath(os.path.dirname(__file__))
LLVM_DIR = os.path.dirname(os.path.dirname(os.path.dirname(THIS_DIR)))
-def which(program):
- # distutils.spawn.which() doesn't find .bat files,
- # https://bugs.python.org/issue2200
- for path in os.environ["PATH"].split(os.pathsep):
- candidate = os.path.join(path, program)
- if os.path.isfile(candidate) and os.access(candidate, os.X_OK):
- return candidate
- return None
-
-
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
@@ -46,11 +37,11 @@ def main():
vcsrevision_contents = ""
if args.write_git_rev:
- git, use_shell = which("git"), False
+ git, use_shell = shutil.which("git"), False
if not git:
- git = which("git.exe")
+ git = shutil.which("git.exe")
if not git:
- git, use_shell = which("git.bat"), True
+ git, use_shell = shutil.which("git.bat"), True
git_dir = (
subprocess.check_output(
[git, "rev-parse", "--git-dir"], cwd=LLVM_DIR, shell=use_shell