diff options
| author | Andrei Elovikov <andrei.elovikov@sifive.com> | 2026-02-25 10:01:33 -0800 |
|---|---|---|
| committer | Andrei Elovikov <andrei.elovikov@sifive.com> | 2026-03-06 10:10:14 -0800 |
| commit | dd155ea613313dcc674ef87b4a9b2260b53fdbb5 (patch) | |
| tree | e00477fa5f354d2cd61ebb60ffa5c6731125d64e | |
| parent | 56ea5c963f27dde8d7385dad8224c9d38c962341 (diff) | |
| download | llvm-users/eas/vplan-based-stride-mv-rt-guard.tar.gz llvm-users/eas/vplan-based-stride-mv-rt-guard.tar.bz2 llvm-users/eas/vplan-based-stride-mv-rt-guard.zip | |
[VPlan] Implement VPlan-based stride speculationusers/eas/vplan-based-stride-mv-rt-guard
| -rw-r--r-- | llvm/lib/Transforms/Vectorize/LoopVectorizationPlanner.h | 6 | ||||
| -rw-r--r-- | llvm/lib/Transforms/Vectorize/LoopVectorize.cpp | 10 | ||||
| -rw-r--r-- | llvm/lib/Transforms/Vectorize/VPRecipeBuilder.h | 12 | ||||
| -rw-r--r-- | llvm/lib/Transforms/Vectorize/VPlan.cpp | 4 | ||||
| -rw-r--r-- | llvm/lib/Transforms/Vectorize/VPlan.h | 43 | ||||
| -rw-r--r-- | llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp | 3 | ||||
| -rw-r--r-- | llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp | 7 | ||||
| -rw-r--r-- | llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp | 251 | ||||
| -rw-r--r-- | llvm/lib/Transforms/Vectorize/VPlanTransforms.h | 6 | ||||
| -rw-r--r-- | llvm/lib/Transforms/Vectorize/VPlanUtils.cpp | 5 | ||||
| -rw-r--r-- | llvm/test/Transforms/LoopVectorize/VPlan/vplan-based-stride-mv.ll | 449 | ||||
| -rw-r--r-- | llvm/test/Transforms/LoopVectorize/vplan-based-stride-mv.ll | 2134 |
12 files changed, 1611 insertions, 1319 deletions
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorizationPlanner.h b/llvm/lib/Transforms/Vectorize/LoopVectorizationPlanner.h index 8368349e63ce..3a1273419d88 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorizationPlanner.h +++ b/llvm/lib/Transforms/Vectorize/LoopVectorizationPlanner.h @@ -389,6 +389,12 @@ public: return tryInsertInstruction(new VPExpandSCEVRecipe(Expr)); } + VPExpandStridePredicatesRecipe * + createExpandSCEVPredicate(const SCEVUnionPredicate &StridePredicates) { + return tryInsertInstruction( + new VPExpandStridePredicatesRecipe(StridePredicates)); + } + //===--------------------------------------------------------------------===// // RAII helpers. //===--------------------------------------------------------------------===// diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp index b31f608e10e6..e4a9dec9b77a 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -8008,6 +8008,16 @@ bool VPRecipeBuilder::replaceWithFinalIfReductionStore( return false; } +bool VPRecipeBuilder::isConsecutiveWithoutVPlanBasedStrideSpeculation( + VPInstruction *MemOp) { + auto *I = MemOp->getUnderlyingInstr(); + auto *PtrOp = getLoadStorePointerOperand(I); + auto *ScalarTy = MemOp->getOpcode() == Instruction::Load + ? I->getType() + : I->getOperand(0)->getType(); + return Legal->isConsecutivePtr(ScalarTy, PtrOp); +} + VPReplicateRecipe *VPRecipeBuilder::handleReplication(VPInstruction *VPI, VFRange &Range) { auto *I = VPI->getUnderlyingInstr(); diff --git a/llvm/lib/Transforms/Vectorize/VPRecipeBuilder.h b/llvm/lib/Transforms/Vectorize/VPRecipeBuilder.h index a908c25de3fd..5725ceeca4f1 100644 --- a/llvm/lib/Transforms/Vectorize/VPRecipeBuilder.h +++ b/llvm/lib/Transforms/Vectorize/VPRecipeBuilder.h @@ -47,11 +47,6 @@ class VPRecipeBuilder { /// created. SmallVector<VPHeaderPHIRecipe *, 4> PhisToFix; - /// Check if \p I can be widened at the start of \p Range and possibly - /// decrease the range such that the returned value holds for the entire \p - /// Range. The function should not be called for memory instructions or calls. - bool shouldWiden(Instruction *I, VFRange &Range) const; - /// Optimize the special case where the operand of \p VPI is a constant /// integer induction variable. VPWidenIntOrFpInductionRecipe * @@ -75,6 +70,11 @@ public: VPBuilder &getVPBuilder() const { return Builder; } + /// Check if \p I can be widened at the start of \p Range and possibly + /// decrease the range such that the returned value holds for the entire \p + /// Range. The function should not be called for memory instructions or calls. + bool shouldWiden(Instruction *I, VFRange &Range) const; + /// Create and return a widened recipe for a non-phi recipe \p R if one can be /// created within the given VF \p Range. VPRecipeBase *tryToCreateWidenNonPhiRecipe(VPSingleDefRecipe *R, @@ -100,6 +100,8 @@ public: bool replaceWithFinalIfReductionStore(VPBuilder &FinalRedStoresBuilder, VPInstruction *VPI); + bool isConsecutiveWithoutVPlanBasedStrideSpeculation(VPInstruction *MemOp); + /// Set the recipe created for given ingredient. void setRecipe(Instruction *I, VPRecipeBase *R) { assert(!Ingredient2Recipe.contains(I) && diff --git a/llvm/lib/Transforms/Vectorize/VPlan.cpp b/llvm/lib/Transforms/Vectorize/VPlan.cpp index 0ceeb570e8b1..83342ea427a0 100644 --- a/llvm/lib/Transforms/Vectorize/VPlan.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlan.cpp @@ -682,7 +682,9 @@ void VPBasicBlock::print(raw_ostream &O, const Twine &Indent, auto RecipeIndent = Indent + " "; for (const VPRecipeBase &Recipe : *this) { Recipe.print(O, RecipeIndent, SlotTracker); - O << '\n'; + // SCEVPredicate::print adds a newline so we don't want to add one for it: + if (!isa<VPExpandStridePredicatesRecipe>(Recipe)) + O << '\n'; } printSuccessors(O, Indent); diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h index da2f6f8c7cd0..1cb76eb7abac 100644 --- a/llvm/lib/Transforms/Vectorize/VPlan.h +++ b/llvm/lib/Transforms/Vectorize/VPlan.h @@ -407,6 +407,7 @@ public: VPBranchOnMaskSC, VPDerivedIVSC, VPExpandSCEVSC, + VPExpandStridePredicatesSC, VPExpressionSC, VPIRInstructionSC, VPInstructionSC, @@ -600,6 +601,7 @@ public: switch (R->getVPRecipeID()) { case VPRecipeBase::VPDerivedIVSC: case VPRecipeBase::VPExpandSCEVSC: + case VPRecipeBase::VPExpandStridePredicatesSC: case VPRecipeBase::VPExpressionSC: case VPRecipeBase::VPInstructionSC: case VPRecipeBase::VPReductionEVLSC: @@ -3791,6 +3793,47 @@ protected: #endif }; +class VPExpandStridePredicatesRecipe : public VPSingleDefRecipe { + SCEVUnionPredicate StridePredicates; + +public: + VPExpandStridePredicatesRecipe(const SCEVUnionPredicate &StridePredicates) + : VPSingleDefRecipe(VPRecipeBase::VPExpandStridePredicatesSC, {}), + StridePredicates(StridePredicates) {} + + VPExpandStridePredicatesRecipe(SCEVUnionPredicate &&StridePredicates) + : VPSingleDefRecipe(VPRecipeBase::VPExpandStridePredicatesSC, {}), + StridePredicates(std::move(StridePredicates)) {} + + ~VPExpandStridePredicatesRecipe() override = default; + + VPExpandStridePredicatesRecipe *clone() override { + return new VPExpandStridePredicatesRecipe(StridePredicates); + } + + VP_CLASSOF_IMPL(VPRecipeBase::VPExpandStridePredicatesSC) + + void execute(VPTransformState &State) override { + llvm_unreachable("SCEVPredicates must be expanded before final execute"); + } + + /// Return the cost of this VPExpandSCEVRecipe. + InstructionCost computeCost(ElementCount VF, + VPCostContext &Ctx) const override { + // TODO: Compute accurate cost after retiring the legacy cost model. + return 0; + } + + const SCEVPredicate *getSCEVPredicate() const { return &StridePredicates; } + +protected: +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) + /// Print the recipe. + void printRecipe(raw_ostream &O, const Twine &Indent, + VPSlotTracker &SlotTracker) const override; +#endif +}; + /// Canonical scalar induction phi of the vector loop. Starting at the specified /// start value (either 0 or the resume value when vectorizing the epilogue /// loop). VPWidenCanonicalIVRecipe represents the vector version of the diff --git a/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp b/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp index 998e48d411f5..ab59b5317749 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanAnalysis.cpp @@ -319,6 +319,9 @@ Type *VPTypeAnalysis::inferScalarType(const VPValue *V) { .Case([](const VPExpandSCEVRecipe *R) { return R->getSCEV()->getType(); }) + .Case([this](const VPExpandStridePredicatesRecipe *R) { + return Type::getInt1Ty(Ctx); + }) .Case([this](const VPReductionRecipe *R) { return inferScalarType(R->getChainOp()); }) diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp index d149723e11fb..9295c8cc77a6 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp @@ -4520,6 +4520,13 @@ void VPExpandSCEVRecipe::printRecipe(raw_ostream &O, const Twine &Indent, printAsOperand(O, SlotTracker); O << " = EXPAND SCEV " << *Expr; } + +void VPExpandStridePredicatesRecipe::printRecipe( + raw_ostream &O, const Twine &Indent, VPSlotTracker &SlotTracker) const { + O << Indent << "EMIT "; + printAsOperand(O, SlotTracker); + O << " = EXPAND SCEVPredicate " << StridePredicates; +} #endif void VPWidenCanonicalIVRecipe::execute(VPTransformState &State) { diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp index ea0aded2c071..138e61981a02 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp @@ -47,6 +47,11 @@ using namespace llvm; using namespace VPlanPatternMatch; using namespace SCEVPatternMatch; +static cl::opt<bool> EnableVPlanBasedStrideMV( + "enable-vplan-based-stride-mv", cl::init(false), cl::Hidden, + cl::desc("Perform stride multiversioning directly on VPlan instead of in " + "LoopAccessAnalysis.")); + bool VPlanTransforms::tryToConvertVPInstructionsToVPRecipes( VPlan &Plan, const TargetLibraryInfo &TLI) { @@ -5158,6 +5163,16 @@ VPlanTransforms::expandSCEVs(VPlan &Plan, ScalarEvolution &SE) { for (VPRecipeBase &R : make_early_inc_range(*Entry)) { if (isa<VPIRInstruction, VPIRPhi>(&R)) continue; + if (auto *ExpStrides = dyn_cast<VPExpandStridePredicatesRecipe>(&R)) { + Value *Res = Expander.expandCodeForPredicate( + ExpStrides->getSCEVPredicate(), EntryBB->getTerminator()); + Res->setName("strides.mv.check"); + VPValue *Exp = Plan.getOrAddLiveIn(Res); + + ExpStrides->replaceAllUsesWith(Exp); + ExpStrides->eraseFromParent(); + continue; + } auto *ExpSCEV = dyn_cast<VPExpandSCEVRecipe>(&R); if (!ExpSCEV) break; @@ -5171,9 +5186,10 @@ VPlanTransforms::expandSCEVs(VPlan &Plan, ScalarEvolution &SE) { Plan.resetTripCount(Exp); ExpSCEV->eraseFromParent(); } - assert(none_of(*Entry, IsaPred<VPExpandSCEVRecipe>) && - "VPExpandSCEVRecipes must be at the beginning of the entry block, " - "before any VPIRInstructions"); + assert(none_of(*Entry, + IsaPred<VPExpandSCEVRecipe, VPExpandStridePredicatesRecipe>) && + "VPExpandSCEVRecipes/VPExpandStridePredicatesRecipe must be at the " + "beginning of the entry block, before any VPIRInstructions"); // Add IR instructions in the entry basic block but not in the VPIRBasicBlock // to the VPIRBasicBlock. auto EI = Entry->begin(); @@ -6343,6 +6359,10 @@ void VPlanTransforms::makeMemOpWideningDecisions(VPlan &Plan, VFRange &Range, return false; }); + if (EnableVPlanBasedStrideMV) + RUN_VPLAN_PASS(VPlanTransforms::multiversionForUnitStridedMemOps, Plan, + CostCtx, RecipeBuilder, Range, MemOps); + VPlanTransforms::runPass("delegateMemOpWideningToLegacyCM", ProcessSubset, Plan, [&](VPInstruction *VPI) { VPRecipeBase *Recipe = @@ -6355,6 +6375,231 @@ void VPlanTransforms::makeMemOpWideningDecisions(VPlan &Plan, VFRange &Range, }); } +void VPlanTransforms::multiversionForUnitStridedMemOps( + VPlan &Plan, VPCostContext &CostCtx, VPRecipeBuilder &RecipeBuilder, + VFRange &Range, SmallVectorImpl<VPInstruction *> &MemOps) { + if (CostCtx.L->getHeader()->getParent()->hasOptSize()) + return; + SmallVector<VPInstruction *> RemainingOps; + // Makes a copy of VPTypeAnalysis (not sure where the problem is). + auto Types = CostCtx.Types; + + ScalarEvolution *SE = CostCtx.PSE.getSE(); + + PredicatedScalarEvolution StrideMVPSE(*SE, const_cast<Loop &>(*CostCtx.L)); + + SCEVUnionPredicate StridePredicates({}, *SE); + + // Use `for_each` so that we could do `return Skip();`. + for_each(MemOps, [&](VPInstruction *VPI) { + auto Skip = [&]() { RemainingOps.push_back(VPI); }; + if (RecipeBuilder.isConsecutiveWithoutVPlanBasedStrideSpeculation(VPI)) + return Skip(); + auto *PtrOp = VPI->getOpcode() == Instruction::Load ? VPI->getOperand(0) + : VPI->getOperand(1); + + const SCEV *PtrSCEV = + vputils::getSCEVExprForVPValue(PtrOp, CostCtx.PSE, CostCtx.L); + const SCEV *Start = nullptr; + const SCEV *Stride = nullptr; + + if (!match(PtrSCEV, m_scev_AffineAddRec(m_SCEV(Start), m_SCEV(Stride), + m_SpecificLoop(CostCtx.L)))) { + return Skip(); + } + + Type *ScalarTy = Types.inferScalarType( + VPI->getOpcode() == Instruction::Load ? VPI : VPI->getOperand(0)); + + if (VPI->getMask()) { + auto &TTI = CostCtx.TTI; + Instruction *I = VPI->getUnderlyingInstr(); + unsigned AS = getLoadStoreAddressSpace(I); + const Align Alignment = getLoadStoreAlignment(I); + if (!LoopVectorizationPlanner::getDecisionAndClampRange( + [&](ElementCount VF) -> bool { + Type *VTy = VectorType::get(ScalarTy, VF); + return VPI->getOpcode() == Instruction::Load + ? (TTI.isLegalMaskedLoad(VTy, Alignment, AS) || + TTI.isLegalMaskedGather(VTy, Alignment)) + : (TTI.isLegalMaskedStore(VTy, Alignment, AS) || + TTI.isLegalMaskedScatter(VTy, Alignment)); + }, + Range)) + return Skip(); + } + + const SCEV *TypeSize = SE->getSizeOfExpr( + Stride->getType(), SE->getDataLayout().getTypeStoreSize(ScalarTy)); + + auto ReplaceWithUnitStrided = [&]() { + VPBuilder Builder(VPI); + auto *VecPtr = new VPVectorPointerRecipe( + PtrOp, ScalarTy, GEPNoWrapFlags::none(), VPI->getDebugLoc()); + Builder.insert(VecPtr); + if (VPI->getOpcode() == Instruction::Load) { + auto *WideLoad = new VPWidenLoadRecipe( + cast<LoadInst>(*VPI->getUnderlyingInstr()), VecPtr, VPI->getMask(), + true, false, *VPI, VPI->getDebugLoc()); + Builder.insert(WideLoad); + VPI->replaceAllUsesWith(WideLoad); + } else { + auto *WideStore = + new VPWidenStoreRecipe(cast<StoreInst>(*VPI->getUnderlyingInstr()), + VecPtr, VPI->getOperand(0), VPI->getMask(), + true, false, *VPI, VPI->getDebugLoc()); + Builder.insert(WideStore); + } + VPI->eraseFromParent(); + }; + + if (isa<SCEVConstant>(Stride)) { + if (Stride != TypeSize) + return Skip(); + + // Earlier MV helped with this memory operation too. + ReplaceWithUnitStrided(); + return; + } + + const SCEVConstant *StrideConstantMultiplier; + const SCEV *StrideNonConstantMultiplier; + + const SCEV *ToMultiVersion = Stride; + const SCEV *MVConst = TypeSize; + if (match(Stride, m_scev_c_Mul(m_SCEVConstant(StrideConstantMultiplier), + m_SCEV(StrideNonConstantMultiplier)))) { + if (TypeSize != StrideConstantMultiplier) { + // TODO: Support `TypeSize = N * StrideCosntantMultiplier`, + // including negative `N`. For now, only process when they're equal, + // which matches the usefull part of the legacy behavior that + // multiversiones GEP index for stride one. + return Skip(); + } + ToMultiVersion = StrideNonConstantMultiplier; + MVConst = SE->getOne(ToMultiVersion->getType()); + } else if (!TypeSize->isOne()) { + // Likewise - try to match legacy behavior. + return Skip(); + } + + while (auto *C = dyn_cast<SCEVIntegralCastExpr>(ToMultiVersion)) { + ToMultiVersion = C->getOperand(); + MVConst = SE->getTruncateOrSignExtend(MVConst, ToMultiVersion->getType()); + } + + if (!isa<SCEVUnknown>(ToMultiVersion)) { + // Match legacy behavior. + // If/when changed, make sure that explicit poison/undef in the defining + // expression doesn't cause any issues. + return Skip(); + } + + Value *StrideVal = cast<SCEVUnknown>(ToMultiVersion)->getValue(); + + if (isa<UndefValue>(StrideVal)) + return Skip(); + + const SCEVPredicate *NewPred = + SE->getComparePredicate(CmpInst::ICMP_EQ, ToMultiVersion, MVConst); + + // Check if new predicate implies that backedge is never taken. If so, there + // is no reason to multiversion for it. + SmallVector<const SCEVPredicate *> Preds{&CostCtx.PSE.getPredicate(), + &StridePredicates, NewPred}; + auto *PredicatedMaxBTC = SE->rewriteUsingPredicate( + SE->getSymbolicMaxBackedgeTakenCount(CostCtx.L), CostCtx.L, + StridePredicates.getUnionWith(NewPred, *SE) + .getUnionWith(&CostCtx.PSE.getPredicate(), *SE)); + + if (LoopVectorizationPlanner::getDecisionAndClampRange( + [&](ElementCount VF) { + return SE->isKnownPositive(SE->getMinusSCEV( + SE->getConstant(PredicatedMaxBTC->getType(), + VF.isScalable() ? 1 : VF.getFixedValue() - 1), + PredicatedMaxBTC)); + }, + Range)) + return Skip(); + + StridePredicates = StridePredicates.getUnionWith(NewPred, *SE); + + auto ReplaceMVUses = [&](Value *V) { + VPValue *From = Plan.getLiveIn(V); + if (!From) + return; + VPValue *To = Plan.getConstantInt( + Types.inferScalarType(From), + cast<SCEVConstant>(MVConst)->getAPInt().getLimitedValue()); + // TODO: Why is "If" necessary? + From->replaceUsesWithIf(To, [&](VPUser &U, unsigned) { + auto *R = cast<VPRecipeBase>(&U); + return R->getRegion() || + R->getParent() == + Plan.getVectorLoopRegion()->getSinglePredecessor(); + }); + }; + + ReplaceMVUses(StrideVal); + for (auto *U : StrideVal->users()) + if (isa<SExtInst, ZExtInst>(U)) + ReplaceMVUses(U); + + ReplaceWithUnitStrided(); + }); + + MemOps.swap(RemainingOps); + + if (StridePredicates.isAlwaysTrue()) + return; + + VPBasicBlock *Entry = Plan.getEntry(); + VPBuilder Builder(Entry); + + auto *Pred = Builder.createExpandSCEVPredicate(StridePredicates); + + auto *StridesCheckBB = Plan.createVPBasicBlock("strides.check"); + VPBlockBase *ScalarPH = Plan.getScalarPreheader(); + VPBlockUtils::insertBlockBefore(StridesCheckBB, Plan.getVectorPreheader()); + VPBlockUtils::connectBlocks(StridesCheckBB, ScalarPH); + // SCEVExpander::expandCodeForPredicate would negate the condition, so scalar + // preheader should be the first successor. + std::swap(StridesCheckBB->getSuccessors()[0], + StridesCheckBB->getSuccessors()[1]); + Builder.setInsertPoint(StridesCheckBB); + Builder.createNaryOp(VPInstruction::BranchOnCond, Pred); + + for (VPRecipeBase &R : cast<VPBasicBlock>(ScalarPH)->phis()) { + auto &Phi = cast<VPPhi>(R); + Phi.addOperand(Phi.getIncomingValueForBlock(Entry)); + } + + auto RewriteVPExpandSCEV = [&](VPExpandSCEVRecipe *R) { + const SCEV *S = R->getSCEV(); + Builder.setInsertPoint(R); + const SCEV *NewS = + SE->rewriteUsingPredicate(S, CostCtx.L, StridePredicates); + if (NewS == S) + return; + auto *NewR = Builder.createExpandSCEV(NewS); + R->replaceAllUsesWith(NewR); + + // If this recipe is a trip count then we need to reset it explicitly. + if (R == Plan.getTripCount()) + Plan.resetTripCount(NewR); + + R->eraseFromParent(); + }; + + if (auto *R = dyn_cast<VPExpandSCEVRecipe>(Plan.getTripCount())) { + RewriteVPExpandSCEV(R); + } + + for (auto &R : make_early_inc_range(*Entry)) + if (auto *ExpandSCEV = dyn_cast<VPExpandSCEVRecipe>(&R)) + RewriteVPExpandSCEV(ExpandSCEV); +} + void VPlanTransforms::makeScalarizationDecisions(VPlan &Plan, VFRange &Range) { if (LoopVectorizationPlanner::getDecisionAndClampRange( [&](ElementCount VF) { return VF.isScalar(); }, Range)) diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.h b/llvm/lib/Transforms/Vectorize/VPlanTransforms.h index d28effdc65ac..a8f9c650b82a 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.h +++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.h @@ -511,6 +511,12 @@ struct VPlanTransforms { VPRecipeBuilder &RecipeBuilder, VPCostContext &CostCtx); + /// \p MemOps must be updated to contain ones that haven't been processed by + /// the pass. + static void multiversionForUnitStridedMemOps( + VPlan &Plan, VPCostContext &CostCtx, VPRecipeBuilder &RecipeBuilder, + VFRange &Range, SmallVectorImpl<VPInstruction *> &MemOps); + /// Make VPlan-based scalarization decision prior to delegating to the ones /// made by the legacy CM. Only transforms "usesFirstLaneOnly` def-use chains /// enabled by prior widening of consecutive memory operations for now. diff --git a/llvm/lib/Transforms/Vectorize/VPlanUtils.cpp b/llvm/lib/Transforms/Vectorize/VPlanUtils.cpp index 821a4f7911bb..debcc67b1711 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanUtils.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanUtils.cpp @@ -390,8 +390,9 @@ bool vputils::isSingleScalar(const VPValue *VPV) { if (auto *Expr = dyn_cast<VPExpressionRecipe>(VPV)) return Expr->isSingleScalar(); - // VPExpandSCEVRecipes must be placed in the entry and are always uniform. - return isa<VPExpandSCEVRecipe>(VPV); + // VPExpandSCEVRecipes and VPExpandStridePredicatesRecipe must be placed in + // the entry and are always uniform. + return isa<VPExpandSCEVRecipe, VPExpandStridePredicatesRecipe>(VPV); } bool vputils::isUniformAcrossVFsAndUFs(VPValue *V) { diff --git a/llvm/test/Transforms/LoopVectorize/VPlan/vplan-based-stride-mv.ll b/llvm/test/Transforms/LoopVectorize/VPlan/vplan-based-stride-mv.ll index 62fe6629916f..f02192e1b468 100644 --- a/llvm/test/Transforms/LoopVectorize/VPlan/vplan-based-stride-mv.ll +++ b/llvm/test/Transforms/LoopVectorize/VPlan/vplan-based-stride-mv.ll @@ -1,7 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py UTC_ARGS: --version 6 ; RUN: opt < %s -p loop-vectorize -force-vector-width=4 -disable-output \ -; RUN: -vplan-print-after=scalarizeMemOpsWithIrregularTypes \ -; RUN: -enable-mem-access-versioning=false 2>&1 | FileCheck %s +; RUN: -vplan-print-after=multiversionForUnitStridedMemOps \ +; RUN: -enable-mem-access-versioning=false -enable-vplan-based-stride-mv 2>&1 | FileCheck %s define void @basic(ptr noalias %p.out, ptr %p, i64 %stride) { ; CHECK-LABEL: VPlan for loop in 'basic' @@ -12,6 +12,11 @@ define void @basic(ptr noalias %p.out, ptr %p, i64 %stride) { ; CHECK-NEXT: Live-in ir<128> = original trip-count ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = EXPAND SCEVPredicate Equal predicate: %stride == 1 +; CHECK-NEXT: Successor(s): scalar.ph, strides.check +; CHECK-EMPTY: +; CHECK-NEXT: strides.check: +; CHECK-NEXT: EMIT branch-on-cond vp<[[VP3]]> ; CHECK-NEXT: Successor(s): scalar.ph, vector.ph ; CHECK-EMPTY: ; CHECK-NEXT: vector.ph: @@ -19,24 +24,25 @@ define void @basic(ptr noalias %p.out, ptr %p, i64 %stride) { ; CHECK-EMPTY: ; CHECK-NEXT: <x1> vector loop: { ; CHECK-NEXT: vector.body: -; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: EMIT vp<[[VP5:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> ; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]> ; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1> -; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<%stride> +; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<1> ; CHECK-NEXT: EMIT ir<%gep.ld> = getelementptr ir<%p>, ir<%idx> -; CHECK-NEXT: EMIT-SCALAR ir<%ld> = load ir<%gep.ld> +; CHECK-NEXT: vp<[[VP6:%[0-9]+]]> = vector-pointer ir<%gep.ld> +; CHECK-NEXT: WIDEN ir<%ld> = load vp<[[VP6]]> ; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv> ; CHECK-NEXT: EMIT store ir<%ld>, ir<%gep.st> ; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<128> -; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = not ir<%exitcond> -; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT vp<[[VP7:%[0-9]+]]> = not ir<%exitcond> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP5]]>, vp<[[VP1]]> ; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> ; CHECK-NEXT: No successors ; CHECK-NEXT: } ; CHECK-NEXT: Successor(s): middle.block ; CHECK-EMPTY: ; CHECK-NEXT: middle.block: -; CHECK-NEXT: EMIT vp<[[VP6:%[0-9]+]]> = exiting-iv-value ir<%iv> +; CHECK-NEXT: EMIT vp<[[VP9:%[0-9]+]]> = exiting-iv-value ir<%iv> ; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<128>, vp<[[VP2]]> ; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> ; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph @@ -45,7 +51,7 @@ define void @basic(ptr noalias %p.out, ptr %p, i64 %stride) { ; CHECK-NEXT: No successors ; CHECK-EMPTY: ; CHECK-NEXT: scalar.ph: -; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP6]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP9]]>, middle.block ], [ ir<0>, ir-bb<entry> ], [ ir<0>, strides.check ] ; CHECK-NEXT: Successor(s): ir-bb<header> ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<header>: @@ -257,6 +263,11 @@ define void @byte_gep_scaled_stride(ptr noalias %p.out, ptr %p, i64 %stride) { ; CHECK-NEXT: Live-in ir<128> = original trip-count ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = EXPAND SCEVPredicate Equal predicate: %stride == 1 +; CHECK-NEXT: Successor(s): scalar.ph, strides.check +; CHECK-EMPTY: +; CHECK-NEXT: strides.check: +; CHECK-NEXT: EMIT branch-on-cond vp<[[VP3]]> ; CHECK-NEXT: Successor(s): scalar.ph, vector.ph ; CHECK-EMPTY: ; CHECK-NEXT: vector.ph: @@ -264,25 +275,26 @@ define void @byte_gep_scaled_stride(ptr noalias %p.out, ptr %p, i64 %stride) { ; CHECK-EMPTY: ; CHECK-NEXT: <x1> vector loop: { ; CHECK-NEXT: vector.body: -; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: EMIT vp<[[VP5:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> ; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]> ; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1> -; CHECK-NEXT: EMIT ir<%stride.x8> = mul ir<%stride>, ir<8> +; CHECK-NEXT: EMIT ir<%stride.x8> = mul ir<1>, ir<8> ; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<%stride.x8> ; CHECK-NEXT: EMIT ir<%gep.ld> = getelementptr ir<%p>, ir<%idx> -; CHECK-NEXT: EMIT-SCALAR ir<%ld> = load ir<%gep.ld> +; CHECK-NEXT: vp<[[VP6:%[0-9]+]]> = vector-pointer ir<%gep.ld> +; CHECK-NEXT: WIDEN ir<%ld> = load vp<[[VP6]]> ; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv> ; CHECK-NEXT: EMIT store ir<%ld>, ir<%gep.st> ; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<128> -; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = not ir<%exitcond> -; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT vp<[[VP7:%[0-9]+]]> = not ir<%exitcond> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP5]]>, vp<[[VP1]]> ; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> ; CHECK-NEXT: No successors ; CHECK-NEXT: } ; CHECK-NEXT: Successor(s): middle.block ; CHECK-EMPTY: ; CHECK-NEXT: middle.block: -; CHECK-NEXT: EMIT vp<[[VP6:%[0-9]+]]> = exiting-iv-value ir<%iv> +; CHECK-NEXT: EMIT vp<[[VP9:%[0-9]+]]> = exiting-iv-value ir<%iv> ; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<128>, vp<[[VP2]]> ; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> ; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph @@ -291,7 +303,7 @@ define void @byte_gep_scaled_stride(ptr noalias %p.out, ptr %p, i64 %stride) { ; CHECK-NEXT: No successors ; CHECK-EMPTY: ; CHECK-NEXT: scalar.ph: -; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP6]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP9]]>, middle.block ], [ ir<0>, ir-bb<entry> ], [ ir<0>, strides.check ] ; CHECK-NEXT: Successor(s): ir-bb<header> ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<header>: @@ -758,6 +770,11 @@ define void @shared_stride(ptr noalias %p.out, ptr %p0, ptr %p1, i64 %stride) { ; CHECK-NEXT: Live-in ir<128> = original trip-count ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = EXPAND SCEVPredicate Equal predicate: %stride == 1 +; CHECK-NEXT: Successor(s): scalar.ph, strides.check +; CHECK-EMPTY: +; CHECK-NEXT: strides.check: +; CHECK-NEXT: EMIT branch-on-cond vp<[[VP3]]> ; CHECK-NEXT: Successor(s): scalar.ph, vector.ph ; CHECK-EMPTY: ; CHECK-NEXT: vector.ph: @@ -765,27 +782,29 @@ define void @shared_stride(ptr noalias %p.out, ptr %p0, ptr %p1, i64 %stride) { ; CHECK-EMPTY: ; CHECK-NEXT: <x1> vector loop: { ; CHECK-NEXT: vector.body: -; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: EMIT vp<[[VP5:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> ; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]> ; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1> -; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<%stride> +; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<1> ; CHECK-NEXT: EMIT ir<%gep.ld0> = getelementptr ir<%p0>, ir<%idx> ; CHECK-NEXT: EMIT ir<%gep.ld1> = getelementptr ir<%p1>, ir<%idx> -; CHECK-NEXT: EMIT-SCALAR ir<%ld0> = load ir<%gep.ld0> -; CHECK-NEXT: EMIT-SCALAR ir<%ld1> = load ir<%gep.ld1> +; CHECK-NEXT: vp<[[VP6:%[0-9]+]]> = vector-pointer ir<%gep.ld0> +; CHECK-NEXT: WIDEN ir<%ld0> = load vp<[[VP6]]> +; CHECK-NEXT: vp<[[VP7:%[0-9]+]]> = vector-pointer ir<%gep.ld1> +; CHECK-NEXT: WIDEN ir<%ld1> = load vp<[[VP7]]> ; CHECK-NEXT: EMIT ir<%val> = add ir<%ld0>, ir<%ld1> ; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv> ; CHECK-NEXT: EMIT store ir<%val>, ir<%gep.st> ; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<128> -; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = not ir<%exitcond> -; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT vp<[[VP8:%[0-9]+]]> = not ir<%exitcond> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP5]]>, vp<[[VP1]]> ; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> ; CHECK-NEXT: No successors ; CHECK-NEXT: } ; CHECK-NEXT: Successor(s): middle.block ; CHECK-EMPTY: ; CHECK-NEXT: middle.block: -; CHECK-NEXT: EMIT vp<[[VP6:%[0-9]+]]> = exiting-iv-value ir<%iv> +; CHECK-NEXT: EMIT vp<[[VP10:%[0-9]+]]> = exiting-iv-value ir<%iv> ; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<128>, vp<[[VP2]]> ; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> ; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph @@ -794,7 +813,7 @@ define void @shared_stride(ptr noalias %p.out, ptr %p0, ptr %p1, i64 %stride) { ; CHECK-NEXT: No successors ; CHECK-EMPTY: ; CHECK-NEXT: scalar.ph: -; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP6]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP10]]>, middle.block ], [ ir<0>, ir-bb<entry> ], [ ir<0>, strides.check ] ; CHECK-NEXT: Successor(s): ir-bb<header> ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<header>: @@ -847,6 +866,11 @@ define void @dependent_strides(ptr noalias %p.out, ptr %p0, ptr %p1, i64 %stride ; CHECK-NEXT: Live-in ir<128> = original trip-count ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = EXPAND SCEVPredicate Equal predicate: %stride == 1 +; CHECK-NEXT: Successor(s): scalar.ph, strides.check +; CHECK-EMPTY: +; CHECK-NEXT: strides.check: +; CHECK-NEXT: EMIT branch-on-cond vp<[[VP3]]> ; CHECK-NEXT: Successor(s): scalar.ph, vector.ph ; CHECK-EMPTY: ; CHECK-NEXT: vector.ph: @@ -854,29 +878,30 @@ define void @dependent_strides(ptr noalias %p.out, ptr %p0, ptr %p1, i64 %stride ; CHECK-EMPTY: ; CHECK-NEXT: <x1> vector loop: { ; CHECK-NEXT: vector.body: -; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: EMIT vp<[[VP5:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> ; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]> ; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1> -; CHECK-NEXT: EMIT ir<%stride1> = add ir<%stride>, ir<1> -; CHECK-NEXT: EMIT ir<%idx0> = mul ir<%iv>, ir<%stride> +; CHECK-NEXT: EMIT ir<%stride1> = add ir<1>, ir<1> +; CHECK-NEXT: EMIT ir<%idx0> = mul ir<%iv>, ir<1> ; CHECK-NEXT: EMIT ir<%idx1> = mul ir<%iv>, ir<%stride1> ; CHECK-NEXT: EMIT ir<%gep.ld0> = getelementptr ir<%p0>, ir<%idx0> ; CHECK-NEXT: EMIT ir<%gep.ld1> = getelementptr ir<%p1>, ir<%idx1> -; CHECK-NEXT: EMIT-SCALAR ir<%ld0> = load ir<%gep.ld0> +; CHECK-NEXT: vp<[[VP6:%[0-9]+]]> = vector-pointer ir<%gep.ld0> +; CHECK-NEXT: WIDEN ir<%ld0> = load vp<[[VP6]]> ; CHECK-NEXT: EMIT-SCALAR ir<%ld1> = load ir<%gep.ld1> ; CHECK-NEXT: EMIT ir<%val> = add ir<%ld0>, ir<%ld1> ; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv> ; CHECK-NEXT: EMIT store ir<%val>, ir<%gep.st> ; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<128> -; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = not ir<%exitcond> -; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT vp<[[VP7:%[0-9]+]]> = not ir<%exitcond> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP5]]>, vp<[[VP1]]> ; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> ; CHECK-NEXT: No successors ; CHECK-NEXT: } ; CHECK-NEXT: Successor(s): middle.block ; CHECK-EMPTY: ; CHECK-NEXT: middle.block: -; CHECK-NEXT: EMIT vp<[[VP6:%[0-9]+]]> = exiting-iv-value ir<%iv> +; CHECK-NEXT: EMIT vp<[[VP9:%[0-9]+]]> = exiting-iv-value ir<%iv> ; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<128>, vp<[[VP2]]> ; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> ; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph @@ -885,7 +910,7 @@ define void @dependent_strides(ptr noalias %p.out, ptr %p0, ptr %p1, i64 %stride ; CHECK-NEXT: No successors ; CHECK-EMPTY: ; CHECK-NEXT: scalar.ph: -; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP6]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP9]]>, middle.block ], [ ir<0>, ir-bb<entry> ], [ ir<0>, strides.check ] ; CHECK-NEXT: Successor(s): ir-bb<header> ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<header>: @@ -944,6 +969,11 @@ define void @dependent_strides_reverse_order(ptr noalias %p.out, ptr %p0, ptr %p ; CHECK-NEXT: Live-in ir<128> = original trip-count ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = EXPAND SCEVPredicate Equal predicate: %stride == 1 +; CHECK-NEXT: Successor(s): scalar.ph, strides.check +; CHECK-EMPTY: +; CHECK-NEXT: strides.check: +; CHECK-NEXT: EMIT branch-on-cond vp<[[VP3]]> ; CHECK-NEXT: Successor(s): scalar.ph, vector.ph ; CHECK-EMPTY: ; CHECK-NEXT: vector.ph: @@ -951,29 +981,30 @@ define void @dependent_strides_reverse_order(ptr noalias %p.out, ptr %p0, ptr %p ; CHECK-EMPTY: ; CHECK-NEXT: <x1> vector loop: { ; CHECK-NEXT: vector.body: -; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: EMIT vp<[[VP5:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> ; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]> ; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1> -; CHECK-NEXT: EMIT ir<%stride0> = add ir<%stride>, ir<1> +; CHECK-NEXT: EMIT ir<%stride0> = add ir<1>, ir<1> ; CHECK-NEXT: EMIT ir<%idx0> = mul ir<%iv>, ir<%stride0> -; CHECK-NEXT: EMIT ir<%idx1> = mul ir<%iv>, ir<%stride> +; CHECK-NEXT: EMIT ir<%idx1> = mul ir<%iv>, ir<1> ; CHECK-NEXT: EMIT ir<%gep.ld0> = getelementptr ir<%p0>, ir<%idx0> ; CHECK-NEXT: EMIT ir<%gep.ld1> = getelementptr ir<%p1>, ir<%idx1> ; CHECK-NEXT: EMIT-SCALAR ir<%ld0> = load ir<%gep.ld0> -; CHECK-NEXT: EMIT-SCALAR ir<%ld1> = load ir<%gep.ld1> +; CHECK-NEXT: vp<[[VP6:%[0-9]+]]> = vector-pointer ir<%gep.ld1> +; CHECK-NEXT: WIDEN ir<%ld1> = load vp<[[VP6]]> ; CHECK-NEXT: EMIT ir<%val> = add ir<%ld0>, ir<%ld1> ; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv> ; CHECK-NEXT: EMIT store ir<%val>, ir<%gep.st> ; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<128> -; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = not ir<%exitcond> -; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT vp<[[VP7:%[0-9]+]]> = not ir<%exitcond> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP5]]>, vp<[[VP1]]> ; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> ; CHECK-NEXT: No successors ; CHECK-NEXT: } ; CHECK-NEXT: Successor(s): middle.block ; CHECK-EMPTY: ; CHECK-NEXT: middle.block: -; CHECK-NEXT: EMIT vp<[[VP6:%[0-9]+]]> = exiting-iv-value ir<%iv> +; CHECK-NEXT: EMIT vp<[[VP9:%[0-9]+]]> = exiting-iv-value ir<%iv> ; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<128>, vp<[[VP2]]> ; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> ; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph @@ -982,7 +1013,7 @@ define void @dependent_strides_reverse_order(ptr noalias %p.out, ptr %p0, ptr %p ; CHECK-NEXT: No successors ; CHECK-EMPTY: ; CHECK-NEXT: scalar.ph: -; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP6]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP9]]>, middle.block ], [ ir<0>, ir-bb<entry> ], [ ir<0>, strides.check ] ; CHECK-NEXT: Successor(s): ir-bb<header> ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<header>: @@ -1230,6 +1261,11 @@ define void @strided_interleave(ptr noalias %p.out, ptr %p, i64 %stride) { ; CHECK-NEXT: Live-in ir<128> = original trip-count ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = EXPAND SCEVPredicate Equal predicate: %stride == 1 +; CHECK-NEXT: Successor(s): scalar.ph, strides.check +; CHECK-EMPTY: +; CHECK-NEXT: strides.check: +; CHECK-NEXT: EMIT branch-on-cond vp<[[VP3]]> ; CHECK-NEXT: Successor(s): scalar.ph, vector.ph ; CHECK-EMPTY: ; CHECK-NEXT: vector.ph: @@ -1237,27 +1273,29 @@ define void @strided_interleave(ptr noalias %p.out, ptr %p, i64 %stride) { ; CHECK-EMPTY: ; CHECK-NEXT: <x1> vector loop: { ; CHECK-NEXT: vector.body: -; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: EMIT vp<[[VP5:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> ; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]> ; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1> -; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<%stride> +; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<1> ; CHECK-NEXT: EMIT ir<%gep.ld0> = getelementptr ir<%p>, ir<%idx> ; CHECK-NEXT: EMIT ir<%gep.ld1> = getelementptr ir<%gep.ld0>, ir<1> -; CHECK-NEXT: EMIT-SCALAR ir<%ld0> = load ir<%gep.ld0> -; CHECK-NEXT: EMIT-SCALAR ir<%ld1> = load ir<%gep.ld1> +; CHECK-NEXT: vp<[[VP6:%[0-9]+]]> = vector-pointer ir<%gep.ld0> +; CHECK-NEXT: WIDEN ir<%ld0> = load vp<[[VP6]]> +; CHECK-NEXT: vp<[[VP7:%[0-9]+]]> = vector-pointer ir<%gep.ld1> +; CHECK-NEXT: WIDEN ir<%ld1> = load vp<[[VP7]]> ; CHECK-NEXT: EMIT ir<%val> = add ir<%ld0>, ir<%ld1> ; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv> ; CHECK-NEXT: EMIT store ir<%val>, ir<%gep.st> ; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<128> -; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = not ir<%exitcond> -; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT vp<[[VP8:%[0-9]+]]> = not ir<%exitcond> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP5]]>, vp<[[VP1]]> ; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> ; CHECK-NEXT: No successors ; CHECK-NEXT: } ; CHECK-NEXT: Successor(s): middle.block ; CHECK-EMPTY: ; CHECK-NEXT: middle.block: -; CHECK-NEXT: EMIT vp<[[VP6:%[0-9]+]]> = exiting-iv-value ir<%iv> +; CHECK-NEXT: EMIT vp<[[VP10:%[0-9]+]]> = exiting-iv-value ir<%iv> ; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<128>, vp<[[VP2]]> ; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> ; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph @@ -1266,7 +1304,7 @@ define void @strided_interleave(ptr noalias %p.out, ptr %p, i64 %stride) { ; CHECK-NEXT: No successors ; CHECK-EMPTY: ; CHECK-NEXT: scalar.ph: -; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP6]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP10]]>, middle.block ], [ ir<0>, ir-bb<entry> ], [ ir<0>, strides.check ] ; CHECK-NEXT: Successor(s): ir-bb<header> ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<header>: @@ -1322,6 +1360,11 @@ define void @in_loop_base(ptr noalias %p.out, ptr %p, i64 %stride, i64 %offset) ; CHECK-NEXT: Live-in ir<128> = original trip-count ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = EXPAND SCEVPredicate Equal predicate: %stride == 1 +; CHECK-NEXT: Successor(s): scalar.ph, strides.check +; CHECK-EMPTY: +; CHECK-NEXT: strides.check: +; CHECK-NEXT: EMIT branch-on-cond vp<[[VP3]]> ; CHECK-NEXT: Successor(s): scalar.ph, vector.ph ; CHECK-EMPTY: ; CHECK-NEXT: vector.ph: @@ -1329,26 +1372,27 @@ define void @in_loop_base(ptr noalias %p.out, ptr %p, i64 %stride, i64 %offset) ; CHECK-EMPTY: ; CHECK-NEXT: <x1> vector loop: { ; CHECK-NEXT: vector.body: -; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: EMIT vp<[[VP5:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> ; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]> ; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1> -; CHECK-NEXT: EMIT ir<%mul> = mul ir<%iv>, ir<%stride> +; CHECK-NEXT: EMIT ir<%mul> = mul ir<%iv>, ir<1> ; CHECK-NEXT: EMIT ir<%idx> = add ir<%mul>, ir<%offset> ; CHECK-NEXT: EMIT ir<%gep.ld.base> = getelementptr ir<%p>, ir<%offset> ; CHECK-NEXT: EMIT ir<%gep.ld> = getelementptr ir<%gep.ld.base>, ir<%mul> -; CHECK-NEXT: EMIT-SCALAR ir<%ld> = load ir<%gep.ld> +; CHECK-NEXT: vp<[[VP6:%[0-9]+]]> = vector-pointer ir<%gep.ld> +; CHECK-NEXT: WIDEN ir<%ld> = load vp<[[VP6]]> ; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv> ; CHECK-NEXT: EMIT store ir<%ld>, ir<%gep.st> ; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<128> -; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = not ir<%exitcond> -; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT vp<[[VP7:%[0-9]+]]> = not ir<%exitcond> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP5]]>, vp<[[VP1]]> ; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> ; CHECK-NEXT: No successors ; CHECK-NEXT: } ; CHECK-NEXT: Successor(s): middle.block ; CHECK-EMPTY: ; CHECK-NEXT: middle.block: -; CHECK-NEXT: EMIT vp<[[VP6:%[0-9]+]]> = exiting-iv-value ir<%iv> +; CHECK-NEXT: EMIT vp<[[VP9:%[0-9]+]]> = exiting-iv-value ir<%iv> ; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<128>, vp<[[VP2]]> ; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> ; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph @@ -1357,7 +1401,7 @@ define void @in_loop_base(ptr noalias %p.out, ptr %p, i64 %stride, i64 %offset) ; CHECK-NEXT: No successors ; CHECK-EMPTY: ; CHECK-NEXT: scalar.ph: -; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP6]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP9]]>, middle.block ], [ ir<0>, ir-bb<entry> ], [ ir<0>, strides.check ] ; CHECK-NEXT: Successor(s): ir-bb<header> ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<header>: @@ -1409,6 +1453,11 @@ define void @base_not_in_ir(ptr noalias %p.out, ptr %p, i64 %stride, i64 %offset ; CHECK-NEXT: Live-in ir<128> = original trip-count ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = EXPAND SCEVPredicate Equal predicate: %stride == 1 +; CHECK-NEXT: Successor(s): scalar.ph, strides.check +; CHECK-EMPTY: +; CHECK-NEXT: strides.check: +; CHECK-NEXT: EMIT branch-on-cond vp<[[VP3]]> ; CHECK-NEXT: Successor(s): scalar.ph, vector.ph ; CHECK-EMPTY: ; CHECK-NEXT: vector.ph: @@ -1416,25 +1465,26 @@ define void @base_not_in_ir(ptr noalias %p.out, ptr %p, i64 %stride, i64 %offset ; CHECK-EMPTY: ; CHECK-NEXT: <x1> vector loop: { ; CHECK-NEXT: vector.body: -; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: EMIT vp<[[VP5:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> ; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]> ; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1> -; CHECK-NEXT: EMIT ir<%mul> = mul ir<%iv>, ir<%stride> +; CHECK-NEXT: EMIT ir<%mul> = mul ir<%iv>, ir<1> ; CHECK-NEXT: EMIT ir<%idx> = add ir<%mul>, ir<%offset> ; CHECK-NEXT: EMIT ir<%gep.ld> = getelementptr ir<%p>, ir<%idx> -; CHECK-NEXT: EMIT-SCALAR ir<%ld> = load ir<%gep.ld> +; CHECK-NEXT: vp<[[VP6:%[0-9]+]]> = vector-pointer ir<%gep.ld> +; CHECK-NEXT: WIDEN ir<%ld> = load vp<[[VP6]]> ; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv> ; CHECK-NEXT: EMIT store ir<%ld>, ir<%gep.st> ; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<128> -; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = not ir<%exitcond> -; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT vp<[[VP7:%[0-9]+]]> = not ir<%exitcond> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP5]]>, vp<[[VP1]]> ; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> ; CHECK-NEXT: No successors ; CHECK-NEXT: } ; CHECK-NEXT: Successor(s): middle.block ; CHECK-EMPTY: ; CHECK-NEXT: middle.block: -; CHECK-NEXT: EMIT vp<[[VP6:%[0-9]+]]> = exiting-iv-value ir<%iv> +; CHECK-NEXT: EMIT vp<[[VP9:%[0-9]+]]> = exiting-iv-value ir<%iv> ; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<128>, vp<[[VP2]]> ; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> ; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph @@ -1443,7 +1493,7 @@ define void @base_not_in_ir(ptr noalias %p.out, ptr %p, i64 %stride, i64 %offset ; CHECK-NEXT: No successors ; CHECK-EMPTY: ; CHECK-NEXT: scalar.ph: -; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP6]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP9]]>, middle.block ], [ ir<0>, ir-bb<entry> ], [ ir<0>, strides.check ] ; CHECK-NEXT: Successor(s): ir-bb<header> ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<header>: @@ -1674,6 +1724,11 @@ define void @non_constant_btc(ptr noalias %p.out, ptr %p, i64 %stride, i64 %n) { ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<entry>: ; CHECK-NEXT: EMIT vp<[[VP3]]> = EXPAND SCEV (1 smax %n) +; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = EXPAND SCEVPredicate Equal predicate: %stride == 1 +; CHECK-NEXT: Successor(s): scalar.ph, strides.check +; CHECK-EMPTY: +; CHECK-NEXT: strides.check: +; CHECK-NEXT: EMIT branch-on-cond vp<[[VP4]]> ; CHECK-NEXT: Successor(s): scalar.ph, vector.ph ; CHECK-EMPTY: ; CHECK-NEXT: vector.ph: @@ -1681,24 +1736,25 @@ define void @non_constant_btc(ptr noalias %p.out, ptr %p, i64 %stride, i64 %n) { ; CHECK-EMPTY: ; CHECK-NEXT: <x1> vector loop: { ; CHECK-NEXT: vector.body: -; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: EMIT vp<[[VP6:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> ; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]> ; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1> -; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<%stride> +; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<1> ; CHECK-NEXT: EMIT ir<%gep.ld> = getelementptr ir<%p>, ir<%idx> -; CHECK-NEXT: EMIT-SCALAR ir<%ld> = load ir<%gep.ld> +; CHECK-NEXT: vp<[[VP7:%[0-9]+]]> = vector-pointer ir<%gep.ld> +; CHECK-NEXT: WIDEN ir<%ld> = load vp<[[VP7]]> ; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv> ; CHECK-NEXT: EMIT store ir<%ld>, ir<%gep.st> ; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<%n> -; CHECK-NEXT: EMIT vp<[[VP5:%[0-9]+]]> = not ir<%exitcond> -; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP4]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT vp<[[VP8:%[0-9]+]]> = not ir<%exitcond> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP6]]>, vp<[[VP1]]> ; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> ; CHECK-NEXT: No successors ; CHECK-NEXT: } ; CHECK-NEXT: Successor(s): middle.block ; CHECK-EMPTY: ; CHECK-NEXT: middle.block: -; CHECK-NEXT: EMIT vp<[[VP7:%[0-9]+]]> = exiting-iv-value ir<%iv> +; CHECK-NEXT: EMIT vp<[[VP10:%[0-9]+]]> = exiting-iv-value ir<%iv> ; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq vp<[[VP3]]>, vp<[[VP2]]> ; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> ; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph @@ -1707,7 +1763,7 @@ define void @non_constant_btc(ptr noalias %p.out, ptr %p, i64 %stride, i64 %n) { ; CHECK-NEXT: No successors ; CHECK-EMPTY: ; CHECK-NEXT: scalar.ph: -; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP7]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP10]]>, middle.block ], [ ir<0>, ir-bb<entry> ], [ ir<0>, strides.check ] ; CHECK-NEXT: Successor(s): ir-bb<header> ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<header>: @@ -1923,7 +1979,12 @@ define void @stride_btc_checks_order(ptr noalias %p.out, ptr %p, i64 %stride, i6 ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<entry>: ; CHECK-NEXT: IR %n = mul i64 %m, %stride -; CHECK-NEXT: EMIT vp<[[VP3]]> = EXPAND SCEV (1 smax (%stride * %m)) +; CHECK-NEXT: EMIT vp<[[VP3]]> = EXPAND SCEV (1 smax %m) +; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = EXPAND SCEVPredicate Equal predicate: %stride == 1 +; CHECK-NEXT: Successor(s): scalar.ph, strides.check +; CHECK-EMPTY: +; CHECK-NEXT: strides.check: +; CHECK-NEXT: EMIT branch-on-cond vp<[[VP4]]> ; CHECK-NEXT: Successor(s): scalar.ph, vector.ph ; CHECK-EMPTY: ; CHECK-NEXT: vector.ph: @@ -1931,24 +1992,25 @@ define void @stride_btc_checks_order(ptr noalias %p.out, ptr %p, i64 %stride, i6 ; CHECK-EMPTY: ; CHECK-NEXT: <x1> vector loop: { ; CHECK-NEXT: vector.body: -; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: EMIT vp<[[VP6:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> ; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]> ; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1> -; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<%stride> +; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<1> ; CHECK-NEXT: EMIT ir<%gep.ld> = getelementptr ir<%p>, ir<%idx> -; CHECK-NEXT: EMIT-SCALAR ir<%ld> = load ir<%gep.ld> +; CHECK-NEXT: vp<[[VP7:%[0-9]+]]> = vector-pointer ir<%gep.ld> +; CHECK-NEXT: WIDEN ir<%ld> = load vp<[[VP7]]> ; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv> ; CHECK-NEXT: EMIT store ir<%ld>, ir<%gep.st> ; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<%n> -; CHECK-NEXT: EMIT vp<[[VP5:%[0-9]+]]> = not ir<%exitcond> -; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP4]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT vp<[[VP8:%[0-9]+]]> = not ir<%exitcond> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP6]]>, vp<[[VP1]]> ; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> ; CHECK-NEXT: No successors ; CHECK-NEXT: } ; CHECK-NEXT: Successor(s): middle.block ; CHECK-EMPTY: ; CHECK-NEXT: middle.block: -; CHECK-NEXT: EMIT vp<[[VP7:%[0-9]+]]> = exiting-iv-value ir<%iv> +; CHECK-NEXT: EMIT vp<[[VP10:%[0-9]+]]> = exiting-iv-value ir<%iv> ; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq vp<[[VP3]]>, vp<[[VP2]]> ; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> ; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph @@ -1957,7 +2019,7 @@ define void @stride_btc_checks_order(ptr noalias %p.out, ptr %p, i64 %stride, i6 ; CHECK-NEXT: No successors ; CHECK-EMPTY: ; CHECK-NEXT: scalar.ph: -; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP7]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP10]]>, middle.block ], [ ir<0>, ir-bb<entry> ], [ ir<0>, strides.check ] ; CHECK-NEXT: Successor(s): ir-bb<header> ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<header>: @@ -2006,7 +2068,12 @@ define void @stride_dependent_btc_non_preventive(ptr noalias %p.out, ptr %p, i64 ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<entry>: ; CHECK-NEXT: IR %n = add i64 %stride, 3 -; CHECK-NEXT: EMIT vp<[[VP3]]> = EXPAND SCEV (1 smax (3 + %stride)) +; CHECK-NEXT: EMIT vp<[[VP3]]> = EXPAND SCEV 4 +; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = EXPAND SCEVPredicate Equal predicate: %stride == 1 +; CHECK-NEXT: Successor(s): scalar.ph, strides.check +; CHECK-EMPTY: +; CHECK-NEXT: strides.check: +; CHECK-NEXT: EMIT branch-on-cond vp<[[VP4]]> ; CHECK-NEXT: Successor(s): scalar.ph, vector.ph ; CHECK-EMPTY: ; CHECK-NEXT: vector.ph: @@ -2014,24 +2081,25 @@ define void @stride_dependent_btc_non_preventive(ptr noalias %p.out, ptr %p, i64 ; CHECK-EMPTY: ; CHECK-NEXT: <x1> vector loop: { ; CHECK-NEXT: vector.body: -; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: EMIT vp<[[VP6:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> ; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]> ; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1> -; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<%stride> +; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<1> ; CHECK-NEXT: EMIT ir<%gep.ld> = getelementptr ir<%p>, ir<%idx> -; CHECK-NEXT: EMIT-SCALAR ir<%ld> = load ir<%gep.ld> +; CHECK-NEXT: vp<[[VP7:%[0-9]+]]> = vector-pointer ir<%gep.ld> +; CHECK-NEXT: WIDEN ir<%ld> = load vp<[[VP7]]> ; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv> ; CHECK-NEXT: EMIT store ir<%ld>, ir<%gep.st> ; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<%n> -; CHECK-NEXT: EMIT vp<[[VP5:%[0-9]+]]> = not ir<%exitcond> -; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP4]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT vp<[[VP8:%[0-9]+]]> = not ir<%exitcond> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP6]]>, vp<[[VP1]]> ; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> ; CHECK-NEXT: No successors ; CHECK-NEXT: } ; CHECK-NEXT: Successor(s): middle.block ; CHECK-EMPTY: ; CHECK-NEXT: middle.block: -; CHECK-NEXT: EMIT vp<[[VP7:%[0-9]+]]> = exiting-iv-value ir<%iv> +; CHECK-NEXT: EMIT vp<[[VP10:%[0-9]+]]> = exiting-iv-value ir<%iv> ; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq vp<[[VP3]]>, vp<[[VP2]]> ; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> ; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph @@ -2040,7 +2108,7 @@ define void @stride_dependent_btc_non_preventive(ptr noalias %p.out, ptr %p, i64 ; CHECK-NEXT: No successors ; CHECK-EMPTY: ; CHECK-NEXT: scalar.ph: -; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP7]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP10]]>, middle.block ], [ ir<0>, ir-bb<entry> ], [ ir<0>, strides.check ] ; CHECK-NEXT: Successor(s): ir-bb<header> ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<header>: @@ -2122,7 +2190,12 @@ define void @stride_btc_independent_memdep_triple_check(ptr %p, ptr noalias %p2, ; CHECK-NEXT: ir-bb<entry>: ; CHECK-NEXT: IR %p.out = getelementptr i8, ptr %p2, i64 %out.offset ; CHECK-NEXT: IR %n = add i64 %stride, 3 -; CHECK-NEXT: EMIT vp<[[VP3]]> = EXPAND SCEV (1 smax (3 + %stride)) +; CHECK-NEXT: EMIT vp<[[VP3]]> = EXPAND SCEV 4 +; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = EXPAND SCEVPredicate Equal predicate: %stride == 1 +; CHECK-NEXT: Successor(s): scalar.ph, strides.check +; CHECK-EMPTY: +; CHECK-NEXT: strides.check: +; CHECK-NEXT: EMIT branch-on-cond vp<[[VP4]]> ; CHECK-NEXT: Successor(s): scalar.ph, vector.ph ; CHECK-EMPTY: ; CHECK-NEXT: vector.ph: @@ -2130,27 +2203,28 @@ define void @stride_btc_independent_memdep_triple_check(ptr %p, ptr noalias %p2, ; CHECK-EMPTY: ; CHECK-NEXT: <x1> vector loop: { ; CHECK-NEXT: vector.body: -; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: EMIT vp<[[VP6:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> ; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]> ; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1> -; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<%stride> +; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<1> ; CHECK-NEXT: EMIT ir<%gep.ld> = getelementptr ir<%p>, ir<%idx> -; CHECK-NEXT: EMIT-SCALAR ir<%ld> = load ir<%gep.ld> +; CHECK-NEXT: vp<[[VP7:%[0-9]+]]> = vector-pointer ir<%gep.ld> +; CHECK-NEXT: WIDEN ir<%ld> = load vp<[[VP7]]> ; CHECK-NEXT: EMIT ir<%gep.ld2> = getelementptr ir<%p2>, ir<%iv> ; CHECK-NEXT: EMIT-SCALAR ir<%ld2> = load ir<%gep.ld2> ; CHECK-NEXT: EMIT ir<%val> = add ir<%ld>, ir<%ld2> ; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv> ; CHECK-NEXT: EMIT store ir<%val>, ir<%gep.st> ; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<%n> -; CHECK-NEXT: EMIT vp<[[VP5:%[0-9]+]]> = not ir<%exitcond> -; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP4]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT vp<[[VP8:%[0-9]+]]> = not ir<%exitcond> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP6]]>, vp<[[VP1]]> ; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> ; CHECK-NEXT: No successors ; CHECK-NEXT: } ; CHECK-NEXT: Successor(s): middle.block ; CHECK-EMPTY: ; CHECK-NEXT: middle.block: -; CHECK-NEXT: EMIT vp<[[VP7:%[0-9]+]]> = exiting-iv-value ir<%iv> +; CHECK-NEXT: EMIT vp<[[VP10:%[0-9]+]]> = exiting-iv-value ir<%iv> ; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq vp<[[VP3]]>, vp<[[VP2]]> ; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> ; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph @@ -2159,7 +2233,7 @@ define void @stride_btc_independent_memdep_triple_check(ptr %p, ptr noalias %p2, ; CHECK-NEXT: No successors ; CHECK-EMPTY: ; CHECK-NEXT: scalar.ph: -; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP7]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP10]]>, middle.block ], [ ir<0>, ir-bb<entry> ], [ ir<0>, strides.check ] ; CHECK-NEXT: Successor(s): ir-bb<header> ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<header>: @@ -2300,6 +2374,11 @@ define void @nd_array_last_idx(ptr noalias %p.out, ptr %p, i64 %stride) { ; CHECK-NEXT: Live-in ir<128> = original trip-count ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = EXPAND SCEVPredicate Equal predicate: %stride == 1 +; CHECK-NEXT: Successor(s): scalar.ph, strides.check +; CHECK-EMPTY: +; CHECK-NEXT: strides.check: +; CHECK-NEXT: EMIT branch-on-cond vp<[[VP3]]> ; CHECK-NEXT: Successor(s): scalar.ph, vector.ph ; CHECK-EMPTY: ; CHECK-NEXT: vector.ph: @@ -2307,24 +2386,25 @@ define void @nd_array_last_idx(ptr noalias %p.out, ptr %p, i64 %stride) { ; CHECK-EMPTY: ; CHECK-NEXT: <x1> vector loop: { ; CHECK-NEXT: vector.body: -; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: EMIT vp<[[VP5:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> ; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]> ; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1> -; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<%stride> +; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<1> ; CHECK-NEXT: EMIT ir<%gep.ld> = getelementptr ir<%p>, ir<1>, ir<42>, ir<%idx> -; CHECK-NEXT: EMIT-SCALAR ir<%ld> = load ir<%gep.ld> +; CHECK-NEXT: vp<[[VP6:%[0-9]+]]> = vector-pointer ir<%gep.ld> +; CHECK-NEXT: WIDEN ir<%ld> = load vp<[[VP6]]> ; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv> ; CHECK-NEXT: EMIT store ir<%ld>, ir<%gep.st> ; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<128> -; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = not ir<%exitcond> -; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT vp<[[VP7:%[0-9]+]]> = not ir<%exitcond> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP5]]>, vp<[[VP1]]> ; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> ; CHECK-NEXT: No successors ; CHECK-NEXT: } ; CHECK-NEXT: Successor(s): middle.block ; CHECK-EMPTY: ; CHECK-NEXT: middle.block: -; CHECK-NEXT: EMIT vp<[[VP6:%[0-9]+]]> = exiting-iv-value ir<%iv> +; CHECK-NEXT: EMIT vp<[[VP9:%[0-9]+]]> = exiting-iv-value ir<%iv> ; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<128>, vp<[[VP2]]> ; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> ; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph @@ -2333,7 +2413,7 @@ define void @nd_array_last_idx(ptr noalias %p.out, ptr %p, i64 %stride) { ; CHECK-NEXT: No successors ; CHECK-EMPTY: ; CHECK-NEXT: scalar.ph: -; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP6]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP9]]>, middle.block ], [ ir<0>, ir-bb<entry> ], [ ir<0>, strides.check ] ; CHECK-NEXT: Successor(s): ir-bb<header> ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<header>: @@ -2542,6 +2622,11 @@ define void @sext_stride(ptr noalias %p.out, ptr %p, i32 %stride.i32) { ; CHECK-NEXT: Live-in ir<128> = original trip-count ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = EXPAND SCEVPredicate Equal predicate: %stride.i32 == 1 +; CHECK-NEXT: Successor(s): scalar.ph, strides.check +; CHECK-EMPTY: +; CHECK-NEXT: strides.check: +; CHECK-NEXT: EMIT branch-on-cond vp<[[VP3]]> ; CHECK-NEXT: Successor(s): scalar.ph, vector.ph ; CHECK-EMPTY: ; CHECK-NEXT: vector.ph: @@ -2549,25 +2634,26 @@ define void @sext_stride(ptr noalias %p.out, ptr %p, i32 %stride.i32) { ; CHECK-EMPTY: ; CHECK-NEXT: <x1> vector loop: { ; CHECK-NEXT: vector.body: -; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: EMIT vp<[[VP5:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> ; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]> -; CHECK-NEXT: EMIT-SCALAR ir<%stride> = sext ir<%stride.i32> to i64 +; CHECK-NEXT: EMIT-SCALAR ir<%stride> = sext ir<1> to i64 ; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1> ; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<%stride> ; CHECK-NEXT: EMIT ir<%gep.ld> = getelementptr ir<%p>, ir<%idx> -; CHECK-NEXT: EMIT-SCALAR ir<%ld> = load ir<%gep.ld> +; CHECK-NEXT: vp<[[VP6:%[0-9]+]]> = vector-pointer ir<%gep.ld> +; CHECK-NEXT: WIDEN ir<%ld> = load vp<[[VP6]]> ; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv> ; CHECK-NEXT: EMIT store ir<%ld>, ir<%gep.st> ; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<128> -; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = not ir<%exitcond> -; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT vp<[[VP7:%[0-9]+]]> = not ir<%exitcond> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP5]]>, vp<[[VP1]]> ; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> ; CHECK-NEXT: No successors ; CHECK-NEXT: } ; CHECK-NEXT: Successor(s): middle.block ; CHECK-EMPTY: ; CHECK-NEXT: middle.block: -; CHECK-NEXT: EMIT vp<[[VP6:%[0-9]+]]> = exiting-iv-value ir<%iv> +; CHECK-NEXT: EMIT vp<[[VP9:%[0-9]+]]> = exiting-iv-value ir<%iv> ; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<128>, vp<[[VP2]]> ; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> ; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph @@ -2576,7 +2662,7 @@ define void @sext_stride(ptr noalias %p.out, ptr %p, i32 %stride.i32) { ; CHECK-NEXT: No successors ; CHECK-EMPTY: ; CHECK-NEXT: scalar.ph: -; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP6]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP9]]>, middle.block ], [ ir<0>, ir-bb<entry> ], [ ir<0>, strides.check ] ; CHECK-NEXT: Successor(s): ir-bb<header> ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<header>: @@ -2625,6 +2711,11 @@ define void @trunc_stride(ptr noalias %p.out, ptr %p, i64 %stride.i64) { ; CHECK-NEXT: Live-in ir<128> = original trip-count ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = EXPAND SCEVPredicate Equal predicate: %stride.i64 == 1 +; CHECK-NEXT: Successor(s): scalar.ph, strides.check +; CHECK-EMPTY: +; CHECK-NEXT: strides.check: +; CHECK-NEXT: EMIT branch-on-cond vp<[[VP3]]> ; CHECK-NEXT: Successor(s): scalar.ph, vector.ph ; CHECK-EMPTY: ; CHECK-NEXT: vector.ph: @@ -2632,25 +2723,26 @@ define void @trunc_stride(ptr noalias %p.out, ptr %p, i64 %stride.i64) { ; CHECK-EMPTY: ; CHECK-NEXT: <x1> vector loop: { ; CHECK-NEXT: vector.body: -; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: EMIT vp<[[VP5:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> ; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]> -; CHECK-NEXT: EMIT-SCALAR ir<%stride> = trunc ir<%stride.i64> to i32 +; CHECK-NEXT: EMIT-SCALAR ir<%stride> = trunc ir<1> to i32 ; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1> ; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<%stride> ; CHECK-NEXT: EMIT ir<%gep.ld> = getelementptr ir<%p>, ir<%idx> -; CHECK-NEXT: EMIT-SCALAR ir<%ld> = load ir<%gep.ld> +; CHECK-NEXT: vp<[[VP6:%[0-9]+]]> = vector-pointer ir<%gep.ld> +; CHECK-NEXT: WIDEN ir<%ld> = load vp<[[VP6]]> ; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv> ; CHECK-NEXT: EMIT store ir<%ld>, ir<%gep.st> ; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<128> -; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = not ir<%exitcond> -; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT vp<[[VP7:%[0-9]+]]> = not ir<%exitcond> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP5]]>, vp<[[VP1]]> ; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> ; CHECK-NEXT: No successors ; CHECK-NEXT: } ; CHECK-NEXT: Successor(s): middle.block ; CHECK-EMPTY: ; CHECK-NEXT: middle.block: -; CHECK-NEXT: EMIT vp<[[VP6:%[0-9]+]]> = exiting-iv-value ir<%iv> +; CHECK-NEXT: EMIT vp<[[VP9:%[0-9]+]]> = exiting-iv-value ir<%iv> ; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<128>, vp<[[VP2]]> ; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> ; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph @@ -2659,7 +2751,7 @@ define void @trunc_stride(ptr noalias %p.out, ptr %p, i64 %stride.i64) { ; CHECK-NEXT: No successors ; CHECK-EMPTY: ; CHECK-NEXT: scalar.ph: -; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP6]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP9]]>, middle.block ], [ ir<0>, ir-bb<entry> ], [ ir<0>, strides.check ] ; CHECK-NEXT: Successor(s): ir-bb<header> ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<header>: @@ -2710,6 +2802,11 @@ define void @trunc_ext_stride(ptr noalias %p.out, ptr %p0, ptr %p1, i32 %stride) ; CHECK-NEXT: ir-bb<entry>: ; CHECK-NEXT: IR %stride.trunc = trunc i32 %stride to i16 ; CHECK-NEXT: IR %stride.ext = sext i32 %stride to i64 +; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = EXPAND SCEVPredicate Equal predicate: %stride == 1 +; CHECK-NEXT: Successor(s): scalar.ph, strides.check +; CHECK-EMPTY: +; CHECK-NEXT: strides.check: +; CHECK-NEXT: EMIT branch-on-cond vp<[[VP3]]> ; CHECK-NEXT: Successor(s): scalar.ph, vector.ph ; CHECK-EMPTY: ; CHECK-NEXT: vector.ph: @@ -2717,30 +2814,32 @@ define void @trunc_ext_stride(ptr noalias %p.out, ptr %p0, ptr %p1, i32 %stride) ; CHECK-EMPTY: ; CHECK-NEXT: <x1> vector loop: { ; CHECK-NEXT: vector.body: -; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: EMIT vp<[[VP5:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> ; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]> ; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1> ; CHECK-NEXT: EMIT-SCALAR ir<%iv.trunc> = trunc ir<%iv> to i16 ; CHECK-NEXT: EMIT-SCALAR ir<%iv.ext> = sext ir<%iv> to i64 ; CHECK-NEXT: EMIT ir<%idx.trunc> = mul ir<%iv.trunc>, ir<%stride.trunc> -; CHECK-NEXT: EMIT ir<%idx.ext> = mul ir<%iv.ext>, ir<%stride.ext> +; CHECK-NEXT: EMIT ir<%idx.ext> = mul ir<%iv.ext>, ir<1> ; CHECK-NEXT: EMIT ir<%gep.trunc> = getelementptr ir<%p0>, ir<%idx.trunc> ; CHECK-NEXT: EMIT ir<%gep.ext> = getelementptr ir<%p0>, ir<%idx.ext> -; CHECK-NEXT: EMIT-SCALAR ir<%ld.trunc> = load ir<%gep.trunc> -; CHECK-NEXT: EMIT-SCALAR ir<%ld.ext> = load ir<%gep.ext> +; CHECK-NEXT: vp<[[VP6:%[0-9]+]]> = vector-pointer ir<%gep.trunc> +; CHECK-NEXT: WIDEN ir<%ld.trunc> = load vp<[[VP6]]> +; CHECK-NEXT: vp<[[VP7:%[0-9]+]]> = vector-pointer ir<%gep.ext> +; CHECK-NEXT: WIDEN ir<%ld.ext> = load vp<[[VP7]]> ; CHECK-NEXT: EMIT ir<%val> = add ir<%ld.trunc>, ir<%ld.ext> ; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv> ; CHECK-NEXT: EMIT store ir<%val>, ir<%gep.st> ; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<128> -; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = not ir<%exitcond> -; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT vp<[[VP8:%[0-9]+]]> = not ir<%exitcond> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP5]]>, vp<[[VP1]]> ; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> ; CHECK-NEXT: No successors ; CHECK-NEXT: } ; CHECK-NEXT: Successor(s): middle.block ; CHECK-EMPTY: ; CHECK-NEXT: middle.block: -; CHECK-NEXT: EMIT vp<[[VP6:%[0-9]+]]> = exiting-iv-value ir<%iv> +; CHECK-NEXT: EMIT vp<[[VP10:%[0-9]+]]> = exiting-iv-value ir<%iv> ; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<128>, vp<[[VP2]]> ; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> ; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph @@ -2749,7 +2848,7 @@ define void @trunc_ext_stride(ptr noalias %p.out, ptr %p0, ptr %p1, i32 %stride) ; CHECK-NEXT: No successors ; CHECK-EMPTY: ; CHECK-NEXT: scalar.ph: -; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP6]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP10]]>, middle.block ], [ ir<0>, ir-bb<entry> ], [ ir<0>, strides.check ] ; CHECK-NEXT: Successor(s): ir-bb<header> ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<header>: @@ -2983,6 +3082,11 @@ define void @basic_strided_store(ptr noalias %p.out, ptr %p, i64 %stride) { ; CHECK-NEXT: Live-in ir<128> = original trip-count ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = EXPAND SCEVPredicate Equal predicate: %stride == 1 +; CHECK-NEXT: Successor(s): scalar.ph, strides.check +; CHECK-EMPTY: +; CHECK-NEXT: strides.check: +; CHECK-NEXT: EMIT branch-on-cond vp<[[VP3]]> ; CHECK-NEXT: Successor(s): scalar.ph, vector.ph ; CHECK-EMPTY: ; CHECK-NEXT: vector.ph: @@ -2990,24 +3094,25 @@ define void @basic_strided_store(ptr noalias %p.out, ptr %p, i64 %stride) { ; CHECK-EMPTY: ; CHECK-NEXT: <x1> vector loop: { ; CHECK-NEXT: vector.body: -; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: EMIT vp<[[VP5:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> ; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]> ; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1> -; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<%stride> +; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<1> ; CHECK-NEXT: EMIT ir<%gep.ld> = getelementptr ir<%p>, ir<%iv> ; CHECK-NEXT: EMIT-SCALAR ir<%ld> = load ir<%gep.ld> ; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%idx> -; CHECK-NEXT: EMIT store ir<%ld>, ir<%gep.st> +; CHECK-NEXT: vp<[[VP6:%[0-9]+]]> = vector-pointer ir<%gep.st> +; CHECK-NEXT: WIDEN store vp<[[VP6]]>, ir<%ld> ; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<128> -; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = not ir<%exitcond> -; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT vp<[[VP7:%[0-9]+]]> = not ir<%exitcond> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP5]]>, vp<[[VP1]]> ; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> ; CHECK-NEXT: No successors ; CHECK-NEXT: } ; CHECK-NEXT: Successor(s): middle.block ; CHECK-EMPTY: ; CHECK-NEXT: middle.block: -; CHECK-NEXT: EMIT vp<[[VP6:%[0-9]+]]> = exiting-iv-value ir<%iv> +; CHECK-NEXT: EMIT vp<[[VP9:%[0-9]+]]> = exiting-iv-value ir<%iv> ; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<128>, vp<[[VP2]]> ; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> ; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph @@ -3016,7 +3121,7 @@ define void @basic_strided_store(ptr noalias %p.out, ptr %p, i64 %stride) { ; CHECK-NEXT: No successors ; CHECK-EMPTY: ; CHECK-NEXT: scalar.ph: -; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP6]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP9]]>, middle.block ], [ ir<0>, ir-bb<entry> ], [ ir<0>, strides.check ] ; CHECK-NEXT: Successor(s): ir-bb<header> ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<header>: @@ -3064,6 +3169,11 @@ define void @ptr_vec_use(ptr noalias %p.out, ptr noalias %p.ptr.out, ptr %p, i64 ; CHECK-NEXT: Live-in ir<128> = original trip-count ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = EXPAND SCEVPredicate Equal predicate: %stride == 1 +; CHECK-NEXT: Successor(s): scalar.ph, strides.check +; CHECK-EMPTY: +; CHECK-NEXT: strides.check: +; CHECK-NEXT: EMIT branch-on-cond vp<[[VP3]]> ; CHECK-NEXT: Successor(s): scalar.ph, vector.ph ; CHECK-EMPTY: ; CHECK-NEXT: vector.ph: @@ -3071,26 +3181,27 @@ define void @ptr_vec_use(ptr noalias %p.out, ptr noalias %p.ptr.out, ptr %p, i64 ; CHECK-EMPTY: ; CHECK-NEXT: <x1> vector loop: { ; CHECK-NEXT: vector.body: -; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: EMIT vp<[[VP5:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> ; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]> ; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1> -; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<%stride> +; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<1> ; CHECK-NEXT: EMIT ir<%gep.ld> = getelementptr ir<%p>, ir<%idx> -; CHECK-NEXT: EMIT-SCALAR ir<%ld> = load ir<%gep.ld> +; CHECK-NEXT: vp<[[VP6:%[0-9]+]]> = vector-pointer ir<%gep.ld> +; CHECK-NEXT: WIDEN ir<%ld> = load vp<[[VP6]]> ; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv> ; CHECK-NEXT: EMIT store ir<%ld>, ir<%gep.st> ; CHECK-NEXT: EMIT ir<%gep.ptr.st> = getelementptr ir<%p.ptr.out>, ir<%iv> ; CHECK-NEXT: EMIT store ir<%gep.ld>, ir<%gep.ptr.st> ; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<128> -; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = not ir<%exitcond> -; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT vp<[[VP7:%[0-9]+]]> = not ir<%exitcond> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP5]]>, vp<[[VP1]]> ; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> ; CHECK-NEXT: No successors ; CHECK-NEXT: } ; CHECK-NEXT: Successor(s): middle.block ; CHECK-EMPTY: ; CHECK-NEXT: middle.block: -; CHECK-NEXT: EMIT vp<[[VP6:%[0-9]+]]> = exiting-iv-value ir<%iv> +; CHECK-NEXT: EMIT vp<[[VP9:%[0-9]+]]> = exiting-iv-value ir<%iv> ; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<128>, vp<[[VP2]]> ; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> ; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph @@ -3099,7 +3210,7 @@ define void @ptr_vec_use(ptr noalias %p.out, ptr noalias %p.ptr.out, ptr %p, i64 ; CHECK-NEXT: No successors ; CHECK-EMPTY: ; CHECK-NEXT: scalar.ph: -; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP6]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP9]]>, middle.block ], [ ir<0>, ir-bb<entry> ], [ ir<0>, strides.check ] ; CHECK-NEXT: Successor(s): ir-bb<header> ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<header>: @@ -3152,6 +3263,11 @@ define void @stride_idx_vec_use(ptr noalias %p.out, ptr %p, i64 %stride) { ; CHECK-NEXT: Live-in ir<128> = original trip-count ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = EXPAND SCEVPredicate Equal predicate: %stride == 1 +; CHECK-NEXT: Successor(s): scalar.ph, strides.check +; CHECK-EMPTY: +; CHECK-NEXT: strides.check: +; CHECK-NEXT: EMIT branch-on-cond vp<[[VP3]]> ; CHECK-NEXT: Successor(s): scalar.ph, vector.ph ; CHECK-EMPTY: ; CHECK-NEXT: vector.ph: @@ -3159,25 +3275,26 @@ define void @stride_idx_vec_use(ptr noalias %p.out, ptr %p, i64 %stride) { ; CHECK-EMPTY: ; CHECK-NEXT: <x1> vector loop: { ; CHECK-NEXT: vector.body: -; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: EMIT vp<[[VP5:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> ; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]> ; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1> -; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<%stride> +; CHECK-NEXT: EMIT ir<%idx> = mul ir<%iv>, ir<1> ; CHECK-NEXT: EMIT ir<%gep.ld> = getelementptr ir<%p>, ir<%idx> -; CHECK-NEXT: EMIT-SCALAR ir<%ld> = load ir<%gep.ld> +; CHECK-NEXT: vp<[[VP6:%[0-9]+]]> = vector-pointer ir<%gep.ld> +; CHECK-NEXT: WIDEN ir<%ld> = load vp<[[VP6]]> ; CHECK-NEXT: EMIT ir<%val> = mul ir<%ld>, ir<%idx> ; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv> ; CHECK-NEXT: EMIT store ir<%val>, ir<%gep.st> ; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<128> -; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = not ir<%exitcond> -; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT vp<[[VP7:%[0-9]+]]> = not ir<%exitcond> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP5]]>, vp<[[VP1]]> ; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> ; CHECK-NEXT: No successors ; CHECK-NEXT: } ; CHECK-NEXT: Successor(s): middle.block ; CHECK-EMPTY: ; CHECK-NEXT: middle.block: -; CHECK-NEXT: EMIT vp<[[VP6:%[0-9]+]]> = exiting-iv-value ir<%iv> +; CHECK-NEXT: EMIT vp<[[VP9:%[0-9]+]]> = exiting-iv-value ir<%iv> ; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<128>, vp<[[VP2]]> ; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> ; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph @@ -3186,7 +3303,7 @@ define void @stride_idx_vec_use(ptr noalias %p.out, ptr %p, i64 %stride) { ; CHECK-NEXT: No successors ; CHECK-EMPTY: ; CHECK-NEXT: scalar.ph: -; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP6]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP9]]>, middle.block ], [ ir<0>, ir-bb<entry> ], [ ir<0>, strides.check ] ; CHECK-NEXT: Successor(s): ir-bb<header> ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<header>: @@ -3236,6 +3353,11 @@ define void @offset_stride_idx_vec_use(ptr noalias %p.out, ptr %p, i64 %stride) ; CHECK-NEXT: Live-in ir<128> = original trip-count ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<entry>: +; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = EXPAND SCEVPredicate Equal predicate: %stride == 1 +; CHECK-NEXT: Successor(s): scalar.ph, strides.check +; CHECK-EMPTY: +; CHECK-NEXT: strides.check: +; CHECK-NEXT: EMIT branch-on-cond vp<[[VP3]]> ; CHECK-NEXT: Successor(s): scalar.ph, vector.ph ; CHECK-EMPTY: ; CHECK-NEXT: vector.ph: @@ -3243,26 +3365,27 @@ define void @offset_stride_idx_vec_use(ptr noalias %p.out, ptr %p, i64 %stride) ; CHECK-EMPTY: ; CHECK-NEXT: <x1> vector loop: { ; CHECK-NEXT: vector.body: -; CHECK-NEXT: EMIT vp<[[VP3:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: EMIT vp<[[VP5:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> ; CHECK-NEXT: ir<%iv> = WIDEN-INDUCTION nsw ir<0>, ir<1>, vp<[[VP0]]> ; CHECK-NEXT: EMIT ir<%iv.next> = add nsw ir<%iv>, ir<1> -; CHECK-NEXT: EMIT ir<%iv.times.stride> = mul ir<%iv>, ir<%stride> +; CHECK-NEXT: EMIT ir<%iv.times.stride> = mul ir<%iv>, ir<1> ; CHECK-NEXT: EMIT ir<%idx> = add ir<%iv.times.stride>, ir<42> ; CHECK-NEXT: EMIT ir<%gep.ld> = getelementptr ir<%p>, ir<%idx> -; CHECK-NEXT: EMIT-SCALAR ir<%ld> = load ir<%gep.ld> +; CHECK-NEXT: vp<[[VP6:%[0-9]+]]> = vector-pointer ir<%gep.ld> +; CHECK-NEXT: WIDEN ir<%ld> = load vp<[[VP6]]> ; CHECK-NEXT: EMIT ir<%val> = mul ir<%ld>, ir<%idx> ; CHECK-NEXT: EMIT ir<%gep.st> = getelementptr ir<%p.out>, ir<%iv> ; CHECK-NEXT: EMIT store ir<%val>, ir<%gep.st> ; CHECK-NEXT: EMIT ir<%exitcond> = icmp sge ir<%iv.next>, ir<128> -; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = not ir<%exitcond> -; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP3]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT vp<[[VP7:%[0-9]+]]> = not ir<%exitcond> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP5]]>, vp<[[VP1]]> ; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> ; CHECK-NEXT: No successors ; CHECK-NEXT: } ; CHECK-NEXT: Successor(s): middle.block ; CHECK-EMPTY: ; CHECK-NEXT: middle.block: -; CHECK-NEXT: EMIT vp<[[VP6:%[0-9]+]]> = exiting-iv-value ir<%iv> +; CHECK-NEXT: EMIT vp<[[VP9:%[0-9]+]]> = exiting-iv-value ir<%iv> ; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq ir<128>, vp<[[VP2]]> ; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> ; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph @@ -3271,7 +3394,7 @@ define void @offset_stride_idx_vec_use(ptr noalias %p.out, ptr %p, i64 %stride) ; CHECK-NEXT: No successors ; CHECK-EMPTY: ; CHECK-NEXT: scalar.ph: -; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP6]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP9]]>, middle.block ], [ ir<0>, ir-bb<entry> ], [ ir<0>, strides.check ] ; CHECK-NEXT: Successor(s): ir-bb<header> ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<header>: @@ -3351,8 +3474,13 @@ define void @test_rewrite_iv_scevs(i32 %start, ptr %dst) { ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<entry>: ; CHECK-NEXT: IR %start.ext = zext i32 %start to i64 -; CHECK-NEXT: EMIT vp<[[VP3]]> = EXPAND SCEV (100 + (-1 * (zext i32 %start to i64))<nsw>)<nsw> -; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = EXPAND SCEV (zext i32 %start to i64) +; CHECK-NEXT: EMIT vp<[[VP3]]> = EXPAND SCEV 99 +; CHECK-NEXT: EMIT vp<[[VP4:%[0-9]+]]> = EXPAND SCEV 1 +; CHECK-NEXT: EMIT vp<[[VP5:%[0-9]+]]> = EXPAND SCEVPredicate Equal predicate: %start == 1 +; CHECK-NEXT: Successor(s): scalar.ph, strides.check +; CHECK-EMPTY: +; CHECK-NEXT: strides.check: +; CHECK-NEXT: EMIT branch-on-cond vp<[[VP5]]> ; CHECK-NEXT: Successor(s): scalar.ph, vector.ph ; CHECK-EMPTY: ; CHECK-NEXT: vector.ph: @@ -3360,23 +3488,24 @@ define void @test_rewrite_iv_scevs(i32 %start, ptr %dst) { ; CHECK-EMPTY: ; CHECK-NEXT: <x1> vector loop: { ; CHECK-NEXT: vector.body: -; CHECK-NEXT: EMIT vp<[[VP5:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> -; CHECK-NEXT: ir<%iv.0> = WIDEN-INDUCTION ir<%start.ext>, ir<1>, vp<[[VP0]]> +; CHECK-NEXT: EMIT vp<[[VP7:%[0-9]+]]> = CANONICAL-INDUCTION ir<0>, vp<%index.next> +; CHECK-NEXT: ir<%iv.0> = WIDEN-INDUCTION ir<1>, ir<1>, vp<[[VP0]]> ; CHECK-NEXT: ir<%iv.1> = WIDEN-INDUCTION ir<0>, vp<[[VP4]]>, vp<[[VP0]]> ; CHECK-NEXT: EMIT ir<%gep.dst> = getelementptr ir<%dst>, ir<%iv.1> -; CHECK-NEXT: EMIT store ir<0.000000e+00>, ir<%gep.dst> +; CHECK-NEXT: vp<[[VP8:%[0-9]+]]> = vector-pointer ir<%gep.dst> +; CHECK-NEXT: WIDEN store vp<[[VP8]]>, ir<0.000000e+00> ; CHECK-NEXT: EMIT ir<%iv.1.next> = add ir<%iv.1>, vp<[[VP4]]> ; CHECK-NEXT: EMIT ir<%iv.0.next> = add ir<%iv.0>, ir<1> ; CHECK-NEXT: EMIT ir<%ec> = icmp eq ir<%iv.0.next>, ir<100> -; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP5]]>, vp<[[VP1]]> +; CHECK-NEXT: EMIT vp<%index.next> = add nuw vp<[[VP7]]>, vp<[[VP1]]> ; CHECK-NEXT: EMIT branch-on-count vp<%index.next>, vp<[[VP2]]> ; CHECK-NEXT: No successors ; CHECK-NEXT: } ; CHECK-NEXT: Successor(s): middle.block ; CHECK-EMPTY: ; CHECK-NEXT: middle.block: -; CHECK-NEXT: EMIT vp<[[VP7:%[0-9]+]]> = exiting-iv-value ir<%iv.0> -; CHECK-NEXT: EMIT vp<[[VP8:%[0-9]+]]> = exiting-iv-value ir<%iv.1> +; CHECK-NEXT: EMIT vp<[[VP10:%[0-9]+]]> = exiting-iv-value ir<%iv.0> +; CHECK-NEXT: EMIT vp<[[VP11:%[0-9]+]]> = exiting-iv-value ir<%iv.1> ; CHECK-NEXT: EMIT vp<%cmp.n> = icmp eq vp<[[VP3]]>, vp<[[VP2]]> ; CHECK-NEXT: EMIT branch-on-cond vp<%cmp.n> ; CHECK-NEXT: Successor(s): ir-bb<exit>, scalar.ph @@ -3385,8 +3514,8 @@ define void @test_rewrite_iv_scevs(i32 %start, ptr %dst) { ; CHECK-NEXT: No successors ; CHECK-EMPTY: ; CHECK-NEXT: scalar.ph: -; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP7]]>, middle.block ], [ ir<%start.ext>, ir-bb<entry> ] -; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val>.1 = phi [ vp<[[VP8]]>, middle.block ], [ ir<0>, ir-bb<entry> ] +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val> = phi [ vp<[[VP10]]>, middle.block ], [ ir<%start.ext>, ir-bb<entry> ], [ ir<%start.ext>, strides.check ] +; CHECK-NEXT: EMIT-SCALAR vp<%bc.resume.val>.1 = phi [ vp<[[VP11]]>, middle.block ], [ ir<0>, ir-bb<entry> ], [ ir<0>, strides.check ] ; CHECK-NEXT: Successor(s): ir-bb<loop> ; CHECK-EMPTY: ; CHECK-NEXT: ir-bb<loop>: diff --git a/llvm/test/Transforms/LoopVectorize/vplan-based-stride-mv.ll b/llvm/test/Transforms/LoopVectorize/vplan-based-stride-mv.ll index 6390220e37f4..3cede3c5b93e 100644 --- a/llvm/test/Transforms/LoopVectorize/vplan-based-stride-mv.ll +++ b/llvm/test/Transforms/LoopVectorize/vplan-based-stride-mv.ll @@ -1,58 +1,54 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 6 ; RUN: opt < %s -p loop-vectorize -force-vector-width=4 -S \ -; RUN: -enable-mem-access-versioning=false 2>&1 | FileCheck %s --check-prefix COMPARE-NO-MV +; RUN: -enable-mem-access-versioning=false -enable-vplan-based-stride-mv 2>&1 | FileCheck %s --check-prefix COMPARE-NO-MV ; RUN: opt < %s -p loop-vectorize -force-vector-width=4 -S \ -; RUN: -enable-mem-access-versioning=true 2>&1 | FileCheck %s --check-prefix COMPARE-LAA-MV +; RUN: -enable-mem-access-versioning=false -enable-vplan-based-stride-mv 2>&1 | FileCheck %s --check-prefix COMPARE-LAA-MV define void @basic(ptr noalias %p.out, ptr %p, i64 %stride) { ; COMPARE-NO-MV-LABEL: define void @basic( ; COMPARE-NO-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { ; COMPARE-NO-MV-NEXT: [[ENTRY:.*:]] -; COMPARE-NO-MV-NEXT: br label %[[VECTOR_PH:.*]] +; COMPARE-NO-MV-NEXT: [[STRIDES_MV_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 +; COMPARE-NO-MV-NEXT: br label %[[STRIDES_CHECK:.*]] +; COMPARE-NO-MV: [[STRIDES_CHECK]]: +; COMPARE-NO-MV-NEXT: br i1 [[STRIDES_MV_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; COMPARE-NO-MV: [[VECTOR_PH]]: -; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 -; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer ; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] ; COMPARE-NO-MV: [[VECTOR_BODY]]: ; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] -; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] -; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = extractelement <4 x i64> [[TMP0]], i32 0 -; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP0]], i32 1 -; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP0]], i32 2 -; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP0]], i32 3 -; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP1]] -; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP2]] -; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP3]] -; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP4]] -; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP5]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = insertelement <4 x i64> poison, i64 [[TMP9]], i32 0 -; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> [[TMP13]], i64 [[TMP10]], i32 1 -; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 2 -; COMPARE-NO-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP12]], i32 3 -; COMPARE-NO-MV-NEXT: [[TMP17:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] -; COMPARE-NO-MV-NEXT: store <4 x i64> [[TMP16]], ptr [[TMP17]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = getelementptr i64, ptr [[P]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP0]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: store <4 x i64> [[WIDE_LOAD]], ptr [[TMP1]], align 8 ; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 -; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) -; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 -; COMPARE-NO-MV-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: ; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-NO-MV: [[SCALAR_PH]]: +; COMPARE-NO-MV-NEXT: br label %[[HEADER:.*]] +; COMPARE-NO-MV: [[HEADER]]: +; COMPARE-NO-MV-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-NO-MV-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 +; COMPARE-NO-MV-NEXT: [[IDX:%.*]] = mul i64 [[IV]], [[STRIDE]] +; COMPARE-NO-MV-NEXT: [[GEP_LD:%.*]] = getelementptr i64, ptr [[P]], i64 [[IDX]] +; COMPARE-NO-MV-NEXT: [[LD:%.*]] = load i64, ptr [[GEP_LD]], align 8 +; COMPARE-NO-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] +; COMPARE-NO-MV-NEXT: store i64 [[LD]], ptr [[GEP_ST]], align 8 +; COMPARE-NO-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP3:![0-9]+]] ; COMPARE-NO-MV: [[EXIT]]: ; COMPARE-NO-MV-NEXT: ret void ; ; COMPARE-LAA-MV-LABEL: define void @basic( ; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { ; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] -; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_SCEVCHECK:.*]] -; COMPARE-LAA-MV: [[VECTOR_SCEVCHECK]]: -; COMPARE-LAA-MV-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 -; COMPARE-LAA-MV-NEXT: br i1 [[IDENT_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV-NEXT: [[STRIDES_MV_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 +; COMPARE-LAA-MV-NEXT: br label %[[STRIDES_CHECK:.*]] +; COMPARE-LAA-MV: [[STRIDES_CHECK]]: +; COMPARE-LAA-MV-NEXT: br i1 [[STRIDES_MV_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; COMPARE-LAA-MV: [[VECTOR_PH]]: ; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] ; COMPARE-LAA-MV: [[VECTOR_BODY]]: @@ -116,18 +112,18 @@ define void @basic_optsize(ptr noalias %p.out, ptr %p, i64 %stride) #0 { ; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] ; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] -; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = extractelement <4 x i64> [[TMP0]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP0]], i32 0 ; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP0]], i32 1 ; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP0]], i32 2 -; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP0]], i32 3 -; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP1]] +; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = extractelement <4 x i64> [[TMP0]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP4]] ; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP2]] ; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP3]] -; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP4]] -; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP5]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP19:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP5]] +; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP8]], align 8 ; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8 ; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP19]], align 8 ; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = insertelement <4 x i64> poison, i64 [[TMP9]], i32 0 ; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> [[TMP13]], i64 [[TMP10]], i32 1 ; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 2 @@ -137,10 +133,10 @@ define void @basic_optsize(ptr noalias %p.out, ptr %p, i64 %stride) #0 { ; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) ; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 -; COMPARE-NO-MV-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] -; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: -; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] -; COMPARE-NO-MV: [[EXIT]]: +; COMPARE-NO-MV-NEXT: br i1 [[TMP18]], label %[[SCALAR_PH:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; COMPARE-NO-MV: [[SCALAR_PH]]: +; COMPARE-NO-MV-NEXT: br label %[[HEADER:.*]] +; COMPARE-NO-MV: [[HEADER]]: ; COMPARE-NO-MV-NEXT: ret void ; ; COMPARE-LAA-MV-LABEL: define void @basic_optsize( @@ -155,18 +151,18 @@ define void @basic_optsize(ptr noalias %p.out, ptr %p, i64 %stride) #0 { ; COMPARE-LAA-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; COMPARE-LAA-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] ; COMPARE-LAA-MV-NEXT: [[TMP0:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] -; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = extractelement <4 x i64> [[TMP0]], i32 0 +; COMPARE-LAA-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP0]], i32 0 ; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP0]], i32 1 ; COMPARE-LAA-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP0]], i32 2 -; COMPARE-LAA-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP0]], i32 3 -; COMPARE-LAA-MV-NEXT: [[TMP5:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP1]] +; COMPARE-LAA-MV-NEXT: [[TMP5:%.*]] = extractelement <4 x i64> [[TMP0]], i32 3 +; COMPARE-LAA-MV-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP4]] ; COMPARE-LAA-MV-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP2]] ; COMPARE-LAA-MV-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP3]] -; COMPARE-LAA-MV-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP4]] -; COMPARE-LAA-MV-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP5]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP19:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP5]] +; COMPARE-LAA-MV-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP8]], align 8 ; COMPARE-LAA-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8 ; COMPARE-LAA-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 -; COMPARE-LAA-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP19]], align 8 ; COMPARE-LAA-MV-NEXT: [[TMP13:%.*]] = insertelement <4 x i64> poison, i64 [[TMP9]], i32 0 ; COMPARE-LAA-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> [[TMP13]], i64 [[TMP10]], i32 1 ; COMPARE-LAA-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 2 @@ -176,10 +172,10 @@ define void @basic_optsize(ptr noalias %p.out, ptr %p, i64 %stride) #0 { ; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; COMPARE-LAA-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) ; COMPARE-LAA-MV-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 -; COMPARE-LAA-MV-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] -; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: -; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] -; COMPARE-LAA-MV: [[EXIT]]: +; COMPARE-LAA-MV-NEXT: br i1 [[TMP18]], label %[[SCALAR_PH:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; COMPARE-LAA-MV: [[SCALAR_PH]]: +; COMPARE-LAA-MV-NEXT: br label %[[HEADER:.*]] +; COMPARE-LAA-MV: [[HEADER]]: ; COMPARE-LAA-MV-NEXT: ret void ; entry: @@ -219,18 +215,18 @@ define void @basic_minsize(ptr noalias %p.out, ptr %p, i64 %stride) #1 { ; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] ; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] -; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = extractelement <4 x i64> [[TMP0]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP0]], i32 0 ; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP0]], i32 1 ; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP0]], i32 2 -; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP0]], i32 3 -; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP1]] +; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = extractelement <4 x i64> [[TMP0]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP4]] ; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP2]] ; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP3]] -; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP4]] -; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP5]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP19:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP5]] +; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP8]], align 8 ; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8 ; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP19]], align 8 ; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = insertelement <4 x i64> poison, i64 [[TMP9]], i32 0 ; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> [[TMP13]], i64 [[TMP10]], i32 1 ; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 2 @@ -240,10 +236,10 @@ define void @basic_minsize(ptr noalias %p.out, ptr %p, i64 %stride) #1 { ; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) ; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 -; COMPARE-NO-MV-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] -; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: -; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] -; COMPARE-NO-MV: [[EXIT]]: +; COMPARE-NO-MV-NEXT: br i1 [[TMP18]], label %[[SCALAR_PH:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; COMPARE-NO-MV: [[SCALAR_PH]]: +; COMPARE-NO-MV-NEXT: br label %[[HEADER:.*]] +; COMPARE-NO-MV: [[HEADER]]: ; COMPARE-NO-MV-NEXT: ret void ; ; COMPARE-LAA-MV-LABEL: define void @basic_minsize( @@ -258,18 +254,18 @@ define void @basic_minsize(ptr noalias %p.out, ptr %p, i64 %stride) #1 { ; COMPARE-LAA-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; COMPARE-LAA-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] ; COMPARE-LAA-MV-NEXT: [[TMP0:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] -; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = extractelement <4 x i64> [[TMP0]], i32 0 +; COMPARE-LAA-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP0]], i32 0 ; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP0]], i32 1 ; COMPARE-LAA-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP0]], i32 2 -; COMPARE-LAA-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP0]], i32 3 -; COMPARE-LAA-MV-NEXT: [[TMP5:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP1]] +; COMPARE-LAA-MV-NEXT: [[TMP5:%.*]] = extractelement <4 x i64> [[TMP0]], i32 3 +; COMPARE-LAA-MV-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP4]] ; COMPARE-LAA-MV-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP2]] ; COMPARE-LAA-MV-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP3]] -; COMPARE-LAA-MV-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP4]] -; COMPARE-LAA-MV-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP5]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP19:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP5]] +; COMPARE-LAA-MV-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP8]], align 8 ; COMPARE-LAA-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8 ; COMPARE-LAA-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 -; COMPARE-LAA-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP19]], align 8 ; COMPARE-LAA-MV-NEXT: [[TMP13:%.*]] = insertelement <4 x i64> poison, i64 [[TMP9]], i32 0 ; COMPARE-LAA-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> [[TMP13]], i64 [[TMP10]], i32 1 ; COMPARE-LAA-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 2 @@ -279,10 +275,10 @@ define void @basic_minsize(ptr noalias %p.out, ptr %p, i64 %stride) #1 { ; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; COMPARE-LAA-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) ; COMPARE-LAA-MV-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 -; COMPARE-LAA-MV-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] -; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: -; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] -; COMPARE-LAA-MV: [[EXIT]]: +; COMPARE-LAA-MV-NEXT: br i1 [[TMP18]], label %[[SCALAR_PH:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; COMPARE-LAA-MV: [[SCALAR_PH]]: +; COMPARE-LAA-MV-NEXT: br label %[[HEADER:.*]] +; COMPARE-LAA-MV: [[HEADER]]: ; COMPARE-LAA-MV-NEXT: ret void ; entry: @@ -318,80 +314,74 @@ define void @byte_gep_scaled_stride(ptr noalias %p.out, ptr %p, i64 %stride) { ; COMPARE-NO-MV-LABEL: define void @byte_gep_scaled_stride( ; COMPARE-NO-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { ; COMPARE-NO-MV-NEXT: [[ENTRY:.*:]] -; COMPARE-NO-MV-NEXT: br label %[[VECTOR_PH:.*]] +; COMPARE-NO-MV-NEXT: [[STRIDES_MV_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 +; COMPARE-NO-MV-NEXT: br label %[[STRIDES_CHECK:.*]] +; COMPARE-NO-MV: [[STRIDES_CHECK]]: +; COMPARE-NO-MV-NEXT: br i1 [[STRIDES_MV_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; COMPARE-NO-MV: [[VECTOR_PH]]: -; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 -; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer -; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = shl <4 x i64> [[BROADCAST_SPLAT]], splat (i64 3) ; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] ; COMPARE-NO-MV: [[VECTOR_BODY]]: -; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] -; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = mul <4 x i64> [[VEC_IND]], [[TMP0]] -; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP1]], i32 0 -; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP1]], i32 1 -; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP1]], i32 2 -; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = extractelement <4 x i64> [[TMP1]], i32 3 -; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP2]] -; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP3]] -; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP4]] -; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP5]] -; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP9]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> poison, i64 [[TMP10]], i32 0 -; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 1 -; COMPARE-NO-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP12]], i32 2 -; COMPARE-NO-MV-NEXT: [[TMP17:%.*]] = insertelement <4 x i64> [[TMP16]], i64 [[TMP13]], i32 3 -; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] -; COMPARE-NO-MV-NEXT: store <4 x i64> [[TMP17]], ptr [[TMP18]], align 8 -; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 -; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) -; COMPARE-NO-MV-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 -; COMPARE-NO-MV-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = shl i64 [[TMP0]], 3 +; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP1]] +; COMPARE-NO-MV-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP2]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[TMP0]] +; COMPARE-NO-MV-NEXT: store <4 x i64> [[WIDE_LOAD]], ptr [[TMP3]], align 8 +; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP0]], 4 +; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: ; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-NO-MV: [[SCALAR_PH]]: +; COMPARE-NO-MV-NEXT: br label %[[HEADER:.*]] +; COMPARE-NO-MV: [[HEADER]]: +; COMPARE-NO-MV-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-NO-MV-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 +; COMPARE-NO-MV-NEXT: [[STRIDE_X8:%.*]] = mul i64 [[STRIDE]], 8 +; COMPARE-NO-MV-NEXT: [[IDX:%.*]] = mul i64 [[IV]], [[STRIDE_X8]] +; COMPARE-NO-MV-NEXT: [[GEP_LD:%.*]] = getelementptr i8, ptr [[P]], i64 [[IDX]] +; COMPARE-NO-MV-NEXT: [[LD:%.*]] = load i64, ptr [[GEP_LD]], align 8 +; COMPARE-NO-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] +; COMPARE-NO-MV-NEXT: store i64 [[LD]], ptr [[GEP_ST]], align 8 +; COMPARE-NO-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP7:![0-9]+]] ; COMPARE-NO-MV: [[EXIT]]: ; COMPARE-NO-MV-NEXT: ret void ; ; COMPARE-LAA-MV-LABEL: define void @byte_gep_scaled_stride( ; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { ; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] -; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV-NEXT: [[STRIDES_MV_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 +; COMPARE-LAA-MV-NEXT: br label %[[STRIDES_CHECK:.*]] +; COMPARE-LAA-MV: [[STRIDES_CHECK]]: +; COMPARE-LAA-MV-NEXT: br i1 [[STRIDES_MV_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; COMPARE-LAA-MV: [[VECTOR_PH]]: -; COMPARE-LAA-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 -; COMPARE-LAA-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer -; COMPARE-LAA-MV-NEXT: [[TMP0:%.*]] = shl <4 x i64> [[BROADCAST_SPLAT]], splat (i64 3) ; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] ; COMPARE-LAA-MV: [[VECTOR_BODY]]: -; COMPARE-LAA-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; COMPARE-LAA-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] -; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = mul <4 x i64> [[VEC_IND]], [[TMP0]] -; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP1]], i32 0 -; COMPARE-LAA-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP1]], i32 1 -; COMPARE-LAA-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP1]], i32 2 -; COMPARE-LAA-MV-NEXT: [[TMP5:%.*]] = extractelement <4 x i64> [[TMP1]], i32 3 -; COMPARE-LAA-MV-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP2]] -; COMPARE-LAA-MV-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP3]] -; COMPARE-LAA-MV-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP4]] -; COMPARE-LAA-MV-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP5]] -; COMPARE-LAA-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8 -; COMPARE-LAA-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 -; COMPARE-LAA-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 -; COMPARE-LAA-MV-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP9]], align 8 -; COMPARE-LAA-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> poison, i64 [[TMP10]], i32 0 -; COMPARE-LAA-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 1 -; COMPARE-LAA-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP12]], i32 2 -; COMPARE-LAA-MV-NEXT: [[TMP17:%.*]] = insertelement <4 x i64> [[TMP16]], i64 [[TMP13]], i32 3 -; COMPARE-LAA-MV-NEXT: [[TMP18:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] -; COMPARE-LAA-MV-NEXT: store <4 x i64> [[TMP17]], ptr [[TMP18]], align 8 -; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 -; COMPARE-LAA-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) -; COMPARE-LAA-MV-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 -; COMPARE-LAA-MV-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; COMPARE-LAA-MV-NEXT: [[TMP0:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = shl i64 [[TMP0]], 3 +; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP1]] +; COMPARE-LAA-MV-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP2]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP3:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[TMP0]] +; COMPARE-LAA-MV-NEXT: store <4 x i64> [[WIDE_LOAD]], ptr [[TMP3]], align 8 +; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP0]], 4 +; COMPARE-LAA-MV-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: ; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-LAA-MV: [[SCALAR_PH]]: +; COMPARE-LAA-MV-NEXT: br label %[[HEADER:.*]] +; COMPARE-LAA-MV: [[HEADER]]: +; COMPARE-LAA-MV-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-LAA-MV-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 +; COMPARE-LAA-MV-NEXT: [[STRIDE_X8:%.*]] = mul i64 [[STRIDE]], 8 +; COMPARE-LAA-MV-NEXT: [[IDX:%.*]] = mul i64 [[IV]], [[STRIDE_X8]] +; COMPARE-LAA-MV-NEXT: [[GEP_LD:%.*]] = getelementptr i8, ptr [[P]], i64 [[IDX]] +; COMPARE-LAA-MV-NEXT: [[LD:%.*]] = load i64, ptr [[GEP_LD]], align 8 +; COMPARE-LAA-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] +; COMPARE-LAA-MV-NEXT: store i64 [[LD]], ptr [[GEP_ST]], align 8 +; COMPARE-LAA-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP7:![0-9]+]] ; COMPARE-LAA-MV: [[EXIT]]: ; COMPARE-LAA-MV-NEXT: ret void ; @@ -455,7 +445,7 @@ define void @byte_gep_under_scaled_stride(ptr noalias %p.out, ptr %p, i64 %strid ; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) ; COMPARE-NO-MV-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 -; COMPARE-NO-MV-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; COMPARE-NO-MV-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: ; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] ; COMPARE-NO-MV: [[EXIT]]: @@ -495,7 +485,7 @@ define void @byte_gep_under_scaled_stride(ptr noalias %p.out, ptr %p, i64 %strid ; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; COMPARE-LAA-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) ; COMPARE-LAA-MV-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 -; COMPARE-LAA-MV-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; COMPARE-LAA-MV-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: ; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] ; COMPARE-LAA-MV: [[EXIT]]: @@ -562,7 +552,7 @@ define void @byte_gep_over_scaled_stride(ptr noalias %p.out, ptr %p, i64 %stride ; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) ; COMPARE-NO-MV-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 -; COMPARE-NO-MV-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; COMPARE-NO-MV-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: ; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] ; COMPARE-NO-MV: [[EXIT]]: @@ -602,7 +592,7 @@ define void @byte_gep_over_scaled_stride(ptr noalias %p.out, ptr %p, i64 %stride ; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; COMPARE-LAA-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) ; COMPARE-LAA-MV-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 -; COMPARE-LAA-MV-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; COMPARE-LAA-MV-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: ; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] ; COMPARE-LAA-MV: [[EXIT]]: @@ -668,7 +658,7 @@ define void @byte_gep_non_power_of_two_scaled_stride(ptr noalias %p.out, ptr %p, ; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) ; COMPARE-NO-MV-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 -; COMPARE-NO-MV-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; COMPARE-NO-MV-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: ; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] ; COMPARE-NO-MV: [[EXIT]]: @@ -708,7 +698,7 @@ define void @byte_gep_non_power_of_two_scaled_stride(ptr noalias %p.out, ptr %p, ; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; COMPARE-LAA-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) ; COMPARE-LAA-MV-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 -; COMPARE-LAA-MV-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; COMPARE-LAA-MV-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: ; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] ; COMPARE-LAA-MV: [[EXIT]]: @@ -772,7 +762,7 @@ define void @byte_gep_nonscaled_stride(ptr noalias %p.out, ptr %p, i64 %stride) ; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) ; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 -; COMPARE-NO-MV-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] +; COMPARE-NO-MV-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: ; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] ; COMPARE-NO-MV: [[EXIT]]: @@ -781,48 +771,39 @@ define void @byte_gep_nonscaled_stride(ptr noalias %p.out, ptr %p, i64 %stride) ; COMPARE-LAA-MV-LABEL: define void @byte_gep_nonscaled_stride( ; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { ; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] -; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_SCEVCHECK:.*]] -; COMPARE-LAA-MV: [[VECTOR_SCEVCHECK]]: -; COMPARE-LAA-MV-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 -; COMPARE-LAA-MV-NEXT: br i1 [[IDENT_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_PH:.*]] ; COMPARE-LAA-MV: [[VECTOR_PH]]: +; COMPARE-LAA-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 +; COMPARE-LAA-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer ; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] ; COMPARE-LAA-MV: [[VECTOR_BODY]]: ; COMPARE-LAA-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1 -; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 2 -; COMPARE-LAA-MV-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 3 -; COMPARE-LAA-MV-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[P]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[TMP0:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = extractelement <4 x i64> [[TMP0]], i32 0 +; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP0]], i32 1 +; COMPARE-LAA-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP0]], i32 2 +; COMPARE-LAA-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP0]], i32 3 ; COMPARE-LAA-MV-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP1]] ; COMPARE-LAA-MV-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP2]] ; COMPARE-LAA-MV-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP3]] -; COMPARE-LAA-MV-NEXT: [[TMP8:%.*]] = load i64, ptr [[TMP4]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP4]] ; COMPARE-LAA-MV-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP5]], align 8 ; COMPARE-LAA-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8 ; COMPARE-LAA-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 -; COMPARE-LAA-MV-NEXT: [[TMP12:%.*]] = insertelement <4 x i64> poison, i64 [[TMP8]], i32 0 -; COMPARE-LAA-MV-NEXT: [[TMP13:%.*]] = insertelement <4 x i64> [[TMP12]], i64 [[TMP9]], i32 1 -; COMPARE-LAA-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> [[TMP13]], i64 [[TMP10]], i32 2 -; COMPARE-LAA-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 3 -; COMPARE-LAA-MV-NEXT: [[TMP16:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] -; COMPARE-LAA-MV-NEXT: store <4 x i64> [[TMP15]], ptr [[TMP16]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP13:%.*]] = insertelement <4 x i64> poison, i64 [[TMP9]], i32 0 +; COMPARE-LAA-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> [[TMP13]], i64 [[TMP10]], i32 1 +; COMPARE-LAA-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 2 +; COMPARE-LAA-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP12]], i32 3 +; COMPARE-LAA-MV-NEXT: [[TMP17:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: store <4 x i64> [[TMP16]], ptr [[TMP17]], align 8 ; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 -; COMPARE-LAA-MV-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 -; COMPARE-LAA-MV-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; COMPARE-LAA-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-LAA-MV-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: ; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] -; COMPARE-LAA-MV: [[SCALAR_PH]]: -; COMPARE-LAA-MV-NEXT: br label %[[HEADER:.*]] -; COMPARE-LAA-MV: [[HEADER]]: -; COMPARE-LAA-MV-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] -; COMPARE-LAA-MV-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 -; COMPARE-LAA-MV-NEXT: [[IDX:%.*]] = mul i64 [[IV]], [[STRIDE]] -; COMPARE-LAA-MV-NEXT: [[GEP_LD:%.*]] = getelementptr i8, ptr [[P]], i64 [[IDX]] -; COMPARE-LAA-MV-NEXT: [[LD:%.*]] = load i64, ptr [[GEP_LD]], align 8 -; COMPARE-LAA-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] -; COMPARE-LAA-MV-NEXT: store i64 [[LD]], ptr [[GEP_ST]], align 8 -; COMPARE-LAA-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], 128 -; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP11:![0-9]+]] ; COMPARE-LAA-MV: [[EXIT]]: ; COMPARE-LAA-MV-NEXT: ret void ; @@ -886,7 +867,7 @@ define void @byte_gep_negated_stride(ptr noalias %p.out, ptr %p, i64 %stride) { ; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) ; COMPARE-NO-MV-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 -; COMPARE-NO-MV-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; COMPARE-NO-MV-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: ; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] ; COMPARE-NO-MV: [[EXIT]]: @@ -960,62 +941,51 @@ define void @shared_stride(ptr noalias %p.out, ptr %p0, ptr %p1, i64 %stride) { ; COMPARE-NO-MV-LABEL: define void @shared_stride( ; COMPARE-NO-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P0:%.*]], ptr [[P1:%.*]], i64 [[STRIDE:%.*]]) { ; COMPARE-NO-MV-NEXT: [[ENTRY:.*:]] -; COMPARE-NO-MV-NEXT: br label %[[VECTOR_PH:.*]] +; COMPARE-NO-MV-NEXT: [[STRIDES_MV_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 +; COMPARE-NO-MV-NEXT: br label %[[STRIDES_CHECK:.*]] +; COMPARE-NO-MV: [[STRIDES_CHECK]]: +; COMPARE-NO-MV-NEXT: br i1 [[STRIDES_MV_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; COMPARE-NO-MV: [[VECTOR_PH]]: -; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 -; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer ; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] ; COMPARE-NO-MV: [[VECTOR_BODY]]: ; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] -; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] -; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = extractelement <4 x i64> [[TMP0]], i32 0 -; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP0]], i32 1 -; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP0]], i32 2 -; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP0]], i32 3 -; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = getelementptr i64, ptr [[P0]], i64 [[TMP1]] -; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[P0]], i64 [[TMP2]] -; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P0]], i64 [[TMP3]] -; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P0]], i64 [[TMP4]] -; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[P1]], i64 [[TMP1]] -; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = getelementptr i64, ptr [[P1]], i64 [[TMP2]] -; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = getelementptr i64, ptr [[P1]], i64 [[TMP3]] -; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = getelementptr i64, ptr [[P1]], i64 [[TMP4]] -; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP5]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = load i64, ptr [[TMP6]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = load i64, ptr [[TMP7]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP16:%.*]] = load i64, ptr [[TMP8]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP17:%.*]] = insertelement <4 x i64> poison, i64 [[TMP13]], i32 0 -; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = insertelement <4 x i64> [[TMP17]], i64 [[TMP14]], i32 1 -; COMPARE-NO-MV-NEXT: [[TMP19:%.*]] = insertelement <4 x i64> [[TMP18]], i64 [[TMP15]], i32 2 -; COMPARE-NO-MV-NEXT: [[TMP20:%.*]] = insertelement <4 x i64> [[TMP19]], i64 [[TMP16]], i32 3 -; COMPARE-NO-MV-NEXT: [[TMP21:%.*]] = load i64, ptr [[TMP9]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP22:%.*]] = load i64, ptr [[TMP10]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP23:%.*]] = load i64, ptr [[TMP11]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP24:%.*]] = load i64, ptr [[TMP12]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP25:%.*]] = insertelement <4 x i64> poison, i64 [[TMP21]], i32 0 -; COMPARE-NO-MV-NEXT: [[TMP26:%.*]] = insertelement <4 x i64> [[TMP25]], i64 [[TMP22]], i32 1 -; COMPARE-NO-MV-NEXT: [[TMP27:%.*]] = insertelement <4 x i64> [[TMP26]], i64 [[TMP23]], i32 2 -; COMPARE-NO-MV-NEXT: [[TMP28:%.*]] = insertelement <4 x i64> [[TMP27]], i64 [[TMP24]], i32 3 -; COMPARE-NO-MV-NEXT: [[TMP29:%.*]] = add <4 x i64> [[TMP20]], [[TMP28]] -; COMPARE-NO-MV-NEXT: [[TMP30:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] -; COMPARE-NO-MV-NEXT: store <4 x i64> [[TMP29]], ptr [[TMP30]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = getelementptr i64, ptr [[P0]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = getelementptr i64, ptr [[P1]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP0]], align 8 +; COMPARE-NO-MV-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP1]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = add <4 x i64> [[WIDE_LOAD]], [[WIDE_LOAD1]] +; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: store <4 x i64> [[TMP2]], ptr [[TMP3]], align 8 ; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 -; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) -; COMPARE-NO-MV-NEXT: [[TMP31:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 -; COMPARE-NO-MV-NEXT: br i1 [[TMP31]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: ; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-NO-MV: [[SCALAR_PH]]: +; COMPARE-NO-MV-NEXT: br label %[[HEADER:.*]] +; COMPARE-NO-MV: [[HEADER]]: +; COMPARE-NO-MV-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-NO-MV-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 +; COMPARE-NO-MV-NEXT: [[IDX:%.*]] = mul i64 [[IV]], [[STRIDE]] +; COMPARE-NO-MV-NEXT: [[GEP_LD0:%.*]] = getelementptr i64, ptr [[P0]], i64 [[IDX]] +; COMPARE-NO-MV-NEXT: [[GEP_LD1:%.*]] = getelementptr i64, ptr [[P1]], i64 [[IDX]] +; COMPARE-NO-MV-NEXT: [[LD0:%.*]] = load i64, ptr [[GEP_LD0]], align 8 +; COMPARE-NO-MV-NEXT: [[LD1:%.*]] = load i64, ptr [[GEP_LD1]], align 8 +; COMPARE-NO-MV-NEXT: [[VAL:%.*]] = add i64 [[LD0]], [[LD1]] +; COMPARE-NO-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] +; COMPARE-NO-MV-NEXT: store i64 [[VAL]], ptr [[GEP_ST]], align 8 +; COMPARE-NO-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP14:![0-9]+]] ; COMPARE-NO-MV: [[EXIT]]: ; COMPARE-NO-MV-NEXT: ret void ; ; COMPARE-LAA-MV-LABEL: define void @shared_stride( ; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P0:%.*]], ptr [[P1:%.*]], i64 [[STRIDE:%.*]]) { ; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] -; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_SCEVCHECK:.*]] -; COMPARE-LAA-MV: [[VECTOR_SCEVCHECK]]: -; COMPARE-LAA-MV-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 -; COMPARE-LAA-MV-NEXT: br i1 [[IDENT_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV-NEXT: [[STRIDES_MV_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 +; COMPARE-LAA-MV-NEXT: br label %[[STRIDES_CHECK:.*]] +; COMPARE-LAA-MV: [[STRIDES_CHECK]]: +; COMPARE-LAA-MV-NEXT: br i1 [[STRIDES_MV_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; COMPARE-LAA-MV: [[VECTOR_PH]]: ; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] ; COMPARE-LAA-MV: [[VECTOR_BODY]]: @@ -1080,68 +1050,70 @@ define void @dependent_strides(ptr noalias %p.out, ptr %p0, ptr %p1, i64 %stride ; COMPARE-NO-MV-LABEL: define void @dependent_strides( ; COMPARE-NO-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P0:%.*]], ptr [[P1:%.*]], i64 [[STRIDE:%.*]]) { ; COMPARE-NO-MV-NEXT: [[ENTRY:.*:]] -; COMPARE-NO-MV-NEXT: br label %[[VECTOR_PH:.*]] +; COMPARE-NO-MV-NEXT: [[STRIDES_MV_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 +; COMPARE-NO-MV-NEXT: br label %[[STRIDES_CHECK:.*]] +; COMPARE-NO-MV: [[STRIDES_CHECK]]: +; COMPARE-NO-MV-NEXT: br i1 [[STRIDES_MV_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; COMPARE-NO-MV: [[VECTOR_PH]]: -; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 -; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer -; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = add <4 x i64> [[BROADCAST_SPLAT]], splat (i64 1) ; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] ; COMPARE-NO-MV: [[VECTOR_BODY]]: ; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] -; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] -; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP1]], i32 0 -; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP1]], i32 1 -; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP1]], i32 2 -; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = extractelement <4 x i64> [[TMP1]], i32 3 -; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = mul <4 x i64> [[VEC_IND]], [[TMP0]] -; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = extractelement <4 x i64> [[TMP6]], i32 0 -; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = extractelement <4 x i64> [[TMP6]], i32 1 -; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = extractelement <4 x i64> [[TMP6]], i32 2 -; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = extractelement <4 x i64> [[TMP6]], i32 3 -; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = getelementptr i64, ptr [[P0]], i64 [[TMP2]] -; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = getelementptr i64, ptr [[P0]], i64 [[TMP3]] -; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = getelementptr i64, ptr [[P0]], i64 [[TMP4]] -; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = getelementptr i64, ptr [[P0]], i64 [[TMP5]] -; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = getelementptr i64, ptr [[P1]], i64 [[TMP7]] -; COMPARE-NO-MV-NEXT: [[TMP16:%.*]] = getelementptr i64, ptr [[P1]], i64 [[TMP8]] -; COMPARE-NO-MV-NEXT: [[TMP17:%.*]] = getelementptr i64, ptr [[P1]], i64 [[TMP9]] -; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = getelementptr i64, ptr [[P1]], i64 [[TMP10]] -; COMPARE-NO-MV-NEXT: [[TMP19:%.*]] = load i64, ptr [[TMP11]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP20:%.*]] = load i64, ptr [[TMP12]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP21:%.*]] = load i64, ptr [[TMP13]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP22:%.*]] = load i64, ptr [[TMP14]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP23:%.*]] = insertelement <4 x i64> poison, i64 [[TMP19]], i32 0 -; COMPARE-NO-MV-NEXT: [[TMP24:%.*]] = insertelement <4 x i64> [[TMP23]], i64 [[TMP20]], i32 1 -; COMPARE-NO-MV-NEXT: [[TMP25:%.*]] = insertelement <4 x i64> [[TMP24]], i64 [[TMP21]], i32 2 -; COMPARE-NO-MV-NEXT: [[TMP26:%.*]] = insertelement <4 x i64> [[TMP25]], i64 [[TMP22]], i32 3 -; COMPARE-NO-MV-NEXT: [[TMP27:%.*]] = load i64, ptr [[TMP15]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP28:%.*]] = load i64, ptr [[TMP16]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP29:%.*]] = load i64, ptr [[TMP17]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP30:%.*]] = load i64, ptr [[TMP18]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP31:%.*]] = insertelement <4 x i64> poison, i64 [[TMP27]], i32 0 -; COMPARE-NO-MV-NEXT: [[TMP32:%.*]] = insertelement <4 x i64> [[TMP31]], i64 [[TMP28]], i32 1 -; COMPARE-NO-MV-NEXT: [[TMP33:%.*]] = insertelement <4 x i64> [[TMP32]], i64 [[TMP29]], i32 2 -; COMPARE-NO-MV-NEXT: [[TMP34:%.*]] = insertelement <4 x i64> [[TMP33]], i64 [[TMP30]], i32 3 -; COMPARE-NO-MV-NEXT: [[TMP35:%.*]] = add <4 x i64> [[TMP26]], [[TMP34]] -; COMPARE-NO-MV-NEXT: [[TMP36:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] -; COMPARE-NO-MV-NEXT: store <4 x i64> [[TMP35]], ptr [[TMP36]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = shl <4 x i64> [[VEC_IND]], splat (i64 1) +; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = getelementptr i64, ptr [[P0]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP0]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = getelementptr i64, ptr [[P1]], i64 [[TMP2]] +; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP0]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = getelementptr i64, ptr [[P1]], i64 [[TMP4]] +; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = extractelement <4 x i64> [[TMP0]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P1]], i64 [[TMP6]] +; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = extractelement <4 x i64> [[TMP0]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[P1]], i64 [[TMP8]] +; COMPARE-NO-MV-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP1]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP3]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP5]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP7]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP9]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> poison, i64 [[TMP10]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP12]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP17:%.*]] = insertelement <4 x i64> [[TMP16]], i64 [[TMP13]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = add <4 x i64> [[WIDE_LOAD]], [[TMP17]] +; COMPARE-NO-MV-NEXT: [[TMP19:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: store <4 x i64> [[TMP18]], ptr [[TMP19]], align 8 ; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) -; COMPARE-NO-MV-NEXT: [[TMP37:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 -; COMPARE-NO-MV-NEXT: br i1 [[TMP37]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; COMPARE-NO-MV-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: ; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-NO-MV: [[SCALAR_PH]]: +; COMPARE-NO-MV-NEXT: br label %[[HEADER:.*]] +; COMPARE-NO-MV: [[HEADER]]: +; COMPARE-NO-MV-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-NO-MV-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 +; COMPARE-NO-MV-NEXT: [[STRIDE1:%.*]] = add i64 [[STRIDE]], 1 +; COMPARE-NO-MV-NEXT: [[IDX0:%.*]] = mul i64 [[IV]], [[STRIDE]] +; COMPARE-NO-MV-NEXT: [[IDX1:%.*]] = mul i64 [[IV]], [[STRIDE1]] +; COMPARE-NO-MV-NEXT: [[GEP_LD0:%.*]] = getelementptr i64, ptr [[P0]], i64 [[IDX0]] +; COMPARE-NO-MV-NEXT: [[GEP_LD1:%.*]] = getelementptr i64, ptr [[P1]], i64 [[IDX1]] +; COMPARE-NO-MV-NEXT: [[LD0:%.*]] = load i64, ptr [[GEP_LD0]], align 8 +; COMPARE-NO-MV-NEXT: [[LD1:%.*]] = load i64, ptr [[GEP_LD1]], align 8 +; COMPARE-NO-MV-NEXT: [[VAL:%.*]] = add i64 [[LD0]], [[LD1]] +; COMPARE-NO-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] +; COMPARE-NO-MV-NEXT: store i64 [[VAL]], ptr [[GEP_ST]], align 8 +; COMPARE-NO-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP16:![0-9]+]] ; COMPARE-NO-MV: [[EXIT]]: ; COMPARE-NO-MV-NEXT: ret void ; ; COMPARE-LAA-MV-LABEL: define void @dependent_strides( ; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P0:%.*]], ptr [[P1:%.*]], i64 [[STRIDE:%.*]]) { ; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] -; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_SCEVCHECK:.*]] -; COMPARE-LAA-MV: [[VECTOR_SCEVCHECK]]: -; COMPARE-LAA-MV-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 -; COMPARE-LAA-MV-NEXT: br i1 [[IDENT_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV-NEXT: [[STRIDES_MV_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 +; COMPARE-LAA-MV-NEXT: br label %[[STRIDES_CHECK:.*]] +; COMPARE-LAA-MV: [[STRIDES_CHECK]]: +; COMPARE-LAA-MV-NEXT: br i1 [[STRIDES_MV_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; COMPARE-LAA-MV: [[VECTOR_PH]]: ; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] ; COMPARE-LAA-MV: [[VECTOR_BODY]]: @@ -1229,68 +1201,70 @@ define void @dependent_strides_reverse_order(ptr noalias %p.out, ptr %p0, ptr %p ; COMPARE-NO-MV-LABEL: define void @dependent_strides_reverse_order( ; COMPARE-NO-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P0:%.*]], ptr [[P1:%.*]], i64 [[STRIDE:%.*]]) { ; COMPARE-NO-MV-NEXT: [[ENTRY:.*:]] -; COMPARE-NO-MV-NEXT: br label %[[VECTOR_PH:.*]] +; COMPARE-NO-MV-NEXT: [[STRIDES_MV_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 +; COMPARE-NO-MV-NEXT: br label %[[STRIDES_CHECK:.*]] +; COMPARE-NO-MV: [[STRIDES_CHECK]]: +; COMPARE-NO-MV-NEXT: br i1 [[STRIDES_MV_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; COMPARE-NO-MV: [[VECTOR_PH]]: -; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 -; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer -; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = add <4 x i64> [[BROADCAST_SPLAT]], splat (i64 1) ; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] ; COMPARE-NO-MV: [[VECTOR_BODY]]: ; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] -; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = mul <4 x i64> [[VEC_IND]], [[TMP0]] -; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP1]], i32 0 -; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP1]], i32 1 -; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP1]], i32 2 -; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = extractelement <4 x i64> [[TMP1]], i32 3 -; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] -; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = extractelement <4 x i64> [[TMP6]], i32 0 -; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = extractelement <4 x i64> [[TMP6]], i32 1 -; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = extractelement <4 x i64> [[TMP6]], i32 2 -; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = extractelement <4 x i64> [[TMP6]], i32 3 -; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = getelementptr i64, ptr [[P0]], i64 [[TMP2]] -; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = getelementptr i64, ptr [[P0]], i64 [[TMP3]] -; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = getelementptr i64, ptr [[P0]], i64 [[TMP4]] -; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = getelementptr i64, ptr [[P0]], i64 [[TMP5]] -; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = getelementptr i64, ptr [[P1]], i64 [[TMP7]] -; COMPARE-NO-MV-NEXT: [[TMP16:%.*]] = getelementptr i64, ptr [[P1]], i64 [[TMP8]] -; COMPARE-NO-MV-NEXT: [[TMP17:%.*]] = getelementptr i64, ptr [[P1]], i64 [[TMP9]] -; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = getelementptr i64, ptr [[P1]], i64 [[TMP10]] -; COMPARE-NO-MV-NEXT: [[TMP19:%.*]] = load i64, ptr [[TMP11]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP20:%.*]] = load i64, ptr [[TMP12]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP21:%.*]] = load i64, ptr [[TMP13]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP22:%.*]] = load i64, ptr [[TMP14]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP23:%.*]] = insertelement <4 x i64> poison, i64 [[TMP19]], i32 0 -; COMPARE-NO-MV-NEXT: [[TMP24:%.*]] = insertelement <4 x i64> [[TMP23]], i64 [[TMP20]], i32 1 -; COMPARE-NO-MV-NEXT: [[TMP25:%.*]] = insertelement <4 x i64> [[TMP24]], i64 [[TMP21]], i32 2 -; COMPARE-NO-MV-NEXT: [[TMP26:%.*]] = insertelement <4 x i64> [[TMP25]], i64 [[TMP22]], i32 3 -; COMPARE-NO-MV-NEXT: [[TMP27:%.*]] = load i64, ptr [[TMP15]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP28:%.*]] = load i64, ptr [[TMP16]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP29:%.*]] = load i64, ptr [[TMP17]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP30:%.*]] = load i64, ptr [[TMP18]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP31:%.*]] = insertelement <4 x i64> poison, i64 [[TMP27]], i32 0 -; COMPARE-NO-MV-NEXT: [[TMP32:%.*]] = insertelement <4 x i64> [[TMP31]], i64 [[TMP28]], i32 1 -; COMPARE-NO-MV-NEXT: [[TMP33:%.*]] = insertelement <4 x i64> [[TMP32]], i64 [[TMP29]], i32 2 -; COMPARE-NO-MV-NEXT: [[TMP34:%.*]] = insertelement <4 x i64> [[TMP33]], i64 [[TMP30]], i32 3 -; COMPARE-NO-MV-NEXT: [[TMP35:%.*]] = add <4 x i64> [[TMP26]], [[TMP34]] -; COMPARE-NO-MV-NEXT: [[TMP36:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] -; COMPARE-NO-MV-NEXT: store <4 x i64> [[TMP35]], ptr [[TMP36]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = shl <4 x i64> [[VEC_IND]], splat (i64 1) +; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = extractelement <4 x i64> [[TMP0]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = getelementptr i64, ptr [[P0]], i64 [[TMP1]] +; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP0]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = getelementptr i64, ptr [[P0]], i64 [[TMP3]] +; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = extractelement <4 x i64> [[TMP0]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[P0]], i64 [[TMP5]] +; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = extractelement <4 x i64> [[TMP0]], i32 3 +; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P0]], i64 [[TMP7]] +; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[P1]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP2]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP4]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP6]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP8]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> poison, i64 [[TMP10]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 1 +; COMPARE-NO-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP12]], i32 2 +; COMPARE-NO-MV-NEXT: [[TMP17:%.*]] = insertelement <4 x i64> [[TMP16]], i64 [[TMP13]], i32 3 +; COMPARE-NO-MV-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP9]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = add <4 x i64> [[TMP17]], [[WIDE_LOAD]] +; COMPARE-NO-MV-NEXT: [[TMP19:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: store <4 x i64> [[TMP18]], ptr [[TMP19]], align 8 ; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) -; COMPARE-NO-MV-NEXT: [[TMP37:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 -; COMPARE-NO-MV-NEXT: br i1 [[TMP37]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] +; COMPARE-NO-MV-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: ; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-NO-MV: [[SCALAR_PH]]: +; COMPARE-NO-MV-NEXT: br label %[[HEADER:.*]] +; COMPARE-NO-MV: [[HEADER]]: +; COMPARE-NO-MV-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-NO-MV-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 +; COMPARE-NO-MV-NEXT: [[STRIDE0:%.*]] = add i64 [[STRIDE]], 1 +; COMPARE-NO-MV-NEXT: [[IDX0:%.*]] = mul i64 [[IV]], [[STRIDE0]] +; COMPARE-NO-MV-NEXT: [[IDX1:%.*]] = mul i64 [[IV]], [[STRIDE]] +; COMPARE-NO-MV-NEXT: [[GEP_LD0:%.*]] = getelementptr i64, ptr [[P0]], i64 [[IDX0]] +; COMPARE-NO-MV-NEXT: [[GEP_LD1:%.*]] = getelementptr i64, ptr [[P1]], i64 [[IDX1]] +; COMPARE-NO-MV-NEXT: [[LD0:%.*]] = load i64, ptr [[GEP_LD0]], align 8 +; COMPARE-NO-MV-NEXT: [[LD1:%.*]] = load i64, ptr [[GEP_LD1]], align 8 +; COMPARE-NO-MV-NEXT: [[VAL:%.*]] = add i64 [[LD0]], [[LD1]] +; COMPARE-NO-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] +; COMPARE-NO-MV-NEXT: store i64 [[VAL]], ptr [[GEP_ST]], align 8 +; COMPARE-NO-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP18:![0-9]+]] ; COMPARE-NO-MV: [[EXIT]]: ; COMPARE-NO-MV-NEXT: ret void ; ; COMPARE-LAA-MV-LABEL: define void @dependent_strides_reverse_order( ; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P0:%.*]], ptr [[P1:%.*]], i64 [[STRIDE:%.*]]) { ; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] -; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_SCEVCHECK:.*]] -; COMPARE-LAA-MV: [[VECTOR_SCEVCHECK]]: -; COMPARE-LAA-MV-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 -; COMPARE-LAA-MV-NEXT: br i1 [[IDENT_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV-NEXT: [[STRIDES_MV_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 +; COMPARE-LAA-MV-NEXT: br label %[[STRIDES_CHECK:.*]] +; COMPARE-LAA-MV: [[STRIDES_CHECK]]: +; COMPARE-LAA-MV-NEXT: br i1 [[STRIDES_MV_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; COMPARE-LAA-MV: [[VECTOR_PH]]: ; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] ; COMPARE-LAA-MV: [[VECTOR_BODY]]: @@ -1421,7 +1395,7 @@ define void @byte_dependent_byte_geps(ptr noalias %p.out, ptr %p0, ptr %p1, i64 ; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) ; COMPARE-NO-MV-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 -; COMPARE-NO-MV-NEXT: br i1 [[TMP32]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; COMPARE-NO-MV-NEXT: br i1 [[TMP32]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] ; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: ; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] ; COMPARE-NO-MV: [[EXIT]]: @@ -1430,66 +1404,53 @@ define void @byte_dependent_byte_geps(ptr noalias %p.out, ptr %p0, ptr %p1, i64 ; COMPARE-LAA-MV-LABEL: define void @byte_dependent_byte_geps( ; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P0:%.*]], ptr [[P1:%.*]], i64 [[STRIDE:%.*]]) { ; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] -; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_SCEVCHECK:.*]] -; COMPARE-LAA-MV: [[VECTOR_SCEVCHECK]]: -; COMPARE-LAA-MV-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 -; COMPARE-LAA-MV-NEXT: br i1 [[IDENT_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_PH:.*]] ; COMPARE-LAA-MV: [[VECTOR_PH]]: +; COMPARE-LAA-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 +; COMPARE-LAA-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer ; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] ; COMPARE-LAA-MV: [[VECTOR_BODY]]: ; COMPARE-LAA-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1 -; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 2 -; COMPARE-LAA-MV-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 3 -; COMPARE-LAA-MV-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[P0]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[TMP0:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = extractelement <4 x i64> [[TMP0]], i32 0 +; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP0]], i32 1 +; COMPARE-LAA-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP0]], i32 2 +; COMPARE-LAA-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP0]], i32 3 ; COMPARE-LAA-MV-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[P0]], i64 [[TMP1]] ; COMPARE-LAA-MV-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[P0]], i64 [[TMP2]] ; COMPARE-LAA-MV-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[P0]], i64 [[TMP3]] -; COMPARE-LAA-MV-NEXT: [[TMP8:%.*]] = load i64, ptr [[TMP4]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[P0]], i64 [[TMP4]] ; COMPARE-LAA-MV-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP5]], align 8 ; COMPARE-LAA-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8 ; COMPARE-LAA-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 -; COMPARE-LAA-MV-NEXT: [[TMP12:%.*]] = insertelement <4 x i64> poison, i64 [[TMP8]], i32 0 -; COMPARE-LAA-MV-NEXT: [[TMP13:%.*]] = insertelement <4 x i64> [[TMP12]], i64 [[TMP9]], i32 1 -; COMPARE-LAA-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> [[TMP13]], i64 [[TMP10]], i32 2 -; COMPARE-LAA-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 3 -; COMPARE-LAA-MV-NEXT: [[TMP16:%.*]] = getelementptr i8, ptr [[P1]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP13:%.*]] = insertelement <4 x i64> poison, i64 [[TMP9]], i32 0 +; COMPARE-LAA-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> [[TMP13]], i64 [[TMP10]], i32 1 +; COMPARE-LAA-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 2 +; COMPARE-LAA-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP12]], i32 3 ; COMPARE-LAA-MV-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[P1]], i64 [[TMP1]] ; COMPARE-LAA-MV-NEXT: [[TMP18:%.*]] = getelementptr i8, ptr [[P1]], i64 [[TMP2]] ; COMPARE-LAA-MV-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[P1]], i64 [[TMP3]] -; COMPARE-LAA-MV-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP16]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP20:%.*]] = getelementptr i8, ptr [[P1]], i64 [[TMP4]] ; COMPARE-LAA-MV-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP17]], align 8 ; COMPARE-LAA-MV-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP18]], align 8 ; COMPARE-LAA-MV-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP19]], align 8 -; COMPARE-LAA-MV-NEXT: [[TMP24:%.*]] = insertelement <4 x i32> poison, i32 [[TMP20]], i32 0 -; COMPARE-LAA-MV-NEXT: [[TMP25:%.*]] = insertelement <4 x i32> [[TMP24]], i32 [[TMP21]], i32 1 -; COMPARE-LAA-MV-NEXT: [[TMP26:%.*]] = insertelement <4 x i32> [[TMP25]], i32 [[TMP22]], i32 2 -; COMPARE-LAA-MV-NEXT: [[TMP27:%.*]] = insertelement <4 x i32> [[TMP26]], i32 [[TMP23]], i32 3 -; COMPARE-LAA-MV-NEXT: [[TMP28:%.*]] = sext <4 x i32> [[TMP27]] to <4 x i64> -; COMPARE-LAA-MV-NEXT: [[TMP29:%.*]] = add <4 x i64> [[TMP15]], [[TMP28]] -; COMPARE-LAA-MV-NEXT: [[TMP30:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] -; COMPARE-LAA-MV-NEXT: store <4 x i64> [[TMP29]], ptr [[TMP30]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP24:%.*]] = load i32, ptr [[TMP20]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP25:%.*]] = insertelement <4 x i32> poison, i32 [[TMP21]], i32 0 +; COMPARE-LAA-MV-NEXT: [[TMP26:%.*]] = insertelement <4 x i32> [[TMP25]], i32 [[TMP22]], i32 1 +; COMPARE-LAA-MV-NEXT: [[TMP27:%.*]] = insertelement <4 x i32> [[TMP26]], i32 [[TMP23]], i32 2 +; COMPARE-LAA-MV-NEXT: [[TMP28:%.*]] = insertelement <4 x i32> [[TMP27]], i32 [[TMP24]], i32 3 +; COMPARE-LAA-MV-NEXT: [[TMP29:%.*]] = sext <4 x i32> [[TMP28]] to <4 x i64> +; COMPARE-LAA-MV-NEXT: [[TMP30:%.*]] = add <4 x i64> [[TMP16]], [[TMP29]] +; COMPARE-LAA-MV-NEXT: [[TMP31:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: store <4 x i64> [[TMP30]], ptr [[TMP31]], align 8 ; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 -; COMPARE-LAA-MV-NEXT: [[TMP31:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 -; COMPARE-LAA-MV-NEXT: br i1 [[TMP31]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] +; COMPARE-LAA-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-LAA-MV-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[TMP32]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] ; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: ; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] -; COMPARE-LAA-MV: [[SCALAR_PH]]: -; COMPARE-LAA-MV-NEXT: br label %[[HEADER:.*]] -; COMPARE-LAA-MV: [[HEADER]]: -; COMPARE-LAA-MV-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] -; COMPARE-LAA-MV-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 -; COMPARE-LAA-MV-NEXT: [[IDX:%.*]] = mul i64 [[IV]], [[STRIDE]] -; COMPARE-LAA-MV-NEXT: [[GEP_LD0:%.*]] = getelementptr i8, ptr [[P0]], i64 [[IDX]] -; COMPARE-LAA-MV-NEXT: [[LD0:%.*]] = load i64, ptr [[GEP_LD0]], align 8 -; COMPARE-LAA-MV-NEXT: [[GEP_LD1:%.*]] = getelementptr i8, ptr [[P1]], i64 [[IDX]] -; COMPARE-LAA-MV-NEXT: [[LD1:%.*]] = load i32, ptr [[GEP_LD1]], align 8 -; COMPARE-LAA-MV-NEXT: [[LD1_EXT:%.*]] = sext i32 [[LD1]] to i64 -; COMPARE-LAA-MV-NEXT: [[VAL:%.*]] = add i64 [[LD0]], [[LD1_EXT]] -; COMPARE-LAA-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] -; COMPARE-LAA-MV-NEXT: store i64 [[VAL]], ptr [[GEP_ST]], align 8 -; COMPARE-LAA-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], 128 -; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP20:![0-9]+]] ; COMPARE-LAA-MV: [[EXIT]]: ; COMPARE-LAA-MV-NEXT: ret void ; @@ -1570,7 +1531,7 @@ define void @byte_dependent_byte_geps_reverse_order(ptr noalias %p.out, ptr %p0, ; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) ; COMPARE-NO-MV-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 -; COMPARE-NO-MV-NEXT: br i1 [[TMP32]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] +; COMPARE-NO-MV-NEXT: br i1 [[TMP32]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: ; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] ; COMPARE-NO-MV: [[EXIT]]: @@ -1579,66 +1540,53 @@ define void @byte_dependent_byte_geps_reverse_order(ptr noalias %p.out, ptr %p0, ; COMPARE-LAA-MV-LABEL: define void @byte_dependent_byte_geps_reverse_order( ; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P0:%.*]], ptr [[P1:%.*]], i64 [[STRIDE:%.*]]) { ; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] -; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_SCEVCHECK:.*]] -; COMPARE-LAA-MV: [[VECTOR_SCEVCHECK]]: -; COMPARE-LAA-MV-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 -; COMPARE-LAA-MV-NEXT: br i1 [[IDENT_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_PH:.*]] ; COMPARE-LAA-MV: [[VECTOR_PH]]: +; COMPARE-LAA-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 +; COMPARE-LAA-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer ; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] ; COMPARE-LAA-MV: [[VECTOR_BODY]]: ; COMPARE-LAA-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1 -; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 2 -; COMPARE-LAA-MV-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 3 -; COMPARE-LAA-MV-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[P1]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[TMP0:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = extractelement <4 x i64> [[TMP0]], i32 0 +; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP0]], i32 1 +; COMPARE-LAA-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP0]], i32 2 +; COMPARE-LAA-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP0]], i32 3 ; COMPARE-LAA-MV-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[P1]], i64 [[TMP1]] ; COMPARE-LAA-MV-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[P1]], i64 [[TMP2]] ; COMPARE-LAA-MV-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[P1]], i64 [[TMP3]] -; COMPARE-LAA-MV-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP4]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[P1]], i64 [[TMP4]] ; COMPARE-LAA-MV-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 8 ; COMPARE-LAA-MV-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP6]], align 8 ; COMPARE-LAA-MV-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP7]], align 8 -; COMPARE-LAA-MV-NEXT: [[TMP12:%.*]] = insertelement <4 x i32> poison, i32 [[TMP8]], i32 0 -; COMPARE-LAA-MV-NEXT: [[TMP13:%.*]] = insertelement <4 x i32> [[TMP12]], i32 [[TMP9]], i32 1 -; COMPARE-LAA-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i32> [[TMP13]], i32 [[TMP10]], i32 2 -; COMPARE-LAA-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i32> [[TMP14]], i32 [[TMP11]], i32 3 -; COMPARE-LAA-MV-NEXT: [[TMP16:%.*]] = sext <4 x i32> [[TMP15]] to <4 x i64> -; COMPARE-LAA-MV-NEXT: [[TMP17:%.*]] = getelementptr i8, ptr [[P0]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP8]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP13:%.*]] = insertelement <4 x i32> poison, i32 [[TMP9]], i32 0 +; COMPARE-LAA-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i32> [[TMP13]], i32 [[TMP10]], i32 1 +; COMPARE-LAA-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i32> [[TMP14]], i32 [[TMP11]], i32 2 +; COMPARE-LAA-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i32> [[TMP15]], i32 [[TMP12]], i32 3 +; COMPARE-LAA-MV-NEXT: [[TMP17:%.*]] = sext <4 x i32> [[TMP16]] to <4 x i64> ; COMPARE-LAA-MV-NEXT: [[TMP18:%.*]] = getelementptr i8, ptr [[P0]], i64 [[TMP1]] ; COMPARE-LAA-MV-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[P0]], i64 [[TMP2]] ; COMPARE-LAA-MV-NEXT: [[TMP20:%.*]] = getelementptr i8, ptr [[P0]], i64 [[TMP3]] -; COMPARE-LAA-MV-NEXT: [[TMP21:%.*]] = load i64, ptr [[TMP17]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[P0]], i64 [[TMP4]] ; COMPARE-LAA-MV-NEXT: [[TMP22:%.*]] = load i64, ptr [[TMP18]], align 8 ; COMPARE-LAA-MV-NEXT: [[TMP23:%.*]] = load i64, ptr [[TMP19]], align 8 ; COMPARE-LAA-MV-NEXT: [[TMP24:%.*]] = load i64, ptr [[TMP20]], align 8 -; COMPARE-LAA-MV-NEXT: [[TMP25:%.*]] = insertelement <4 x i64> poison, i64 [[TMP21]], i32 0 -; COMPARE-LAA-MV-NEXT: [[TMP26:%.*]] = insertelement <4 x i64> [[TMP25]], i64 [[TMP22]], i32 1 -; COMPARE-LAA-MV-NEXT: [[TMP27:%.*]] = insertelement <4 x i64> [[TMP26]], i64 [[TMP23]], i32 2 -; COMPARE-LAA-MV-NEXT: [[TMP28:%.*]] = insertelement <4 x i64> [[TMP27]], i64 [[TMP24]], i32 3 -; COMPARE-LAA-MV-NEXT: [[TMP29:%.*]] = add <4 x i64> [[TMP28]], [[TMP16]] -; COMPARE-LAA-MV-NEXT: [[TMP30:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] -; COMPARE-LAA-MV-NEXT: store <4 x i64> [[TMP29]], ptr [[TMP30]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP25:%.*]] = load i64, ptr [[TMP21]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP26:%.*]] = insertelement <4 x i64> poison, i64 [[TMP22]], i32 0 +; COMPARE-LAA-MV-NEXT: [[TMP27:%.*]] = insertelement <4 x i64> [[TMP26]], i64 [[TMP23]], i32 1 +; COMPARE-LAA-MV-NEXT: [[TMP28:%.*]] = insertelement <4 x i64> [[TMP27]], i64 [[TMP24]], i32 2 +; COMPARE-LAA-MV-NEXT: [[TMP29:%.*]] = insertelement <4 x i64> [[TMP28]], i64 [[TMP25]], i32 3 +; COMPARE-LAA-MV-NEXT: [[TMP30:%.*]] = add <4 x i64> [[TMP29]], [[TMP17]] +; COMPARE-LAA-MV-NEXT: [[TMP31:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: store <4 x i64> [[TMP30]], ptr [[TMP31]], align 8 ; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 -; COMPARE-LAA-MV-NEXT: [[TMP31:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 -; COMPARE-LAA-MV-NEXT: br i1 [[TMP31]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] +; COMPARE-LAA-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-LAA-MV-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[TMP32]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: ; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] -; COMPARE-LAA-MV: [[SCALAR_PH]]: -; COMPARE-LAA-MV-NEXT: br label %[[HEADER:.*]] -; COMPARE-LAA-MV: [[HEADER]]: -; COMPARE-LAA-MV-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] -; COMPARE-LAA-MV-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 -; COMPARE-LAA-MV-NEXT: [[IDX:%.*]] = mul i64 [[IV]], [[STRIDE]] -; COMPARE-LAA-MV-NEXT: [[GEP_LD1:%.*]] = getelementptr i8, ptr [[P1]], i64 [[IDX]] -; COMPARE-LAA-MV-NEXT: [[LD1:%.*]] = load i32, ptr [[GEP_LD1]], align 8 -; COMPARE-LAA-MV-NEXT: [[LD1_EXT:%.*]] = sext i32 [[LD1]] to i64 -; COMPARE-LAA-MV-NEXT: [[GEP_LD0:%.*]] = getelementptr i8, ptr [[P0]], i64 [[IDX]] -; COMPARE-LAA-MV-NEXT: [[LD0:%.*]] = load i64, ptr [[GEP_LD0]], align 8 -; COMPARE-LAA-MV-NEXT: [[VAL:%.*]] = add i64 [[LD0]], [[LD1_EXT]] -; COMPARE-LAA-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] -; COMPARE-LAA-MV-NEXT: store i64 [[VAL]], ptr [[GEP_ST]], align 8 -; COMPARE-LAA-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], 128 -; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP22:![0-9]+]] ; COMPARE-LAA-MV: [[EXIT]]: ; COMPARE-LAA-MV-NEXT: ret void ; @@ -1677,76 +1625,65 @@ define void @strided_interleave(ptr noalias %p.out, ptr %p, i64 %stride) { ; COMPARE-NO-MV-LABEL: define void @strided_interleave( ; COMPARE-NO-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { ; COMPARE-NO-MV-NEXT: [[ENTRY:.*:]] -; COMPARE-NO-MV-NEXT: br label %[[VECTOR_PH:.*]] +; COMPARE-NO-MV-NEXT: [[STRIDES_MV_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 +; COMPARE-NO-MV-NEXT: br label %[[STRIDES_CHECK:.*]] +; COMPARE-NO-MV: [[STRIDES_CHECK]]: +; COMPARE-NO-MV-NEXT: br i1 [[STRIDES_MV_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; COMPARE-NO-MV: [[VECTOR_PH]]: -; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 -; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer ; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] ; COMPARE-NO-MV: [[VECTOR_BODY]]: -; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] -; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] -; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = extractelement <4 x i64> [[TMP0]], i32 0 -; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP0]], i32 1 -; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP0]], i32 2 -; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP0]], i32 3 -; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP1]] -; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP2]] -; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP3]] -; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP4]] -; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[TMP5]], i61 1 -; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = getelementptr i64, ptr [[TMP6]], i61 1 -; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = getelementptr i64, ptr [[TMP7]], i61 1 -; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = getelementptr i64, ptr [[TMP8]], i61 1 -; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP5]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = load i64, ptr [[TMP6]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = load i64, ptr [[TMP7]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP16:%.*]] = load i64, ptr [[TMP8]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP17:%.*]] = insertelement <4 x i64> poison, i64 [[TMP13]], i32 0 -; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = insertelement <4 x i64> [[TMP17]], i64 [[TMP14]], i32 1 -; COMPARE-NO-MV-NEXT: [[TMP19:%.*]] = insertelement <4 x i64> [[TMP18]], i64 [[TMP15]], i32 2 -; COMPARE-NO-MV-NEXT: [[TMP20:%.*]] = insertelement <4 x i64> [[TMP19]], i64 [[TMP16]], i32 3 -; COMPARE-NO-MV-NEXT: [[TMP21:%.*]] = load i64, ptr [[TMP9]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP22:%.*]] = load i64, ptr [[TMP10]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP23:%.*]] = load i64, ptr [[TMP11]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP24:%.*]] = load i64, ptr [[TMP12]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP25:%.*]] = insertelement <4 x i64> poison, i64 [[TMP21]], i32 0 -; COMPARE-NO-MV-NEXT: [[TMP26:%.*]] = insertelement <4 x i64> [[TMP25]], i64 [[TMP22]], i32 1 -; COMPARE-NO-MV-NEXT: [[TMP27:%.*]] = insertelement <4 x i64> [[TMP26]], i64 [[TMP23]], i32 2 -; COMPARE-NO-MV-NEXT: [[TMP28:%.*]] = insertelement <4 x i64> [[TMP27]], i64 [[TMP24]], i32 3 -; COMPARE-NO-MV-NEXT: [[TMP29:%.*]] = add <4 x i64> [[TMP20]], [[TMP28]] -; COMPARE-NO-MV-NEXT: [[TMP30:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] -; COMPARE-NO-MV-NEXT: store <4 x i64> [[TMP29]], ptr [[TMP30]], align 8 -; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 -; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) -; COMPARE-NO-MV-NEXT: [[TMP31:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 -; COMPARE-NO-MV-NEXT: br i1 [[TMP31]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] +; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP0]] +; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = getelementptr i64, ptr [[TMP4]], i61 1 +; COMPARE-NO-MV-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP4]], align 8 +; COMPARE-NO-MV-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP5]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = add <4 x i64> [[WIDE_LOAD]], [[WIDE_LOAD1]] +; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[TMP0]] +; COMPARE-NO-MV-NEXT: store <4 x i64> [[TMP6]], ptr [[TMP7]], align 8 +; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP0]], 4 +; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] ; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: ; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-NO-MV: [[SCALAR_PH]]: +; COMPARE-NO-MV-NEXT: br label %[[HEADER:.*]] +; COMPARE-NO-MV: [[HEADER]]: +; COMPARE-NO-MV-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-NO-MV-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 +; COMPARE-NO-MV-NEXT: [[IDX:%.*]] = mul i64 [[IV]], [[STRIDE]] +; COMPARE-NO-MV-NEXT: [[GEP_LD0:%.*]] = getelementptr i64, ptr [[P]], i64 [[IDX]] +; COMPARE-NO-MV-NEXT: [[GEP_LD1:%.*]] = getelementptr i64, ptr [[GEP_LD0]], i61 1 +; COMPARE-NO-MV-NEXT: [[LD0:%.*]] = load i64, ptr [[GEP_LD0]], align 8 +; COMPARE-NO-MV-NEXT: [[LD1:%.*]] = load i64, ptr [[GEP_LD1]], align 8 +; COMPARE-NO-MV-NEXT: [[VAL:%.*]] = add i64 [[LD0]], [[LD1]] +; COMPARE-NO-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] +; COMPARE-NO-MV-NEXT: store i64 [[VAL]], ptr [[GEP_ST]], align 8 +; COMPARE-NO-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP22:![0-9]+]] ; COMPARE-NO-MV: [[EXIT]]: ; COMPARE-NO-MV-NEXT: ret void ; ; COMPARE-LAA-MV-LABEL: define void @strided_interleave( ; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { ; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] -; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_SCEVCHECK:.*]] -; COMPARE-LAA-MV: [[VECTOR_SCEVCHECK]]: -; COMPARE-LAA-MV-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 -; COMPARE-LAA-MV-NEXT: br i1 [[IDENT_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV-NEXT: [[STRIDES_MV_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 +; COMPARE-LAA-MV-NEXT: br label %[[STRIDES_CHECK:.*]] +; COMPARE-LAA-MV: [[STRIDES_CHECK]]: +; COMPARE-LAA-MV-NEXT: br i1 [[STRIDES_MV_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; COMPARE-LAA-MV: [[VECTOR_PH]]: ; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] ; COMPARE-LAA-MV: [[VECTOR_BODY]]: -; COMPARE-LAA-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; COMPARE-LAA-MV-NEXT: [[TMP0:%.*]] = getelementptr i64, ptr [[P]], i64 [[INDEX]] -; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = getelementptr i64, ptr [[TMP0]], i61 1 -; COMPARE-LAA-MV-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP0]], align 8 -; COMPARE-LAA-MV-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP1]], align 8 -; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = add <4 x i64> [[WIDE_LOAD]], [[WIDE_LOAD1]] -; COMPARE-LAA-MV-NEXT: [[TMP3:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] -; COMPARE-LAA-MV-NEXT: store <4 x i64> [[TMP2]], ptr [[TMP3]], align 8 -; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 -; COMPARE-LAA-MV-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 -; COMPARE-LAA-MV-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] +; COMPARE-LAA-MV-NEXT: [[TMP0:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[TMP4:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP0]] +; COMPARE-LAA-MV-NEXT: [[TMP5:%.*]] = getelementptr i64, ptr [[TMP4]], i61 1 +; COMPARE-LAA-MV-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP4]], align 8 +; COMPARE-LAA-MV-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP5]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP6:%.*]] = add <4 x i64> [[WIDE_LOAD]], [[WIDE_LOAD1]] +; COMPARE-LAA-MV-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[TMP0]] +; COMPARE-LAA-MV-NEXT: store <4 x i64> [[TMP6]], ptr [[TMP7]], align 8 +; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[TMP0]], 4 +; COMPARE-LAA-MV-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] ; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: ; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] ; COMPARE-LAA-MV: [[SCALAR_PH]]: @@ -1763,7 +1700,7 @@ define void @strided_interleave(ptr noalias %p.out, ptr %p, i64 %stride) { ; COMPARE-LAA-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] ; COMPARE-LAA-MV-NEXT: store i64 [[VAL]], ptr [[GEP_ST]], align 8 ; COMPARE-LAA-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], 128 -; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP24:![0-9]+]] +; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP22:![0-9]+]] ; COMPARE-LAA-MV: [[EXIT]]: ; COMPARE-LAA-MV-NEXT: ret void ; @@ -1800,50 +1737,48 @@ define void @in_loop_base(ptr noalias %p.out, ptr %p, i64 %stride, i64 %offset) ; COMPARE-NO-MV-LABEL: define void @in_loop_base( ; COMPARE-NO-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]], i64 [[OFFSET:%.*]]) { ; COMPARE-NO-MV-NEXT: [[ENTRY:.*:]] -; COMPARE-NO-MV-NEXT: br label %[[VECTOR_PH:.*]] +; COMPARE-NO-MV-NEXT: [[STRIDES_MV_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 +; COMPARE-NO-MV-NEXT: br label %[[STRIDES_CHECK:.*]] +; COMPARE-NO-MV: [[STRIDES_CHECK]]: +; COMPARE-NO-MV-NEXT: br i1 [[STRIDES_MV_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; COMPARE-NO-MV: [[VECTOR_PH]]: ; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET]] -; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 -; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer ; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] ; COMPARE-NO-MV: [[VECTOR_BODY]]: ; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] -; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] -; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP1]], i32 0 -; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP1]], i32 1 -; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP1]], i32 2 -; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = extractelement <4 x i64> [[TMP1]], i32 3 -; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[TMP0]], i64 [[TMP2]] -; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[TMP0]], i64 [[TMP3]] -; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[TMP0]], i64 [[TMP4]] -; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[TMP0]], i64 [[TMP5]] -; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP9]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> poison, i64 [[TMP10]], i32 0 -; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 1 -; COMPARE-NO-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP12]], i32 2 -; COMPARE-NO-MV-NEXT: [[TMP17:%.*]] = insertelement <4 x i64> [[TMP16]], i64 [[TMP13]], i32 3 -; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] -; COMPARE-NO-MV-NEXT: store <4 x i64> [[TMP17]], ptr [[TMP18]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = getelementptr i64, ptr [[TMP0]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP1]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: store <4 x i64> [[WIDE_LOAD]], ptr [[TMP2]], align 8 ; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 -; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) -; COMPARE-NO-MV-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 -; COMPARE-NO-MV-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] +; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] ; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: ; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-NO-MV: [[SCALAR_PH]]: +; COMPARE-NO-MV-NEXT: br label %[[HEADER:.*]] +; COMPARE-NO-MV: [[HEADER]]: +; COMPARE-NO-MV-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-NO-MV-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 +; COMPARE-NO-MV-NEXT: [[MUL:%.*]] = mul i64 [[IV]], [[STRIDE]] +; COMPARE-NO-MV-NEXT: [[IDX:%.*]] = add i64 [[MUL]], [[OFFSET]] +; COMPARE-NO-MV-NEXT: [[GEP_LD_BASE:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET]] +; COMPARE-NO-MV-NEXT: [[GEP_LD:%.*]] = getelementptr i64, ptr [[GEP_LD_BASE]], i64 [[MUL]] +; COMPARE-NO-MV-NEXT: [[LD:%.*]] = load i64, ptr [[GEP_LD]], align 8 +; COMPARE-NO-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] +; COMPARE-NO-MV-NEXT: store i64 [[LD]], ptr [[GEP_ST]], align 8 +; COMPARE-NO-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP24:![0-9]+]] ; COMPARE-NO-MV: [[EXIT]]: ; COMPARE-NO-MV-NEXT: ret void ; ; COMPARE-LAA-MV-LABEL: define void @in_loop_base( ; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]], i64 [[OFFSET:%.*]]) { ; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] -; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_SCEVCHECK:.*]] -; COMPARE-LAA-MV: [[VECTOR_SCEVCHECK]]: -; COMPARE-LAA-MV-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 -; COMPARE-LAA-MV-NEXT: br i1 [[IDENT_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV-NEXT: [[STRIDES_MV_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 +; COMPARE-LAA-MV-NEXT: br label %[[STRIDES_CHECK:.*]] +; COMPARE-LAA-MV: [[STRIDES_CHECK]]: +; COMPARE-LAA-MV-NEXT: br i1 [[STRIDES_MV_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; COMPARE-LAA-MV: [[VECTOR_PH]]: ; COMPARE-LAA-MV-NEXT: [[TMP0:%.*]] = getelementptr i64, ptr [[P]], i64 [[OFFSET]] ; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] @@ -1855,7 +1790,7 @@ define void @in_loop_base(ptr noalias %p.out, ptr %p, i64 %stride, i64 %offset) ; COMPARE-LAA-MV-NEXT: store <4 x i64> [[WIDE_LOAD]], ptr [[TMP2]], align 8 ; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; COMPARE-LAA-MV-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 -; COMPARE-LAA-MV-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]] +; COMPARE-LAA-MV-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] ; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: ; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] ; COMPARE-LAA-MV: [[SCALAR_PH]]: @@ -1871,7 +1806,7 @@ define void @in_loop_base(ptr noalias %p.out, ptr %p, i64 %stride, i64 %offset) ; COMPARE-LAA-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] ; COMPARE-LAA-MV-NEXT: store i64 [[LD]], ptr [[GEP_ST]], align 8 ; COMPARE-LAA-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], 128 -; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP26:![0-9]+]] +; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP24:![0-9]+]] ; COMPARE-LAA-MV: [[EXIT]]: ; COMPARE-LAA-MV-NEXT: ret void ; @@ -1905,64 +1840,59 @@ define void @base_not_in_ir(ptr noalias %p.out, ptr %p, i64 %stride, i64 %offset ; COMPARE-NO-MV-LABEL: define void @base_not_in_ir( ; COMPARE-NO-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]], i64 [[OFFSET:%.*]]) { ; COMPARE-NO-MV-NEXT: [[ENTRY:.*:]] -; COMPARE-NO-MV-NEXT: br label %[[VECTOR_PH:.*]] +; COMPARE-NO-MV-NEXT: [[STRIDES_MV_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 +; COMPARE-NO-MV-NEXT: br label %[[STRIDES_CHECK:.*]] +; COMPARE-NO-MV: [[STRIDES_CHECK]]: +; COMPARE-NO-MV-NEXT: br i1 [[STRIDES_MV_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; COMPARE-NO-MV: [[VECTOR_PH]]: -; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 -; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer -; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <4 x i64> poison, i64 [[OFFSET]], i64 0 -; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT1]], <4 x i64> poison, <4 x i32> zeroinitializer ; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] ; COMPARE-NO-MV: [[VECTOR_BODY]]: ; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] -; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] -; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = add <4 x i64> [[TMP0]], [[BROADCAST_SPLAT2]] -; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP1]], i32 0 -; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP1]], i32 1 -; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP1]], i32 2 -; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = extractelement <4 x i64> [[TMP1]], i32 3 -; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP2]] -; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP3]] -; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP4]] -; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP5]] -; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP9]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> poison, i64 [[TMP10]], i32 0 -; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 1 -; COMPARE-NO-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP12]], i32 2 -; COMPARE-NO-MV-NEXT: [[TMP17:%.*]] = insertelement <4 x i64> [[TMP16]], i64 [[TMP13]], i32 3 -; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] -; COMPARE-NO-MV-NEXT: store <4 x i64> [[TMP17]], ptr [[TMP18]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], [[OFFSET]] +; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP1]] +; COMPARE-NO-MV-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP2]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: store <4 x i64> [[WIDE_LOAD]], ptr [[TMP3]], align 8 ; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 -; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) -; COMPARE-NO-MV-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 -; COMPARE-NO-MV-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] +; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]] ; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: ; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-NO-MV: [[SCALAR_PH]]: +; COMPARE-NO-MV-NEXT: br label %[[HEADER:.*]] +; COMPARE-NO-MV: [[HEADER]]: +; COMPARE-NO-MV-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-NO-MV-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 +; COMPARE-NO-MV-NEXT: [[MUL:%.*]] = mul i64 [[IV]], [[STRIDE]] +; COMPARE-NO-MV-NEXT: [[IDX:%.*]] = add i64 [[MUL]], [[OFFSET]] +; COMPARE-NO-MV-NEXT: [[GEP_LD:%.*]] = getelementptr i64, ptr [[P]], i64 [[IDX]] +; COMPARE-NO-MV-NEXT: [[LD:%.*]] = load i64, ptr [[GEP_LD]], align 8 +; COMPARE-NO-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] +; COMPARE-NO-MV-NEXT: store i64 [[LD]], ptr [[GEP_ST]], align 8 +; COMPARE-NO-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP26:![0-9]+]] ; COMPARE-NO-MV: [[EXIT]]: ; COMPARE-NO-MV-NEXT: ret void ; ; COMPARE-LAA-MV-LABEL: define void @base_not_in_ir( ; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]], i64 [[OFFSET:%.*]]) { ; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] -; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_SCEVCHECK:.*]] -; COMPARE-LAA-MV: [[VECTOR_SCEVCHECK]]: -; COMPARE-LAA-MV-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 -; COMPARE-LAA-MV-NEXT: br i1 [[IDENT_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV-NEXT: [[STRIDES_MV_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 +; COMPARE-LAA-MV-NEXT: br label %[[STRIDES_CHECK:.*]] +; COMPARE-LAA-MV: [[STRIDES_CHECK]]: +; COMPARE-LAA-MV-NEXT: br i1 [[STRIDES_MV_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; COMPARE-LAA-MV: [[VECTOR_PH]]: ; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] ; COMPARE-LAA-MV: [[VECTOR_BODY]]: ; COMPARE-LAA-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; COMPARE-LAA-MV-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], [[OFFSET]] -; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP0]] -; COMPARE-LAA-MV-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP1]], align 8 -; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] -; COMPARE-LAA-MV-NEXT: store <4 x i64> [[WIDE_LOAD]], ptr [[TMP2]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], [[OFFSET]] +; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP1]] +; COMPARE-LAA-MV-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP2]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP3:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: store <4 x i64> [[WIDE_LOAD]], ptr [[TMP3]], align 8 ; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 -; COMPARE-LAA-MV-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 -; COMPARE-LAA-MV-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]] +; COMPARE-LAA-MV-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]] ; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: ; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] ; COMPARE-LAA-MV: [[SCALAR_PH]]: @@ -1977,7 +1907,7 @@ define void @base_not_in_ir(ptr noalias %p.out, ptr %p, i64 %stride, i64 %offset ; COMPARE-LAA-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] ; COMPARE-LAA-MV-NEXT: store i64 [[LD]], ptr [[GEP_ST]], align 8 ; COMPARE-LAA-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], 128 -; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP28:![0-9]+]] +; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP26:![0-9]+]] ; COMPARE-LAA-MV: [[EXIT]]: ; COMPARE-LAA-MV-NEXT: ret void ; @@ -2042,7 +1972,7 @@ define void @non_invariant_uniform_base(ptr noalias %p.out, ptr %p, i64 %stride) ; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) ; COMPARE-NO-MV-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 -; COMPARE-NO-MV-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] +; COMPARE-NO-MV-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]] ; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: ; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] ; COMPARE-NO-MV: [[EXIT]]: @@ -2083,7 +2013,7 @@ define void @non_invariant_uniform_base(ptr noalias %p.out, ptr %p, i64 %stride) ; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; COMPARE-LAA-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) ; COMPARE-LAA-MV-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 -; COMPARE-LAA-MV-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]] +; COMPARE-LAA-MV-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]] ; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: ; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] ; COMPARE-LAA-MV: [[EXIT]]: @@ -2151,7 +2081,7 @@ define void @non_invariant_uniform_stride(ptr noalias %p.out, ptr %p, ptr %p.uni ; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) ; COMPARE-NO-MV-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 -; COMPARE-NO-MV-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; COMPARE-NO-MV-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]] ; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: ; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] ; COMPARE-NO-MV: [[EXIT]]: @@ -2193,7 +2123,7 @@ define void @non_invariant_uniform_stride(ptr noalias %p.out, ptr %p, ptr %p.uni ; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; COMPARE-LAA-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) ; COMPARE-LAA-MV-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 -; COMPARE-LAA-MV-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]] +; COMPARE-LAA-MV-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP28:![0-9]+]] ; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: ; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] ; COMPARE-LAA-MV: [[EXIT]]: @@ -2235,45 +2165,29 @@ define void @non_constant_btc(ptr noalias %p.out, ptr %p, i64 %stride, i64 %n) { ; COMPARE-NO-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]], i64 [[N:%.*]]) { ; COMPARE-NO-MV-NEXT: [[ENTRY:.*]]: ; COMPARE-NO-MV-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[N]], i64 1) +; COMPARE-NO-MV-NEXT: [[STRIDES_MV_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 ; COMPARE-NO-MV-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[SMAX]], 4 -; COMPARE-NO-MV-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; COMPARE-NO-MV-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[STRIDES_CHECK:.*]] +; COMPARE-NO-MV: [[STRIDES_CHECK]]: +; COMPARE-NO-MV-NEXT: br i1 [[STRIDES_MV_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; COMPARE-NO-MV: [[VECTOR_PH]]: ; COMPARE-NO-MV-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[SMAX]], 4 ; COMPARE-NO-MV-NEXT: [[N_VEC:%.*]] = sub i64 [[SMAX]], [[N_MOD_VF]] -; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 -; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer ; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] ; COMPARE-NO-MV: [[VECTOR_BODY]]: ; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] -; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] -; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = extractelement <4 x i64> [[TMP0]], i32 0 -; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP0]], i32 1 -; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP0]], i32 2 -; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP0]], i32 3 -; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP1]] -; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP2]] -; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP3]] -; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP4]] -; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP5]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = insertelement <4 x i64> poison, i64 [[TMP9]], i32 0 -; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> [[TMP13]], i64 [[TMP10]], i32 1 -; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 2 -; COMPARE-NO-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP12]], i32 3 -; COMPARE-NO-MV-NEXT: [[TMP17:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] -; COMPARE-NO-MV-NEXT: store <4 x i64> [[TMP16]], ptr [[TMP17]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = getelementptr i64, ptr [[P]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP0]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: store <4 x i64> [[WIDE_LOAD]], ptr [[TMP1]], align 8 ; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 -; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) -; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; COMPARE-NO-MV-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]] +; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; COMPARE-NO-MV-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]] ; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: ; COMPARE-NO-MV-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[SMAX]], [[N_VEC]] ; COMPARE-NO-MV-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] ; COMPARE-NO-MV: [[SCALAR_PH]]: -; COMPARE-NO-MV-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; COMPARE-NO-MV-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[STRIDES_CHECK]] ] ; COMPARE-NO-MV-NEXT: br label %[[HEADER:.*]] ; COMPARE-NO-MV: [[HEADER]]: ; COMPARE-NO-MV-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] @@ -2284,7 +2198,7 @@ define void @non_constant_btc(ptr noalias %p.out, ptr %p, i64 %stride, i64 %n) { ; COMPARE-NO-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] ; COMPARE-NO-MV-NEXT: store i64 [[LD]], ptr [[GEP_ST]], align 8 ; COMPARE-NO-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], [[N]] -; COMPARE-NO-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP22:![0-9]+]] +; COMPARE-NO-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP30:![0-9]+]] ; COMPARE-NO-MV: [[EXIT]]: ; COMPARE-NO-MV-NEXT: ret void ; @@ -2292,11 +2206,11 @@ define void @non_constant_btc(ptr noalias %p.out, ptr %p, i64 %stride, i64 %n) { ; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]], i64 [[N:%.*]]) { ; COMPARE-LAA-MV-NEXT: [[ENTRY:.*]]: ; COMPARE-LAA-MV-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[N]], i64 1) +; COMPARE-LAA-MV-NEXT: [[STRIDES_MV_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 ; COMPARE-LAA-MV-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[SMAX]], 4 -; COMPARE-LAA-MV-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]] -; COMPARE-LAA-MV: [[VECTOR_SCEVCHECK]]: -; COMPARE-LAA-MV-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 -; COMPARE-LAA-MV-NEXT: br i1 [[IDENT_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[STRIDES_CHECK:.*]] +; COMPARE-LAA-MV: [[STRIDES_CHECK]]: +; COMPARE-LAA-MV-NEXT: br i1 [[STRIDES_MV_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; COMPARE-LAA-MV: [[VECTOR_PH]]: ; COMPARE-LAA-MV-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[SMAX]], 4 ; COMPARE-LAA-MV-NEXT: [[N_VEC:%.*]] = sub i64 [[SMAX]], [[N_MOD_VF]] @@ -2309,12 +2223,12 @@ define void @non_constant_btc(ptr noalias %p.out, ptr %p, i64 %stride, i64 %n) { ; COMPARE-LAA-MV-NEXT: store <4 x i64> [[WIDE_LOAD]], ptr [[TMP1]], align 8 ; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; COMPARE-LAA-MV-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP31:![0-9]+]] +; COMPARE-LAA-MV-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]] ; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: ; COMPARE-LAA-MV-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[SMAX]], [[N_VEC]] ; COMPARE-LAA-MV-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] ; COMPARE-LAA-MV: [[SCALAR_PH]]: -; COMPARE-LAA-MV-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_SCEVCHECK]] ] +; COMPARE-LAA-MV-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[STRIDES_CHECK]] ] ; COMPARE-LAA-MV-NEXT: br label %[[HEADER:.*]] ; COMPARE-LAA-MV: [[HEADER]]: ; COMPARE-LAA-MV-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] @@ -2325,7 +2239,7 @@ define void @non_constant_btc(ptr noalias %p.out, ptr %p, i64 %stride, i64 %n) { ; COMPARE-LAA-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] ; COMPARE-LAA-MV-NEXT: store i64 [[LD]], ptr [[GEP_ST]], align 8 ; COMPARE-LAA-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], [[N]] -; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP32:![0-9]+]] +; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP30:![0-9]+]] ; COMPARE-LAA-MV: [[EXIT]]: ; COMPARE-LAA-MV-NEXT: ret void ; @@ -2391,7 +2305,7 @@ define void @stride_as_btc(ptr noalias %p.out, ptr %p, i64 %stride) { ; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) ; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; COMPARE-NO-MV-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] +; COMPARE-NO-MV-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP31:![0-9]+]] ; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: ; COMPARE-NO-MV-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[SMAX]], [[N_VEC]] ; COMPARE-NO-MV-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] @@ -2407,16 +2321,56 @@ define void @stride_as_btc(ptr noalias %p.out, ptr %p, i64 %stride) { ; COMPARE-NO-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] ; COMPARE-NO-MV-NEXT: store i64 [[LD]], ptr [[GEP_ST]], align 8 ; COMPARE-NO-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], [[STRIDE]] -; COMPARE-NO-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP24:![0-9]+]] +; COMPARE-NO-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP32:![0-9]+]] ; COMPARE-NO-MV: [[EXIT]]: ; COMPARE-NO-MV-NEXT: ret void ; ; COMPARE-LAA-MV-LABEL: define void @stride_as_btc( ; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { ; COMPARE-LAA-MV-NEXT: [[ENTRY:.*]]: +; COMPARE-LAA-MV-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[STRIDE]], i64 1) +; COMPARE-LAA-MV-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[SMAX]], 4 +; COMPARE-LAA-MV-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV: [[VECTOR_PH]]: +; COMPARE-LAA-MV-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[SMAX]], 4 +; COMPARE-LAA-MV-NEXT: [[N_VEC:%.*]] = sub i64 [[SMAX]], [[N_MOD_VF]] +; COMPARE-LAA-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 +; COMPARE-LAA-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-LAA-MV: [[VECTOR_BODY]]: +; COMPARE-LAA-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[TMP0:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = extractelement <4 x i64> [[TMP0]], i32 0 +; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP0]], i32 1 +; COMPARE-LAA-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP0]], i32 2 +; COMPARE-LAA-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP0]], i32 3 +; COMPARE-LAA-MV-NEXT: [[TMP5:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP1]] +; COMPARE-LAA-MV-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP2]] +; COMPARE-LAA-MV-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP3]] +; COMPARE-LAA-MV-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP4]] +; COMPARE-LAA-MV-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP5]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP13:%.*]] = insertelement <4 x i64> poison, i64 [[TMP9]], i32 0 +; COMPARE-LAA-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> [[TMP13]], i64 [[TMP10]], i32 1 +; COMPARE-LAA-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 2 +; COMPARE-LAA-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP12]], i32 3 +; COMPARE-LAA-MV-NEXT: [[TMP17:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: store <4 x i64> [[TMP16]], ptr [[TMP17]], align 8 +; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-LAA-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-LAA-MV-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; COMPARE-LAA-MV-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP31:![0-9]+]] +; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: +; COMPARE-LAA-MV-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[SMAX]], [[N_VEC]] +; COMPARE-LAA-MV-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; COMPARE-LAA-MV: [[SCALAR_PH]]: +; COMPARE-LAA-MV-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] ; COMPARE-LAA-MV-NEXT: br label %[[HEADER:.*]] ; COMPARE-LAA-MV: [[HEADER]]: -; COMPARE-LAA-MV-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-LAA-MV-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] ; COMPARE-LAA-MV-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 ; COMPARE-LAA-MV-NEXT: [[IDX:%.*]] = mul i64 [[IV]], [[STRIDE]] ; COMPARE-LAA-MV-NEXT: [[GEP_LD:%.*]] = getelementptr i64, ptr [[P]], i64 [[IDX]] @@ -2424,7 +2378,7 @@ define void @stride_as_btc(ptr noalias %p.out, ptr %p, i64 %stride) { ; COMPARE-LAA-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] ; COMPARE-LAA-MV-NEXT: store i64 [[LD]], ptr [[GEP_ST]], align 8 ; COMPARE-LAA-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], [[STRIDE]] -; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT:.*]] +; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP32:![0-9]+]] ; COMPARE-LAA-MV: [[EXIT]]: ; COMPARE-LAA-MV-NEXT: ret void ; @@ -2491,7 +2445,7 @@ define void @stride_dependent_btc(ptr noalias %p.out, ptr %p, i64 %stride) { ; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) ; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; COMPARE-NO-MV-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP25:![0-9]+]] +; COMPARE-NO-MV-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP33:![0-9]+]] ; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: ; COMPARE-NO-MV-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[SMAX]], [[N_VEC]] ; COMPARE-NO-MV-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] @@ -2507,7 +2461,7 @@ define void @stride_dependent_btc(ptr noalias %p.out, ptr %p, i64 %stride) { ; COMPARE-NO-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] ; COMPARE-NO-MV-NEXT: store i64 [[LD]], ptr [[GEP_ST]], align 8 ; COMPARE-NO-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], [[N]] -; COMPARE-NO-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP26:![0-9]+]] +; COMPARE-NO-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP34:![0-9]+]] ; COMPARE-NO-MV: [[EXIT]]: ; COMPARE-NO-MV-NEXT: ret void ; @@ -2515,9 +2469,49 @@ define void @stride_dependent_btc(ptr noalias %p.out, ptr %p, i64 %stride) { ; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { ; COMPARE-LAA-MV-NEXT: [[ENTRY:.*]]: ; COMPARE-LAA-MV-NEXT: [[N:%.*]] = add i64 [[STRIDE]], 1 +; COMPARE-LAA-MV-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[N]], i64 1) +; COMPARE-LAA-MV-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[SMAX]], 4 +; COMPARE-LAA-MV-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV: [[VECTOR_PH]]: +; COMPARE-LAA-MV-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[SMAX]], 4 +; COMPARE-LAA-MV-NEXT: [[N_VEC:%.*]] = sub i64 [[SMAX]], [[N_MOD_VF]] +; COMPARE-LAA-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 +; COMPARE-LAA-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] +; COMPARE-LAA-MV: [[VECTOR_BODY]]: +; COMPARE-LAA-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[TMP0:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = extractelement <4 x i64> [[TMP0]], i32 0 +; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP0]], i32 1 +; COMPARE-LAA-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP0]], i32 2 +; COMPARE-LAA-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP0]], i32 3 +; COMPARE-LAA-MV-NEXT: [[TMP5:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP1]] +; COMPARE-LAA-MV-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP2]] +; COMPARE-LAA-MV-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP3]] +; COMPARE-LAA-MV-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP4]] +; COMPARE-LAA-MV-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP5]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP13:%.*]] = insertelement <4 x i64> poison, i64 [[TMP9]], i32 0 +; COMPARE-LAA-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> [[TMP13]], i64 [[TMP10]], i32 1 +; COMPARE-LAA-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 2 +; COMPARE-LAA-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP12]], i32 3 +; COMPARE-LAA-MV-NEXT: [[TMP17:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: store <4 x i64> [[TMP16]], ptr [[TMP17]], align 8 +; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; COMPARE-LAA-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-LAA-MV-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; COMPARE-LAA-MV-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP33:![0-9]+]] +; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: +; COMPARE-LAA-MV-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[SMAX]], [[N_VEC]] +; COMPARE-LAA-MV-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; COMPARE-LAA-MV: [[SCALAR_PH]]: +; COMPARE-LAA-MV-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] ; COMPARE-LAA-MV-NEXT: br label %[[HEADER:.*]] ; COMPARE-LAA-MV: [[HEADER]]: -; COMPARE-LAA-MV-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-LAA-MV-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] ; COMPARE-LAA-MV-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 ; COMPARE-LAA-MV-NEXT: [[IDX:%.*]] = mul i64 [[IV]], [[STRIDE]] ; COMPARE-LAA-MV-NEXT: [[GEP_LD:%.*]] = getelementptr i64, ptr [[P]], i64 [[IDX]] @@ -2525,7 +2519,7 @@ define void @stride_dependent_btc(ptr noalias %p.out, ptr %p, i64 %stride) { ; COMPARE-LAA-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] ; COMPARE-LAA-MV-NEXT: store i64 [[LD]], ptr [[GEP_ST]], align 8 ; COMPARE-LAA-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], [[N]] -; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT:.*]] +; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP34:![0-9]+]] ; COMPARE-LAA-MV: [[EXIT]]: ; COMPARE-LAA-MV-NEXT: ret void ; @@ -2560,46 +2554,30 @@ define void @stride_btc_checks_order(ptr noalias %p.out, ptr %p, i64 %stride, i6 ; COMPARE-NO-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]], i64 [[M:%.*]]) { ; COMPARE-NO-MV-NEXT: [[ENTRY:.*]]: ; COMPARE-NO-MV-NEXT: [[N:%.*]] = mul i64 [[M]], [[STRIDE]] -; COMPARE-NO-MV-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[N]], i64 1) +; COMPARE-NO-MV-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[M]], i64 1) +; COMPARE-NO-MV-NEXT: [[STRIDES_MV_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 ; COMPARE-NO-MV-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[SMAX]], 4 -; COMPARE-NO-MV-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; COMPARE-NO-MV-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[STRIDES_CHECK:.*]] +; COMPARE-NO-MV: [[STRIDES_CHECK]]: +; COMPARE-NO-MV-NEXT: br i1 [[STRIDES_MV_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; COMPARE-NO-MV: [[VECTOR_PH]]: ; COMPARE-NO-MV-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[SMAX]], 4 ; COMPARE-NO-MV-NEXT: [[N_VEC:%.*]] = sub i64 [[SMAX]], [[N_MOD_VF]] -; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 -; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer ; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] ; COMPARE-NO-MV: [[VECTOR_BODY]]: ; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] -; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] -; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = extractelement <4 x i64> [[TMP0]], i32 0 -; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP0]], i32 1 -; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP0]], i32 2 -; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP0]], i32 3 -; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP1]] -; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP2]] -; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP3]] -; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP4]] -; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP5]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = insertelement <4 x i64> poison, i64 [[TMP9]], i32 0 -; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> [[TMP13]], i64 [[TMP10]], i32 1 -; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 2 -; COMPARE-NO-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP12]], i32 3 -; COMPARE-NO-MV-NEXT: [[TMP17:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] -; COMPARE-NO-MV-NEXT: store <4 x i64> [[TMP16]], ptr [[TMP17]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = getelementptr i64, ptr [[P]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP0]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: store <4 x i64> [[WIDE_LOAD]], ptr [[TMP1]], align 8 ; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 -; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) -; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; COMPARE-NO-MV-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]] +; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; COMPARE-NO-MV-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP35:![0-9]+]] ; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: ; COMPARE-NO-MV-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[SMAX]], [[N_VEC]] ; COMPARE-NO-MV-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] ; COMPARE-NO-MV: [[SCALAR_PH]]: -; COMPARE-NO-MV-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; COMPARE-NO-MV-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[STRIDES_CHECK]] ] ; COMPARE-NO-MV-NEXT: br label %[[HEADER:.*]] ; COMPARE-NO-MV: [[HEADER]]: ; COMPARE-NO-MV-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] @@ -2610,7 +2588,7 @@ define void @stride_btc_checks_order(ptr noalias %p.out, ptr %p, i64 %stride, i6 ; COMPARE-NO-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] ; COMPARE-NO-MV-NEXT: store i64 [[LD]], ptr [[GEP_ST]], align 8 ; COMPARE-NO-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], [[N]] -; COMPARE-NO-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP28:![0-9]+]] +; COMPARE-NO-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP36:![0-9]+]] ; COMPARE-NO-MV: [[EXIT]]: ; COMPARE-NO-MV-NEXT: ret void ; @@ -2618,16 +2596,15 @@ define void @stride_btc_checks_order(ptr noalias %p.out, ptr %p, i64 %stride, i6 ; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]], i64 [[M:%.*]]) { ; COMPARE-LAA-MV-NEXT: [[ENTRY:.*]]: ; COMPARE-LAA-MV-NEXT: [[N:%.*]] = mul i64 [[M]], [[STRIDE]] -; COMPARE-LAA-MV-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[N]], i64 1) -; COMPARE-LAA-MV-NEXT: [[SMAX1:%.*]] = call i64 @llvm.smax.i64(i64 [[M]], i64 1) -; COMPARE-LAA-MV-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[SMAX1]], 4 -; COMPARE-LAA-MV-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]] -; COMPARE-LAA-MV: [[VECTOR_SCEVCHECK]]: -; COMPARE-LAA-MV-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 -; COMPARE-LAA-MV-NEXT: br i1 [[IDENT_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[M]], i64 1) +; COMPARE-LAA-MV-NEXT: [[STRIDES_MV_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 +; COMPARE-LAA-MV-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[SMAX]], 4 +; COMPARE-LAA-MV-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[STRIDES_CHECK:.*]] +; COMPARE-LAA-MV: [[STRIDES_CHECK]]: +; COMPARE-LAA-MV-NEXT: br i1 [[STRIDES_MV_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; COMPARE-LAA-MV: [[VECTOR_PH]]: -; COMPARE-LAA-MV-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[SMAX1]], 4 -; COMPARE-LAA-MV-NEXT: [[N_VEC:%.*]] = sub i64 [[SMAX1]], [[N_MOD_VF]] +; COMPARE-LAA-MV-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[SMAX]], 4 +; COMPARE-LAA-MV-NEXT: [[N_VEC:%.*]] = sub i64 [[SMAX]], [[N_MOD_VF]] ; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] ; COMPARE-LAA-MV: [[VECTOR_BODY]]: ; COMPARE-LAA-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] @@ -2637,12 +2614,12 @@ define void @stride_btc_checks_order(ptr noalias %p.out, ptr %p, i64 %stride, i6 ; COMPARE-LAA-MV-NEXT: store <4 x i64> [[WIDE_LOAD]], ptr [[TMP1]], align 8 ; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; COMPARE-LAA-MV-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP33:![0-9]+]] +; COMPARE-LAA-MV-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP35:![0-9]+]] ; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: -; COMPARE-LAA-MV-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[SMAX1]], [[N_VEC]] +; COMPARE-LAA-MV-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[SMAX]], [[N_VEC]] ; COMPARE-LAA-MV-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] ; COMPARE-LAA-MV: [[SCALAR_PH]]: -; COMPARE-LAA-MV-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_SCEVCHECK]] ] +; COMPARE-LAA-MV-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[STRIDES_CHECK]] ] ; COMPARE-LAA-MV-NEXT: br label %[[HEADER:.*]] ; COMPARE-LAA-MV: [[HEADER]]: ; COMPARE-LAA-MV-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] @@ -2653,7 +2630,7 @@ define void @stride_btc_checks_order(ptr noalias %p.out, ptr %p, i64 %stride, i6 ; COMPARE-LAA-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] ; COMPARE-LAA-MV-NEXT: store i64 [[LD]], ptr [[GEP_ST]], align 8 ; COMPARE-LAA-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], [[N]] -; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP34:![0-9]+]] +; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP36:![0-9]+]] ; COMPARE-LAA-MV: [[EXIT]]: ; COMPARE-LAA-MV-NEXT: ret void ; @@ -2684,48 +2661,22 @@ exit: define void @stride_dependent_btc_non_preventive(ptr noalias %p.out, ptr %p, i64 %stride) { ; COMPARE-NO-MV-LABEL: define void @stride_dependent_btc_non_preventive( ; COMPARE-NO-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { -; COMPARE-NO-MV-NEXT: [[ENTRY:.*]]: +; COMPARE-NO-MV-NEXT: [[ENTRY:.*:]] ; COMPARE-NO-MV-NEXT: [[N:%.*]] = add i64 [[STRIDE]], 3 -; COMPARE-NO-MV-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[N]], i64 1) -; COMPARE-NO-MV-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[SMAX]], 4 -; COMPARE-NO-MV-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; COMPARE-NO-MV-NEXT: [[STRIDES_MV_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 +; COMPARE-NO-MV-NEXT: br label %[[STRIDES_CHECK:.*]] +; COMPARE-NO-MV: [[STRIDES_CHECK]]: +; COMPARE-NO-MV-NEXT: br i1 [[STRIDES_MV_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; COMPARE-NO-MV: [[VECTOR_PH]]: -; COMPARE-NO-MV-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[SMAX]], 4 -; COMPARE-NO-MV-NEXT: [[N_VEC:%.*]] = sub i64 [[SMAX]], [[N_MOD_VF]] -; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 -; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer ; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] ; COMPARE-NO-MV: [[VECTOR_BODY]]: -; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] -; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] -; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = extractelement <4 x i64> [[TMP0]], i32 0 -; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP0]], i32 1 -; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP0]], i32 2 -; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP0]], i32 3 -; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP1]] -; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP2]] -; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP3]] -; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP4]] -; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP5]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = insertelement <4 x i64> poison, i64 [[TMP9]], i32 0 -; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> [[TMP13]], i64 [[TMP10]], i32 1 -; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 2 -; COMPARE-NO-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP12]], i32 3 -; COMPARE-NO-MV-NEXT: [[TMP17:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] -; COMPARE-NO-MV-NEXT: store <4 x i64> [[TMP16]], ptr [[TMP17]], align 8 -; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 -; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) -; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; COMPARE-NO-MV-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP29:![0-9]+]] +; COMPARE-NO-MV-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[P]], align 8 +; COMPARE-NO-MV-NEXT: store <4 x i64> [[WIDE_LOAD]], ptr [[P_OUT]], align 8 +; COMPARE-NO-MV-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: -; COMPARE-NO-MV-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[SMAX]], [[N_VEC]] -; COMPARE-NO-MV-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; COMPARE-NO-MV-NEXT: br i1 true, label %[[EXIT:.*]], label %[[SCALAR_PH]] ; COMPARE-NO-MV: [[SCALAR_PH]]: -; COMPARE-NO-MV-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; COMPARE-NO-MV-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 4, %[[MIDDLE_BLOCK]] ], [ 0, %[[STRIDES_CHECK]] ] ; COMPARE-NO-MV-NEXT: br label %[[HEADER:.*]] ; COMPARE-NO-MV: [[HEADER]]: ; COMPARE-NO-MV-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] @@ -2736,7 +2687,7 @@ define void @stride_dependent_btc_non_preventive(ptr noalias %p.out, ptr %p, i64 ; COMPARE-NO-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] ; COMPARE-NO-MV-NEXT: store i64 [[LD]], ptr [[GEP_ST]], align 8 ; COMPARE-NO-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], [[N]] -; COMPARE-NO-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP30:![0-9]+]] +; COMPARE-NO-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP37:![0-9]+]] ; COMPARE-NO-MV: [[EXIT]]: ; COMPARE-NO-MV-NEXT: ret void ; @@ -2744,11 +2695,10 @@ define void @stride_dependent_btc_non_preventive(ptr noalias %p.out, ptr %p, i64 ; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { ; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] ; COMPARE-LAA-MV-NEXT: [[N:%.*]] = add i64 [[STRIDE]], 3 -; COMPARE-LAA-MV-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[N]], i64 1) -; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_SCEVCHECK:.*]] -; COMPARE-LAA-MV: [[VECTOR_SCEVCHECK]]: -; COMPARE-LAA-MV-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 -; COMPARE-LAA-MV-NEXT: br i1 [[IDENT_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV-NEXT: [[STRIDES_MV_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 +; COMPARE-LAA-MV-NEXT: br label %[[STRIDES_CHECK:.*]] +; COMPARE-LAA-MV: [[STRIDES_CHECK]]: +; COMPARE-LAA-MV-NEXT: br i1 [[STRIDES_MV_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; COMPARE-LAA-MV: [[VECTOR_PH]]: ; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] ; COMPARE-LAA-MV: [[VECTOR_BODY]]: @@ -2756,11 +2706,12 @@ define void @stride_dependent_btc_non_preventive(ptr noalias %p.out, ptr %p, i64 ; COMPARE-LAA-MV-NEXT: store <4 x i64> [[WIDE_LOAD]], ptr [[P_OUT]], align 8 ; COMPARE-LAA-MV-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: -; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-LAA-MV-NEXT: br i1 true, label %[[EXIT:.*]], label %[[SCALAR_PH]] ; COMPARE-LAA-MV: [[SCALAR_PH]]: +; COMPARE-LAA-MV-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 4, %[[MIDDLE_BLOCK]] ], [ 0, %[[STRIDES_CHECK]] ] ; COMPARE-LAA-MV-NEXT: br label %[[HEADER:.*]] ; COMPARE-LAA-MV: [[HEADER]]: -; COMPARE-LAA-MV-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-LAA-MV-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] ; COMPARE-LAA-MV-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 ; COMPARE-LAA-MV-NEXT: [[IDX:%.*]] = mul i64 [[IV]], [[STRIDE]] ; COMPARE-LAA-MV-NEXT: [[GEP_LD:%.*]] = getelementptr i64, ptr [[P]], i64 [[IDX]] @@ -2768,7 +2719,7 @@ define void @stride_dependent_btc_non_preventive(ptr noalias %p.out, ptr %p, i64 ; COMPARE-LAA-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] ; COMPARE-LAA-MV-NEXT: store i64 [[LD]], ptr [[GEP_ST]], align 8 ; COMPARE-LAA-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], [[N]] -; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP35:![0-9]+]] +; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP37:![0-9]+]] ; COMPARE-LAA-MV: [[EXIT]]: ; COMPARE-LAA-MV-NEXT: ret void ; @@ -2824,30 +2775,12 @@ define void @stride_btc_memdep_triple_check(ptr %p, i64 %stride, i64 %out.offset ; ; COMPARE-LAA-MV-LABEL: define void @stride_btc_memdep_triple_check( ; COMPARE-LAA-MV-SAME: ptr [[P:%.*]], i64 [[STRIDE:%.*]], i64 [[OUT_OFFSET:%.*]]) { -; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] +; COMPARE-LAA-MV-NEXT: [[ENTRY:.*]]: ; COMPARE-LAA-MV-NEXT: [[P_OUT:%.*]] = getelementptr i8, ptr [[P]], i64 [[OUT_OFFSET]] ; COMPARE-LAA-MV-NEXT: [[N:%.*]] = add i64 [[STRIDE]], 3 -; COMPARE-LAA-MV-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[N]], i64 1) -; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_SCEVCHECK:.*]] -; COMPARE-LAA-MV: [[VECTOR_SCEVCHECK]]: -; COMPARE-LAA-MV-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 -; COMPARE-LAA-MV-NEXT: br i1 [[IDENT_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] -; COMPARE-LAA-MV: [[VECTOR_MEMCHECK]]: -; COMPARE-LAA-MV-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[OUT_OFFSET]], 32 -; COMPARE-LAA-MV-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] -; COMPARE-LAA-MV: [[VECTOR_PH]]: -; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] -; COMPARE-LAA-MV: [[VECTOR_BODY]]: -; COMPARE-LAA-MV-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[P]], align 8 -; COMPARE-LAA-MV-NEXT: store <4 x i64> [[WIDE_LOAD]], ptr [[P_OUT]], align 8 -; COMPARE-LAA-MV-NEXT: br label %[[MIDDLE_BLOCK:.*]] -; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: -; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] -; COMPARE-LAA-MV: [[SCALAR_PH]]: -; COMPARE-LAA-MV-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[VECTOR_SCEVCHECK]] ], [ 0, %[[VECTOR_MEMCHECK]] ] ; COMPARE-LAA-MV-NEXT: br label %[[HEADER:.*]] ; COMPARE-LAA-MV: [[HEADER]]: -; COMPARE-LAA-MV-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-LAA-MV-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] ; COMPARE-LAA-MV-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 ; COMPARE-LAA-MV-NEXT: [[IDX:%.*]] = mul i64 [[IV]], [[STRIDE]] ; COMPARE-LAA-MV-NEXT: [[GEP_LD:%.*]] = getelementptr i64, ptr [[P]], i64 [[IDX]] @@ -2855,7 +2788,7 @@ define void @stride_btc_memdep_triple_check(ptr %p, i64 %stride, i64 %out.offset ; COMPARE-LAA-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] ; COMPARE-LAA-MV-NEXT: store i64 [[LD]], ptr [[GEP_ST]], align 8 ; COMPARE-LAA-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], [[N]] -; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP36:![0-9]+]] +; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT:.*]] ; COMPARE-LAA-MV: [[EXIT]]: ; COMPARE-LAA-MV-NEXT: ret void ; @@ -2887,55 +2820,28 @@ exit: define void @stride_btc_independent_memdep_triple_check(ptr %p, ptr noalias %p2, i64 %stride, i64 %out.offset) { ; COMPARE-NO-MV-LABEL: define void @stride_btc_independent_memdep_triple_check( ; COMPARE-NO-MV-SAME: ptr [[P:%.*]], ptr noalias [[P2:%.*]], i64 [[STRIDE:%.*]], i64 [[OUT_OFFSET:%.*]]) { -; COMPARE-NO-MV-NEXT: [[ENTRY:.*]]: +; COMPARE-NO-MV-NEXT: [[ENTRY:.*:]] ; COMPARE-NO-MV-NEXT: [[P_OUT:%.*]] = getelementptr i8, ptr [[P2]], i64 [[OUT_OFFSET]] ; COMPARE-NO-MV-NEXT: [[N:%.*]] = add i64 [[STRIDE]], 3 -; COMPARE-NO-MV-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[N]], i64 1) -; COMPARE-NO-MV-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[SMAX]], 4 -; COMPARE-NO-MV-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; COMPARE-NO-MV-NEXT: [[STRIDES_MV_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 +; COMPARE-NO-MV-NEXT: br label %[[STRIDES_CHECK:.*]] +; COMPARE-NO-MV: [[STRIDES_CHECK]]: +; COMPARE-NO-MV-NEXT: br i1 [[STRIDES_MV_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] ; COMPARE-NO-MV: [[VECTOR_MEMCHECK]]: ; COMPARE-NO-MV-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[OUT_OFFSET]], 32 ; COMPARE-NO-MV-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; COMPARE-NO-MV: [[VECTOR_PH]]: -; COMPARE-NO-MV-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[SMAX]], 4 -; COMPARE-NO-MV-NEXT: [[N_VEC:%.*]] = sub i64 [[SMAX]], [[N_MOD_VF]] -; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 -; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer ; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] ; COMPARE-NO-MV: [[VECTOR_BODY]]: -; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] -; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] -; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = extractelement <4 x i64> [[TMP0]], i32 0 -; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP0]], i32 1 -; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP0]], i32 2 -; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP0]], i32 3 -; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP1]] -; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP2]] -; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP3]] -; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP4]] -; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP5]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = insertelement <4 x i64> poison, i64 [[TMP9]], i32 0 -; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> [[TMP13]], i64 [[TMP10]], i32 1 -; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 2 -; COMPARE-NO-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP12]], i32 3 -; COMPARE-NO-MV-NEXT: [[TMP17:%.*]] = getelementptr i64, ptr [[P2]], i64 [[INDEX]] -; COMPARE-NO-MV-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP17]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = add <4 x i64> [[TMP16]], [[WIDE_LOAD]] -; COMPARE-NO-MV-NEXT: [[TMP19:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] -; COMPARE-NO-MV-NEXT: store <4 x i64> [[TMP18]], ptr [[TMP19]], align 8 -; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 -; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) -; COMPARE-NO-MV-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; COMPARE-NO-MV-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP31:![0-9]+]] +; COMPARE-NO-MV-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[P]], align 8 +; COMPARE-NO-MV-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[P2]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = add <4 x i64> [[WIDE_LOAD]], [[WIDE_LOAD1]] +; COMPARE-NO-MV-NEXT: store <4 x i64> [[TMP0]], ptr [[P_OUT]], align 8 +; COMPARE-NO-MV-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: -; COMPARE-NO-MV-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[SMAX]], [[N_VEC]] -; COMPARE-NO-MV-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; COMPARE-NO-MV-NEXT: br i1 true, label %[[EXIT:.*]], label %[[SCALAR_PH]] ; COMPARE-NO-MV: [[SCALAR_PH]]: -; COMPARE-NO-MV-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ], [ 0, %[[VECTOR_MEMCHECK]] ] +; COMPARE-NO-MV-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 4, %[[MIDDLE_BLOCK]] ], [ 0, %[[STRIDES_CHECK]] ], [ 0, %[[VECTOR_MEMCHECK]] ] ; COMPARE-NO-MV-NEXT: br label %[[HEADER:.*]] ; COMPARE-NO-MV: [[HEADER]]: ; COMPARE-NO-MV-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] @@ -2949,7 +2855,7 @@ define void @stride_btc_independent_memdep_triple_check(ptr %p, ptr noalias %p2, ; COMPARE-NO-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] ; COMPARE-NO-MV-NEXT: store i64 [[VAL]], ptr [[GEP_ST]], align 8 ; COMPARE-NO-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], [[N]] -; COMPARE-NO-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP32:![0-9]+]] +; COMPARE-NO-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP38:![0-9]+]] ; COMPARE-NO-MV: [[EXIT]]: ; COMPARE-NO-MV-NEXT: ret void ; @@ -2958,11 +2864,10 @@ define void @stride_btc_independent_memdep_triple_check(ptr %p, ptr noalias %p2, ; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] ; COMPARE-LAA-MV-NEXT: [[P_OUT:%.*]] = getelementptr i8, ptr [[P2]], i64 [[OUT_OFFSET]] ; COMPARE-LAA-MV-NEXT: [[N:%.*]] = add i64 [[STRIDE]], 3 -; COMPARE-LAA-MV-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[N]], i64 1) -; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_SCEVCHECK:.*]] -; COMPARE-LAA-MV: [[VECTOR_SCEVCHECK]]: -; COMPARE-LAA-MV-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 -; COMPARE-LAA-MV-NEXT: br i1 [[IDENT_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] +; COMPARE-LAA-MV-NEXT: [[STRIDES_MV_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 +; COMPARE-LAA-MV-NEXT: br label %[[STRIDES_CHECK:.*]] +; COMPARE-LAA-MV: [[STRIDES_CHECK]]: +; COMPARE-LAA-MV-NEXT: br i1 [[STRIDES_MV_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_MEMCHECK:.*]] ; COMPARE-LAA-MV: [[VECTOR_MEMCHECK]]: ; COMPARE-LAA-MV-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[OUT_OFFSET]], 32 ; COMPARE-LAA-MV-NEXT: br i1 [[DIFF_CHECK]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] @@ -2975,9 +2880,9 @@ define void @stride_btc_independent_memdep_triple_check(ptr %p, ptr noalias %p2, ; COMPARE-LAA-MV-NEXT: store <4 x i64> [[TMP0]], ptr [[P_OUT]], align 8 ; COMPARE-LAA-MV-NEXT: br label %[[MIDDLE_BLOCK:.*]] ; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: -; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-LAA-MV-NEXT: br i1 true, label %[[EXIT:.*]], label %[[SCALAR_PH]] ; COMPARE-LAA-MV: [[SCALAR_PH]]: -; COMPARE-LAA-MV-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, %[[VECTOR_SCEVCHECK]] ], [ 0, %[[VECTOR_MEMCHECK]] ] +; COMPARE-LAA-MV-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 4, %[[MIDDLE_BLOCK]] ], [ 0, %[[STRIDES_CHECK]] ], [ 0, %[[VECTOR_MEMCHECK]] ] ; COMPARE-LAA-MV-NEXT: br label %[[HEADER:.*]] ; COMPARE-LAA-MV: [[HEADER]]: ; COMPARE-LAA-MV-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] @@ -2991,7 +2896,7 @@ define void @stride_btc_independent_memdep_triple_check(ptr %p, ptr noalias %p2, ; COMPARE-LAA-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] ; COMPARE-LAA-MV-NEXT: store i64 [[VAL]], ptr [[GEP_ST]], align 8 ; COMPARE-LAA-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], [[N]] -; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP37:![0-9]+]] +; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP38:![0-9]+]] ; COMPARE-LAA-MV: [[EXIT]]: ; COMPARE-LAA-MV-NEXT: ret void ; @@ -3062,7 +2967,7 @@ define void @actual_stride_not_in_ir(ptr noalias %p.out, ptr %p, i64 %stride) { ; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) ; COMPARE-NO-MV-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 -; COMPARE-NO-MV-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP33:![0-9]+]] +; COMPARE-NO-MV-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP39:![0-9]+]] ; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: ; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] ; COMPARE-NO-MV: [[EXIT]]: @@ -3105,7 +3010,7 @@ define void @actual_stride_not_in_ir(ptr noalias %p.out, ptr %p, i64 %stride) { ; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; COMPARE-LAA-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) ; COMPARE-LAA-MV-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 -; COMPARE-LAA-MV-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP38:![0-9]+]] +; COMPARE-LAA-MV-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP39:![0-9]+]] ; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: ; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] ; COMPARE-LAA-MV: [[EXIT]]: @@ -3141,49 +3046,45 @@ define void @nd_array_last_idx(ptr noalias %p.out, ptr %p, i64 %stride) { ; COMPARE-NO-MV-LABEL: define void @nd_array_last_idx( ; COMPARE-NO-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { ; COMPARE-NO-MV-NEXT: [[ENTRY:.*:]] -; COMPARE-NO-MV-NEXT: br label %[[VECTOR_PH:.*]] +; COMPARE-NO-MV-NEXT: [[STRIDES_MV_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 +; COMPARE-NO-MV-NEXT: br label %[[STRIDES_CHECK:.*]] +; COMPARE-NO-MV: [[STRIDES_CHECK]]: +; COMPARE-NO-MV-NEXT: br i1 [[STRIDES_MV_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; COMPARE-NO-MV: [[VECTOR_PH]]: -; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 -; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer ; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] ; COMPARE-NO-MV: [[VECTOR_BODY]]: ; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] -; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] -; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = extractelement <4 x i64> [[TMP0]], i32 0 -; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP0]], i32 1 -; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP0]], i32 2 -; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP0]], i32 3 -; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = getelementptr [256 x [256 x i64]], ptr [[P]], i64 1, i64 42, i64 [[TMP1]] -; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = getelementptr [256 x [256 x i64]], ptr [[P]], i64 1, i64 42, i64 [[TMP2]] -; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = getelementptr [256 x [256 x i64]], ptr [[P]], i64 1, i64 42, i64 [[TMP3]] -; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = getelementptr [256 x [256 x i64]], ptr [[P]], i64 1, i64 42, i64 [[TMP4]] -; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP5]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = insertelement <4 x i64> poison, i64 [[TMP9]], i32 0 -; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> [[TMP13]], i64 [[TMP10]], i32 1 -; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 2 -; COMPARE-NO-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP12]], i32 3 -; COMPARE-NO-MV-NEXT: [[TMP17:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] -; COMPARE-NO-MV-NEXT: store <4 x i64> [[TMP16]], ptr [[TMP17]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = getelementptr [256 x [256 x i64]], ptr [[P]], i64 1, i64 42, i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP0]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: store <4 x i64> [[WIDE_LOAD]], ptr [[TMP1]], align 8 ; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 -; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) -; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 -; COMPARE-NO-MV-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP34:![0-9]+]] +; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP40:![0-9]+]] ; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: ; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-NO-MV: [[SCALAR_PH]]: +; COMPARE-NO-MV-NEXT: br label %[[HEADER:.*]] +; COMPARE-NO-MV: [[HEADER]]: +; COMPARE-NO-MV-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-NO-MV-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 +; COMPARE-NO-MV-NEXT: [[IDX:%.*]] = mul i64 [[IV]], [[STRIDE]] +; COMPARE-NO-MV-NEXT: [[GEP_LD:%.*]] = getelementptr [256 x [256 x i64]], ptr [[P]], i64 1, i64 42, i64 [[IDX]] +; COMPARE-NO-MV-NEXT: [[LD:%.*]] = load i64, ptr [[GEP_LD]], align 8 +; COMPARE-NO-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] +; COMPARE-NO-MV-NEXT: store i64 [[LD]], ptr [[GEP_ST]], align 8 +; COMPARE-NO-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP41:![0-9]+]] ; COMPARE-NO-MV: [[EXIT]]: ; COMPARE-NO-MV-NEXT: ret void ; ; COMPARE-LAA-MV-LABEL: define void @nd_array_last_idx( ; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { ; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] -; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_SCEVCHECK:.*]] -; COMPARE-LAA-MV: [[VECTOR_SCEVCHECK]]: -; COMPARE-LAA-MV-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 -; COMPARE-LAA-MV-NEXT: br i1 [[IDENT_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV-NEXT: [[STRIDES_MV_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 +; COMPARE-LAA-MV-NEXT: br label %[[STRIDES_CHECK:.*]] +; COMPARE-LAA-MV: [[STRIDES_CHECK]]: +; COMPARE-LAA-MV-NEXT: br i1 [[STRIDES_MV_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; COMPARE-LAA-MV: [[VECTOR_PH]]: ; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] ; COMPARE-LAA-MV: [[VECTOR_BODY]]: @@ -3194,7 +3095,7 @@ define void @nd_array_last_idx(ptr noalias %p.out, ptr %p, i64 %stride) { ; COMPARE-LAA-MV-NEXT: store <4 x i64> [[WIDE_LOAD]], ptr [[TMP1]], align 8 ; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 -; COMPARE-LAA-MV-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP39:![0-9]+]] +; COMPARE-LAA-MV-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP40:![0-9]+]] ; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: ; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] ; COMPARE-LAA-MV: [[SCALAR_PH]]: @@ -3208,7 +3109,7 @@ define void @nd_array_last_idx(ptr noalias %p.out, ptr %p, i64 %stride) { ; COMPARE-LAA-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] ; COMPARE-LAA-MV-NEXT: store i64 [[LD]], ptr [[GEP_ST]], align 8 ; COMPARE-LAA-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], 128 -; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP40:![0-9]+]] +; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP41:![0-9]+]] ; COMPARE-LAA-MV: [[EXIT]]: ; COMPARE-LAA-MV-NEXT: ret void ; @@ -3271,7 +3172,7 @@ define void @nd_array_non_last_idx(ptr noalias %p.out, ptr %p, i64 %stride) { ; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) ; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 -; COMPARE-NO-MV-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP35:![0-9]+]] +; COMPARE-NO-MV-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP42:![0-9]+]] ; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: ; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] ; COMPARE-NO-MV: [[EXIT]]: @@ -3280,48 +3181,39 @@ define void @nd_array_non_last_idx(ptr noalias %p.out, ptr %p, i64 %stride) { ; COMPARE-LAA-MV-LABEL: define void @nd_array_non_last_idx( ; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { ; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] -; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_SCEVCHECK:.*]] -; COMPARE-LAA-MV: [[VECTOR_SCEVCHECK]]: -; COMPARE-LAA-MV-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 -; COMPARE-LAA-MV-NEXT: br i1 [[IDENT_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_PH:.*]] ; COMPARE-LAA-MV: [[VECTOR_PH]]: +; COMPARE-LAA-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 +; COMPARE-LAA-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer ; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] ; COMPARE-LAA-MV: [[VECTOR_BODY]]: ; COMPARE-LAA-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1 -; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 2 -; COMPARE-LAA-MV-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 3 -; COMPARE-LAA-MV-NEXT: [[TMP4:%.*]] = getelementptr [256 x [256 x i64]], ptr [[P]], i64 1, i64 [[INDEX]], i64 42 +; COMPARE-LAA-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-LAA-MV-NEXT: [[TMP0:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] +; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = extractelement <4 x i64> [[TMP0]], i32 0 +; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP0]], i32 1 +; COMPARE-LAA-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP0]], i32 2 +; COMPARE-LAA-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP0]], i32 3 ; COMPARE-LAA-MV-NEXT: [[TMP5:%.*]] = getelementptr [256 x [256 x i64]], ptr [[P]], i64 1, i64 [[TMP1]], i64 42 ; COMPARE-LAA-MV-NEXT: [[TMP6:%.*]] = getelementptr [256 x [256 x i64]], ptr [[P]], i64 1, i64 [[TMP2]], i64 42 ; COMPARE-LAA-MV-NEXT: [[TMP7:%.*]] = getelementptr [256 x [256 x i64]], ptr [[P]], i64 1, i64 [[TMP3]], i64 42 -; COMPARE-LAA-MV-NEXT: [[TMP8:%.*]] = load i64, ptr [[TMP4]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP8:%.*]] = getelementptr [256 x [256 x i64]], ptr [[P]], i64 1, i64 [[TMP4]], i64 42 ; COMPARE-LAA-MV-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP5]], align 8 ; COMPARE-LAA-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8 ; COMPARE-LAA-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 -; COMPARE-LAA-MV-NEXT: [[TMP12:%.*]] = insertelement <4 x i64> poison, i64 [[TMP8]], i32 0 -; COMPARE-LAA-MV-NEXT: [[TMP13:%.*]] = insertelement <4 x i64> [[TMP12]], i64 [[TMP9]], i32 1 -; COMPARE-LAA-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> [[TMP13]], i64 [[TMP10]], i32 2 -; COMPARE-LAA-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 3 -; COMPARE-LAA-MV-NEXT: [[TMP16:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] -; COMPARE-LAA-MV-NEXT: store <4 x i64> [[TMP15]], ptr [[TMP16]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP13:%.*]] = insertelement <4 x i64> poison, i64 [[TMP9]], i32 0 +; COMPARE-LAA-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> [[TMP13]], i64 [[TMP10]], i32 1 +; COMPARE-LAA-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 2 +; COMPARE-LAA-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP12]], i32 3 +; COMPARE-LAA-MV-NEXT: [[TMP17:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-LAA-MV-NEXT: store <4 x i64> [[TMP16]], ptr [[TMP17]], align 8 ; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 -; COMPARE-LAA-MV-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 -; COMPARE-LAA-MV-NEXT: br i1 [[TMP17]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP41:![0-9]+]] +; COMPARE-LAA-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) +; COMPARE-LAA-MV-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP42:![0-9]+]] ; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: ; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] -; COMPARE-LAA-MV: [[SCALAR_PH]]: -; COMPARE-LAA-MV-NEXT: br label %[[HEADER:.*]] -; COMPARE-LAA-MV: [[HEADER]]: -; COMPARE-LAA-MV-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] -; COMPARE-LAA-MV-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 -; COMPARE-LAA-MV-NEXT: [[IDX:%.*]] = mul i64 [[IV]], [[STRIDE]] -; COMPARE-LAA-MV-NEXT: [[GEP_LD:%.*]] = getelementptr [256 x [256 x i64]], ptr [[P]], i64 1, i64 [[IDX]], i64 42 -; COMPARE-LAA-MV-NEXT: [[LD:%.*]] = load i64, ptr [[GEP_LD]], align 8 -; COMPARE-LAA-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] -; COMPARE-LAA-MV-NEXT: store i64 [[LD]], ptr [[GEP_ST]], align 8 -; COMPARE-LAA-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], 128 -; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP42:![0-9]+]] ; COMPARE-LAA-MV: [[EXIT]]: ; COMPARE-LAA-MV-NEXT: ret void ; @@ -3382,7 +3274,7 @@ define void @nd_array_multiple_idxs(ptr noalias %p.out, ptr %p, i64 %stride) { ; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) ; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 -; COMPARE-NO-MV-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP36:![0-9]+]] +; COMPARE-NO-MV-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP43:![0-9]+]] ; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: ; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] ; COMPARE-NO-MV: [[EXIT]]: @@ -3454,50 +3346,46 @@ define void @sext_stride(ptr noalias %p.out, ptr %p, i32 %stride.i32) { ; COMPARE-NO-MV-LABEL: define void @sext_stride( ; COMPARE-NO-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i32 [[STRIDE_I32:%.*]]) { ; COMPARE-NO-MV-NEXT: [[ENTRY:.*:]] -; COMPARE-NO-MV-NEXT: br label %[[VECTOR_PH:.*]] +; COMPARE-NO-MV-NEXT: [[STRIDES_MV_CHECK:%.*]] = icmp ne i32 [[STRIDE_I32]], 1 +; COMPARE-NO-MV-NEXT: br label %[[STRIDES_CHECK:.*]] +; COMPARE-NO-MV: [[STRIDES_CHECK]]: +; COMPARE-NO-MV-NEXT: br i1 [[STRIDES_MV_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; COMPARE-NO-MV: [[VECTOR_PH]]: -; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[STRIDE_I32]], i64 0 -; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer -; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = sext <4 x i32> [[BROADCAST_SPLAT]] to <4 x i64> ; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] ; COMPARE-NO-MV: [[VECTOR_BODY]]: ; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] -; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = mul <4 x i64> [[VEC_IND]], [[TMP0]] -; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP1]], i32 0 -; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP1]], i32 1 -; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP1]], i32 2 -; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = extractelement <4 x i64> [[TMP1]], i32 3 -; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP2]] -; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP3]] -; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP4]] -; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP5]] -; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP9]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> poison, i64 [[TMP10]], i32 0 -; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 1 -; COMPARE-NO-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP12]], i32 2 -; COMPARE-NO-MV-NEXT: [[TMP17:%.*]] = insertelement <4 x i64> [[TMP16]], i64 [[TMP13]], i32 3 -; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] -; COMPARE-NO-MV-NEXT: store <4 x i64> [[TMP17]], ptr [[TMP18]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = getelementptr i64, ptr [[P]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP0]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: store <4 x i64> [[WIDE_LOAD]], ptr [[TMP1]], align 8 ; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 -; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) -; COMPARE-NO-MV-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 -; COMPARE-NO-MV-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP37:![0-9]+]] +; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP44:![0-9]+]] ; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: ; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-NO-MV: [[SCALAR_PH]]: +; COMPARE-NO-MV-NEXT: br label %[[HEADER:.*]] +; COMPARE-NO-MV: [[HEADER]]: +; COMPARE-NO-MV-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-NO-MV-NEXT: [[STRIDE:%.*]] = sext i32 [[STRIDE_I32]] to i64 +; COMPARE-NO-MV-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 +; COMPARE-NO-MV-NEXT: [[IDX:%.*]] = mul i64 [[IV]], [[STRIDE]] +; COMPARE-NO-MV-NEXT: [[GEP_LD:%.*]] = getelementptr i64, ptr [[P]], i64 [[IDX]] +; COMPARE-NO-MV-NEXT: [[LD:%.*]] = load i64, ptr [[GEP_LD]], align 8 +; COMPARE-NO-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] +; COMPARE-NO-MV-NEXT: store i64 [[LD]], ptr [[GEP_ST]], align 8 +; COMPARE-NO-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP45:![0-9]+]] ; COMPARE-NO-MV: [[EXIT]]: ; COMPARE-NO-MV-NEXT: ret void ; ; COMPARE-LAA-MV-LABEL: define void @sext_stride( ; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i32 [[STRIDE_I32:%.*]]) { ; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] -; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_SCEVCHECK:.*]] -; COMPARE-LAA-MV: [[VECTOR_SCEVCHECK]]: -; COMPARE-LAA-MV-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i32 [[STRIDE_I32]], 1 -; COMPARE-LAA-MV-NEXT: br i1 [[IDENT_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV-NEXT: [[STRIDES_MV_CHECK:%.*]] = icmp ne i32 [[STRIDE_I32]], 1 +; COMPARE-LAA-MV-NEXT: br label %[[STRIDES_CHECK:.*]] +; COMPARE-LAA-MV: [[STRIDES_CHECK]]: +; COMPARE-LAA-MV-NEXT: br i1 [[STRIDES_MV_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; COMPARE-LAA-MV: [[VECTOR_PH]]: ; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] ; COMPARE-LAA-MV: [[VECTOR_BODY]]: @@ -3555,7 +3443,10 @@ define void @trunc_stride(ptr noalias %p.out, ptr %p, i64 %stride.i64) { ; COMPARE-NO-MV-LABEL: define void @trunc_stride( ; COMPARE-NO-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE_I64:%.*]]) { ; COMPARE-NO-MV-NEXT: [[ENTRY:.*:]] -; COMPARE-NO-MV-NEXT: br label %[[VECTOR_SCEVCHECK:.*]] +; COMPARE-NO-MV-NEXT: [[STRIDES_MV_CHECK:%.*]] = icmp ne i64 [[STRIDE_I64]], 1 +; COMPARE-NO-MV-NEXT: br label %[[STRIDES_CHECK:.*]] +; COMPARE-NO-MV: [[STRIDES_CHECK]]: +; COMPARE-NO-MV-NEXT: br i1 [[STRIDES_MV_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]] ; COMPARE-NO-MV: [[VECTOR_SCEVCHECK]]: ; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = trunc i64 [[STRIDE_I64]] to i32 ; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = sub i32 0, [[TMP0]] @@ -3569,44 +3460,25 @@ define void @trunc_stride(ptr noalias %p.out, ptr %p, i64 %stride.i64) { ; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = icmp sgt i32 [[TMP4]], 0 ; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = select i1 [[TMP2]], i1 [[TMP6]], i1 [[TMP5]] ; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = or i1 [[TMP7]], [[MUL_OVERFLOW]] -; COMPARE-NO-MV-NEXT: br i1 [[TMP8]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; COMPARE-NO-MV-NEXT: br i1 [[TMP8]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; COMPARE-NO-MV: [[VECTOR_PH]]: -; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE_I64]], i64 0 -; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer -; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = trunc <4 x i64> [[BROADCAST_SPLAT]] to <4 x i32> ; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] ; COMPARE-NO-MV: [[VECTOR_BODY]]: ; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] -; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = mul <4 x i32> [[VEC_IND]], [[TMP9]] -; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = extractelement <4 x i32> [[TMP10]], i32 0 -; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = extractelement <4 x i32> [[TMP10]], i32 1 -; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = extractelement <4 x i32> [[TMP10]], i32 2 -; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = extractelement <4 x i32> [[TMP10]], i32 3 -; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr [[P]], i32 [[TMP11]] -; COMPARE-NO-MV-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr [[P]], i32 [[TMP12]] -; COMPARE-NO-MV-NEXT: [[TMP17:%.*]] = getelementptr i32, ptr [[P]], i32 [[TMP13]] -; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = getelementptr i32, ptr [[P]], i32 [[TMP14]] -; COMPARE-NO-MV-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP15]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP16]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP17]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP18]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP23:%.*]] = insertelement <4 x i32> poison, i32 [[TMP19]], i32 0 -; COMPARE-NO-MV-NEXT: [[TMP24:%.*]] = insertelement <4 x i32> [[TMP23]], i32 [[TMP20]], i32 1 -; COMPARE-NO-MV-NEXT: [[TMP25:%.*]] = insertelement <4 x i32> [[TMP24]], i32 [[TMP21]], i32 2 -; COMPARE-NO-MV-NEXT: [[TMP26:%.*]] = insertelement <4 x i32> [[TMP25]], i32 [[TMP22]], i32 3 -; COMPARE-NO-MV-NEXT: [[TMP27:%.*]] = getelementptr i32, ptr [[P_OUT]], i32 [[INDEX]] -; COMPARE-NO-MV-NEXT: store <4 x i32> [[TMP26]], ptr [[TMP27]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = getelementptr i32, ptr [[P]], i32 [[INDEX]] +; COMPARE-NO-MV-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP9]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = getelementptr i32, ptr [[P_OUT]], i32 [[INDEX]] +; COMPARE-NO-MV-NEXT: store <4 x i32> [[WIDE_LOAD]], ptr [[TMP10]], align 8 ; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 -; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i32> [[VEC_IND]], splat (i32 4) -; COMPARE-NO-MV-NEXT: [[TMP28:%.*]] = icmp eq i32 [[INDEX_NEXT]], 128 -; COMPARE-NO-MV-NEXT: br i1 [[TMP28]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP38:![0-9]+]] +; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = icmp eq i32 [[INDEX_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[TMP11]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP46:![0-9]+]] ; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: ; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] ; COMPARE-NO-MV: [[SCALAR_PH]]: +; COMPARE-NO-MV-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, %[[STRIDES_CHECK]] ], [ 0, %[[VECTOR_SCEVCHECK]] ] ; COMPARE-NO-MV-NEXT: br label %[[HEADER:.*]] ; COMPARE-NO-MV: [[HEADER]]: -; COMPARE-NO-MV-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-NO-MV-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] ; COMPARE-NO-MV-NEXT: [[STRIDE:%.*]] = trunc i64 [[STRIDE_I64]] to i32 ; COMPARE-NO-MV-NEXT: [[IV_NEXT]] = add nsw i32 [[IV]], 1 ; COMPARE-NO-MV-NEXT: [[IDX:%.*]] = mul i32 [[IV]], [[STRIDE]] @@ -3615,34 +3487,49 @@ define void @trunc_stride(ptr noalias %p.out, ptr %p, i64 %stride.i64) { ; COMPARE-NO-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i32, ptr [[P_OUT]], i32 [[IV]] ; COMPARE-NO-MV-NEXT: store i32 [[LD]], ptr [[GEP_ST]], align 8 ; COMPARE-NO-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i32 [[IV_NEXT]], 128 -; COMPARE-NO-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP39:![0-9]+]] +; COMPARE-NO-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP47:![0-9]+]] ; COMPARE-NO-MV: [[EXIT]]: ; COMPARE-NO-MV-NEXT: ret void ; ; COMPARE-LAA-MV-LABEL: define void @trunc_stride( ; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE_I64:%.*]]) { ; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] -; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_SCEVCHECK:.*]] +; COMPARE-LAA-MV-NEXT: [[STRIDES_MV_CHECK:%.*]] = icmp ne i64 [[STRIDE_I64]], 1 +; COMPARE-LAA-MV-NEXT: br label %[[STRIDES_CHECK:.*]] +; COMPARE-LAA-MV: [[STRIDES_CHECK]]: +; COMPARE-LAA-MV-NEXT: br i1 [[STRIDES_MV_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]] ; COMPARE-LAA-MV: [[VECTOR_SCEVCHECK]]: -; COMPARE-LAA-MV-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE_I64]], 1 -; COMPARE-LAA-MV-NEXT: br i1 [[IDENT_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV-NEXT: [[TMP0:%.*]] = trunc i64 [[STRIDE_I64]] to i32 +; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = sub i32 0, [[TMP0]] +; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = icmp slt i32 [[TMP0]], 0 +; COMPARE-LAA-MV-NEXT: [[TMP3:%.*]] = select i1 [[TMP2]], i32 [[TMP1]], i32 [[TMP0]] +; COMPARE-LAA-MV-NEXT: [[MUL:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 [[TMP3]], i32 127) +; COMPARE-LAA-MV-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL]], 0 +; COMPARE-LAA-MV-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL]], 1 +; COMPARE-LAA-MV-NEXT: [[TMP4:%.*]] = sub i32 0, [[MUL_RESULT]] +; COMPARE-LAA-MV-NEXT: [[TMP5:%.*]] = icmp slt i32 [[MUL_RESULT]], 0 +; COMPARE-LAA-MV-NEXT: [[TMP6:%.*]] = icmp sgt i32 [[TMP4]], 0 +; COMPARE-LAA-MV-NEXT: [[TMP7:%.*]] = select i1 [[TMP2]], i1 [[TMP6]], i1 [[TMP5]] +; COMPARE-LAA-MV-NEXT: [[TMP8:%.*]] = or i1 [[TMP7]], [[MUL_OVERFLOW]] +; COMPARE-LAA-MV-NEXT: br i1 [[TMP8]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; COMPARE-LAA-MV: [[VECTOR_PH]]: ; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] ; COMPARE-LAA-MV: [[VECTOR_BODY]]: ; COMPARE-LAA-MV-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; COMPARE-LAA-MV-NEXT: [[TMP0:%.*]] = getelementptr i32, ptr [[P]], i32 [[INDEX]] -; COMPARE-LAA-MV-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP0]], align 8 -; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = getelementptr i32, ptr [[P_OUT]], i32 [[INDEX]] -; COMPARE-LAA-MV-NEXT: store <4 x i32> [[WIDE_LOAD]], ptr [[TMP1]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP9:%.*]] = getelementptr i32, ptr [[P]], i32 [[INDEX]] +; COMPARE-LAA-MV-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP9]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP10:%.*]] = getelementptr i32, ptr [[P_OUT]], i32 [[INDEX]] +; COMPARE-LAA-MV-NEXT: store <4 x i32> [[WIDE_LOAD]], ptr [[TMP10]], align 8 ; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 -; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = icmp eq i32 [[INDEX_NEXT]], 128 -; COMPARE-LAA-MV-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP46:![0-9]+]] +; COMPARE-LAA-MV-NEXT: [[TMP11:%.*]] = icmp eq i32 [[INDEX_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[TMP11]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP46:![0-9]+]] ; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: ; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] ; COMPARE-LAA-MV: [[SCALAR_PH]]: +; COMPARE-LAA-MV-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, %[[STRIDES_CHECK]] ], [ 0, %[[VECTOR_SCEVCHECK]] ] ; COMPARE-LAA-MV-NEXT: br label %[[HEADER:.*]] ; COMPARE-LAA-MV: [[HEADER]]: -; COMPARE-LAA-MV-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-LAA-MV-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] ; COMPARE-LAA-MV-NEXT: [[STRIDE:%.*]] = trunc i64 [[STRIDE_I64]] to i32 ; COMPARE-LAA-MV-NEXT: [[IV_NEXT]] = add nsw i32 [[IV]], 1 ; COMPARE-LAA-MV-NEXT: [[IDX:%.*]] = mul i32 [[IV]], [[STRIDE]] @@ -3685,7 +3572,10 @@ define void @trunc_ext_stride(ptr noalias %p.out, ptr %p0, ptr %p1, i32 %stride) ; COMPARE-NO-MV-NEXT: [[ENTRY:.*:]] ; COMPARE-NO-MV-NEXT: [[STRIDE_TRUNC:%.*]] = trunc i32 [[STRIDE]] to i16 ; COMPARE-NO-MV-NEXT: [[STRIDE_EXT:%.*]] = sext i32 [[STRIDE]] to i64 -; COMPARE-NO-MV-NEXT: br label %[[VECTOR_SCEVCHECK:.*]] +; COMPARE-NO-MV-NEXT: [[STRIDES_MV_CHECK:%.*]] = icmp ne i32 [[STRIDE]], 1 +; COMPARE-NO-MV-NEXT: br label %[[STRIDES_CHECK:.*]] +; COMPARE-NO-MV: [[STRIDES_CHECK]]: +; COMPARE-NO-MV-NEXT: br i1 [[STRIDES_MV_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]] ; COMPARE-NO-MV: [[VECTOR_SCEVCHECK]]: ; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = sub i16 0, [[STRIDE_TRUNC]] ; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = icmp slt i16 [[STRIDE_TRUNC]], 0 @@ -3698,66 +3588,31 @@ define void @trunc_ext_stride(ptr noalias %p.out, ptr %p0, ptr %p1, i32 %stride) ; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = icmp sgt i16 [[TMP3]], 0 ; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = select i1 [[TMP1]], i1 [[TMP5]], i1 [[TMP4]] ; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = or i1 [[TMP6]], [[MUL_OVERFLOW]] -; COMPARE-NO-MV-NEXT: br i1 [[TMP7]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; COMPARE-NO-MV-NEXT: br i1 [[TMP7]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; COMPARE-NO-MV: [[VECTOR_PH]]: -; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i16> poison, i16 [[STRIDE_TRUNC]], i64 0 -; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i16> [[BROADCAST_SPLATINSERT]], <4 x i16> poison, <4 x i32> zeroinitializer -; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE_EXT]], i64 0 -; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT1]], <4 x i64> poison, <4 x i32> zeroinitializer ; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] ; COMPARE-NO-MV: [[VECTOR_BODY]]: ; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] -; COMPARE-NO-MV-NEXT: [[VEC_IND3:%.*]] = phi <4 x i16> [ <i16 0, i16 1, i16 2, i16 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT4:%.*]], %[[VECTOR_BODY]] ] -; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = sext <4 x i32> [[VEC_IND]] to <4 x i64> -; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = mul <4 x i16> [[VEC_IND3]], [[BROADCAST_SPLAT]] -; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = extractelement <4 x i16> [[TMP9]], i32 0 -; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = extractelement <4 x i16> [[TMP9]], i32 1 -; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = extractelement <4 x i16> [[TMP9]], i32 2 -; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = extractelement <4 x i16> [[TMP9]], i32 3 -; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = mul <4 x i64> [[TMP8]], [[BROADCAST_SPLAT2]] -; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = extractelement <4 x i64> [[TMP14]], i32 0 -; COMPARE-NO-MV-NEXT: [[TMP16:%.*]] = extractelement <4 x i64> [[TMP14]], i32 1 -; COMPARE-NO-MV-NEXT: [[TMP17:%.*]] = extractelement <4 x i64> [[TMP14]], i32 2 -; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = extractelement <4 x i64> [[TMP14]], i32 3 -; COMPARE-NO-MV-NEXT: [[TMP19:%.*]] = getelementptr i32, ptr [[P0]], i16 [[TMP10]] -; COMPARE-NO-MV-NEXT: [[TMP20:%.*]] = getelementptr i32, ptr [[P0]], i16 [[TMP11]] -; COMPARE-NO-MV-NEXT: [[TMP21:%.*]] = getelementptr i32, ptr [[P0]], i16 [[TMP12]] -; COMPARE-NO-MV-NEXT: [[TMP22:%.*]] = getelementptr i32, ptr [[P0]], i16 [[TMP13]] -; COMPARE-NO-MV-NEXT: [[TMP23:%.*]] = getelementptr i32, ptr [[P0]], i64 [[TMP15]] -; COMPARE-NO-MV-NEXT: [[TMP24:%.*]] = getelementptr i32, ptr [[P0]], i64 [[TMP16]] -; COMPARE-NO-MV-NEXT: [[TMP25:%.*]] = getelementptr i32, ptr [[P0]], i64 [[TMP17]] -; COMPARE-NO-MV-NEXT: [[TMP26:%.*]] = getelementptr i32, ptr [[P0]], i64 [[TMP18]] -; COMPARE-NO-MV-NEXT: [[TMP27:%.*]] = load i32, ptr [[TMP19]], align 4 -; COMPARE-NO-MV-NEXT: [[TMP28:%.*]] = load i32, ptr [[TMP20]], align 4 -; COMPARE-NO-MV-NEXT: [[TMP29:%.*]] = load i32, ptr [[TMP21]], align 4 -; COMPARE-NO-MV-NEXT: [[TMP30:%.*]] = load i32, ptr [[TMP22]], align 4 -; COMPARE-NO-MV-NEXT: [[TMP31:%.*]] = insertelement <4 x i32> poison, i32 [[TMP27]], i32 0 -; COMPARE-NO-MV-NEXT: [[TMP32:%.*]] = insertelement <4 x i32> [[TMP31]], i32 [[TMP28]], i32 1 -; COMPARE-NO-MV-NEXT: [[TMP33:%.*]] = insertelement <4 x i32> [[TMP32]], i32 [[TMP29]], i32 2 -; COMPARE-NO-MV-NEXT: [[TMP34:%.*]] = insertelement <4 x i32> [[TMP33]], i32 [[TMP30]], i32 3 -; COMPARE-NO-MV-NEXT: [[TMP35:%.*]] = load i32, ptr [[TMP23]], align 4 -; COMPARE-NO-MV-NEXT: [[TMP36:%.*]] = load i32, ptr [[TMP24]], align 4 -; COMPARE-NO-MV-NEXT: [[TMP37:%.*]] = load i32, ptr [[TMP25]], align 4 -; COMPARE-NO-MV-NEXT: [[TMP38:%.*]] = load i32, ptr [[TMP26]], align 4 -; COMPARE-NO-MV-NEXT: [[TMP39:%.*]] = insertelement <4 x i32> poison, i32 [[TMP35]], i32 0 -; COMPARE-NO-MV-NEXT: [[TMP40:%.*]] = insertelement <4 x i32> [[TMP39]], i32 [[TMP36]], i32 1 -; COMPARE-NO-MV-NEXT: [[TMP41:%.*]] = insertelement <4 x i32> [[TMP40]], i32 [[TMP37]], i32 2 -; COMPARE-NO-MV-NEXT: [[TMP42:%.*]] = insertelement <4 x i32> [[TMP41]], i32 [[TMP38]], i32 3 -; COMPARE-NO-MV-NEXT: [[TMP43:%.*]] = add <4 x i32> [[TMP34]], [[TMP42]] -; COMPARE-NO-MV-NEXT: [[TMP44:%.*]] = getelementptr i32, ptr [[P_OUT]], i32 [[INDEX]] -; COMPARE-NO-MV-NEXT: store <4 x i32> [[TMP43]], ptr [[TMP44]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = trunc i32 [[INDEX]] to i16 +; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = sext i32 [[INDEX]] to i64 +; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = mul i16 [[TMP9]], [[STRIDE_TRUNC]] +; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[P0]], i16 [[TMP10]] +; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = getelementptr i32, ptr [[P0]], i64 [[TMP12]] +; COMPARE-NO-MV-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP11]], align 4 +; COMPARE-NO-MV-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i32>, ptr [[TMP13]], align 4 +; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = add <4 x i32> [[WIDE_LOAD]], [[WIDE_LOAD2]] +; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr [[P_OUT]], i32 [[INDEX]] +; COMPARE-NO-MV-NEXT: store <4 x i32> [[TMP14]], ptr [[TMP15]], align 8 ; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 -; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i32> [[VEC_IND]], splat (i32 4) -; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT4]] = add <4 x i16> [[VEC_IND3]], splat (i16 4) -; COMPARE-NO-MV-NEXT: [[TMP45:%.*]] = icmp eq i32 [[INDEX_NEXT]], 128 -; COMPARE-NO-MV-NEXT: br i1 [[TMP45]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP40:![0-9]+]] +; COMPARE-NO-MV-NEXT: [[TMP16:%.*]] = icmp eq i32 [[INDEX_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP48:![0-9]+]] ; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: ; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] ; COMPARE-NO-MV: [[SCALAR_PH]]: +; COMPARE-NO-MV-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, %[[STRIDES_CHECK]] ], [ 0, %[[VECTOR_SCEVCHECK]] ] ; COMPARE-NO-MV-NEXT: br label %[[HEADER:.*]] ; COMPARE-NO-MV: [[HEADER]]: -; COMPARE-NO-MV-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-NO-MV-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] ; COMPARE-NO-MV-NEXT: [[IV_NEXT]] = add nsw i32 [[IV]], 1 ; COMPARE-NO-MV-NEXT: [[IV_TRUNC:%.*]] = trunc i32 [[IV]] to i16 ; COMPARE-NO-MV-NEXT: [[IV_EXT:%.*]] = sext i32 [[IV]] to i64 @@ -3771,7 +3626,7 @@ define void @trunc_ext_stride(ptr noalias %p.out, ptr %p0, ptr %p1, i32 %stride) ; COMPARE-NO-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i32, ptr [[P_OUT]], i32 [[IV]] ; COMPARE-NO-MV-NEXT: store i32 [[VAL]], ptr [[GEP_ST]], align 8 ; COMPARE-NO-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i32 [[IV_NEXT]], 128 -; COMPARE-NO-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP41:![0-9]+]] +; COMPARE-NO-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP49:![0-9]+]] ; COMPARE-NO-MV: [[EXIT]]: ; COMPARE-NO-MV-NEXT: ret void ; @@ -3780,33 +3635,47 @@ define void @trunc_ext_stride(ptr noalias %p.out, ptr %p0, ptr %p1, i32 %stride) ; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] ; COMPARE-LAA-MV-NEXT: [[STRIDE_TRUNC:%.*]] = trunc i32 [[STRIDE]] to i16 ; COMPARE-LAA-MV-NEXT: [[STRIDE_EXT:%.*]] = sext i32 [[STRIDE]] to i64 -; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_SCEVCHECK:.*]] +; COMPARE-LAA-MV-NEXT: [[STRIDES_MV_CHECK:%.*]] = icmp ne i32 [[STRIDE]], 1 +; COMPARE-LAA-MV-NEXT: br label %[[STRIDES_CHECK:.*]] +; COMPARE-LAA-MV: [[STRIDES_CHECK]]: +; COMPARE-LAA-MV-NEXT: br i1 [[STRIDES_MV_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_SCEVCHECK:.*]] ; COMPARE-LAA-MV: [[VECTOR_SCEVCHECK]]: -; COMPARE-LAA-MV-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i32 [[STRIDE]], 1 -; COMPARE-LAA-MV-NEXT: br i1 [[IDENT_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV-NEXT: [[TMP0:%.*]] = sub i16 0, [[STRIDE_TRUNC]] +; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = icmp slt i16 [[STRIDE_TRUNC]], 0 +; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i16 [[TMP0]], i16 [[STRIDE_TRUNC]] +; COMPARE-LAA-MV-NEXT: [[MUL:%.*]] = call { i16, i1 } @llvm.umul.with.overflow.i16(i16 [[TMP2]], i16 127) +; COMPARE-LAA-MV-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i16, i1 } [[MUL]], 0 +; COMPARE-LAA-MV-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i16, i1 } [[MUL]], 1 +; COMPARE-LAA-MV-NEXT: [[TMP3:%.*]] = sub i16 0, [[MUL_RESULT]] +; COMPARE-LAA-MV-NEXT: [[TMP4:%.*]] = icmp slt i16 [[MUL_RESULT]], 0 +; COMPARE-LAA-MV-NEXT: [[TMP5:%.*]] = icmp sgt i16 [[TMP3]], 0 +; COMPARE-LAA-MV-NEXT: [[TMP6:%.*]] = select i1 [[TMP1]], i1 [[TMP5]], i1 [[TMP4]] +; COMPARE-LAA-MV-NEXT: [[TMP7:%.*]] = or i1 [[TMP6]], [[MUL_OVERFLOW]] +; COMPARE-LAA-MV-NEXT: br i1 [[TMP7]], label %[[SCALAR_PH]], label %[[VECTOR_PH:.*]] ; COMPARE-LAA-MV: [[VECTOR_PH]]: ; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] ; COMPARE-LAA-MV: [[VECTOR_BODY]]: ; COMPARE-LAA-MV-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; COMPARE-LAA-MV-NEXT: [[TMP0:%.*]] = trunc i32 [[INDEX]] to i16 -; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = sext i32 [[INDEX]] to i64 -; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = mul i16 [[TMP0]], [[STRIDE_TRUNC]] -; COMPARE-LAA-MV-NEXT: [[TMP3:%.*]] = getelementptr i32, ptr [[P0]], i16 [[TMP2]] -; COMPARE-LAA-MV-NEXT: [[TMP4:%.*]] = getelementptr i32, ptr [[P0]], i64 [[TMP1]] -; COMPARE-LAA-MV-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP3]], align 4 -; COMPARE-LAA-MV-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 -; COMPARE-LAA-MV-NEXT: [[TMP5:%.*]] = add <4 x i32> [[WIDE_LOAD]], [[WIDE_LOAD1]] -; COMPARE-LAA-MV-NEXT: [[TMP6:%.*]] = getelementptr i32, ptr [[P_OUT]], i32 [[INDEX]] -; COMPARE-LAA-MV-NEXT: store <4 x i32> [[TMP5]], ptr [[TMP6]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP9:%.*]] = trunc i32 [[INDEX]] to i16 +; COMPARE-LAA-MV-NEXT: [[TMP12:%.*]] = sext i32 [[INDEX]] to i64 +; COMPARE-LAA-MV-NEXT: [[TMP10:%.*]] = mul i16 [[TMP9]], [[STRIDE_TRUNC]] +; COMPARE-LAA-MV-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[P0]], i16 [[TMP10]] +; COMPARE-LAA-MV-NEXT: [[TMP13:%.*]] = getelementptr i32, ptr [[P0]], i64 [[TMP12]] +; COMPARE-LAA-MV-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP11]], align 4 +; COMPARE-LAA-MV-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i32>, ptr [[TMP13]], align 4 +; COMPARE-LAA-MV-NEXT: [[TMP14:%.*]] = add <4 x i32> [[WIDE_LOAD]], [[WIDE_LOAD2]] +; COMPARE-LAA-MV-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr [[P_OUT]], i32 [[INDEX]] +; COMPARE-LAA-MV-NEXT: store <4 x i32> [[TMP14]], ptr [[TMP15]], align 8 ; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 -; COMPARE-LAA-MV-NEXT: [[TMP7:%.*]] = icmp eq i32 [[INDEX_NEXT]], 128 -; COMPARE-LAA-MV-NEXT: br i1 [[TMP7]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP48:![0-9]+]] +; COMPARE-LAA-MV-NEXT: [[TMP16:%.*]] = icmp eq i32 [[INDEX_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP48:![0-9]+]] ; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: ; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] ; COMPARE-LAA-MV: [[SCALAR_PH]]: +; COMPARE-LAA-MV-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 0, %[[STRIDES_CHECK]] ], [ 0, %[[VECTOR_SCEVCHECK]] ] ; COMPARE-LAA-MV-NEXT: br label %[[HEADER:.*]] ; COMPARE-LAA-MV: [[HEADER]]: -; COMPARE-LAA-MV-NEXT: [[IV:%.*]] = phi i32 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-LAA-MV-NEXT: [[IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] ; COMPARE-LAA-MV-NEXT: [[IV_NEXT]] = add nsw i32 [[IV]], 1 ; COMPARE-LAA-MV-NEXT: [[IV_TRUNC:%.*]] = trunc i32 [[IV]] to i16 ; COMPARE-LAA-MV-NEXT: [[IV_EXT:%.*]] = sext i32 [[IV]] to i64 @@ -3922,7 +3791,7 @@ define void @basic_masked(ptr noalias %p.out, ptr %p, i64 %stride, i64 %x) { ; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) ; COMPARE-NO-MV-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 -; COMPARE-NO-MV-NEXT: br i1 [[TMP26]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP42:![0-9]+]] +; COMPARE-NO-MV-NEXT: br i1 [[TMP26]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP50:![0-9]+]] ; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: ; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] ; COMPARE-NO-MV: [[EXIT]]: @@ -3931,81 +3800,68 @@ define void @basic_masked(ptr noalias %p.out, ptr %p, i64 %stride, i64 %x) { ; COMPARE-LAA-MV-LABEL: define void @basic_masked( ; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]], i64 [[X:%.*]]) { ; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] -; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_SCEVCHECK:.*]] -; COMPARE-LAA-MV: [[VECTOR_SCEVCHECK]]: -; COMPARE-LAA-MV-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 -; COMPARE-LAA-MV-NEXT: br i1 [[IDENT_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_PH:.*]] ; COMPARE-LAA-MV: [[VECTOR_PH]]: ; COMPARE-LAA-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[X]], i64 0 ; COMPARE-LAA-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer +; COMPARE-LAA-MV-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 +; COMPARE-LAA-MV-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT1]], <4 x i64> poison, <4 x i32> zeroinitializer ; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] ; COMPARE-LAA-MV: [[VECTOR_BODY]]: -; COMPARE-LAA-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE6:.*]] ] -; COMPARE-LAA-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[PRED_STORE_CONTINUE6]] ] +; COMPARE-LAA-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[PRED_STORE_CONTINUE8:.*]] ] +; COMPARE-LAA-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[PRED_STORE_CONTINUE8]] ] ; COMPARE-LAA-MV-NEXT: [[TMP0:%.*]] = icmp sge <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] -; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = extractelement <4 x i1> [[TMP0]], i32 0 -; COMPARE-LAA-MV-NEXT: br i1 [[TMP1]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]] +; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT2]] +; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i1> [[TMP0]], i32 0 +; COMPARE-LAA-MV-NEXT: br i1 [[TMP2]], label %[[PRED_STORE_IF:.*]], label %[[PRED_STORE_CONTINUE:.*]] ; COMPARE-LAA-MV: [[PRED_STORE_IF]]: -; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 0 -; COMPARE-LAA-MV-NEXT: [[TMP3:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP2]] -; COMPARE-LAA-MV-NEXT: [[TMP4:%.*]] = load i64, ptr [[TMP3]], align 8 -; COMPARE-LAA-MV-NEXT: [[TMP5:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[TMP2]] -; COMPARE-LAA-MV-NEXT: store i64 [[TMP4]], ptr [[TMP5]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP1]], i32 0 +; COMPARE-LAA-MV-NEXT: [[TMP4:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP3]] +; COMPARE-LAA-MV-NEXT: [[TMP5:%.*]] = load i64, ptr [[TMP4]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP6:%.*]] = add i64 [[INDEX]], 0 +; COMPARE-LAA-MV-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[TMP6]] +; COMPARE-LAA-MV-NEXT: store i64 [[TMP5]], ptr [[TMP7]], align 8 ; COMPARE-LAA-MV-NEXT: br label %[[PRED_STORE_CONTINUE]] ; COMPARE-LAA-MV: [[PRED_STORE_CONTINUE]]: -; COMPARE-LAA-MV-NEXT: [[TMP6:%.*]] = extractelement <4 x i1> [[TMP0]], i32 1 -; COMPARE-LAA-MV-NEXT: br i1 [[TMP6]], label %[[PRED_STORE_IF1:.*]], label %[[PRED_STORE_CONTINUE2:.*]] -; COMPARE-LAA-MV: [[PRED_STORE_IF1]]: -; COMPARE-LAA-MV-NEXT: [[TMP7:%.*]] = add i64 [[INDEX]], 1 -; COMPARE-LAA-MV-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP7]] -; COMPARE-LAA-MV-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP8]], align 8 -; COMPARE-LAA-MV-NEXT: [[TMP10:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[TMP7]] -; COMPARE-LAA-MV-NEXT: store i64 [[TMP9]], ptr [[TMP10]], align 8 -; COMPARE-LAA-MV-NEXT: br label %[[PRED_STORE_CONTINUE2]] -; COMPARE-LAA-MV: [[PRED_STORE_CONTINUE2]]: -; COMPARE-LAA-MV-NEXT: [[TMP11:%.*]] = extractelement <4 x i1> [[TMP0]], i32 2 -; COMPARE-LAA-MV-NEXT: br i1 [[TMP11]], label %[[PRED_STORE_IF3:.*]], label %[[PRED_STORE_CONTINUE4:.*]] +; COMPARE-LAA-MV-NEXT: [[TMP8:%.*]] = extractelement <4 x i1> [[TMP0]], i32 1 +; COMPARE-LAA-MV-NEXT: br i1 [[TMP8]], label %[[PRED_STORE_IF3:.*]], label %[[PRED_STORE_CONTINUE4:.*]] ; COMPARE-LAA-MV: [[PRED_STORE_IF3]]: -; COMPARE-LAA-MV-NEXT: [[TMP12:%.*]] = add i64 [[INDEX]], 2 -; COMPARE-LAA-MV-NEXT: [[TMP13:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP12]] -; COMPARE-LAA-MV-NEXT: [[TMP14:%.*]] = load i64, ptr [[TMP13]], align 8 -; COMPARE-LAA-MV-NEXT: [[TMP15:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[TMP12]] -; COMPARE-LAA-MV-NEXT: store i64 [[TMP14]], ptr [[TMP15]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP9:%.*]] = extractelement <4 x i64> [[TMP1]], i32 1 +; COMPARE-LAA-MV-NEXT: [[TMP10:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP9]] +; COMPARE-LAA-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP10]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP12:%.*]] = add i64 [[INDEX]], 1 +; COMPARE-LAA-MV-NEXT: [[TMP13:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[TMP12]] +; COMPARE-LAA-MV-NEXT: store i64 [[TMP11]], ptr [[TMP13]], align 8 ; COMPARE-LAA-MV-NEXT: br label %[[PRED_STORE_CONTINUE4]] ; COMPARE-LAA-MV: [[PRED_STORE_CONTINUE4]]: -; COMPARE-LAA-MV-NEXT: [[TMP16:%.*]] = extractelement <4 x i1> [[TMP0]], i32 3 -; COMPARE-LAA-MV-NEXT: br i1 [[TMP16]], label %[[PRED_STORE_IF5:.*]], label %[[PRED_STORE_CONTINUE6]] +; COMPARE-LAA-MV-NEXT: [[TMP14:%.*]] = extractelement <4 x i1> [[TMP0]], i32 2 +; COMPARE-LAA-MV-NEXT: br i1 [[TMP14]], label %[[PRED_STORE_IF5:.*]], label %[[PRED_STORE_CONTINUE6:.*]] ; COMPARE-LAA-MV: [[PRED_STORE_IF5]]: -; COMPARE-LAA-MV-NEXT: [[TMP17:%.*]] = add i64 [[INDEX]], 3 -; COMPARE-LAA-MV-NEXT: [[TMP18:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP17]] -; COMPARE-LAA-MV-NEXT: [[TMP19:%.*]] = load i64, ptr [[TMP18]], align 8 -; COMPARE-LAA-MV-NEXT: [[TMP20:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[TMP17]] -; COMPARE-LAA-MV-NEXT: store i64 [[TMP19]], ptr [[TMP20]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP15:%.*]] = extractelement <4 x i64> [[TMP1]], i32 2 +; COMPARE-LAA-MV-NEXT: [[TMP16:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP15]] +; COMPARE-LAA-MV-NEXT: [[TMP17:%.*]] = load i64, ptr [[TMP16]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP18:%.*]] = add i64 [[INDEX]], 2 +; COMPARE-LAA-MV-NEXT: [[TMP19:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[TMP18]] +; COMPARE-LAA-MV-NEXT: store i64 [[TMP17]], ptr [[TMP19]], align 8 ; COMPARE-LAA-MV-NEXT: br label %[[PRED_STORE_CONTINUE6]] ; COMPARE-LAA-MV: [[PRED_STORE_CONTINUE6]]: +; COMPARE-LAA-MV-NEXT: [[TMP20:%.*]] = extractelement <4 x i1> [[TMP0]], i32 3 +; COMPARE-LAA-MV-NEXT: br i1 [[TMP20]], label %[[PRED_STORE_IF7:.*]], label %[[PRED_STORE_CONTINUE8]] +; COMPARE-LAA-MV: [[PRED_STORE_IF7]]: +; COMPARE-LAA-MV-NEXT: [[TMP21:%.*]] = extractelement <4 x i64> [[TMP1]], i32 3 +; COMPARE-LAA-MV-NEXT: [[TMP22:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP21]] +; COMPARE-LAA-MV-NEXT: [[TMP23:%.*]] = load i64, ptr [[TMP22]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP24:%.*]] = add i64 [[INDEX]], 3 +; COMPARE-LAA-MV-NEXT: [[TMP25:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[TMP24]] +; COMPARE-LAA-MV-NEXT: store i64 [[TMP23]], ptr [[TMP25]], align 8 +; COMPARE-LAA-MV-NEXT: br label %[[PRED_STORE_CONTINUE8]] +; COMPARE-LAA-MV: [[PRED_STORE_CONTINUE8]]: ; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; COMPARE-LAA-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) -; COMPARE-LAA-MV-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 -; COMPARE-LAA-MV-NEXT: br i1 [[TMP21]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP50:![0-9]+]] +; COMPARE-LAA-MV-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-LAA-MV-NEXT: br i1 [[TMP26]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP50:![0-9]+]] ; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: ; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] -; COMPARE-LAA-MV: [[SCALAR_PH]]: -; COMPARE-LAA-MV-NEXT: br label %[[HEADER:.*]] -; COMPARE-LAA-MV: [[HEADER]]: -; COMPARE-LAA-MV-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LATCH:.*]] ] -; COMPARE-LAA-MV-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 -; COMPARE-LAA-MV-NEXT: [[C:%.*]] = icmp sge i64 [[IV]], [[X]] -; COMPARE-LAA-MV-NEXT: br i1 [[C]], label %[[IF:.*]], label %[[LATCH]] -; COMPARE-LAA-MV: [[IF]]: -; COMPARE-LAA-MV-NEXT: [[IDX:%.*]] = mul i64 [[IV]], [[STRIDE]] -; COMPARE-LAA-MV-NEXT: [[GEP_LD:%.*]] = getelementptr i64, ptr [[P]], i64 [[IDX]] -; COMPARE-LAA-MV-NEXT: [[LD:%.*]] = load i64, ptr [[GEP_LD]], align 8 -; COMPARE-LAA-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] -; COMPARE-LAA-MV-NEXT: store i64 [[LD]], ptr [[GEP_ST]], align 8 -; COMPARE-LAA-MV-NEXT: br label %[[LATCH]] -; COMPARE-LAA-MV: [[LATCH]]: -; COMPARE-LAA-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], 128 -; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP51:![0-9]+]] ; COMPARE-LAA-MV: [[EXIT]]: ; COMPARE-LAA-MV-NEXT: ret void ; @@ -4069,7 +3925,7 @@ define void @stride_poison(ptr noalias %p.out, ptr %p, i64 %stride) { ; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) ; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 -; COMPARE-NO-MV-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP43:![0-9]+]] +; COMPARE-NO-MV-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP51:![0-9]+]] ; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: ; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] ; COMPARE-NO-MV: [[EXIT]]: @@ -4106,7 +3962,7 @@ define void @stride_poison(ptr noalias %p.out, ptr %p, i64 %stride) { ; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; COMPARE-LAA-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) ; COMPARE-LAA-MV-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 -; COMPARE-LAA-MV-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP52:![0-9]+]] +; COMPARE-LAA-MV-NEXT: br i1 [[TMP18]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP51:![0-9]+]] ; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: ; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] ; COMPARE-LAA-MV: [[EXIT]]: @@ -4139,49 +3995,45 @@ define void @basic_strided_store(ptr noalias %p.out, ptr %p, i64 %stride) { ; COMPARE-NO-MV-LABEL: define void @basic_strided_store( ; COMPARE-NO-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { ; COMPARE-NO-MV-NEXT: [[ENTRY:.*:]] -; COMPARE-NO-MV-NEXT: br label %[[VECTOR_PH:.*]] +; COMPARE-NO-MV-NEXT: [[STRIDES_MV_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 +; COMPARE-NO-MV-NEXT: br label %[[STRIDES_CHECK:.*]] +; COMPARE-NO-MV: [[STRIDES_CHECK]]: +; COMPARE-NO-MV-NEXT: br i1 [[STRIDES_MV_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; COMPARE-NO-MV: [[VECTOR_PH]]: -; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 -; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer ; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] ; COMPARE-NO-MV: [[VECTOR_BODY]]: ; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] -; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] -; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = extractelement <4 x i64> [[TMP0]], i32 0 -; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP0]], i32 1 -; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP0]], i32 2 -; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP0]], i32 3 -; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = getelementptr i64, ptr [[P]], i64 [[INDEX]] -; COMPARE-NO-MV-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP5]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = extractelement <4 x i64> [[WIDE_LOAD]], i32 0 -; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = extractelement <4 x i64> [[WIDE_LOAD]], i32 1 -; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = extractelement <4 x i64> [[WIDE_LOAD]], i32 2 -; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = extractelement <4 x i64> [[WIDE_LOAD]], i32 3 -; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[TMP1]] -; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[TMP2]] -; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[TMP3]] -; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[TMP4]] -; COMPARE-NO-MV-NEXT: store i64 [[TMP6]], ptr [[TMP10]], align 8 -; COMPARE-NO-MV-NEXT: store i64 [[TMP7]], ptr [[TMP11]], align 8 -; COMPARE-NO-MV-NEXT: store i64 [[TMP8]], ptr [[TMP12]], align 8 -; COMPARE-NO-MV-NEXT: store i64 [[TMP9]], ptr [[TMP13]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = getelementptr i64, ptr [[P]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP0]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: store <4 x i64> [[WIDE_LOAD]], ptr [[TMP1]], align 8 ; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 -; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) -; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 -; COMPARE-NO-MV-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP44:![0-9]+]] +; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP52:![0-9]+]] ; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: ; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-NO-MV: [[SCALAR_PH]]: +; COMPARE-NO-MV-NEXT: br label %[[HEADER:.*]] +; COMPARE-NO-MV: [[HEADER]]: +; COMPARE-NO-MV-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-NO-MV-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 +; COMPARE-NO-MV-NEXT: [[IDX:%.*]] = mul i64 [[IV]], [[STRIDE]] +; COMPARE-NO-MV-NEXT: [[GEP_LD:%.*]] = getelementptr i64, ptr [[P]], i64 [[IV]] +; COMPARE-NO-MV-NEXT: [[LD:%.*]] = load i64, ptr [[GEP_LD]], align 8 +; COMPARE-NO-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IDX]] +; COMPARE-NO-MV-NEXT: store i64 [[LD]], ptr [[GEP_ST]], align 8 +; COMPARE-NO-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP53:![0-9]+]] ; COMPARE-NO-MV: [[EXIT]]: ; COMPARE-NO-MV-NEXT: ret void ; ; COMPARE-LAA-MV-LABEL: define void @basic_strided_store( ; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { ; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] -; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_SCEVCHECK:.*]] -; COMPARE-LAA-MV: [[VECTOR_SCEVCHECK]]: -; COMPARE-LAA-MV-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 -; COMPARE-LAA-MV-NEXT: br i1 [[IDENT_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV-NEXT: [[STRIDES_MV_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 +; COMPARE-LAA-MV-NEXT: br label %[[STRIDES_CHECK:.*]] +; COMPARE-LAA-MV: [[STRIDES_CHECK]]: +; COMPARE-LAA-MV-NEXT: br i1 [[STRIDES_MV_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; COMPARE-LAA-MV: [[VECTOR_PH]]: ; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] ; COMPARE-LAA-MV: [[VECTOR_BODY]]: @@ -4192,7 +4044,7 @@ define void @basic_strided_store(ptr noalias %p.out, ptr %p, i64 %stride) { ; COMPARE-LAA-MV-NEXT: store <4 x i64> [[WIDE_LOAD]], ptr [[TMP1]], align 8 ; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 -; COMPARE-LAA-MV-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP53:![0-9]+]] +; COMPARE-LAA-MV-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP52:![0-9]+]] ; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: ; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] ; COMPARE-LAA-MV: [[SCALAR_PH]]: @@ -4206,7 +4058,7 @@ define void @basic_strided_store(ptr noalias %p.out, ptr %p, i64 %stride) { ; COMPARE-LAA-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IDX]] ; COMPARE-LAA-MV-NEXT: store i64 [[LD]], ptr [[GEP_ST]], align 8 ; COMPARE-LAA-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], 128 -; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP54:![0-9]+]] +; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP53:![0-9]+]] ; COMPARE-LAA-MV: [[EXIT]]: ; COMPARE-LAA-MV-NEXT: ret void ; @@ -4238,64 +4090,68 @@ define void @ptr_vec_use(ptr noalias %p.out, ptr noalias %p.ptr.out, ptr %p, i64 ; COMPARE-NO-MV-LABEL: define void @ptr_vec_use( ; COMPARE-NO-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr noalias [[P_PTR_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { ; COMPARE-NO-MV-NEXT: [[ENTRY:.*:]] -; COMPARE-NO-MV-NEXT: br label %[[VECTOR_PH:.*]] +; COMPARE-NO-MV-NEXT: [[STRIDES_MV_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 +; COMPARE-NO-MV-NEXT: br label %[[STRIDES_CHECK:.*]] +; COMPARE-NO-MV: [[STRIDES_CHECK]]: +; COMPARE-NO-MV-NEXT: br i1 [[STRIDES_MV_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; COMPARE-NO-MV: [[VECTOR_PH]]: -; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 -; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer ; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] ; COMPARE-NO-MV: [[VECTOR_BODY]]: ; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] -; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] -; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = getelementptr i64, ptr [[P]], <4 x i64> [[TMP0]] -; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x ptr> [[TMP1]], i32 0 -; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x ptr> [[TMP1]], i32 1 -; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x ptr> [[TMP1]], i32 2 -; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = extractelement <4 x ptr> [[TMP1]], i32 3 -; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = load i64, ptr [[TMP2]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = load i64, ptr [[TMP3]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = load i64, ptr [[TMP4]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP5]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = insertelement <4 x i64> poison, i64 [[TMP6]], i32 0 -; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = insertelement <4 x i64> [[TMP10]], i64 [[TMP7]], i32 1 -; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = insertelement <4 x i64> [[TMP11]], i64 [[TMP8]], i32 2 -; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = insertelement <4 x i64> [[TMP12]], i64 [[TMP9]], i32 3 -; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] -; COMPARE-NO-MV-NEXT: store <4 x i64> [[TMP13]], ptr [[TMP14]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = getelementptr ptr, ptr [[P_PTR_OUT]], i64 [[INDEX]] -; COMPARE-NO-MV-NEXT: store <4 x ptr> [[TMP1]], ptr [[TMP15]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = getelementptr i64, ptr [[P]], <4 x i64> [[VEC_IND]] +; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = extractelement <4 x ptr> [[TMP1]], i32 0 +; COMPARE-NO-MV-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP0]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: store <4 x i64> [[WIDE_LOAD]], ptr [[TMP2]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = getelementptr ptr, ptr [[P_PTR_OUT]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: store <4 x ptr> [[TMP1]], ptr [[TMP3]], align 8 ; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) -; COMPARE-NO-MV-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 -; COMPARE-NO-MV-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP45:![0-9]+]] +; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP54:![0-9]+]] ; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: ; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-NO-MV: [[SCALAR_PH]]: +; COMPARE-NO-MV-NEXT: br label %[[HEADER:.*]] +; COMPARE-NO-MV: [[HEADER]]: +; COMPARE-NO-MV-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-NO-MV-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 +; COMPARE-NO-MV-NEXT: [[IDX:%.*]] = mul i64 [[IV]], [[STRIDE]] +; COMPARE-NO-MV-NEXT: [[GEP_LD:%.*]] = getelementptr i64, ptr [[P]], i64 [[IDX]] +; COMPARE-NO-MV-NEXT: [[LD:%.*]] = load i64, ptr [[GEP_LD]], align 8 +; COMPARE-NO-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] +; COMPARE-NO-MV-NEXT: store i64 [[LD]], ptr [[GEP_ST]], align 8 +; COMPARE-NO-MV-NEXT: [[GEP_PTR_ST:%.*]] = getelementptr ptr, ptr [[P_PTR_OUT]], i64 [[IV]] +; COMPARE-NO-MV-NEXT: store ptr [[GEP_LD]], ptr [[GEP_PTR_ST]], align 8 +; COMPARE-NO-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP55:![0-9]+]] ; COMPARE-NO-MV: [[EXIT]]: ; COMPARE-NO-MV-NEXT: ret void ; ; COMPARE-LAA-MV-LABEL: define void @ptr_vec_use( ; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr noalias [[P_PTR_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { ; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] -; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_SCEVCHECK:.*]] -; COMPARE-LAA-MV: [[VECTOR_SCEVCHECK]]: -; COMPARE-LAA-MV-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 -; COMPARE-LAA-MV-NEXT: br i1 [[IDENT_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV-NEXT: [[STRIDES_MV_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 +; COMPARE-LAA-MV-NEXT: br label %[[STRIDES_CHECK:.*]] +; COMPARE-LAA-MV: [[STRIDES_CHECK]]: +; COMPARE-LAA-MV-NEXT: br i1 [[STRIDES_MV_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; COMPARE-LAA-MV: [[VECTOR_PH]]: ; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] ; COMPARE-LAA-MV: [[VECTOR_BODY]]: ; COMPARE-LAA-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; COMPARE-LAA-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] -; COMPARE-LAA-MV-NEXT: [[TMP0:%.*]] = getelementptr i64, ptr [[P]], <4 x i64> [[VEC_IND]] -; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = extractelement <4 x ptr> [[TMP0]], i32 0 -; COMPARE-LAA-MV-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP1]], align 8 +; COMPARE-LAA-MV-NEXT: [[TMP1:%.*]] = getelementptr i64, ptr [[P]], <4 x i64> [[VEC_IND]] +; COMPARE-LAA-MV-NEXT: [[TMP0:%.*]] = extractelement <4 x ptr> [[TMP1]], i32 0 +; COMPARE-LAA-MV-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP0]], align 8 ; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] ; COMPARE-LAA-MV-NEXT: store <4 x i64> [[WIDE_LOAD]], ptr [[TMP2]], align 8 ; COMPARE-LAA-MV-NEXT: [[TMP3:%.*]] = getelementptr ptr, ptr [[P_PTR_OUT]], i64 [[INDEX]] -; COMPARE-LAA-MV-NEXT: store <4 x ptr> [[TMP0]], ptr [[TMP3]], align 8 +; COMPARE-LAA-MV-NEXT: store <4 x ptr> [[TMP1]], ptr [[TMP3]], align 8 ; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; COMPARE-LAA-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) ; COMPARE-LAA-MV-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 -; COMPARE-LAA-MV-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP55:![0-9]+]] +; COMPARE-LAA-MV-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP54:![0-9]+]] ; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: ; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] ; COMPARE-LAA-MV: [[SCALAR_PH]]: @@ -4311,7 +4167,7 @@ define void @ptr_vec_use(ptr noalias %p.out, ptr noalias %p.ptr.out, ptr %p, i64 ; COMPARE-LAA-MV-NEXT: [[GEP_PTR_ST:%.*]] = getelementptr ptr, ptr [[P_PTR_OUT]], i64 [[IV]] ; COMPARE-LAA-MV-NEXT: store ptr [[GEP_LD]], ptr [[GEP_PTR_ST]], align 8 ; COMPARE-LAA-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], 128 -; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP56:![0-9]+]] +; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP55:![0-9]+]] ; COMPARE-LAA-MV: [[EXIT]]: ; COMPARE-LAA-MV-NEXT: ret void ; @@ -4346,50 +4202,49 @@ define void @stride_idx_vec_use(ptr noalias %p.out, ptr %p, i64 %stride) { ; COMPARE-NO-MV-LABEL: define void @stride_idx_vec_use( ; COMPARE-NO-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { ; COMPARE-NO-MV-NEXT: [[ENTRY:.*:]] -; COMPARE-NO-MV-NEXT: br label %[[VECTOR_PH:.*]] +; COMPARE-NO-MV-NEXT: [[STRIDES_MV_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 +; COMPARE-NO-MV-NEXT: br label %[[STRIDES_CHECK:.*]] +; COMPARE-NO-MV: [[STRIDES_CHECK]]: +; COMPARE-NO-MV-NEXT: br i1 [[STRIDES_MV_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; COMPARE-NO-MV: [[VECTOR_PH]]: -; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 -; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer ; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] ; COMPARE-NO-MV: [[VECTOR_BODY]]: ; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] -; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] -; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = extractelement <4 x i64> [[TMP0]], i32 0 -; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP0]], i32 1 -; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP0]], i32 2 -; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP0]], i32 3 -; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP1]] -; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP2]] -; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP3]] -; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP4]] -; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = load i64, ptr [[TMP5]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = insertelement <4 x i64> poison, i64 [[TMP9]], i32 0 -; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> [[TMP13]], i64 [[TMP10]], i32 1 -; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 2 -; COMPARE-NO-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP12]], i32 3 -; COMPARE-NO-MV-NEXT: [[TMP17:%.*]] = mul <4 x i64> [[TMP16]], [[TMP0]] -; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] -; COMPARE-NO-MV-NEXT: store <4 x i64> [[TMP17]], ptr [[TMP18]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = getelementptr i64, ptr [[P]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP0]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = mul <4 x i64> [[WIDE_LOAD]], [[VEC_IND]] +; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: store <4 x i64> [[TMP1]], ptr [[TMP2]], align 8 ; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) -; COMPARE-NO-MV-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 -; COMPARE-NO-MV-NEXT: br i1 [[TMP19]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP46:![0-9]+]] +; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP56:![0-9]+]] ; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: ; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-NO-MV: [[SCALAR_PH]]: +; COMPARE-NO-MV-NEXT: br label %[[HEADER:.*]] +; COMPARE-NO-MV: [[HEADER]]: +; COMPARE-NO-MV-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-NO-MV-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 +; COMPARE-NO-MV-NEXT: [[IDX:%.*]] = mul i64 [[IV]], [[STRIDE]] +; COMPARE-NO-MV-NEXT: [[GEP_LD:%.*]] = getelementptr i64, ptr [[P]], i64 [[IDX]] +; COMPARE-NO-MV-NEXT: [[LD:%.*]] = load i64, ptr [[GEP_LD]], align 8 +; COMPARE-NO-MV-NEXT: [[VAL:%.*]] = mul i64 [[LD]], [[IDX]] +; COMPARE-NO-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] +; COMPARE-NO-MV-NEXT: store i64 [[VAL]], ptr [[GEP_ST]], align 8 +; COMPARE-NO-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP57:![0-9]+]] ; COMPARE-NO-MV: [[EXIT]]: ; COMPARE-NO-MV-NEXT: ret void ; ; COMPARE-LAA-MV-LABEL: define void @stride_idx_vec_use( ; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { ; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] -; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_SCEVCHECK:.*]] -; COMPARE-LAA-MV: [[VECTOR_SCEVCHECK]]: -; COMPARE-LAA-MV-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 -; COMPARE-LAA-MV-NEXT: br i1 [[IDENT_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV-NEXT: [[STRIDES_MV_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 +; COMPARE-LAA-MV-NEXT: br label %[[STRIDES_CHECK:.*]] +; COMPARE-LAA-MV: [[STRIDES_CHECK]]: +; COMPARE-LAA-MV-NEXT: br i1 [[STRIDES_MV_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; COMPARE-LAA-MV: [[VECTOR_PH]]: ; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] ; COMPARE-LAA-MV: [[VECTOR_BODY]]: @@ -4403,7 +4258,7 @@ define void @stride_idx_vec_use(ptr noalias %p.out, ptr %p, i64 %stride) { ; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; COMPARE-LAA-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) ; COMPARE-LAA-MV-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 -; COMPARE-LAA-MV-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP57:![0-9]+]] +; COMPARE-LAA-MV-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP56:![0-9]+]] ; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: ; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] ; COMPARE-LAA-MV: [[SCALAR_PH]]: @@ -4418,7 +4273,7 @@ define void @stride_idx_vec_use(ptr noalias %p.out, ptr %p, i64 %stride) { ; COMPARE-LAA-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] ; COMPARE-LAA-MV-NEXT: store i64 [[VAL]], ptr [[GEP_ST]], align 8 ; COMPARE-LAA-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], 128 -; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP58:![0-9]+]] +; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP57:![0-9]+]] ; COMPARE-LAA-MV: [[EXIT]]: ; COMPARE-LAA-MV-NEXT: ret void ; @@ -4451,51 +4306,52 @@ define void @offset_stride_idx_vec_use(ptr noalias %p.out, ptr %p, i64 %stride) ; COMPARE-NO-MV-LABEL: define void @offset_stride_idx_vec_use( ; COMPARE-NO-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { ; COMPARE-NO-MV-NEXT: [[ENTRY:.*:]] -; COMPARE-NO-MV-NEXT: br label %[[VECTOR_PH:.*]] +; COMPARE-NO-MV-NEXT: [[STRIDES_MV_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 +; COMPARE-NO-MV-NEXT: br label %[[STRIDES_CHECK:.*]] +; COMPARE-NO-MV: [[STRIDES_CHECK]]: +; COMPARE-NO-MV-NEXT: br i1 [[STRIDES_MV_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; COMPARE-NO-MV: [[VECTOR_PH]]: -; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[STRIDE]], i64 0 -; COMPARE-NO-MV-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer ; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] ; COMPARE-NO-MV: [[VECTOR_BODY]]: ; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] ; COMPARE-NO-MV-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ] -; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = mul <4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]] -; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = add <4 x i64> [[TMP0]], splat (i64 42) -; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = extractelement <4 x i64> [[TMP1]], i32 0 -; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = extractelement <4 x i64> [[TMP1]], i32 1 -; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = extractelement <4 x i64> [[TMP1]], i32 2 -; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = extractelement <4 x i64> [[TMP1]], i32 3 -; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP2]] -; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP3]] -; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP4]] -; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP5]] -; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = load i64, ptr [[TMP6]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = load i64, ptr [[TMP7]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP8]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = load i64, ptr [[TMP9]], align 8 -; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = insertelement <4 x i64> poison, i64 [[TMP10]], i32 0 -; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = insertelement <4 x i64> [[TMP14]], i64 [[TMP11]], i32 1 -; COMPARE-NO-MV-NEXT: [[TMP16:%.*]] = insertelement <4 x i64> [[TMP15]], i64 [[TMP12]], i32 2 -; COMPARE-NO-MV-NEXT: [[TMP17:%.*]] = insertelement <4 x i64> [[TMP16]], i64 [[TMP13]], i32 3 -; COMPARE-NO-MV-NEXT: [[TMP18:%.*]] = mul <4 x i64> [[TMP17]], [[TMP1]] -; COMPARE-NO-MV-NEXT: [[TMP19:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] -; COMPARE-NO-MV-NEXT: store <4 x i64> [[TMP18]], ptr [[TMP19]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = add <4 x i64> [[VEC_IND]], splat (i64 42) +; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = extractelement <4 x i64> [[TMP0]], i32 0 +; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP1]] +; COMPARE-NO-MV-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP2]], align 8 +; COMPARE-NO-MV-NEXT: [[TMP3:%.*]] = mul <4 x i64> [[WIDE_LOAD]], [[TMP0]] +; COMPARE-NO-MV-NEXT: [[TMP4:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[INDEX]] +; COMPARE-NO-MV-NEXT: store <4 x i64> [[TMP3]], ptr [[TMP4]], align 8 ; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; COMPARE-NO-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) -; COMPARE-NO-MV-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 -; COMPARE-NO-MV-NEXT: br i1 [[TMP20]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP47:![0-9]+]] +; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP58:![0-9]+]] ; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: ; COMPARE-NO-MV-NEXT: br label %[[EXIT:.*]] +; COMPARE-NO-MV: [[SCALAR_PH]]: +; COMPARE-NO-MV-NEXT: br label %[[HEADER:.*]] +; COMPARE-NO-MV: [[HEADER]]: +; COMPARE-NO-MV-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[HEADER]] ] +; COMPARE-NO-MV-NEXT: [[IV_NEXT]] = add nsw i64 [[IV]], 1 +; COMPARE-NO-MV-NEXT: [[IV_TIMES_STRIDE:%.*]] = mul i64 [[IV]], [[STRIDE]] +; COMPARE-NO-MV-NEXT: [[IDX:%.*]] = add i64 [[IV_TIMES_STRIDE]], 42 +; COMPARE-NO-MV-NEXT: [[GEP_LD:%.*]] = getelementptr i64, ptr [[P]], i64 [[IDX]] +; COMPARE-NO-MV-NEXT: [[LD:%.*]] = load i64, ptr [[GEP_LD]], align 8 +; COMPARE-NO-MV-NEXT: [[VAL:%.*]] = mul i64 [[LD]], [[IDX]] +; COMPARE-NO-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] +; COMPARE-NO-MV-NEXT: store i64 [[VAL]], ptr [[GEP_ST]], align 8 +; COMPARE-NO-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], 128 +; COMPARE-NO-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP59:![0-9]+]] ; COMPARE-NO-MV: [[EXIT]]: ; COMPARE-NO-MV-NEXT: ret void ; ; COMPARE-LAA-MV-LABEL: define void @offset_stride_idx_vec_use( ; COMPARE-LAA-MV-SAME: ptr noalias [[P_OUT:%.*]], ptr [[P:%.*]], i64 [[STRIDE:%.*]]) { ; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] -; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_SCEVCHECK:.*]] -; COMPARE-LAA-MV: [[VECTOR_SCEVCHECK]]: -; COMPARE-LAA-MV-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 -; COMPARE-LAA-MV-NEXT: br i1 [[IDENT_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; COMPARE-LAA-MV-NEXT: [[STRIDES_MV_CHECK:%.*]] = icmp ne i64 [[STRIDE]], 1 +; COMPARE-LAA-MV-NEXT: br label %[[STRIDES_CHECK:.*]] +; COMPARE-LAA-MV: [[STRIDES_CHECK]]: +; COMPARE-LAA-MV-NEXT: br i1 [[STRIDES_MV_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; COMPARE-LAA-MV: [[VECTOR_PH]]: ; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] ; COMPARE-LAA-MV: [[VECTOR_BODY]]: @@ -4511,7 +4367,7 @@ define void @offset_stride_idx_vec_use(ptr noalias %p.out, ptr %p, i64 %stride) ; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; COMPARE-LAA-MV-NEXT: [[VEC_IND_NEXT]] = add nsw <4 x i64> [[VEC_IND]], splat (i64 4) ; COMPARE-LAA-MV-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 128 -; COMPARE-LAA-MV-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP59:![0-9]+]] +; COMPARE-LAA-MV-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP58:![0-9]+]] ; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: ; COMPARE-LAA-MV-NEXT: br label %[[EXIT:.*]] ; COMPARE-LAA-MV: [[SCALAR_PH]]: @@ -4527,7 +4383,7 @@ define void @offset_stride_idx_vec_use(ptr noalias %p.out, ptr %p, i64 %stride) ; COMPARE-LAA-MV-NEXT: [[GEP_ST:%.*]] = getelementptr i64, ptr [[P_OUT]], i64 [[IV]] ; COMPARE-LAA-MV-NEXT: store i64 [[VAL]], ptr [[GEP_ST]], align 8 ; COMPARE-LAA-MV-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[IV_NEXT]], 128 -; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP60:![0-9]+]] +; COMPARE-LAA-MV-NEXT: br i1 [[EXITCOND]], label %[[HEADER]], label %[[EXIT]], !llvm.loop [[LOOP59:![0-9]+]] ; COMPARE-LAA-MV: [[EXIT]]: ; COMPARE-LAA-MV-NEXT: ret void ; @@ -4623,43 +4479,26 @@ exit: define void @test_rewrite_iv_scevs(i32 %start, ptr %dst) { ; COMPARE-NO-MV-LABEL: define void @test_rewrite_iv_scevs( ; COMPARE-NO-MV-SAME: i32 [[START:%.*]], ptr [[DST:%.*]]) { -; COMPARE-NO-MV-NEXT: [[ENTRY:.*]]: +; COMPARE-NO-MV-NEXT: [[ENTRY:.*:]] ; COMPARE-NO-MV-NEXT: [[START_EXT:%.*]] = zext i32 [[START]] to i64 -; COMPARE-NO-MV-NEXT: [[TMP0:%.*]] = sub i64 100, [[START_EXT]] -; COMPARE-NO-MV-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], 4 +; COMPARE-NO-MV-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ne i32 [[START]], 1 +; COMPARE-NO-MV-NEXT: br label %[[STRIDES_CHECK:.*]] +; COMPARE-NO-MV: [[STRIDES_CHECK]]: ; COMPARE-NO-MV-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; COMPARE-NO-MV: [[VECTOR_PH]]: -; COMPARE-NO-MV-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], 4 -; COMPARE-NO-MV-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]] -; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = add i64 [[START_EXT]], [[N_VEC]] -; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = mul i64 [[N_VEC]], [[START_EXT]] ; COMPARE-NO-MV-NEXT: br label %[[VECTOR_BODY:.*]] ; COMPARE-NO-MV: [[VECTOR_BODY]]: -; COMPARE-NO-MV-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] -; COMPARE-NO-MV-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], [[START_EXT]] -; COMPARE-NO-MV-NEXT: [[TMP5:%.*]] = mul i64 1, [[START_EXT]] -; COMPARE-NO-MV-NEXT: [[TMP6:%.*]] = add i64 [[OFFSET_IDX]], [[TMP5]] -; COMPARE-NO-MV-NEXT: [[TMP7:%.*]] = mul i64 2, [[START_EXT]] -; COMPARE-NO-MV-NEXT: [[TMP8:%.*]] = add i64 [[OFFSET_IDX]], [[TMP7]] -; COMPARE-NO-MV-NEXT: [[TMP9:%.*]] = mul i64 3, [[START_EXT]] -; COMPARE-NO-MV-NEXT: [[TMP10:%.*]] = add i64 [[OFFSET_IDX]], [[TMP9]] -; COMPARE-NO-MV-NEXT: [[TMP11:%.*]] = getelementptr float, ptr [[DST]], i64 [[OFFSET_IDX]] -; COMPARE-NO-MV-NEXT: [[TMP12:%.*]] = getelementptr float, ptr [[DST]], i64 [[TMP6]] -; COMPARE-NO-MV-NEXT: [[TMP13:%.*]] = getelementptr float, ptr [[DST]], i64 [[TMP8]] -; COMPARE-NO-MV-NEXT: [[TMP14:%.*]] = getelementptr float, ptr [[DST]], i64 [[TMP10]] -; COMPARE-NO-MV-NEXT: store float 0.000000e+00, ptr [[TMP11]], align 4 -; COMPARE-NO-MV-NEXT: store float 0.000000e+00, ptr [[TMP12]], align 4 -; COMPARE-NO-MV-NEXT: store float 0.000000e+00, ptr [[TMP13]], align 4 -; COMPARE-NO-MV-NEXT: store float 0.000000e+00, ptr [[TMP14]], align 4 -; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 -; COMPARE-NO-MV-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; COMPARE-NO-MV-NEXT: br i1 [[TMP15]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP48:![0-9]+]] +; COMPARE-NO-MV-NEXT: [[OFFSET_IDX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; COMPARE-NO-MV-NEXT: [[TMP1:%.*]] = getelementptr float, ptr [[DST]], i64 [[OFFSET_IDX]] +; COMPARE-NO-MV-NEXT: store <4 x float> zeroinitializer, ptr [[TMP1]], align 4 +; COMPARE-NO-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[OFFSET_IDX]], 4 +; COMPARE-NO-MV-NEXT: [[TMP2:%.*]] = icmp eq i64 [[INDEX_NEXT]], 96 +; COMPARE-NO-MV-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP60:![0-9]+]] ; COMPARE-NO-MV: [[MIDDLE_BLOCK]]: -; COMPARE-NO-MV-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] -; COMPARE-NO-MV-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; COMPARE-NO-MV-NEXT: br i1 false, label %[[EXIT:.*]], label %[[SCALAR_PH]] ; COMPARE-NO-MV: [[SCALAR_PH]]: -; COMPARE-NO-MV-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[TMP1]], %[[MIDDLE_BLOCK]] ], [ [[START_EXT]], %[[ENTRY]] ] -; COMPARE-NO-MV-NEXT: [[BC_RESUME_VAL1:%.*]] = phi i64 [ [[TMP2]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; COMPARE-NO-MV-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 97, %[[MIDDLE_BLOCK]] ], [ [[START_EXT]], %[[STRIDES_CHECK]] ] +; COMPARE-NO-MV-NEXT: [[BC_RESUME_VAL1:%.*]] = phi i64 [ 96, %[[MIDDLE_BLOCK]] ], [ 0, %[[STRIDES_CHECK]] ] ; COMPARE-NO-MV-NEXT: br label %[[LOOP:.*]] ; COMPARE-NO-MV: [[LOOP]]: ; COMPARE-NO-MV-NEXT: [[IV_0:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_0_NEXT:%.*]], %[[LOOP]] ] @@ -4669,7 +4508,7 @@ define void @test_rewrite_iv_scevs(i32 %start, ptr %dst) { ; COMPARE-NO-MV-NEXT: [[IV_1_NEXT]] = add i64 [[IV_1]], [[START_EXT]] ; COMPARE-NO-MV-NEXT: [[IV_0_NEXT]] = add i64 [[IV_0]], 1 ; COMPARE-NO-MV-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_0_NEXT]], 100 -; COMPARE-NO-MV-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP49:![0-9]+]] +; COMPARE-NO-MV-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP61:![0-9]+]] ; COMPARE-NO-MV: [[EXIT]]: ; COMPARE-NO-MV-NEXT: ret void ; @@ -4677,10 +4516,9 @@ define void @test_rewrite_iv_scevs(i32 %start, ptr %dst) { ; COMPARE-LAA-MV-SAME: i32 [[START:%.*]], ptr [[DST:%.*]]) { ; COMPARE-LAA-MV-NEXT: [[ENTRY:.*:]] ; COMPARE-LAA-MV-NEXT: [[START_EXT:%.*]] = zext i32 [[START]] to i64 -; COMPARE-LAA-MV-NEXT: [[TMP0:%.*]] = sub i64 100, [[START_EXT]] +; COMPARE-LAA-MV-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i32 [[START]], 1 ; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_SCEVCHECK:.*]] ; COMPARE-LAA-MV: [[VECTOR_SCEVCHECK]]: -; COMPARE-LAA-MV-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i32 [[START]], 1 ; COMPARE-LAA-MV-NEXT: br i1 [[IDENT_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] ; COMPARE-LAA-MV: [[VECTOR_PH]]: ; COMPARE-LAA-MV-NEXT: br label %[[VECTOR_BODY:.*]] @@ -4690,9 +4528,9 @@ define void @test_rewrite_iv_scevs(i32 %start, ptr %dst) { ; COMPARE-LAA-MV-NEXT: store <4 x float> zeroinitializer, ptr [[TMP1]], align 4 ; COMPARE-LAA-MV-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; COMPARE-LAA-MV-NEXT: [[TMP2:%.*]] = icmp eq i64 [[INDEX_NEXT]], 96 -; COMPARE-LAA-MV-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP61:![0-9]+]] +; COMPARE-LAA-MV-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP60:![0-9]+]] ; COMPARE-LAA-MV: [[MIDDLE_BLOCK]]: -; COMPARE-LAA-MV-NEXT: br label %[[SCALAR_PH]] +; COMPARE-LAA-MV-NEXT: br i1 false, label %[[EXIT:.*]], label %[[SCALAR_PH]] ; COMPARE-LAA-MV: [[SCALAR_PH]]: ; COMPARE-LAA-MV-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 97, %[[MIDDLE_BLOCK]] ], [ [[START_EXT]], %[[VECTOR_SCEVCHECK]] ] ; COMPARE-LAA-MV-NEXT: [[BC_RESUME_VAL1:%.*]] = phi i64 [ 96, %[[MIDDLE_BLOCK]] ], [ 0, %[[VECTOR_SCEVCHECK]] ] @@ -4705,7 +4543,7 @@ define void @test_rewrite_iv_scevs(i32 %start, ptr %dst) { ; COMPARE-LAA-MV-NEXT: [[IV_1_NEXT]] = add i64 [[IV_1]], [[START_EXT]] ; COMPARE-LAA-MV-NEXT: [[IV_0_NEXT]] = add i64 [[IV_0]], 1 ; COMPARE-LAA-MV-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_0_NEXT]], 100 -; COMPARE-LAA-MV-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP]], !llvm.loop [[LOOP62:![0-9]+]] +; COMPARE-LAA-MV-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP61:![0-9]+]] ; COMPARE-LAA-MV: [[EXIT]]: ; COMPARE-LAA-MV-NEXT: ret void ; |
