diff options
Diffstat (limited to 'llvm/lib/Target')
36 files changed, 373 insertions, 234 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64Combine.td b/llvm/lib/Target/AArch64/AArch64Combine.td index 639ddcb..ecaeff7 100644 --- a/llvm/lib/Target/AArch64/AArch64Combine.td +++ b/llvm/lib/Target/AArch64/AArch64Combine.td @@ -350,7 +350,7 @@ def AArch64PostLegalizerLowering // Post-legalization combines which are primarily optimizations. def AArch64PostLegalizerCombiner : GICombiner<"AArch64PostLegalizerCombinerImpl", - [copy_prop, cast_of_cast_combines, + [copy_prop, cast_of_cast_combines, constant_fold_fp_ops, buildvector_of_truncate, integer_of_truncate, mutate_anyext_to_zext, combines_for_extload, combine_indexed_load_store, sext_trunc_sextload, diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index 31b3d18..fbce3b0 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -16249,7 +16249,9 @@ SDValue AArch64TargetLowering::LowerDIV(SDValue Op, SelectionDAG &DAG) const { bool Negated; uint64_t SplatVal; - if (Signed && isPow2Splat(Op.getOperand(1), SplatVal, Negated)) { + // NOTE: SRAD cannot be used to represent sdiv-by-one. + if (Signed && isPow2Splat(Op.getOperand(1), SplatVal, Negated) && + SplatVal > 1) { SDValue Pg = getPredicateForScalableVector(DAG, DL, VT); SDValue Res = DAG.getNode(AArch64ISD::SRAD_MERGE_OP1, DL, VT, Pg, Op->getOperand(0), @@ -18638,7 +18640,7 @@ bool AArch64TargetLowering::isDesirableToCommuteXorWithShift( } bool AArch64TargetLowering::shouldFoldConstantShiftPairToMask( - const SDNode *N, CombineLevel Level) const { + const SDNode *N) const { assert(((N->getOpcode() == ISD::SHL && N->getOperand(0).getOpcode() == ISD::SRL) || (N->getOpcode() == ISD::SRL && @@ -30034,7 +30036,9 @@ SDValue AArch64TargetLowering::LowerFixedLengthVectorIntDivideToSVE( bool Negated; uint64_t SplatVal; - if (Signed && isPow2Splat(Op.getOperand(1), SplatVal, Negated)) { + // NOTE: SRAD cannot be used to represent sdiv-by-one. + if (Signed && isPow2Splat(Op.getOperand(1), SplatVal, Negated) && + SplatVal > 1) { EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT); SDValue Op1 = convertToScalableVector(DAG, ContainerVT, Op.getOperand(0)); SDValue Op2 = DAG.getTargetConstant(Log2_64(SplatVal), DL, MVT::i32); @@ -30606,6 +30610,43 @@ AArch64TargetLowering::LowerVECTOR_DEINTERLEAVE(SDValue Op, assert(OpVT.isScalableVector() && "Expected scalable vector in LowerVECTOR_DEINTERLEAVE."); + if (Op->getNumOperands() == 3) { + // aarch64_sve_ld3 only supports packed datatypes. + EVT PackedVT = getPackedSVEVectorVT(OpVT.getVectorElementCount()); + Align Alignment = DAG.getReducedAlign(PackedVT, /*UseABI=*/false); + SDValue StackPtr = + DAG.CreateStackTemporary(PackedVT.getStoreSize() * 3, Alignment); + + // Write out unmodified operands. + SmallVector<SDValue, 3> Chains; + for (unsigned I = 0; I < 3; ++I) { + SDValue Ptr = + DAG.getMemBasePlusOffset(StackPtr, PackedVT.getStoreSize() * I, DL); + SDValue V = getSVESafeBitCast(PackedVT, Op.getOperand(I), DAG); + Chains.push_back( + DAG.getStore(DAG.getEntryNode(), DL, V, Ptr, MachinePointerInfo())); + } + + Intrinsic::ID IntID = Intrinsic::aarch64_sve_ld3_sret; + EVT PredVT = PackedVT.changeVectorElementType(MVT::i1); + + SmallVector<SDValue, 7> Ops; + Ops.push_back(DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains)); + Ops.push_back(DAG.getTargetConstant(IntID, DL, MVT::i64)); + Ops.push_back(DAG.getConstant(1, DL, PredVT)); + Ops.push_back(StackPtr); + + // Read back and deinterleave data. + SDVTList VTs = DAG.getVTList(PackedVT, PackedVT, PackedVT, MVT::Other); + SDValue LD3 = DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops); + + SmallVector<SDValue, 3> Results; + Results.push_back(getSVESafeBitCast(OpVT, LD3.getValue(0), DAG)); + Results.push_back(getSVESafeBitCast(OpVT, LD3.getValue(1), DAG)); + Results.push_back(getSVESafeBitCast(OpVT, LD3.getValue(2), DAG)); + return DAG.getMergeValues(Results, DL); + } + // Are multi-register uzp instructions available? if (Subtarget->hasSME2() && Subtarget->isStreaming() && OpVT.getVectorElementType() != MVT::i1) { @@ -30647,6 +30688,42 @@ SDValue AArch64TargetLowering::LowerVECTOR_INTERLEAVE(SDValue Op, assert(OpVT.isScalableVector() && "Expected scalable vector in LowerVECTOR_INTERLEAVE."); + if (Op->getNumOperands() == 3) { + // aarch64_sve_st3 only supports packed datatypes. + EVT PackedVT = getPackedSVEVectorVT(OpVT.getVectorElementCount()); + SmallVector<SDValue, 3> InVecs; + for (SDValue V : Op->ops()) + InVecs.push_back(getSVESafeBitCast(PackedVT, V, DAG)); + + Align Alignment = DAG.getReducedAlign(PackedVT, /*UseABI=*/false); + SDValue StackPtr = + DAG.CreateStackTemporary(PackedVT.getStoreSize() * 3, Alignment); + + Intrinsic::ID IntID = Intrinsic::aarch64_sve_st3; + EVT PredVT = PackedVT.changeVectorElementType(MVT::i1); + + SmallVector<SDValue, 7> Ops; + Ops.push_back(DAG.getEntryNode()); + Ops.push_back(DAG.getTargetConstant(IntID, DL, MVT::i64)); + Ops.append(InVecs); + Ops.push_back(DAG.getConstant(1, DL, PredVT)); + Ops.push_back(StackPtr); + + // Interleave operands and store. + SDValue Chain = DAG.getNode(ISD::INTRINSIC_VOID, DL, MVT::Other, Ops); + + // Read back the interleaved data. + SmallVector<SDValue, 3> Results; + for (unsigned I = 0; I < 3; ++I) { + SDValue Ptr = + DAG.getMemBasePlusOffset(StackPtr, PackedVT.getStoreSize() * I, DL); + SDValue L = DAG.getLoad(PackedVT, DL, Chain, Ptr, MachinePointerInfo()); + Results.push_back(getSVESafeBitCast(OpVT, L, DAG)); + } + + return DAG.getMergeValues(Results, DL); + } + // Are multi-register zip instructions available? if (Subtarget->hasSME2() && Subtarget->isStreaming() && OpVT.getVectorElementType() != MVT::i1) { diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h index e472e7d..00956fd 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h @@ -300,8 +300,7 @@ public: bool isDesirableToCommuteXorWithShift(const SDNode *N) const override; /// Return true if it is profitable to fold a pair of shifts into a mask. - bool shouldFoldConstantShiftPairToMask(const SDNode *N, - CombineLevel Level) const override; + bool shouldFoldConstantShiftPairToMask(const SDNode *N) const override; /// Return true if it is profitable to fold a pair of shifts into a mask. bool shouldFoldMaskToVariableShiftPair(SDValue Y) const override { diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp index 24bef82..8e35ba7 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp @@ -15,6 +15,7 @@ #include "AMDGPU.h" #include "AMDGPUTargetMachine.h" #include "SIModeRegisterDefaults.h" +#include "llvm/ADT/SetVector.h" #include "llvm/Analysis/AssumptionCache.h" #include "llvm/Analysis/ConstantFolding.h" #include "llvm/Analysis/TargetLibraryInfo.h" @@ -27,6 +28,7 @@ #include "llvm/IR/InstVisitor.h" #include "llvm/IR/IntrinsicsAMDGPU.h" #include "llvm/IR/PatternMatch.h" +#include "llvm/IR/ValueHandle.h" #include "llvm/InitializePasses.h" #include "llvm/Pass.h" #include "llvm/Support/KnownBits.h" @@ -106,6 +108,7 @@ public: bool FlowChanged = false; mutable Function *SqrtF32 = nullptr; mutable Function *LdexpF32 = nullptr; + mutable SmallVector<WeakVH> DeadVals; DenseMap<const PHINode *, bool> BreakPhiNodesCache; @@ -242,6 +245,8 @@ public: Value *emitSqrtIEEE2ULP(IRBuilder<> &Builder, Value *Src, FastMathFlags FMF) const; + bool tryNarrowMathIfNoOverflow(Instruction *I); + public: bool visitFDiv(BinaryOperator &I); @@ -281,28 +286,21 @@ bool AMDGPUCodeGenPrepareImpl::run() { BreakPhiNodesCache.clear(); bool MadeChange = false; - Function::iterator NextBB; - for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; FI = NextBB) { - BasicBlock *BB = &*FI; - NextBB = std::next(FI); - - BasicBlock::iterator Next; - for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; - I = Next) { - Next = std::next(I); - - MadeChange |= visit(*I); - - if (Next != E) { // Control flow changed - BasicBlock *NextInstBB = Next->getParent(); - if (NextInstBB != BB) { - BB = NextInstBB; - E = BB->end(); - FE = F.end(); - } - } + // Need to use make_early_inc_range because integer division expansion is + // handled by Transform/Utils, and it can delete instructions such as the + // terminator of the BB. + for (BasicBlock &BB : reverse(F)) { + for (Instruction &I : make_early_inc_range(reverse(BB))) { + if (!isInstructionTriviallyDead(&I, TLI)) + MadeChange |= visit(I); } } + + while (!DeadVals.empty()) { + if (auto *I = dyn_cast_or_null<Instruction>(DeadVals.pop_back_val())) + RecursivelyDeleteTriviallyDeadInstructions(I, TLI); + } + return MadeChange; } @@ -422,7 +420,7 @@ bool AMDGPUCodeGenPrepareImpl::replaceMulWithMul24(BinaryOperator &I) const { Value *NewVal = insertValues(Builder, Ty, ResultVals); NewVal->takeName(&I); I.replaceAllUsesWith(NewVal); - I.eraseFromParent(); + DeadVals.push_back(&I); return true; } @@ -496,10 +494,10 @@ bool AMDGPUCodeGenPrepareImpl::foldBinOpIntoSelect(BinaryOperator &BO) const { FoldedT, FoldedF); NewSelect->takeName(&BO); BO.replaceAllUsesWith(NewSelect); - BO.eraseFromParent(); + DeadVals.push_back(&BO); if (CastOp) - CastOp->eraseFromParent(); - Sel->eraseFromParent(); + DeadVals.push_back(CastOp); + DeadVals.push_back(Sel); return true; } @@ -895,7 +893,7 @@ bool AMDGPUCodeGenPrepareImpl::visitFDiv(BinaryOperator &FDiv) { if (NewVal) { FDiv.replaceAllUsesWith(NewVal); NewVal->takeName(&FDiv); - RecursivelyDeleteTriviallyDeadInstructions(&FDiv, TLI); + DeadVals.push_back(&FDiv); } return true; @@ -1302,10 +1300,7 @@ it will create `s_and_b32 s0, s0, 0xff`. We accept this change since the non-byte load assumes the upper bits within the byte are all 0. */ -static bool tryNarrowMathIfNoOverflow(Instruction *I, - const SITargetLowering *TLI, - const TargetTransformInfo &TTI, - const DataLayout &DL) { +bool AMDGPUCodeGenPrepareImpl::tryNarrowMathIfNoOverflow(Instruction *I) { unsigned Opc = I->getOpcode(); Type *OldType = I->getType(); @@ -1330,6 +1325,7 @@ static bool tryNarrowMathIfNoOverflow(Instruction *I, NewType = I->getType()->getWithNewBitWidth(NewBit); // Old cost + const TargetTransformInfo &TTI = TM.getTargetTransformInfo(F); InstructionCost OldCost = TTI.getArithmeticInstrCost(Opc, OldType, TTI::TCK_RecipThroughput); // New cost of new op @@ -1360,7 +1356,7 @@ static bool tryNarrowMathIfNoOverflow(Instruction *I, Value *Zext = Builder.CreateZExt(Arith, OldType); I->replaceAllUsesWith(Zext); - I->eraseFromParent(); + DeadVals.push_back(I); return true; } @@ -1370,8 +1366,7 @@ bool AMDGPUCodeGenPrepareImpl::visitBinaryOperator(BinaryOperator &I) { if (UseMul24Intrin && replaceMulWithMul24(I)) return true; - if (tryNarrowMathIfNoOverflow(&I, ST.getTargetLowering(), - TM.getTargetTransformInfo(F), DL)) + if (tryNarrowMathIfNoOverflow(&I)) return true; bool Changed = false; @@ -1436,7 +1431,7 @@ bool AMDGPUCodeGenPrepareImpl::visitBinaryOperator(BinaryOperator &I) { if (NewDiv) { I.replaceAllUsesWith(NewDiv); - I.eraseFromParent(); + DeadVals.push_back(&I); Changed = true; } } @@ -1492,7 +1487,7 @@ bool AMDGPUCodeGenPrepareImpl::visitLoadInst(LoadInst &I) { Value *ValTrunc = Builder.CreateTrunc(WidenLoad, IntNTy); Value *ValOrig = Builder.CreateBitCast(ValTrunc, I.getType()); I.replaceAllUsesWith(ValOrig); - I.eraseFromParent(); + DeadVals.push_back(&I); return true; } @@ -1534,7 +1529,7 @@ bool AMDGPUCodeGenPrepareImpl::visitSelectInst(SelectInst &I) { Fract->takeName(&I); I.replaceAllUsesWith(Fract); - RecursivelyDeleteTriviallyDeadInstructions(&I, TLI); + DeadVals.push_back(&I); return true; } @@ -1822,7 +1817,7 @@ bool AMDGPUCodeGenPrepareImpl::visitPHINode(PHINode &I) { } I.replaceAllUsesWith(Vec); - I.eraseFromParent(); + DeadVals.push_back(&I); return true; } @@ -1903,7 +1898,7 @@ bool AMDGPUCodeGenPrepareImpl::visitAddrSpaceCastInst(AddrSpaceCastInst &I) { auto *Intrin = B.CreateIntrinsic( I.getType(), Intrinsic::amdgcn_addrspacecast_nonnull, {I.getOperand(0)}); I.replaceAllUsesWith(Intrin); - I.eraseFromParent(); + DeadVals.push_back(&I); return true; } @@ -2000,16 +1995,10 @@ bool AMDGPUCodeGenPrepareImpl::visitFMinLike(IntrinsicInst &I) { Value *Fract = applyFractPat(Builder, FractArg); Fract->takeName(&I); I.replaceAllUsesWith(Fract); - - RecursivelyDeleteTriviallyDeadInstructions(&I, TLI); + DeadVals.push_back(&I); return true; } -static bool isOneOrNegOne(const Value *Val) { - const APFloat *C; - return match(Val, m_APFloat(C)) && C->getExactLog2Abs() == 0; -} - // Expand llvm.sqrt.f32 calls with !fpmath metadata in a semi-fast way. bool AMDGPUCodeGenPrepareImpl::visitSqrt(IntrinsicInst &Sqrt) { Type *Ty = Sqrt.getType()->getScalarType(); @@ -2030,18 +2019,6 @@ bool AMDGPUCodeGenPrepareImpl::visitSqrt(IntrinsicInst &Sqrt) { if (ReqdAccuracy < 1.0f) return false; - // FIXME: This is an ugly hack for this pass using forward iteration instead - // of reverse. If it worked like a normal combiner, the rsq would form before - // we saw a sqrt call. - auto *FDiv = - dyn_cast_or_null<FPMathOperator>(Sqrt.getUniqueUndroppableUser()); - if (FDiv && FDiv->getOpcode() == Instruction::FDiv && - FDiv->getFPAccuracy() >= 1.0f && - canOptimizeWithRsq(FPOp, FDiv->getFastMathFlags(), SqrtFMF) && - // TODO: We should also handle the arcp case for the fdiv with non-1 value - isOneOrNegOne(FDiv->getOperand(0))) - return false; - Value *SrcVal = Sqrt.getOperand(0); bool CanTreatAsDAZ = canIgnoreDenormalInput(SrcVal, &Sqrt); @@ -2065,7 +2042,7 @@ bool AMDGPUCodeGenPrepareImpl::visitSqrt(IntrinsicInst &Sqrt) { Value *NewSqrt = insertValues(Builder, Sqrt.getType(), ResultVals); NewSqrt->takeName(&Sqrt); Sqrt.replaceAllUsesWith(NewSqrt); - Sqrt.eraseFromParent(); + DeadVals.push_back(&Sqrt); return true; } diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.cpp index 73b2660..5407566 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.cpp @@ -468,6 +468,38 @@ void RegBankLegalizeHelper::lowerUnpackBitShift(MachineInstr &MI) { MI.eraseFromParent(); } +void RegBankLegalizeHelper::lowerUnpackMinMax(MachineInstr &MI) { + Register Lo, Hi; + switch (MI.getOpcode()) { + case AMDGPU::G_SMIN: + case AMDGPU::G_SMAX: { + // For signed operations, use sign extension + auto [Val0_Lo, Val0_Hi] = unpackSExt(MI.getOperand(1).getReg()); + auto [Val1_Lo, Val1_Hi] = unpackSExt(MI.getOperand(2).getReg()); + Lo = B.buildInstr(MI.getOpcode(), {SgprRB_S32}, {Val0_Lo, Val1_Lo}) + .getReg(0); + Hi = B.buildInstr(MI.getOpcode(), {SgprRB_S32}, {Val0_Hi, Val1_Hi}) + .getReg(0); + break; + } + case AMDGPU::G_UMIN: + case AMDGPU::G_UMAX: { + // For unsigned operations, use zero extension + auto [Val0_Lo, Val0_Hi] = unpackZExt(MI.getOperand(1).getReg()); + auto [Val1_Lo, Val1_Hi] = unpackZExt(MI.getOperand(2).getReg()); + Lo = B.buildInstr(MI.getOpcode(), {SgprRB_S32}, {Val0_Lo, Val1_Lo}) + .getReg(0); + Hi = B.buildInstr(MI.getOpcode(), {SgprRB_S32}, {Val0_Hi, Val1_Hi}) + .getReg(0); + break; + } + default: + llvm_unreachable("Unpack min/max lowering not implemented"); + } + B.buildBuildVectorTrunc(MI.getOperand(0).getReg(), {Lo, Hi}); + MI.eraseFromParent(); +} + static bool isSignedBFE(MachineInstr &MI) { if (GIntrinsic *GI = dyn_cast<GIntrinsic>(&MI)) return (GI->is(Intrinsic::amdgcn_sbfe)); @@ -654,6 +686,8 @@ void RegBankLegalizeHelper::lower(MachineInstr &MI, } case UnpackBitShift: return lowerUnpackBitShift(MI); + case UnpackMinMax: + return lowerUnpackMinMax(MI); case Ext32To64: { const RegisterBank *RB = MRI.getRegBank(MI.getOperand(0).getReg()); MachineInstrBuilder Hi; diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.h b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.h index 7affe5a..d937815 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.h +++ b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.h @@ -123,6 +123,7 @@ private: void lowerSplitTo32(MachineInstr &MI); void lowerSplitTo32Select(MachineInstr &MI); void lowerSplitTo32SExtInReg(MachineInstr &MI); + void lowerUnpackMinMax(MachineInstr &MI); }; } // end namespace AMDGPU diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp index f413bbc..bfe2c80 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp @@ -522,6 +522,22 @@ RegBankLegalizeRules::RegBankLegalizeRules(const GCNSubtarget &_ST, .Uni(S64, {{Sgpr64}, {Sgpr64, Sgpr32, Sgpr32}, S_BFE}) .Div(S64, {{Vgpr64}, {Vgpr64, Vgpr32, Vgpr32}, V_BFE}); + addRulesForGOpcs({G_SMIN, G_SMAX}, Standard) + .Uni(S16, {{Sgpr32Trunc}, {Sgpr32SExt, Sgpr32SExt}}) + .Div(S16, {{Vgpr16}, {Vgpr16, Vgpr16}}) + .Uni(S32, {{Sgpr32}, {Sgpr32, Sgpr32}}) + .Div(S32, {{Vgpr32}, {Vgpr32, Vgpr32}}) + .Uni(V2S16, {{SgprV2S16}, {SgprV2S16, SgprV2S16}, UnpackMinMax}) + .Div(V2S16, {{VgprV2S16}, {VgprV2S16, VgprV2S16}}); + + addRulesForGOpcs({G_UMIN, G_UMAX}, Standard) + .Uni(S16, {{Sgpr32Trunc}, {Sgpr32ZExt, Sgpr32ZExt}}) + .Div(S16, {{Vgpr16}, {Vgpr16, Vgpr16}}) + .Uni(S32, {{Sgpr32}, {Sgpr32, Sgpr32}}) + .Div(S32, {{Vgpr32}, {Vgpr32, Vgpr32}}) + .Uni(V2S16, {{SgprV2S16}, {SgprV2S16, SgprV2S16}, UnpackMinMax}) + .Div(V2S16, {{VgprV2S16}, {VgprV2S16, VgprV2S16}}); + // Note: we only write S1 rules for G_IMPLICIT_DEF, G_CONSTANT, G_FCONSTANT // and G_FREEZE here, rest is trivially regbankselected earlier addRulesForGOpcs({G_IMPLICIT_DEF}).Any({{UniS1}, {{Sgpr32Trunc}, {}}}); @@ -617,6 +633,12 @@ RegBankLegalizeRules::RegBankLegalizeRules(const GCNSubtarget &_ST, .Any({{UniS64, S64}, {{Sgpr64}, {Sgpr64}}}) .Any({{DivS64, S64}, {{Vgpr64}, {Vgpr64}, SplitTo32SExtInReg}}); + addRulesForGOpcs({G_ASSERT_ZEXT, G_ASSERT_SEXT}, Standard) + .Uni(S32, {{Sgpr32}, {Sgpr32, Imm}}) + .Div(S32, {{Vgpr32}, {Vgpr32, Imm}}) + .Uni(S64, {{Sgpr64}, {Sgpr64, Imm}}) + .Div(S64, {{Vgpr64}, {Vgpr64, Imm}}); + bool hasSMRDx3 = ST->hasScalarDwordx3Loads(); bool hasSMRDSmall = ST->hasScalarSubwordLoads(); bool usesTrue16 = ST->useRealTrue16Insts(); diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.h b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.h index d0c6910..93e0efd 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.h +++ b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.h @@ -212,6 +212,7 @@ enum LoweringMethodID { VccExtToSel, UniExtToSel, UnpackBitShift, + UnpackMinMax, S_BFE, V_BFE, VgprToVccCopy, diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp index 557d87f..56807a4 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp @@ -5053,16 +5053,18 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const { // // vdst, srcA, srcB, srcC const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); + + bool UseAGPRForm = !Subtarget.hasGFX90AInsts() || + Info->selectAGPRFormMFMA(MinNumRegsRequired); + OpdsMapping[0] = - Info->getMinNumAGPRs() >= MinNumRegsRequired - ? getAGPROpMapping(MI.getOperand(0).getReg(), MRI, *TRI) - : getVGPROpMapping(MI.getOperand(0).getReg(), MRI, *TRI); + UseAGPRForm ? getAGPROpMapping(MI.getOperand(0).getReg(), MRI, *TRI) + : getVGPROpMapping(MI.getOperand(0).getReg(), MRI, *TRI); OpdsMapping[2] = getVGPROpMapping(MI.getOperand(2).getReg(), MRI, *TRI); OpdsMapping[3] = getVGPROpMapping(MI.getOperand(3).getReg(), MRI, *TRI); OpdsMapping[4] = - Info->getMinNumAGPRs() >= MinNumRegsRequired - ? getAGPROpMapping(MI.getOperand(4).getReg(), MRI, *TRI) - : getVGPROpMapping(MI.getOperand(4).getReg(), MRI, *TRI); + UseAGPRForm ? getAGPROpMapping(MI.getOperand(4).getReg(), MRI, *TRI) + : getVGPROpMapping(MI.getOperand(4).getReg(), MRI, *TRI); break; } case Intrinsic::amdgcn_mfma_scale_f32_16x16x128_f8f6f4: @@ -5115,11 +5117,21 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const { case Intrinsic::amdgcn_smfmac_f32_32x32x64_bf8_fp8: case Intrinsic::amdgcn_smfmac_f32_32x32x64_fp8_bf8: case Intrinsic::amdgcn_smfmac_f32_32x32x64_fp8_fp8: { + Register DstReg = MI.getOperand(0).getReg(); + unsigned DstSize = MRI.getType(DstReg).getSizeInBits(); + unsigned MinNumRegsRequired = DstSize / 32; + const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); + bool UseAGPRForm = Info->selectAGPRFormMFMA(MinNumRegsRequired); + // vdst, srcA, srcB, srcC, idx - OpdsMapping[0] = getAGPROpMapping(MI.getOperand(0).getReg(), MRI, *TRI); + OpdsMapping[0] = UseAGPRForm ? getAGPROpMapping(DstReg, MRI, *TRI) + : getVGPROpMapping(DstReg, MRI, *TRI); + OpdsMapping[2] = getVGPROpMapping(MI.getOperand(2).getReg(), MRI, *TRI); OpdsMapping[3] = getVGPROpMapping(MI.getOperand(3).getReg(), MRI, *TRI); - OpdsMapping[4] = getAGPROpMapping(MI.getOperand(4).getReg(), MRI, *TRI); + OpdsMapping[4] = + UseAGPRForm ? getAGPROpMapping(MI.getOperand(4).getReg(), MRI, *TRI) + : getVGPROpMapping(MI.getOperand(4).getReg(), MRI, *TRI); OpdsMapping[5] = getVGPROpMapping(MI.getOperand(5).getReg(), MRI, *TRI); break; } diff --git a/llvm/lib/Target/AMDGPU/GCNRegPressure.cpp b/llvm/lib/Target/AMDGPU/GCNRegPressure.cpp index ef63acc..71494be 100644 --- a/llvm/lib/Target/AMDGPU/GCNRegPressure.cpp +++ b/llvm/lib/Target/AMDGPU/GCNRegPressure.cpp @@ -905,7 +905,7 @@ bool GCNRegPressurePrinter::runOnMachineFunction(MachineFunction &MF) { OS << ":\n"; SlotIndex MBBStartSlot = LIS.getSlotIndexes()->getMBBStartIdx(&MBB); - SlotIndex MBBEndSlot = LIS.getSlotIndexes()->getMBBEndIdx(&MBB); + SlotIndex MBBLastSlot = LIS.getSlotIndexes()->getMBBLastIdx(&MBB); GCNRPTracker::LiveRegSet LiveIn, LiveOut; GCNRegPressure RPAtMBBEnd; @@ -931,7 +931,7 @@ bool GCNRegPressurePrinter::runOnMachineFunction(MachineFunction &MF) { } } else { GCNUpwardRPTracker RPT(LIS); - RPT.reset(MRI, MBBEndSlot); + RPT.reset(MRI, MBBLastSlot); LiveOut = RPT.getLiveRegs(); RPAtMBBEnd = RPT.getPressure(); @@ -966,14 +966,14 @@ bool GCNRegPressurePrinter::runOnMachineFunction(MachineFunction &MF) { OS << PFX " Live-out:" << llvm::print(LiveOut, MRI); if (UseDownwardTracker) - ReportLISMismatchIfAny(LiveOut, getLiveRegs(MBBEndSlot, LIS, MRI)); + ReportLISMismatchIfAny(LiveOut, getLiveRegs(MBBLastSlot, LIS, MRI)); GCNRPTracker::LiveRegSet LiveThrough; for (auto [Reg, Mask] : LiveIn) { LaneBitmask MaskIntersection = Mask & LiveOut.lookup(Reg); if (MaskIntersection.any()) { LaneBitmask LTMask = getRegLiveThroughMask( - MRI, LIS, Reg, MBBStartSlot, MBBEndSlot, MaskIntersection); + MRI, LIS, Reg, MBBStartSlot, MBBLastSlot, MaskIntersection); if (LTMask.any()) LiveThrough[Reg] = LTMask; } diff --git a/llvm/lib/Target/AMDGPU/GCNRegPressure.h b/llvm/lib/Target/AMDGPU/GCNRegPressure.h index a9c58bb..898d1ff 100644 --- a/llvm/lib/Target/AMDGPU/GCNRegPressure.h +++ b/llvm/lib/Target/AMDGPU/GCNRegPressure.h @@ -313,8 +313,8 @@ public: /// reset tracker to the end of the \p MBB. void reset(const MachineBasicBlock &MBB) { - reset(MBB.getParent()->getRegInfo(), - LIS.getSlotIndexes()->getMBBEndIdx(&MBB)); + SlotIndex MBBLastSlot = LIS.getSlotIndexes()->getMBBLastIdx(&MBB); + reset(MBB.getParent()->getRegInfo(), MBBLastSlot); } /// reset tracker to the point just after \p MI (in program order). diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp index 730be69..80e985d 100644 --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -103,52 +103,52 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM, addRegisterClass(MVT::Untyped, V64RegClass); addRegisterClass(MVT::v3i32, &AMDGPU::SGPR_96RegClass); - addRegisterClass(MVT::v3f32, TRI->getVGPRClassForBitWidth(96)); + addRegisterClass(MVT::v3f32, &AMDGPU::VReg_96RegClass); addRegisterClass(MVT::v2i64, &AMDGPU::SGPR_128RegClass); addRegisterClass(MVT::v2f64, &AMDGPU::SGPR_128RegClass); addRegisterClass(MVT::v4i32, &AMDGPU::SGPR_128RegClass); - addRegisterClass(MVT::v4f32, TRI->getVGPRClassForBitWidth(128)); + addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass); addRegisterClass(MVT::v5i32, &AMDGPU::SGPR_160RegClass); - addRegisterClass(MVT::v5f32, TRI->getVGPRClassForBitWidth(160)); + addRegisterClass(MVT::v5f32, &AMDGPU::VReg_160RegClass); addRegisterClass(MVT::v6i32, &AMDGPU::SGPR_192RegClass); - addRegisterClass(MVT::v6f32, TRI->getVGPRClassForBitWidth(192)); + addRegisterClass(MVT::v6f32, &AMDGPU::VReg_192RegClass); addRegisterClass(MVT::v3i64, &AMDGPU::SGPR_192RegClass); - addRegisterClass(MVT::v3f64, TRI->getVGPRClassForBitWidth(192)); + addRegisterClass(MVT::v3f64, &AMDGPU::VReg_192RegClass); addRegisterClass(MVT::v7i32, &AMDGPU::SGPR_224RegClass); - addRegisterClass(MVT::v7f32, TRI->getVGPRClassForBitWidth(224)); + addRegisterClass(MVT::v7f32, &AMDGPU::VReg_224RegClass); addRegisterClass(MVT::v8i32, &AMDGPU::SGPR_256RegClass); - addRegisterClass(MVT::v8f32, TRI->getVGPRClassForBitWidth(256)); + addRegisterClass(MVT::v8f32, &AMDGPU::VReg_256RegClass); addRegisterClass(MVT::v4i64, &AMDGPU::SGPR_256RegClass); - addRegisterClass(MVT::v4f64, TRI->getVGPRClassForBitWidth(256)); + addRegisterClass(MVT::v4f64, &AMDGPU::VReg_256RegClass); addRegisterClass(MVT::v9i32, &AMDGPU::SGPR_288RegClass); - addRegisterClass(MVT::v9f32, TRI->getVGPRClassForBitWidth(288)); + addRegisterClass(MVT::v9f32, &AMDGPU::VReg_288RegClass); addRegisterClass(MVT::v10i32, &AMDGPU::SGPR_320RegClass); - addRegisterClass(MVT::v10f32, TRI->getVGPRClassForBitWidth(320)); + addRegisterClass(MVT::v10f32, &AMDGPU::VReg_320RegClass); addRegisterClass(MVT::v11i32, &AMDGPU::SGPR_352RegClass); - addRegisterClass(MVT::v11f32, TRI->getVGPRClassForBitWidth(352)); + addRegisterClass(MVT::v11f32, &AMDGPU::VReg_352RegClass); addRegisterClass(MVT::v12i32, &AMDGPU::SGPR_384RegClass); - addRegisterClass(MVT::v12f32, TRI->getVGPRClassForBitWidth(384)); + addRegisterClass(MVT::v12f32, &AMDGPU::VReg_384RegClass); addRegisterClass(MVT::v16i32, &AMDGPU::SGPR_512RegClass); - addRegisterClass(MVT::v16f32, TRI->getVGPRClassForBitWidth(512)); + addRegisterClass(MVT::v16f32, &AMDGPU::VReg_512RegClass); addRegisterClass(MVT::v8i64, &AMDGPU::SGPR_512RegClass); - addRegisterClass(MVT::v8f64, TRI->getVGPRClassForBitWidth(512)); + addRegisterClass(MVT::v8f64, &AMDGPU::VReg_512RegClass); addRegisterClass(MVT::v16i64, &AMDGPU::SGPR_1024RegClass); - addRegisterClass(MVT::v16f64, TRI->getVGPRClassForBitWidth(1024)); + addRegisterClass(MVT::v16f64, &AMDGPU::VReg_1024RegClass); if (Subtarget->has16BitInsts()) { if (Subtarget->useRealTrue16Insts()) { @@ -180,7 +180,7 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM, } addRegisterClass(MVT::v32i32, &AMDGPU::VReg_1024RegClass); - addRegisterClass(MVT::v32f32, TRI->getVGPRClassForBitWidth(1024)); + addRegisterClass(MVT::v32f32, &AMDGPU::VReg_1024RegClass); computeRegisterProperties(Subtarget->getRegisterInfo()); diff --git a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h index b7dbb59..2c1a13c 100644 --- a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h +++ b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h @@ -1202,6 +1202,12 @@ public: unsigned getMinNumAGPRs() const { return MinNumAGPRs; } + /// Return true if an MFMA that requires at least \p NumRegs should select to + /// the AGPR form, instead of the VGPR form. + bool selectAGPRFormMFMA(unsigned NumRegs) const { + return !MFMAVGPRForm && getMinNumAGPRs() >= NumRegs; + } + // \returns true if a function has a use of AGPRs via inline asm or // has a call which may use it. bool mayUseAGPRs(const Function &F) const; diff --git a/llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp b/llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp index 4d3331a..c684f9e 100644 --- a/llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp +++ b/llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp @@ -674,15 +674,9 @@ void SIPreEmitPeephole::performF32Unpacking(MachineInstr &I) { createUnpackedMI(I, UnpackedOpcode, /*IsHiBits=*/true); MachineOperand HiDstOp = Op0HOp1H->getOperand(0); - if (I.getFlag(MachineInstr::MIFlag::NoFPExcept)) { - Op0LOp1L->setFlag(MachineInstr::MIFlag::NoFPExcept); - Op0HOp1H->setFlag(MachineInstr::MIFlag::NoFPExcept); - } - if (I.getFlag(MachineInstr::MIFlag::FmContract)) { - Op0LOp1L->setFlag(MachineInstr::MIFlag::FmContract); - Op0HOp1H->setFlag(MachineInstr::MIFlag::FmContract); - } - + uint32_t IFlags = I.getFlags(); + Op0LOp1L->setFlags(IFlags); + Op0HOp1H->setFlags(IFlags); LoDstOp.setIsRenamable(DstOp.isRenamable()); HiDstOp.setIsRenamable(DstOp.isRenamable()); diff --git a/llvm/lib/Target/AMDGPU/VOP3PInstructions.td b/llvm/lib/Target/AMDGPU/VOP3PInstructions.td index 7cfd059..6500fce 100644 --- a/llvm/lib/Target/AMDGPU/VOP3PInstructions.td +++ b/llvm/lib/Target/AMDGPU/VOP3PInstructions.td @@ -964,14 +964,12 @@ class MAIFrag<SDPatternOperator Op, bit HasAbid = true, bit Scaled = false> : Pa class CanUseAGPR_MAI<ValueType vt> { code PredicateCode = [{ return !Subtarget->hasGFX90AInsts() || - (!SIMachineFunctionInfo::MFMAVGPRForm && - MF->getInfo<SIMachineFunctionInfo>()->getMinNumAGPRs() >= - }] # !srl(vt.Size, 5) # ");"; + MF->getInfo<SIMachineFunctionInfo>()->selectAGPRFormMFMA( + }] # !srl(vt.Size, 5) # ");"; code GISelPredicateCode = [{ return !Subtarget->hasGFX90AInsts() || - (!SIMachineFunctionInfo::MFMAVGPRForm && - MF.getInfo<SIMachineFunctionInfo>()->getMinNumAGPRs() >= + MF.getInfo<SIMachineFunctionInfo>()->selectAGPRFormMFMA( }] # !srl(vt.Size, 5) # ");"; } diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp index 2a40fb9..67ea2dd 100644 --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -42,7 +42,6 @@ #include "llvm/CodeGen/CallingConvLower.h" #include "llvm/CodeGen/ComplexDeinterleavingPass.h" #include "llvm/CodeGen/ISDOpcodes.h" -#include "llvm/CodeGen/IntrinsicLowering.h" #include "llvm/CodeGen/MachineBasicBlock.h" #include "llvm/CodeGen/MachineConstantPool.h" #include "llvm/CodeGen/MachineFrameInfo.h" @@ -13817,7 +13816,7 @@ bool ARMTargetLowering::isDesirableToCommuteXorWithShift( } bool ARMTargetLowering::shouldFoldConstantShiftPairToMask( - const SDNode *N, CombineLevel Level) const { + const SDNode *N) const { assert(((N->getOpcode() == ISD::SHL && N->getOperand(0).getOpcode() == ISD::SRL) || (N->getOpcode() == ISD::SRL && @@ -13827,7 +13826,8 @@ bool ARMTargetLowering::shouldFoldConstantShiftPairToMask( if (!Subtarget->isThumb1Only()) return true; - if (Level == BeforeLegalizeTypes) + EVT VT = N->getValueType(0); + if (VT.getScalarSizeInBits() > 32) return true; return false; diff --git a/llvm/lib/Target/ARM/ARMISelLowering.h b/llvm/lib/Target/ARM/ARMISelLowering.h index 26ff54c..70aa001 100644 --- a/llvm/lib/Target/ARM/ARMISelLowering.h +++ b/llvm/lib/Target/ARM/ARMISelLowering.h @@ -772,8 +772,7 @@ class VectorType; bool isDesirableToCommuteXorWithShift(const SDNode *N) const override; - bool shouldFoldConstantShiftPairToMask(const SDNode *N, - CombineLevel Level) const override; + bool shouldFoldConstantShiftPairToMask(const SDNode *N) const override; /// Return true if it is profitable to fold a pair of shifts into a mask. bool shouldFoldMaskToVariableShiftPair(SDValue Y) const override { diff --git a/llvm/lib/Target/Hexagon/HexagonPatterns.td b/llvm/lib/Target/Hexagon/HexagonPatterns.td index a0acfcf..85ce944 100644 --- a/llvm/lib/Target/Hexagon/HexagonPatterns.td +++ b/llvm/lib/Target/Hexagon/HexagonPatterns.td @@ -699,35 +699,20 @@ def: OpR_RR_pat<C2_cmpgtp, setgt, i1, I64>; def: OpR_RR_pat<C2_cmpgtup, setugt, i1, I64>; def: OpR_RR_pat<C2_cmpgtp, RevCmp<setlt>, i1, I64>; def: OpR_RR_pat<C2_cmpgtup, RevCmp<setult>, i1, I64>; -def: OpR_RR_pat<A2_vcmpbeq, seteq, i1, V8I8>; def: OpR_RR_pat<A2_vcmpbeq, seteq, v8i1, V8I8>; -def: OpR_RR_pat<A4_vcmpbgt, RevCmp<setlt>, i1, V8I8>; def: OpR_RR_pat<A4_vcmpbgt, RevCmp<setlt>, v8i1, V8I8>; -def: OpR_RR_pat<A4_vcmpbgt, setgt, i1, V8I8>; def: OpR_RR_pat<A4_vcmpbgt, setgt, v8i1, V8I8>; -def: OpR_RR_pat<A2_vcmpbgtu, RevCmp<setult>, i1, V8I8>; def: OpR_RR_pat<A2_vcmpbgtu, RevCmp<setult>, v8i1, V8I8>; -def: OpR_RR_pat<A2_vcmpbgtu, setugt, i1, V8I8>; def: OpR_RR_pat<A2_vcmpbgtu, setugt, v8i1, V8I8>; -def: OpR_RR_pat<A2_vcmpheq, seteq, i1, V4I16>; def: OpR_RR_pat<A2_vcmpheq, seteq, v4i1, V4I16>; -def: OpR_RR_pat<A2_vcmphgt, RevCmp<setlt>, i1, V4I16>; def: OpR_RR_pat<A2_vcmphgt, RevCmp<setlt>, v4i1, V4I16>; -def: OpR_RR_pat<A2_vcmphgt, setgt, i1, V4I16>; def: OpR_RR_pat<A2_vcmphgt, setgt, v4i1, V4I16>; -def: OpR_RR_pat<A2_vcmphgtu, RevCmp<setult>, i1, V4I16>; def: OpR_RR_pat<A2_vcmphgtu, RevCmp<setult>, v4i1, V4I16>; -def: OpR_RR_pat<A2_vcmphgtu, setugt, i1, V4I16>; def: OpR_RR_pat<A2_vcmphgtu, setugt, v4i1, V4I16>; -def: OpR_RR_pat<A2_vcmpweq, seteq, i1, V2I32>; def: OpR_RR_pat<A2_vcmpweq, seteq, v2i1, V2I32>; -def: OpR_RR_pat<A2_vcmpwgt, RevCmp<setlt>, i1, V2I32>; def: OpR_RR_pat<A2_vcmpwgt, RevCmp<setlt>, v2i1, V2I32>; -def: OpR_RR_pat<A2_vcmpwgt, setgt, i1, V2I32>; def: OpR_RR_pat<A2_vcmpwgt, setgt, v2i1, V2I32>; -def: OpR_RR_pat<A2_vcmpwgtu, RevCmp<setult>, i1, V2I32>; def: OpR_RR_pat<A2_vcmpwgtu, RevCmp<setult>, v2i1, V2I32>; -def: OpR_RR_pat<A2_vcmpwgtu, setugt, i1, V2I32>; def: OpR_RR_pat<A2_vcmpwgtu, setugt, v2i1, V2I32>; def: OpR_RR_pat<F2_sfcmpeq, seteq, i1, F32>; @@ -1213,12 +1198,6 @@ def: OpR_RI_pat<S2_asl_i_r, Shl, i32, I32, u5_0ImmPred>; def: OpR_RI_pat<S2_asr_i_p, Sra, i64, I64, u6_0ImmPred>; def: OpR_RI_pat<S2_lsr_i_p, Srl, i64, I64, u6_0ImmPred>; def: OpR_RI_pat<S2_asl_i_p, Shl, i64, I64, u6_0ImmPred>; -def: OpR_RI_pat<S2_asr_i_vh, Sra, v4i16, V4I16, u4_0ImmPred>; -def: OpR_RI_pat<S2_lsr_i_vh, Srl, v4i16, V4I16, u4_0ImmPred>; -def: OpR_RI_pat<S2_asl_i_vh, Shl, v4i16, V4I16, u4_0ImmPred>; -def: OpR_RI_pat<S2_asr_i_vh, Sra, v2i32, V2I32, u5_0ImmPred>; -def: OpR_RI_pat<S2_lsr_i_vh, Srl, v2i32, V2I32, u5_0ImmPred>; -def: OpR_RI_pat<S2_asl_i_vh, Shl, v2i32, V2I32, u5_0ImmPred>; def: OpR_RR_pat<S2_asr_r_r, Sra, i32, I32, I32>; def: OpR_RR_pat<S2_lsr_r_r, Srl, i32, I32, I32>; diff --git a/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp b/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp index ba70c9e..97379d7 100644 --- a/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp +++ b/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp @@ -3677,7 +3677,7 @@ bool MipsAsmParser::expandBranchImm(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out, Out, STI)) return true; - if (IsLikely) { + if (IsLikely && MemOffsetOp.isExpr()) { TOut.emitRRX(OpCode, DstRegOp.getReg(), ATReg, MCOperand::createExpr(MemOffsetOp.getExpr()), IDLoc, STI); TOut.emitRRI(Mips::SLL, Mips::ZERO, Mips::ZERO, 0, IDLoc, STI); diff --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp index b05de49..7f1ff45 100644 --- a/llvm/lib/Target/Mips/MipsISelLowering.cpp +++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp @@ -1306,7 +1306,7 @@ bool MipsTargetLowering::hasBitTest(SDValue X, SDValue Y) const { } bool MipsTargetLowering::shouldFoldConstantShiftPairToMask( - const SDNode *N, CombineLevel Level) const { + const SDNode *N) const { assert(((N->getOpcode() == ISD::SHL && N->getOperand(0).getOpcode() == ISD::SRL) || (N->getOpcode() == ISD::SRL && diff --git a/llvm/lib/Target/Mips/MipsISelLowering.h b/llvm/lib/Target/Mips/MipsISelLowering.h index c65c76c..25a0bf9 100644 --- a/llvm/lib/Target/Mips/MipsISelLowering.h +++ b/llvm/lib/Target/Mips/MipsISelLowering.h @@ -290,8 +290,7 @@ class TargetRegisterClass; bool isCheapToSpeculateCttz(Type *Ty) const override; bool isCheapToSpeculateCtlz(Type *Ty) const override; bool hasBitTest(SDValue X, SDValue Y) const override; - bool shouldFoldConstantShiftPairToMask(const SDNode *N, - CombineLevel Level) const override; + bool shouldFoldConstantShiftPairToMask(const SDNode *N) const override; /// Return the register type for a given MVT, ensuring vectors are treated /// as a series of gpr sized integers. diff --git a/llvm/lib/Target/Mips/MipsInstrInfo.td b/llvm/lib/Target/Mips/MipsInstrInfo.td index eff80e5..21d8ded 100644 --- a/llvm/lib/Target/Mips/MipsInstrInfo.td +++ b/llvm/lib/Target/Mips/MipsInstrInfo.td @@ -855,6 +855,16 @@ def calltarget : Operand<iPTR> { def imm64: Operand<i64>; +def ConstantImmAsmOperandClass : AsmOperandClass { + let Name = "ConstantImm"; + let PredicateMethod = "isConstantImm"; + let RenderMethod = "addImmOperands"; +} + +def ConstantImm64: Operand<i64> { + let ParserMatchClass = ConstantImmAsmOperandClass; +} + def simm19_lsl2 : Operand<i32> { let EncoderMethod = "getSimm19Lsl2Encoding"; let DecoderMethod = "DecodeSimm19Lsl2"; @@ -2947,10 +2957,10 @@ def : MipsInstAlias<"nor\t$rs, $imm", (NORImm GPR32Opnd:$rs, GPR32Opnd:$rs, let hasDelaySlot = 1, isCTI = 1 in { def BneImm : MipsAsmPseudoInst<(outs GPR32Opnd:$rt), - (ins imm64:$imm64, brtarget:$offset), + (ins ConstantImm64:$imm64, brtarget:$offset), "bne\t$rt, $imm64, $offset">; def BeqImm : MipsAsmPseudoInst<(outs GPR32Opnd:$rt), - (ins imm64:$imm64, brtarget:$offset), + (ins ConstantImm64:$imm64, brtarget:$offset), "beq\t$rt, $imm64, $offset">; class CondBranchPseudo<string instr_asm> : @@ -2978,7 +2988,7 @@ def BGTUL: CondBranchPseudo<"bgtul">, ISA_MIPS2_NOT_32R6_64R6; let isCTI = 1 in class CondBranchImmPseudo<string instr_asm> : - MipsAsmPseudoInst<(outs), (ins GPR32Opnd:$rs, imm64:$imm, brtarget:$offset), + MipsAsmPseudoInst<(outs), (ins GPR32Opnd:$rs, ConstantImm64:$imm, brtarget:$offset), !strconcat(instr_asm, "\t$rs, $imm, $offset")>; def BEQLImmMacro : CondBranchImmPseudo<"beql">, ISA_MIPS2_NOT_32R6_64R6; diff --git a/llvm/lib/Target/RISCV/RISCVFeatures.td b/llvm/lib/Target/RISCV/RISCVFeatures.td index 333b693..5ceb477 100644 --- a/llvm/lib/Target/RISCV/RISCVFeatures.td +++ b/llvm/lib/Target/RISCV/RISCVFeatures.td @@ -1520,6 +1520,8 @@ def HasVendorXqcics : Predicate<"Subtarget->hasVendorXqcics()">, AssemblerPredicate<(all_of FeatureVendorXqcics), "'Xqcics' (Qualcomm uC Conditional Select Extension)">; +def NoVendorXqcics + : Predicate<"!Subtarget->hasVendorXqcics()">; def FeatureVendorXqcicsr : RISCVExperimentalExtension<0, 4, "Qualcomm uC CSR Extension">; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXqci.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXqci.td index f2724c41..5e1d07a 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoXqci.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXqci.td @@ -1571,35 +1571,42 @@ def : QCIMVCCIPat<SETUGE, QC_MVGEUI, uimm5nonzero>; } let Predicates = [HasVendorXqcicli, IsRV32] in { -def : QCILICCPat<SETEQ, QC_LIEQ>; -def : QCILICCPat<SETNE, QC_LINE>; def : QCILICCPat<SETLT, QC_LILT>; def : QCILICCPat<SETGE, QC_LIGE>; def : QCILICCPat<SETULT, QC_LILTU>; def : QCILICCPat<SETUGE, QC_LIGEU>; -def : QCILICCIPat<SETEQ, QC_LIEQI, simm5>; -def : QCILICCIPat<SETNE, QC_LINEI, simm5>; def : QCILICCIPat<SETLT, QC_LILTI, simm5>; def : QCILICCIPat<SETGE, QC_LIGEI, simm5>; def : QCILICCIPat<SETULT, QC_LILTUI, uimm5>; def : QCILICCIPat<SETUGE, QC_LIGEUI, uimm5>; -def : QCILICCPatInv<SETNE, QC_LIEQ>; -def : QCILICCPatInv<SETEQ, QC_LINE>; def : QCILICCPatInv<SETGE, QC_LILT>; def : QCILICCPatInv<SETLT, QC_LIGE>; def : QCILICCPatInv<SETUGE, QC_LILTU>; def : QCILICCPatInv<SETULT, QC_LIGEU>; -def : QCILICCIPatInv<SETNE, QC_LIEQI, simm5>; -def : QCILICCIPatInv<SETEQ, QC_LINEI, simm5>; def : QCILICCIPatInv<SETGE, QC_LILTI, simm5>; def : QCILICCIPatInv<SETLT, QC_LIGEI, simm5>; def : QCILICCIPatInv<SETUGE, QC_LILTUI, uimm5>; def : QCILICCIPatInv<SETULT, QC_LIGEUI, uimm5>; } // Predicates = [HasVendorXqcicli, IsRV32] +// Prioritize Xqcics over these patterns. +let Predicates = [HasVendorXqcicli, NoVendorXqcics, IsRV32] in { +def : QCILICCPat<SETEQ, QC_LIEQ>; +def : QCILICCPat<SETNE, QC_LINE>; + +def : QCILICCIPat<SETEQ, QC_LIEQI, simm5>; +def : QCILICCIPat<SETNE, QC_LINEI, simm5>; + +def : QCILICCPatInv<SETNE, QC_LIEQ>; +def : QCILICCPatInv<SETEQ, QC_LINE>; + +def : QCILICCIPatInv<SETNE, QC_LIEQI, simm5>; +def : QCILICCIPatInv<SETEQ, QC_LINEI, simm5>; +} // Predicates = [HasVendorXqcicli, NoVendorXqcics, IsRV32] + let Predicates = [HasVendorXqcics, IsRV32] in { // (SELECT X, Y, Z) is canonicalised to `(riscv_selectcc x, 0, NE, y, z)`. // These exist to prioritise over the `Select_GPR_Using_CC_GPR` pattern. diff --git a/llvm/lib/Target/SPIRV/CMakeLists.txt b/llvm/lib/Target/SPIRV/CMakeLists.txt index 46afe03..eab7b21 100644 --- a/llvm/lib/Target/SPIRV/CMakeLists.txt +++ b/llvm/lib/Target/SPIRV/CMakeLists.txt @@ -36,6 +36,7 @@ add_llvm_target(SPIRVCodeGen SPIRVMetadata.cpp SPIRVModuleAnalysis.cpp SPIRVStructurizer.cpp + SPIRVCombinerHelper.cpp SPIRVPreLegalizer.cpp SPIRVPreLegalizerCombiner.cpp SPIRVPostLegalizer.cpp diff --git a/llvm/lib/Target/SPIRV/SPIRVCombine.td b/llvm/lib/Target/SPIRV/SPIRVCombine.td index 6f726e0..fde56c4 100644 --- a/llvm/lib/Target/SPIRV/SPIRVCombine.td +++ b/llvm/lib/Target/SPIRV/SPIRVCombine.td @@ -11,8 +11,8 @@ include "llvm/Target/GlobalISel/Combine.td" def vector_length_sub_to_distance_lowering : GICombineRule < (defs root:$root), (match (wip_match_opcode G_INTRINSIC):$root, - [{ return matchLengthToDistance(*${root}, MRI); }]), - (apply [{ applySPIRVDistance(*${root}, MRI, B); }]) + [{ return Helper.matchLengthToDistance(*${root}); }]), + (apply [{ Helper.applySPIRVDistance(*${root}); }]) >; def SPIRVPreLegalizerCombiner diff --git a/llvm/lib/Target/SPIRV/SPIRVCombinerHelper.cpp b/llvm/lib/Target/SPIRV/SPIRVCombinerHelper.cpp new file mode 100644 index 0000000..267794c --- /dev/null +++ b/llvm/lib/Target/SPIRV/SPIRVCombinerHelper.cpp @@ -0,0 +1,60 @@ +//===-- SPIRVCombinerHelper.cpp -------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "SPIRVCombinerHelper.h" +#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h" +#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h" +#include "llvm/IR/IntrinsicsSPIRV.h" +#include "llvm/Target/TargetMachine.h" + +using namespace llvm; +using namespace MIPatternMatch; + +SPIRVCombinerHelper::SPIRVCombinerHelper( + GISelChangeObserver &Observer, MachineIRBuilder &B, bool IsPreLegalize, + GISelValueTracking *VT, MachineDominatorTree *MDT, const LegalizerInfo *LI, + const SPIRVSubtarget &STI) + : CombinerHelper(Observer, B, IsPreLegalize, VT, MDT, LI), STI(STI) {} + +/// This match is part of a combine that +/// rewrites length(X - Y) to distance(X, Y) +/// (f32 (g_intrinsic length +/// (g_fsub (vXf32 X) (vXf32 Y)))) +/// -> +/// (f32 (g_intrinsic distance +/// (vXf32 X) (vXf32 Y))) +/// +bool SPIRVCombinerHelper::matchLengthToDistance(MachineInstr &MI) const { + if (MI.getOpcode() != TargetOpcode::G_INTRINSIC || + cast<GIntrinsic>(MI).getIntrinsicID() != Intrinsic::spv_length) + return false; + + // First operand of MI is `G_INTRINSIC` so start at operand 2. + Register SubReg = MI.getOperand(2).getReg(); + MachineInstr *SubInstr = MRI.getVRegDef(SubReg); + if (SubInstr->getOpcode() != TargetOpcode::G_FSUB) + return false; + + return true; +} + +void SPIRVCombinerHelper::applySPIRVDistance(MachineInstr &MI) const { + // Extract the operands for X and Y from the match criteria. + Register SubDestReg = MI.getOperand(2).getReg(); + MachineInstr *SubInstr = MRI.getVRegDef(SubDestReg); + Register SubOperand1 = SubInstr->getOperand(1).getReg(); + Register SubOperand2 = SubInstr->getOperand(2).getReg(); + Register ResultReg = MI.getOperand(0).getReg(); + + Builder.setInstrAndDebugLoc(MI); + Builder.buildIntrinsic(Intrinsic::spv_distance, ResultReg) + .addUse(SubOperand1) + .addUse(SubOperand2); + + MI.eraseFromParent(); +} diff --git a/llvm/lib/Target/SPIRV/SPIRVCombinerHelper.h b/llvm/lib/Target/SPIRV/SPIRVCombinerHelper.h new file mode 100644 index 0000000..0b39d34 --- /dev/null +++ b/llvm/lib/Target/SPIRV/SPIRVCombinerHelper.h @@ -0,0 +1,38 @@ +//===-- SPIRVCombinerHelper.h -----------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// +/// This contains common combine transformations that may be used in a combine +/// pass. +/// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_SPIRV_SPIRVCOMBINERHELPER_H +#define LLVM_LIB_TARGET_SPIRV_SPIRVCOMBINERHELPER_H + +#include "SPIRVSubtarget.h" +#include "llvm/CodeGen/GlobalISel/CombinerHelper.h" + +namespace llvm { +class SPIRVCombinerHelper : public CombinerHelper { +protected: + const SPIRVSubtarget &STI; + +public: + using CombinerHelper::CombinerHelper; + SPIRVCombinerHelper(GISelChangeObserver &Observer, MachineIRBuilder &B, + bool IsPreLegalize, GISelValueTracking *VT, + MachineDominatorTree *MDT, const LegalizerInfo *LI, + const SPIRVSubtarget &STI); + + bool matchLengthToDistance(MachineInstr &MI) const; + void applySPIRVDistance(MachineInstr &MI) const; +}; + +} // end namespace llvm + +#endif // LLVM_LIB_TARGET_SPIRV_SPIRVCOMBINERHELPER_H diff --git a/llvm/lib/Target/SPIRV/SPIRVLegalizePointerCast.cpp b/llvm/lib/Target/SPIRV/SPIRVLegalizePointerCast.cpp index e8c849e..28a1690 100644 --- a/llvm/lib/Target/SPIRV/SPIRVLegalizePointerCast.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVLegalizePointerCast.cpp @@ -46,7 +46,6 @@ #include "SPIRVSubtarget.h" #include "SPIRVTargetMachine.h" #include "SPIRVUtils.h" -#include "llvm/CodeGen/IntrinsicLowering.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Intrinsics.h" diff --git a/llvm/lib/Target/SPIRV/SPIRVMergeRegionExitTargets.cpp b/llvm/lib/Target/SPIRV/SPIRVMergeRegionExitTargets.cpp index 20f03b0..60d39c9 100644 --- a/llvm/lib/Target/SPIRV/SPIRVMergeRegionExitTargets.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVMergeRegionExitTargets.cpp @@ -19,7 +19,6 @@ #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/Analysis/LoopInfo.h" -#include "llvm/CodeGen/IntrinsicLowering.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/Intrinsics.h" diff --git a/llvm/lib/Target/SPIRV/SPIRVPreLegalizerCombiner.cpp b/llvm/lib/Target/SPIRV/SPIRVPreLegalizerCombiner.cpp index 8356751..48f4047 100644 --- a/llvm/lib/Target/SPIRV/SPIRVPreLegalizerCombiner.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVPreLegalizerCombiner.cpp @@ -1,4 +1,3 @@ - //===-- SPIRVPreLegalizerCombiner.cpp - combine legalization ----*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. @@ -13,24 +12,17 @@ //===----------------------------------------------------------------------===// #include "SPIRV.h" -#include "SPIRVTargetMachine.h" +#include "SPIRVCombinerHelper.h" #include "llvm/CodeGen/GlobalISel/CSEInfo.h" #include "llvm/CodeGen/GlobalISel/Combiner.h" -#include "llvm/CodeGen/GlobalISel/CombinerHelper.h" #include "llvm/CodeGen/GlobalISel/CombinerInfo.h" #include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h" #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h" #include "llvm/CodeGen/GlobalISel/GISelValueTracking.h" -#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h" #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h" -#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" -#include "llvm/CodeGen/GlobalISel/Utils.h" #include "llvm/CodeGen/MachineDominators.h" #include "llvm/CodeGen/MachineFunctionPass.h" -#include "llvm/CodeGen/MachineRegisterInfo.h" -#include "llvm/CodeGen/TargetOpcodes.h" #include "llvm/CodeGen/TargetPassConfig.h" -#include "llvm/IR/IntrinsicsSPIRV.h" #define GET_GICOMBINER_DEPS #include "SPIRVGenPreLegalizeGICombiner.inc" @@ -47,72 +39,9 @@ namespace { #include "SPIRVGenPreLegalizeGICombiner.inc" #undef GET_GICOMBINER_TYPES -/// This match is part of a combine that -/// rewrites length(X - Y) to distance(X, Y) -/// (f32 (g_intrinsic length -/// (g_fsub (vXf32 X) (vXf32 Y)))) -/// -> -/// (f32 (g_intrinsic distance -/// (vXf32 X) (vXf32 Y))) -/// -bool matchLengthToDistance(MachineInstr &MI, MachineRegisterInfo &MRI) { - if (MI.getOpcode() != TargetOpcode::G_INTRINSIC || - cast<GIntrinsic>(MI).getIntrinsicID() != Intrinsic::spv_length) - return false; - - // First operand of MI is `G_INTRINSIC` so start at operand 2. - Register SubReg = MI.getOperand(2).getReg(); - MachineInstr *SubInstr = MRI.getVRegDef(SubReg); - if (!SubInstr || SubInstr->getOpcode() != TargetOpcode::G_FSUB) - return false; - - return true; -} -void applySPIRVDistance(MachineInstr &MI, MachineRegisterInfo &MRI, - MachineIRBuilder &B) { - - // Extract the operands for X and Y from the match criteria. - Register SubDestReg = MI.getOperand(2).getReg(); - MachineInstr *SubInstr = MRI.getVRegDef(SubDestReg); - Register SubOperand1 = SubInstr->getOperand(1).getReg(); - Register SubOperand2 = SubInstr->getOperand(2).getReg(); - - // Remove the original `spv_length` instruction. - - Register ResultReg = MI.getOperand(0).getReg(); - DebugLoc DL = MI.getDebugLoc(); - MachineBasicBlock &MBB = *MI.getParent(); - MachineBasicBlock::iterator InsertPt = MI.getIterator(); - - // Build the `spv_distance` intrinsic. - MachineInstrBuilder NewInstr = - BuildMI(MBB, InsertPt, DL, B.getTII().get(TargetOpcode::G_INTRINSIC)); - NewInstr - .addDef(ResultReg) // Result register - .addIntrinsicID(Intrinsic::spv_distance) // Intrinsic ID - .addUse(SubOperand1) // Operand X - .addUse(SubOperand2); // Operand Y - - SPIRVGlobalRegistry *GR = - MI.getMF()->getSubtarget<SPIRVSubtarget>().getSPIRVGlobalRegistry(); - auto RemoveAllUses = [&](Register Reg) { - SmallVector<MachineInstr *, 4> UsesToErase( - llvm::make_pointer_range(MRI.use_instructions(Reg))); - - // calling eraseFromParent to early invalidates the iterator. - for (auto *MIToErase : UsesToErase) { - GR->invalidateMachineInstr(MIToErase); - MIToErase->eraseFromParent(); - } - }; - RemoveAllUses(SubDestReg); // remove all uses of FSUB Result - GR->invalidateMachineInstr(SubInstr); - SubInstr->eraseFromParent(); // remove FSUB instruction -} - class SPIRVPreLegalizerCombinerImpl : public Combiner { protected: - const CombinerHelper Helper; + const SPIRVCombinerHelper Helper; const SPIRVPreLegalizerCombinerImplRuleConfig &RuleConfig; const SPIRVSubtarget &STI; @@ -147,7 +76,7 @@ SPIRVPreLegalizerCombinerImpl::SPIRVPreLegalizerCombinerImpl( const SPIRVSubtarget &STI, MachineDominatorTree *MDT, const LegalizerInfo *LI) : Combiner(MF, CInfo, TPC, &VT, CSEInfo), - Helper(Observer, B, /*IsPreLegalize*/ true, &VT, MDT, LI), + Helper(Observer, B, /*IsPreLegalize*/ true, &VT, MDT, LI, STI), RuleConfig(RuleConfig), STI(STI), #define GET_GICOMBINER_CONSTRUCTOR_INITS #include "SPIRVGenPreLegalizeGICombiner.inc" diff --git a/llvm/lib/Target/SPIRV/SPIRVStripConvergentIntrinsics.cpp b/llvm/lib/Target/SPIRV/SPIRVStripConvergentIntrinsics.cpp index 278ad7c..e621bcd44 100644 --- a/llvm/lib/Target/SPIRV/SPIRVStripConvergentIntrinsics.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVStripConvergentIntrinsics.cpp @@ -14,7 +14,6 @@ #include "SPIRV.h" #include "SPIRVSubtarget.h" #include "SPIRVUtils.h" -#include "llvm/CodeGen/IntrinsicLowering.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Intrinsics.h" #include "llvm/Transforms/Utils/Cloning.h" diff --git a/llvm/lib/Target/SPIRV/SPIRVStructurizer.cpp b/llvm/lib/Target/SPIRV/SPIRVStructurizer.cpp index 1811492..5b149f8 100644 --- a/llvm/lib/Target/SPIRV/SPIRVStructurizer.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVStructurizer.cpp @@ -16,7 +16,6 @@ #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/Analysis/LoopInfo.h" -#include "llvm/CodeGen/IntrinsicLowering.h" #include "llvm/IR/CFG.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/IRBuilder.h" diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 9580ade..eea84a2 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -28,7 +28,6 @@ #include "llvm/Analysis/BlockFrequencyInfo.h" #include "llvm/Analysis/ProfileSummaryInfo.h" #include "llvm/Analysis/VectorUtils.h" -#include "llvm/CodeGen/IntrinsicLowering.h" #include "llvm/CodeGen/LivePhysRegs.h" #include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineFunction.h" @@ -3634,7 +3633,7 @@ bool X86TargetLowering::preferScalarizeSplat(SDNode *N) const { } bool X86TargetLowering::shouldFoldConstantShiftPairToMask( - const SDNode *N, CombineLevel Level) const { + const SDNode *N) const { assert(((N->getOpcode() == ISD::SHL && N->getOperand(0).getOpcode() == ISD::SRL) || (N->getOpcode() == ISD::SRL && @@ -3649,7 +3648,7 @@ bool X86TargetLowering::shouldFoldConstantShiftPairToMask( // the fold for non-splats yet. return N->getOperand(1) == N->getOperand(0).getOperand(1); } - return TargetLoweringBase::shouldFoldConstantShiftPairToMask(N, Level); + return TargetLoweringBase::shouldFoldConstantShiftPairToMask(N); } bool X86TargetLowering::shouldFoldMaskToVariableShiftPair(SDValue Y) const { diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h index b55556a..e28b9c1 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.h +++ b/llvm/lib/Target/X86/X86ISelLowering.h @@ -1244,8 +1244,7 @@ namespace llvm { getJumpConditionMergingParams(Instruction::BinaryOps Opc, const Value *Lhs, const Value *Rhs) const override; - bool shouldFoldConstantShiftPairToMask(const SDNode *N, - CombineLevel Level) const override; + bool shouldFoldConstantShiftPairToMask(const SDNode *N) const override; bool shouldFoldMaskToVariableShiftPair(SDValue Y) const override; diff --git a/llvm/lib/Target/X86/X86ISelLoweringCall.cpp b/llvm/lib/Target/X86/X86ISelLoweringCall.cpp index 3bc46af..6dd43b2 100644 --- a/llvm/lib/Target/X86/X86ISelLoweringCall.cpp +++ b/llvm/lib/Target/X86/X86ISelLoweringCall.cpp @@ -547,7 +547,7 @@ unsigned X86TargetLowering::getAddressSpace() const { static bool hasStackGuardSlotTLS(const Triple &TargetTriple) { return TargetTriple.isOSGlibc() || TargetTriple.isOSFuchsia() || - (TargetTriple.isAndroid() && !TargetTriple.isAndroidVersionLT(17)); + TargetTriple.isAndroid(); } static Constant* SegmentOffset(IRBuilderBase &IRB, |