diff options
Diffstat (limited to 'llvm/lib/Target/AMDGPU')
-rw-r--r-- | llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp | 91 | ||||
-rw-r--r-- | llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.cpp | 34 | ||||
-rw-r--r-- | llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.h | 1 | ||||
-rw-r--r-- | llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp | 22 | ||||
-rw-r--r-- | llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.h | 1 | ||||
-rw-r--r-- | llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp | 28 | ||||
-rw-r--r-- | llvm/lib/Target/AMDGPU/GCNRegPressure.cpp | 8 | ||||
-rw-r--r-- | llvm/lib/Target/AMDGPU/GCNRegPressure.h | 4 | ||||
-rw-r--r-- | llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 32 | ||||
-rw-r--r-- | llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h | 6 | ||||
-rw-r--r-- | llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp | 12 | ||||
-rw-r--r-- | llvm/lib/Target/AMDGPU/VOP3PInstructions.td | 8 |
12 files changed, 146 insertions, 101 deletions
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp index 24bef82..8e35ba7 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp @@ -15,6 +15,7 @@ #include "AMDGPU.h" #include "AMDGPUTargetMachine.h" #include "SIModeRegisterDefaults.h" +#include "llvm/ADT/SetVector.h" #include "llvm/Analysis/AssumptionCache.h" #include "llvm/Analysis/ConstantFolding.h" #include "llvm/Analysis/TargetLibraryInfo.h" @@ -27,6 +28,7 @@ #include "llvm/IR/InstVisitor.h" #include "llvm/IR/IntrinsicsAMDGPU.h" #include "llvm/IR/PatternMatch.h" +#include "llvm/IR/ValueHandle.h" #include "llvm/InitializePasses.h" #include "llvm/Pass.h" #include "llvm/Support/KnownBits.h" @@ -106,6 +108,7 @@ public: bool FlowChanged = false; mutable Function *SqrtF32 = nullptr; mutable Function *LdexpF32 = nullptr; + mutable SmallVector<WeakVH> DeadVals; DenseMap<const PHINode *, bool> BreakPhiNodesCache; @@ -242,6 +245,8 @@ public: Value *emitSqrtIEEE2ULP(IRBuilder<> &Builder, Value *Src, FastMathFlags FMF) const; + bool tryNarrowMathIfNoOverflow(Instruction *I); + public: bool visitFDiv(BinaryOperator &I); @@ -281,28 +286,21 @@ bool AMDGPUCodeGenPrepareImpl::run() { BreakPhiNodesCache.clear(); bool MadeChange = false; - Function::iterator NextBB; - for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; FI = NextBB) { - BasicBlock *BB = &*FI; - NextBB = std::next(FI); - - BasicBlock::iterator Next; - for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; - I = Next) { - Next = std::next(I); - - MadeChange |= visit(*I); - - if (Next != E) { // Control flow changed - BasicBlock *NextInstBB = Next->getParent(); - if (NextInstBB != BB) { - BB = NextInstBB; - E = BB->end(); - FE = F.end(); - } - } + // Need to use make_early_inc_range because integer division expansion is + // handled by Transform/Utils, and it can delete instructions such as the + // terminator of the BB. + for (BasicBlock &BB : reverse(F)) { + for (Instruction &I : make_early_inc_range(reverse(BB))) { + if (!isInstructionTriviallyDead(&I, TLI)) + MadeChange |= visit(I); } } + + while (!DeadVals.empty()) { + if (auto *I = dyn_cast_or_null<Instruction>(DeadVals.pop_back_val())) + RecursivelyDeleteTriviallyDeadInstructions(I, TLI); + } + return MadeChange; } @@ -422,7 +420,7 @@ bool AMDGPUCodeGenPrepareImpl::replaceMulWithMul24(BinaryOperator &I) const { Value *NewVal = insertValues(Builder, Ty, ResultVals); NewVal->takeName(&I); I.replaceAllUsesWith(NewVal); - I.eraseFromParent(); + DeadVals.push_back(&I); return true; } @@ -496,10 +494,10 @@ bool AMDGPUCodeGenPrepareImpl::foldBinOpIntoSelect(BinaryOperator &BO) const { FoldedT, FoldedF); NewSelect->takeName(&BO); BO.replaceAllUsesWith(NewSelect); - BO.eraseFromParent(); + DeadVals.push_back(&BO); if (CastOp) - CastOp->eraseFromParent(); - Sel->eraseFromParent(); + DeadVals.push_back(CastOp); + DeadVals.push_back(Sel); return true; } @@ -895,7 +893,7 @@ bool AMDGPUCodeGenPrepareImpl::visitFDiv(BinaryOperator &FDiv) { if (NewVal) { FDiv.replaceAllUsesWith(NewVal); NewVal->takeName(&FDiv); - RecursivelyDeleteTriviallyDeadInstructions(&FDiv, TLI); + DeadVals.push_back(&FDiv); } return true; @@ -1302,10 +1300,7 @@ it will create `s_and_b32 s0, s0, 0xff`. We accept this change since the non-byte load assumes the upper bits within the byte are all 0. */ -static bool tryNarrowMathIfNoOverflow(Instruction *I, - const SITargetLowering *TLI, - const TargetTransformInfo &TTI, - const DataLayout &DL) { +bool AMDGPUCodeGenPrepareImpl::tryNarrowMathIfNoOverflow(Instruction *I) { unsigned Opc = I->getOpcode(); Type *OldType = I->getType(); @@ -1330,6 +1325,7 @@ static bool tryNarrowMathIfNoOverflow(Instruction *I, NewType = I->getType()->getWithNewBitWidth(NewBit); // Old cost + const TargetTransformInfo &TTI = TM.getTargetTransformInfo(F); InstructionCost OldCost = TTI.getArithmeticInstrCost(Opc, OldType, TTI::TCK_RecipThroughput); // New cost of new op @@ -1360,7 +1356,7 @@ static bool tryNarrowMathIfNoOverflow(Instruction *I, Value *Zext = Builder.CreateZExt(Arith, OldType); I->replaceAllUsesWith(Zext); - I->eraseFromParent(); + DeadVals.push_back(I); return true; } @@ -1370,8 +1366,7 @@ bool AMDGPUCodeGenPrepareImpl::visitBinaryOperator(BinaryOperator &I) { if (UseMul24Intrin && replaceMulWithMul24(I)) return true; - if (tryNarrowMathIfNoOverflow(&I, ST.getTargetLowering(), - TM.getTargetTransformInfo(F), DL)) + if (tryNarrowMathIfNoOverflow(&I)) return true; bool Changed = false; @@ -1436,7 +1431,7 @@ bool AMDGPUCodeGenPrepareImpl::visitBinaryOperator(BinaryOperator &I) { if (NewDiv) { I.replaceAllUsesWith(NewDiv); - I.eraseFromParent(); + DeadVals.push_back(&I); Changed = true; } } @@ -1492,7 +1487,7 @@ bool AMDGPUCodeGenPrepareImpl::visitLoadInst(LoadInst &I) { Value *ValTrunc = Builder.CreateTrunc(WidenLoad, IntNTy); Value *ValOrig = Builder.CreateBitCast(ValTrunc, I.getType()); I.replaceAllUsesWith(ValOrig); - I.eraseFromParent(); + DeadVals.push_back(&I); return true; } @@ -1534,7 +1529,7 @@ bool AMDGPUCodeGenPrepareImpl::visitSelectInst(SelectInst &I) { Fract->takeName(&I); I.replaceAllUsesWith(Fract); - RecursivelyDeleteTriviallyDeadInstructions(&I, TLI); + DeadVals.push_back(&I); return true; } @@ -1822,7 +1817,7 @@ bool AMDGPUCodeGenPrepareImpl::visitPHINode(PHINode &I) { } I.replaceAllUsesWith(Vec); - I.eraseFromParent(); + DeadVals.push_back(&I); return true; } @@ -1903,7 +1898,7 @@ bool AMDGPUCodeGenPrepareImpl::visitAddrSpaceCastInst(AddrSpaceCastInst &I) { auto *Intrin = B.CreateIntrinsic( I.getType(), Intrinsic::amdgcn_addrspacecast_nonnull, {I.getOperand(0)}); I.replaceAllUsesWith(Intrin); - I.eraseFromParent(); + DeadVals.push_back(&I); return true; } @@ -2000,16 +1995,10 @@ bool AMDGPUCodeGenPrepareImpl::visitFMinLike(IntrinsicInst &I) { Value *Fract = applyFractPat(Builder, FractArg); Fract->takeName(&I); I.replaceAllUsesWith(Fract); - - RecursivelyDeleteTriviallyDeadInstructions(&I, TLI); + DeadVals.push_back(&I); return true; } -static bool isOneOrNegOne(const Value *Val) { - const APFloat *C; - return match(Val, m_APFloat(C)) && C->getExactLog2Abs() == 0; -} - // Expand llvm.sqrt.f32 calls with !fpmath metadata in a semi-fast way. bool AMDGPUCodeGenPrepareImpl::visitSqrt(IntrinsicInst &Sqrt) { Type *Ty = Sqrt.getType()->getScalarType(); @@ -2030,18 +2019,6 @@ bool AMDGPUCodeGenPrepareImpl::visitSqrt(IntrinsicInst &Sqrt) { if (ReqdAccuracy < 1.0f) return false; - // FIXME: This is an ugly hack for this pass using forward iteration instead - // of reverse. If it worked like a normal combiner, the rsq would form before - // we saw a sqrt call. - auto *FDiv = - dyn_cast_or_null<FPMathOperator>(Sqrt.getUniqueUndroppableUser()); - if (FDiv && FDiv->getOpcode() == Instruction::FDiv && - FDiv->getFPAccuracy() >= 1.0f && - canOptimizeWithRsq(FPOp, FDiv->getFastMathFlags(), SqrtFMF) && - // TODO: We should also handle the arcp case for the fdiv with non-1 value - isOneOrNegOne(FDiv->getOperand(0))) - return false; - Value *SrcVal = Sqrt.getOperand(0); bool CanTreatAsDAZ = canIgnoreDenormalInput(SrcVal, &Sqrt); @@ -2065,7 +2042,7 @@ bool AMDGPUCodeGenPrepareImpl::visitSqrt(IntrinsicInst &Sqrt) { Value *NewSqrt = insertValues(Builder, Sqrt.getType(), ResultVals); NewSqrt->takeName(&Sqrt); Sqrt.replaceAllUsesWith(NewSqrt); - Sqrt.eraseFromParent(); + DeadVals.push_back(&Sqrt); return true; } diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.cpp index 73b2660..5407566 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.cpp @@ -468,6 +468,38 @@ void RegBankLegalizeHelper::lowerUnpackBitShift(MachineInstr &MI) { MI.eraseFromParent(); } +void RegBankLegalizeHelper::lowerUnpackMinMax(MachineInstr &MI) { + Register Lo, Hi; + switch (MI.getOpcode()) { + case AMDGPU::G_SMIN: + case AMDGPU::G_SMAX: { + // For signed operations, use sign extension + auto [Val0_Lo, Val0_Hi] = unpackSExt(MI.getOperand(1).getReg()); + auto [Val1_Lo, Val1_Hi] = unpackSExt(MI.getOperand(2).getReg()); + Lo = B.buildInstr(MI.getOpcode(), {SgprRB_S32}, {Val0_Lo, Val1_Lo}) + .getReg(0); + Hi = B.buildInstr(MI.getOpcode(), {SgprRB_S32}, {Val0_Hi, Val1_Hi}) + .getReg(0); + break; + } + case AMDGPU::G_UMIN: + case AMDGPU::G_UMAX: { + // For unsigned operations, use zero extension + auto [Val0_Lo, Val0_Hi] = unpackZExt(MI.getOperand(1).getReg()); + auto [Val1_Lo, Val1_Hi] = unpackZExt(MI.getOperand(2).getReg()); + Lo = B.buildInstr(MI.getOpcode(), {SgprRB_S32}, {Val0_Lo, Val1_Lo}) + .getReg(0); + Hi = B.buildInstr(MI.getOpcode(), {SgprRB_S32}, {Val0_Hi, Val1_Hi}) + .getReg(0); + break; + } + default: + llvm_unreachable("Unpack min/max lowering not implemented"); + } + B.buildBuildVectorTrunc(MI.getOperand(0).getReg(), {Lo, Hi}); + MI.eraseFromParent(); +} + static bool isSignedBFE(MachineInstr &MI) { if (GIntrinsic *GI = dyn_cast<GIntrinsic>(&MI)) return (GI->is(Intrinsic::amdgcn_sbfe)); @@ -654,6 +686,8 @@ void RegBankLegalizeHelper::lower(MachineInstr &MI, } case UnpackBitShift: return lowerUnpackBitShift(MI); + case UnpackMinMax: + return lowerUnpackMinMax(MI); case Ext32To64: { const RegisterBank *RB = MRI.getRegBank(MI.getOperand(0).getReg()); MachineInstrBuilder Hi; diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.h b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.h index 7affe5a..d937815 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.h +++ b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeHelper.h @@ -123,6 +123,7 @@ private: void lowerSplitTo32(MachineInstr &MI); void lowerSplitTo32Select(MachineInstr &MI); void lowerSplitTo32SExtInReg(MachineInstr &MI); + void lowerUnpackMinMax(MachineInstr &MI); }; } // end namespace AMDGPU diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp index f413bbc..bfe2c80 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.cpp @@ -522,6 +522,22 @@ RegBankLegalizeRules::RegBankLegalizeRules(const GCNSubtarget &_ST, .Uni(S64, {{Sgpr64}, {Sgpr64, Sgpr32, Sgpr32}, S_BFE}) .Div(S64, {{Vgpr64}, {Vgpr64, Vgpr32, Vgpr32}, V_BFE}); + addRulesForGOpcs({G_SMIN, G_SMAX}, Standard) + .Uni(S16, {{Sgpr32Trunc}, {Sgpr32SExt, Sgpr32SExt}}) + .Div(S16, {{Vgpr16}, {Vgpr16, Vgpr16}}) + .Uni(S32, {{Sgpr32}, {Sgpr32, Sgpr32}}) + .Div(S32, {{Vgpr32}, {Vgpr32, Vgpr32}}) + .Uni(V2S16, {{SgprV2S16}, {SgprV2S16, SgprV2S16}, UnpackMinMax}) + .Div(V2S16, {{VgprV2S16}, {VgprV2S16, VgprV2S16}}); + + addRulesForGOpcs({G_UMIN, G_UMAX}, Standard) + .Uni(S16, {{Sgpr32Trunc}, {Sgpr32ZExt, Sgpr32ZExt}}) + .Div(S16, {{Vgpr16}, {Vgpr16, Vgpr16}}) + .Uni(S32, {{Sgpr32}, {Sgpr32, Sgpr32}}) + .Div(S32, {{Vgpr32}, {Vgpr32, Vgpr32}}) + .Uni(V2S16, {{SgprV2S16}, {SgprV2S16, SgprV2S16}, UnpackMinMax}) + .Div(V2S16, {{VgprV2S16}, {VgprV2S16, VgprV2S16}}); + // Note: we only write S1 rules for G_IMPLICIT_DEF, G_CONSTANT, G_FCONSTANT // and G_FREEZE here, rest is trivially regbankselected earlier addRulesForGOpcs({G_IMPLICIT_DEF}).Any({{UniS1}, {{Sgpr32Trunc}, {}}}); @@ -617,6 +633,12 @@ RegBankLegalizeRules::RegBankLegalizeRules(const GCNSubtarget &_ST, .Any({{UniS64, S64}, {{Sgpr64}, {Sgpr64}}}) .Any({{DivS64, S64}, {{Vgpr64}, {Vgpr64}, SplitTo32SExtInReg}}); + addRulesForGOpcs({G_ASSERT_ZEXT, G_ASSERT_SEXT}, Standard) + .Uni(S32, {{Sgpr32}, {Sgpr32, Imm}}) + .Div(S32, {{Vgpr32}, {Vgpr32, Imm}}) + .Uni(S64, {{Sgpr64}, {Sgpr64, Imm}}) + .Div(S64, {{Vgpr64}, {Vgpr64, Imm}}); + bool hasSMRDx3 = ST->hasScalarDwordx3Loads(); bool hasSMRDSmall = ST->hasScalarSubwordLoads(); bool usesTrue16 = ST->useRealTrue16Insts(); diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.h b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.h index d0c6910..93e0efd 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.h +++ b/llvm/lib/Target/AMDGPU/AMDGPURegBankLegalizeRules.h @@ -212,6 +212,7 @@ enum LoweringMethodID { VccExtToSel, UniExtToSel, UnpackBitShift, + UnpackMinMax, S_BFE, V_BFE, VgprToVccCopy, diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp index 557d87f..56807a4 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp @@ -5053,16 +5053,18 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const { // // vdst, srcA, srcB, srcC const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); + + bool UseAGPRForm = !Subtarget.hasGFX90AInsts() || + Info->selectAGPRFormMFMA(MinNumRegsRequired); + OpdsMapping[0] = - Info->getMinNumAGPRs() >= MinNumRegsRequired - ? getAGPROpMapping(MI.getOperand(0).getReg(), MRI, *TRI) - : getVGPROpMapping(MI.getOperand(0).getReg(), MRI, *TRI); + UseAGPRForm ? getAGPROpMapping(MI.getOperand(0).getReg(), MRI, *TRI) + : getVGPROpMapping(MI.getOperand(0).getReg(), MRI, *TRI); OpdsMapping[2] = getVGPROpMapping(MI.getOperand(2).getReg(), MRI, *TRI); OpdsMapping[3] = getVGPROpMapping(MI.getOperand(3).getReg(), MRI, *TRI); OpdsMapping[4] = - Info->getMinNumAGPRs() >= MinNumRegsRequired - ? getAGPROpMapping(MI.getOperand(4).getReg(), MRI, *TRI) - : getVGPROpMapping(MI.getOperand(4).getReg(), MRI, *TRI); + UseAGPRForm ? getAGPROpMapping(MI.getOperand(4).getReg(), MRI, *TRI) + : getVGPROpMapping(MI.getOperand(4).getReg(), MRI, *TRI); break; } case Intrinsic::amdgcn_mfma_scale_f32_16x16x128_f8f6f4: @@ -5115,11 +5117,21 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const { case Intrinsic::amdgcn_smfmac_f32_32x32x64_bf8_fp8: case Intrinsic::amdgcn_smfmac_f32_32x32x64_fp8_bf8: case Intrinsic::amdgcn_smfmac_f32_32x32x64_fp8_fp8: { + Register DstReg = MI.getOperand(0).getReg(); + unsigned DstSize = MRI.getType(DstReg).getSizeInBits(); + unsigned MinNumRegsRequired = DstSize / 32; + const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>(); + bool UseAGPRForm = Info->selectAGPRFormMFMA(MinNumRegsRequired); + // vdst, srcA, srcB, srcC, idx - OpdsMapping[0] = getAGPROpMapping(MI.getOperand(0).getReg(), MRI, *TRI); + OpdsMapping[0] = UseAGPRForm ? getAGPROpMapping(DstReg, MRI, *TRI) + : getVGPROpMapping(DstReg, MRI, *TRI); + OpdsMapping[2] = getVGPROpMapping(MI.getOperand(2).getReg(), MRI, *TRI); OpdsMapping[3] = getVGPROpMapping(MI.getOperand(3).getReg(), MRI, *TRI); - OpdsMapping[4] = getAGPROpMapping(MI.getOperand(4).getReg(), MRI, *TRI); + OpdsMapping[4] = + UseAGPRForm ? getAGPROpMapping(MI.getOperand(4).getReg(), MRI, *TRI) + : getVGPROpMapping(MI.getOperand(4).getReg(), MRI, *TRI); OpdsMapping[5] = getVGPROpMapping(MI.getOperand(5).getReg(), MRI, *TRI); break; } diff --git a/llvm/lib/Target/AMDGPU/GCNRegPressure.cpp b/llvm/lib/Target/AMDGPU/GCNRegPressure.cpp index ef63acc..71494be 100644 --- a/llvm/lib/Target/AMDGPU/GCNRegPressure.cpp +++ b/llvm/lib/Target/AMDGPU/GCNRegPressure.cpp @@ -905,7 +905,7 @@ bool GCNRegPressurePrinter::runOnMachineFunction(MachineFunction &MF) { OS << ":\n"; SlotIndex MBBStartSlot = LIS.getSlotIndexes()->getMBBStartIdx(&MBB); - SlotIndex MBBEndSlot = LIS.getSlotIndexes()->getMBBEndIdx(&MBB); + SlotIndex MBBLastSlot = LIS.getSlotIndexes()->getMBBLastIdx(&MBB); GCNRPTracker::LiveRegSet LiveIn, LiveOut; GCNRegPressure RPAtMBBEnd; @@ -931,7 +931,7 @@ bool GCNRegPressurePrinter::runOnMachineFunction(MachineFunction &MF) { } } else { GCNUpwardRPTracker RPT(LIS); - RPT.reset(MRI, MBBEndSlot); + RPT.reset(MRI, MBBLastSlot); LiveOut = RPT.getLiveRegs(); RPAtMBBEnd = RPT.getPressure(); @@ -966,14 +966,14 @@ bool GCNRegPressurePrinter::runOnMachineFunction(MachineFunction &MF) { OS << PFX " Live-out:" << llvm::print(LiveOut, MRI); if (UseDownwardTracker) - ReportLISMismatchIfAny(LiveOut, getLiveRegs(MBBEndSlot, LIS, MRI)); + ReportLISMismatchIfAny(LiveOut, getLiveRegs(MBBLastSlot, LIS, MRI)); GCNRPTracker::LiveRegSet LiveThrough; for (auto [Reg, Mask] : LiveIn) { LaneBitmask MaskIntersection = Mask & LiveOut.lookup(Reg); if (MaskIntersection.any()) { LaneBitmask LTMask = getRegLiveThroughMask( - MRI, LIS, Reg, MBBStartSlot, MBBEndSlot, MaskIntersection); + MRI, LIS, Reg, MBBStartSlot, MBBLastSlot, MaskIntersection); if (LTMask.any()) LiveThrough[Reg] = LTMask; } diff --git a/llvm/lib/Target/AMDGPU/GCNRegPressure.h b/llvm/lib/Target/AMDGPU/GCNRegPressure.h index a9c58bb..898d1ff 100644 --- a/llvm/lib/Target/AMDGPU/GCNRegPressure.h +++ b/llvm/lib/Target/AMDGPU/GCNRegPressure.h @@ -313,8 +313,8 @@ public: /// reset tracker to the end of the \p MBB. void reset(const MachineBasicBlock &MBB) { - reset(MBB.getParent()->getRegInfo(), - LIS.getSlotIndexes()->getMBBEndIdx(&MBB)); + SlotIndex MBBLastSlot = LIS.getSlotIndexes()->getMBBLastIdx(&MBB); + reset(MBB.getParent()->getRegInfo(), MBBLastSlot); } /// reset tracker to the point just after \p MI (in program order). diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp index 730be69..80e985d 100644 --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -103,52 +103,52 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM, addRegisterClass(MVT::Untyped, V64RegClass); addRegisterClass(MVT::v3i32, &AMDGPU::SGPR_96RegClass); - addRegisterClass(MVT::v3f32, TRI->getVGPRClassForBitWidth(96)); + addRegisterClass(MVT::v3f32, &AMDGPU::VReg_96RegClass); addRegisterClass(MVT::v2i64, &AMDGPU::SGPR_128RegClass); addRegisterClass(MVT::v2f64, &AMDGPU::SGPR_128RegClass); addRegisterClass(MVT::v4i32, &AMDGPU::SGPR_128RegClass); - addRegisterClass(MVT::v4f32, TRI->getVGPRClassForBitWidth(128)); + addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass); addRegisterClass(MVT::v5i32, &AMDGPU::SGPR_160RegClass); - addRegisterClass(MVT::v5f32, TRI->getVGPRClassForBitWidth(160)); + addRegisterClass(MVT::v5f32, &AMDGPU::VReg_160RegClass); addRegisterClass(MVT::v6i32, &AMDGPU::SGPR_192RegClass); - addRegisterClass(MVT::v6f32, TRI->getVGPRClassForBitWidth(192)); + addRegisterClass(MVT::v6f32, &AMDGPU::VReg_192RegClass); addRegisterClass(MVT::v3i64, &AMDGPU::SGPR_192RegClass); - addRegisterClass(MVT::v3f64, TRI->getVGPRClassForBitWidth(192)); + addRegisterClass(MVT::v3f64, &AMDGPU::VReg_192RegClass); addRegisterClass(MVT::v7i32, &AMDGPU::SGPR_224RegClass); - addRegisterClass(MVT::v7f32, TRI->getVGPRClassForBitWidth(224)); + addRegisterClass(MVT::v7f32, &AMDGPU::VReg_224RegClass); addRegisterClass(MVT::v8i32, &AMDGPU::SGPR_256RegClass); - addRegisterClass(MVT::v8f32, TRI->getVGPRClassForBitWidth(256)); + addRegisterClass(MVT::v8f32, &AMDGPU::VReg_256RegClass); addRegisterClass(MVT::v4i64, &AMDGPU::SGPR_256RegClass); - addRegisterClass(MVT::v4f64, TRI->getVGPRClassForBitWidth(256)); + addRegisterClass(MVT::v4f64, &AMDGPU::VReg_256RegClass); addRegisterClass(MVT::v9i32, &AMDGPU::SGPR_288RegClass); - addRegisterClass(MVT::v9f32, TRI->getVGPRClassForBitWidth(288)); + addRegisterClass(MVT::v9f32, &AMDGPU::VReg_288RegClass); addRegisterClass(MVT::v10i32, &AMDGPU::SGPR_320RegClass); - addRegisterClass(MVT::v10f32, TRI->getVGPRClassForBitWidth(320)); + addRegisterClass(MVT::v10f32, &AMDGPU::VReg_320RegClass); addRegisterClass(MVT::v11i32, &AMDGPU::SGPR_352RegClass); - addRegisterClass(MVT::v11f32, TRI->getVGPRClassForBitWidth(352)); + addRegisterClass(MVT::v11f32, &AMDGPU::VReg_352RegClass); addRegisterClass(MVT::v12i32, &AMDGPU::SGPR_384RegClass); - addRegisterClass(MVT::v12f32, TRI->getVGPRClassForBitWidth(384)); + addRegisterClass(MVT::v12f32, &AMDGPU::VReg_384RegClass); addRegisterClass(MVT::v16i32, &AMDGPU::SGPR_512RegClass); - addRegisterClass(MVT::v16f32, TRI->getVGPRClassForBitWidth(512)); + addRegisterClass(MVT::v16f32, &AMDGPU::VReg_512RegClass); addRegisterClass(MVT::v8i64, &AMDGPU::SGPR_512RegClass); - addRegisterClass(MVT::v8f64, TRI->getVGPRClassForBitWidth(512)); + addRegisterClass(MVT::v8f64, &AMDGPU::VReg_512RegClass); addRegisterClass(MVT::v16i64, &AMDGPU::SGPR_1024RegClass); - addRegisterClass(MVT::v16f64, TRI->getVGPRClassForBitWidth(1024)); + addRegisterClass(MVT::v16f64, &AMDGPU::VReg_1024RegClass); if (Subtarget->has16BitInsts()) { if (Subtarget->useRealTrue16Insts()) { @@ -180,7 +180,7 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM, } addRegisterClass(MVT::v32i32, &AMDGPU::VReg_1024RegClass); - addRegisterClass(MVT::v32f32, TRI->getVGPRClassForBitWidth(1024)); + addRegisterClass(MVT::v32f32, &AMDGPU::VReg_1024RegClass); computeRegisterProperties(Subtarget->getRegisterInfo()); diff --git a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h index b7dbb59..2c1a13c 100644 --- a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h +++ b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h @@ -1202,6 +1202,12 @@ public: unsigned getMinNumAGPRs() const { return MinNumAGPRs; } + /// Return true if an MFMA that requires at least \p NumRegs should select to + /// the AGPR form, instead of the VGPR form. + bool selectAGPRFormMFMA(unsigned NumRegs) const { + return !MFMAVGPRForm && getMinNumAGPRs() >= NumRegs; + } + // \returns true if a function has a use of AGPRs via inline asm or // has a call which may use it. bool mayUseAGPRs(const Function &F) const; diff --git a/llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp b/llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp index 4d3331a..c684f9e 100644 --- a/llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp +++ b/llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp @@ -674,15 +674,9 @@ void SIPreEmitPeephole::performF32Unpacking(MachineInstr &I) { createUnpackedMI(I, UnpackedOpcode, /*IsHiBits=*/true); MachineOperand HiDstOp = Op0HOp1H->getOperand(0); - if (I.getFlag(MachineInstr::MIFlag::NoFPExcept)) { - Op0LOp1L->setFlag(MachineInstr::MIFlag::NoFPExcept); - Op0HOp1H->setFlag(MachineInstr::MIFlag::NoFPExcept); - } - if (I.getFlag(MachineInstr::MIFlag::FmContract)) { - Op0LOp1L->setFlag(MachineInstr::MIFlag::FmContract); - Op0HOp1H->setFlag(MachineInstr::MIFlag::FmContract); - } - + uint32_t IFlags = I.getFlags(); + Op0LOp1L->setFlags(IFlags); + Op0HOp1H->setFlags(IFlags); LoDstOp.setIsRenamable(DstOp.isRenamable()); HiDstOp.setIsRenamable(DstOp.isRenamable()); diff --git a/llvm/lib/Target/AMDGPU/VOP3PInstructions.td b/llvm/lib/Target/AMDGPU/VOP3PInstructions.td index 7cfd059..6500fce 100644 --- a/llvm/lib/Target/AMDGPU/VOP3PInstructions.td +++ b/llvm/lib/Target/AMDGPU/VOP3PInstructions.td @@ -964,14 +964,12 @@ class MAIFrag<SDPatternOperator Op, bit HasAbid = true, bit Scaled = false> : Pa class CanUseAGPR_MAI<ValueType vt> { code PredicateCode = [{ return !Subtarget->hasGFX90AInsts() || - (!SIMachineFunctionInfo::MFMAVGPRForm && - MF->getInfo<SIMachineFunctionInfo>()->getMinNumAGPRs() >= - }] # !srl(vt.Size, 5) # ");"; + MF->getInfo<SIMachineFunctionInfo>()->selectAGPRFormMFMA( + }] # !srl(vt.Size, 5) # ");"; code GISelPredicateCode = [{ return !Subtarget->hasGFX90AInsts() || - (!SIMachineFunctionInfo::MFMAVGPRForm && - MF.getInfo<SIMachineFunctionInfo>()->getMinNumAGPRs() >= + MF.getInfo<SIMachineFunctionInfo>()->selectAGPRFormMFMA( }] # !srl(vt.Size, 5) # ");"; } |