aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/CodeGen')
-rw-r--r--llvm/lib/CodeGen/AsmPrinter/AIXException.cpp4
-rw-r--r--llvm/lib/CodeGen/AsmPrinter/ARMException.cpp2
-rw-r--r--llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp9
-rw-r--r--llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp4
-rw-r--r--llvm/lib/CodeGen/AsmPrinter/DwarfCFIException.cpp6
-rw-r--r--llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp2
-rw-r--r--llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp7
-rw-r--r--llvm/lib/CodeGen/BranchFolding.cpp47
-rw-r--r--llvm/lib/CodeGen/CodeGen.cpp1
-rw-r--r--llvm/lib/CodeGen/CodeGenPrepare.cpp4
-rw-r--r--llvm/lib/CodeGen/CommandFlags.cpp7
-rw-r--r--llvm/lib/CodeGen/ExpandFp.cpp33
-rw-r--r--llvm/lib/CodeGen/InterleavedAccessPass.cpp123
-rw-r--r--llvm/lib/CodeGen/MIRParser/MIRParser.cpp14
-rw-r--r--llvm/lib/CodeGen/MIRPrinter.cpp15
-rw-r--r--llvm/lib/CodeGen/MachineFunction.cpp5
-rw-r--r--llvm/lib/CodeGen/MachineInstrBundle.cpp31
-rw-r--r--llvm/lib/CodeGen/MachineLICM.cpp1
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp54
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h4
-rw-r--r--llvm/lib/CodeGen/StackProtector.cpp10
-rw-r--r--llvm/lib/CodeGen/TargetLoweringBase.cpp34
-rw-r--r--llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp22
23 files changed, 292 insertions, 147 deletions
diff --git a/llvm/lib/CodeGen/AsmPrinter/AIXException.cpp b/llvm/lib/CodeGen/AsmPrinter/AIXException.cpp
index 5d7c97a..6356d71 100644
--- a/llvm/lib/CodeGen/AsmPrinter/AIXException.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/AIXException.cpp
@@ -37,8 +37,8 @@ void AIXException::emitExceptionInfoTable(const MCSymbol *LSDA,
// unsigned long personality; /* Pointer to the personality routine */
// }
- auto *EHInfo =
- cast<MCSectionXCOFF>(Asm->getObjFileLowering().getCompactUnwindSection());
+ auto *EHInfo = static_cast<MCSectionXCOFF *>(
+ Asm->getObjFileLowering().getCompactUnwindSection());
if (Asm->TM.getFunctionSections()) {
// If option -ffunction-sections is on, append the function name to the
// name of EH Info Table csect so that each function has its own EH Info
diff --git a/llvm/lib/CodeGen/AsmPrinter/ARMException.cpp b/llvm/lib/CodeGen/AsmPrinter/ARMException.cpp
index de6ebcf..51342c6 100644
--- a/llvm/lib/CodeGen/AsmPrinter/ARMException.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/ARMException.cpp
@@ -39,7 +39,7 @@ void ARMException::beginFunction(const MachineFunction *MF) {
if (CFISecType == AsmPrinter::CFISection::Debug) {
if (!hasEmittedCFISections) {
if (Asm->getModuleCFISectionType() == AsmPrinter::CFISection::Debug)
- Asm->OutStreamer->emitCFISections(false, true);
+ Asm->OutStreamer->emitCFISections(false, true, false);
hasEmittedCFISections = true;
}
diff --git a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
index f1d3e96..6166271 100644
--- a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
@@ -4221,10 +4221,11 @@ MCSymbol *AsmPrinter::GetCPISymbol(unsigned CPID) const {
SectionKind Kind = CPE.getSectionKind(&DL);
const Constant *C = CPE.Val.ConstVal;
Align Alignment = CPE.Alignment;
- if (const MCSectionCOFF *S = dyn_cast<MCSectionCOFF>(
- getObjFileLowering().getSectionForConstant(DL, Kind, C,
- Alignment))) {
- if (MCSymbol *Sym = S->getCOMDATSymbol()) {
+ auto *S =
+ getObjFileLowering().getSectionForConstant(DL, Kind, C, Alignment);
+ if (S && TM.getTargetTriple().isOSBinFormatCOFF()) {
+ if (MCSymbol *Sym =
+ static_cast<const MCSectionCOFF *>(S)->getCOMDATSymbol()) {
if (Sym->isUndefined())
OutStreamer->emitSymbolAttribute(Sym, MCSA_Global);
return Sym;
diff --git a/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp b/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp
index 8abeb56..c5d6e40 100644
--- a/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp
@@ -1051,10 +1051,10 @@ void CodeViewDebug::switchToDebugSectionForSymbol(const MCSymbol *GVSym) {
// comdat key. A section may be comdat because of -ffunction-sections or
// because it is comdat in the IR.
MCSectionCOFF *GVSec =
- GVSym ? dyn_cast<MCSectionCOFF>(&GVSym->getSection()) : nullptr;
+ GVSym ? static_cast<MCSectionCOFF *>(&GVSym->getSection()) : nullptr;
const MCSymbol *KeySym = GVSec ? GVSec->getCOMDATSymbol() : nullptr;
- MCSectionCOFF *DebugSec = cast<MCSectionCOFF>(
+ auto *DebugSec = static_cast<MCSectionCOFF *>(
CompilerInfoAsm->getObjFileLowering().getCOFFDebugSymbolsSection());
DebugSec = OS.getContext().getAssociativeCOFFSection(DebugSec, KeySym);
diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfCFIException.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfCFIException.cpp
index 4fac4bb..6b8d08c 100644
--- a/llvm/lib/CodeGen/AsmPrinter/DwarfCFIException.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/DwarfCFIException.cpp
@@ -109,9 +109,11 @@ void DwarfCFIException::beginBasicBlockSection(const MachineBasicBlock &MBB) {
// chose not to be verbose in that case. And with `ForceDwarfFrameSection`,
// we should always emit .debug_frame.
if (CFISecType == AsmPrinter::CFISection::Debug ||
- Asm->TM.Options.ForceDwarfFrameSection)
+ Asm->TM.Options.ForceDwarfFrameSection ||
+ Asm->TM.Options.MCOptions.EmitSFrameUnwind)
Asm->OutStreamer->emitCFISections(
- CFISecType == AsmPrinter::CFISection::EH, true);
+ CFISecType == AsmPrinter::CFISection::EH, true,
+ Asm->TM.Options.MCOptions.EmitSFrameUnwind);
hasEmittedCFISections = true;
}
diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp
index 8e8cda4..5577a7d 100644
--- a/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp
@@ -1379,7 +1379,7 @@ void DwarfCompileUnit::constructCallSiteParmEntryDIEs(
DIE *DwarfCompileUnit::constructImportedEntityDIE(
const DIImportedEntity *Module) {
- DIE *IMDie = DIE::get(DIEValueAllocator, (dwarf::Tag)Module->getTag());
+ DIE *IMDie = DIE::get(DIEValueAllocator, Module->getTag());
insertDIE(Module, IMDie);
DIE *EntityDie;
auto *Entity = Module->getEntity();
diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
index 11b8576..7188833 100644
--- a/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
@@ -972,10 +972,9 @@ void DwarfDebug::constructCallSiteEntryDIEs(const DISubprogram &SP,
// the call graph which could lead to some target function. For tail
// calls, no return PC information is needed, unless tuning for GDB in
// DWARF4 mode in which case we fake a return PC for compatibility.
- const MCSymbol *PCAddr =
- (!IsTail || CU.useGNUAnalogForDwarf5Feature())
- ? const_cast<MCSymbol *>(getLabelAfterInsn(TopLevelCallMI))
- : nullptr;
+ const MCSymbol *PCAddr = (!IsTail || CU.useGNUAnalogForDwarf5Feature())
+ ? getLabelAfterInsn(TopLevelCallMI)
+ : nullptr;
// For tail calls, it's necessary to record the address of the branch
// instruction so that the debugger can show where the tail call occurred.
diff --git a/llvm/lib/CodeGen/BranchFolding.cpp b/llvm/lib/CodeGen/BranchFolding.cpp
index 3b3e7a4..a7c99b1 100644
--- a/llvm/lib/CodeGen/BranchFolding.cpp
+++ b/llvm/lib/CodeGen/BranchFolding.cpp
@@ -2083,22 +2083,54 @@ bool BranchFolder::HoistCommonCodeInSuccs(MachineBasicBlock *MBB) {
if (TBB == FBB) {
MBB->splice(Loc, TBB, TBB->begin(), TIB);
} else {
+ // Merge the debug locations, and hoist and kill the debug instructions from
+ // both branches. FIXME: We could probably try harder to preserve some debug
+ // instructions (but at least this isn't producing wrong locations).
+ MachineInstrBuilder MIRBuilder(*MBB->getParent(), Loc);
+ auto HoistAndKillDbgInstr = [MBB, Loc](MachineBasicBlock::iterator DI) {
+ assert(DI->isDebugInstr() && "Expected a debug instruction");
+ if (DI->isDebugRef()) {
+ const TargetInstrInfo *TII =
+ MBB->getParent()->getSubtarget().getInstrInfo();
+ const MCInstrDesc &DBGV = TII->get(TargetOpcode::DBG_VALUE);
+ DI = BuildMI(*MBB->getParent(), DI->getDebugLoc(), DBGV, false, 0,
+ DI->getDebugVariable(), DI->getDebugExpression());
+ MBB->insert(Loc, &*DI);
+ return;
+ }
+ // Deleting a DBG_PHI results in an undef at the referenced DBG_INSTR_REF.
+ if (DI->isDebugPHI()) {
+ DI->eraseFromParent();
+ return;
+ }
+
+ DI->setDebugValueUndef();
+ DI->moveBefore(&*Loc);
+ };
+
// TIB and FIB point to the end of the regions to hoist/merge in TBB and
// FBB.
MachineBasicBlock::iterator FE = FIB;
MachineBasicBlock::iterator FI = FBB->begin();
for (MachineBasicBlock::iterator TI :
make_early_inc_range(make_range(TBB->begin(), TIB))) {
- // Move debug instructions and pseudo probes without modifying them.
- // FIXME: This is the wrong thing to do for debug locations, which
- // should at least be killed (and hoisted from BOTH blocks).
- if (TI->isDebugOrPseudoInstr()) {
- TI->moveBefore(&*Loc);
+ // Hoist and kill debug instructions from FBB. After this loop FI points
+ // to the next non-debug instruction to hoist (checked in assert after the
+ // TBB debug instruction handling code).
+ while (FI != FE && FI->isDebugInstr())
+ HoistAndKillDbgInstr(FI++);
+
+ // Kill debug instructions before moving.
+ if (TI->isDebugInstr()) {
+ HoistAndKillDbgInstr(TI);
continue;
}
- // Get the next non-meta instruction in FBB.
- FI = skipDebugInstructionsForward(FI, FE, false);
+ // FI and TI now point to identical non-debug instructions.
+ assert(FI != FE && "Unexpected end of FBB range");
+ // Pseudo probes are excluded from the range when identifying foldable
+ // instructions, so we don't expect to see one now.
+ assert(!TI->isPseudoProbe() && "Unexpected pseudo probe in range");
// NOTE: The loop above checks CheckKillDead but we can't do that here as
// it modifies some kill markers after the check.
assert(TI->isIdenticalTo(*FI, MachineInstr::CheckDefs) &&
@@ -2111,6 +2143,7 @@ bool BranchFolder::HoistCommonCodeInSuccs(MachineBasicBlock *MBB) {
++FI;
}
}
+
FBB->erase(FBB->begin(), FIB);
if (UpdateLiveIns)
diff --git a/llvm/lib/CodeGen/CodeGen.cpp b/llvm/lib/CodeGen/CodeGen.cpp
index c3b4077..989cf4c4 100644
--- a/llvm/lib/CodeGen/CodeGen.cpp
+++ b/llvm/lib/CodeGen/CodeGen.cpp
@@ -45,7 +45,6 @@ void llvm::initializeCodeGen(PassRegistry &Registry) {
initializeExpandPostRALegacyPass(Registry);
initializeFEntryInserterLegacyPass(Registry);
initializeFinalizeISelPass(Registry);
- initializeFinalizeMachineBundlesPass(Registry);
initializeFixupStatepointCallerSavedLegacyPass(Registry);
initializeFuncletLayoutPass(Registry);
initializeGCMachineCodeAnalysisPass(Registry);
diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp
index c21058c..416c56d 100644
--- a/llvm/lib/CodeGen/CodeGenPrepare.cpp
+++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp
@@ -2095,6 +2095,10 @@ static bool isRemOfLoopIncrementWithLoopInvariant(
if (!L->isLoopInvariant(RemAmt))
return false;
+ // Only works if the AddOffset is a loop invaraint
+ if (AddOffset && !L->isLoopInvariant(AddOffset))
+ return false;
+
// Is the PHI a loop increment?
auto LoopIncrInfo = getIVIncrement(PN, LI);
if (!LoopIncrInfo)
diff --git a/llvm/lib/CodeGen/CommandFlags.cpp b/llvm/lib/CodeGen/CommandFlags.cpp
index 9512f79..810dc29 100644
--- a/llvm/lib/CodeGen/CommandFlags.cpp
+++ b/llvm/lib/CodeGen/CommandFlags.cpp
@@ -101,6 +101,7 @@ CGOPT(EABI, EABIVersion)
CGOPT(DebuggerKind, DebuggerTuningOpt)
CGOPT(bool, EnableStackSizeSection)
CGOPT(bool, EnableAddrsig)
+CGOPT(bool, EnableCallGraphSection)
CGOPT(bool, EmitCallSiteInfo)
CGOPT(bool, EnableMachineFunctionSplitter)
CGOPT(bool, EnableStaticDataPartitioning)
@@ -461,6 +462,11 @@ codegen::RegisterCodeGenFlags::RegisterCodeGenFlags() {
cl::init(false));
CGBINDOPT(EnableAddrsig);
+ static cl::opt<bool> EnableCallGraphSection(
+ "call-graph-section", cl::desc("Emit a call graph section"),
+ cl::init(false));
+ CGBINDOPT(EnableCallGraphSection);
+
static cl::opt<bool> EmitCallSiteInfo(
"emit-call-site-info",
cl::desc(
@@ -595,6 +601,7 @@ codegen::InitTargetOptionsFromCodeGenFlags(const Triple &TheTriple) {
Options.EnableMachineFunctionSplitter = getEnableMachineFunctionSplitter();
Options.EnableStaticDataPartitioning = getEnableStaticDataPartitioning();
Options.EmitAddrsig = getEnableAddrsig();
+ Options.EmitCallGraphSection = getEnableCallGraphSection();
Options.EmitCallSiteInfo = getEmitCallSiteInfo();
Options.EnableDebugEntryValues = getEnableDebugEntryValues();
Options.ForceDwarfFrameSection = getForceDwarfFrameSection();
diff --git a/llvm/lib/CodeGen/ExpandFp.cpp b/llvm/lib/CodeGen/ExpandFp.cpp
index 714ec55..1c1047c 100644
--- a/llvm/lib/CodeGen/ExpandFp.cpp
+++ b/llvm/lib/CodeGen/ExpandFp.cpp
@@ -103,10 +103,10 @@ static void expandFPToI(Instruction *FPToI) {
Value *A1 = nullptr;
if (FloatVal->getType()->isHalfTy()) {
if (FPToI->getOpcode() == Instruction::FPToUI) {
- Value *A0 = Builder.CreateFPToUI(FloatVal, Builder.getIntNTy(32));
+ Value *A0 = Builder.CreateFPToUI(FloatVal, Builder.getInt32Ty());
A1 = Builder.CreateZExt(A0, IntTy);
} else { // FPToSI
- Value *A0 = Builder.CreateFPToSI(FloatVal, Builder.getIntNTy(32));
+ Value *A0 = Builder.CreateFPToSI(FloatVal, Builder.getInt32Ty());
A1 = Builder.CreateSExt(A0, IntTy);
}
FPToI->replaceAllUsesWith(A1);
@@ -425,8 +425,8 @@ static void expandIToFP(Instruction *IToFP) {
AAddr0->addIncoming(IsSigned ? Sub : IntVal, IfThen4);
AAddr0->addIncoming(Shl, SwBB);
Value *A0 = Builder.CreateTrunc(AAddr0, Builder.getInt32Ty());
- Value *A1 = Builder.CreateLShr(A0, Builder.getIntN(32, 2));
- Value *A2 = Builder.CreateAnd(A1, Builder.getIntN(32, 1));
+ Value *A1 = Builder.CreateLShr(A0, Builder.getInt32(2));
+ Value *A2 = Builder.CreateAnd(A1, Builder.getInt32(1));
Value *Conv16 = Builder.CreateZExt(A2, IntTy);
Value *Or17 = Builder.CreateOr(AAddr0, Conv16);
Value *Inc = Builder.CreateAdd(Or17, Builder.getIntN(BitWidth, 1));
@@ -457,9 +457,9 @@ static void expandIToFP(Instruction *IToFP) {
Value *Extract = Builder.CreateLShr(Shr21, Builder.getIntN(BitWidth, 32));
Value *ExtractT62 = nullptr;
if (FloatWidth > 80)
- ExtractT62 = Builder.CreateTrunc(Sub1, Builder.getIntNTy(64));
+ ExtractT62 = Builder.CreateTrunc(Sub1, Builder.getInt64Ty());
else
- ExtractT62 = Builder.CreateTrunc(Extract, Builder.getIntNTy(32));
+ ExtractT62 = Builder.CreateTrunc(Extract, Builder.getInt32Ty());
Builder.CreateBr(IfEnd26);
// if.else:
@@ -475,7 +475,7 @@ static void expandIToFP(Instruction *IToFP) {
Value *Extract65 = Builder.CreateLShr(Shl26, Builder.getIntN(BitWidth, 32));
Value *ExtractT66 = nullptr;
if (FloatWidth > 80)
- ExtractT66 = Builder.CreateTrunc(Sub2, Builder.getIntNTy(64));
+ ExtractT66 = Builder.CreateTrunc(Sub2, Builder.getInt64Ty());
else
ExtractT66 = Builder.CreateTrunc(Extract65, Builder.getInt32Ty());
Builder.CreateBr(IfEnd26);
@@ -507,30 +507,29 @@ static void expandIToFP(Instruction *IToFP) {
Builder.getIntN(BitWidth, 63));
And29 = Builder.CreateAnd(Shr, Temp2, "and29");
} else {
- Value *Conv28 = Builder.CreateTrunc(Shr, Builder.getIntNTy(32));
+ Value *Conv28 = Builder.CreateTrunc(Shr, Builder.getInt32Ty());
And29 = Builder.CreateAnd(
- Conv28, ConstantInt::getSigned(Builder.getIntNTy(32), 0x80000000));
+ Conv28, ConstantInt::getSigned(Builder.getInt32Ty(), 0x80000000));
}
unsigned TempMod = FPMantissaWidth % 32;
Value *And34 = nullptr;
Value *Shl30 = nullptr;
if (FloatWidth > 80) {
TempMod += 32;
- Value *Add = Builder.CreateShl(AAddr1Off32, Builder.getIntN(64, TempMod));
+ Value *Add = Builder.CreateShl(AAddr1Off32, Builder.getInt64(TempMod));
Shl30 = Builder.CreateAdd(
- Add,
- Builder.getIntN(64, ((1ull << (62ull - TempMod)) - 1ull) << TempMod));
- And34 = Builder.CreateZExt(Shl30, Builder.getIntNTy(128));
+ Add, Builder.getInt64(((1ull << (62ull - TempMod)) - 1ull) << TempMod));
+ And34 = Builder.CreateZExt(Shl30, Builder.getInt128Ty());
} else {
- Value *Add = Builder.CreateShl(E0, Builder.getIntN(32, TempMod));
+ Value *Add = Builder.CreateShl(E0, Builder.getInt32(TempMod));
Shl30 = Builder.CreateAdd(
- Add, Builder.getIntN(32, ((1 << (30 - TempMod)) - 1) << TempMod));
+ Add, Builder.getInt32(((1 << (30 - TempMod)) - 1) << TempMod));
And34 = Builder.CreateAnd(FloatWidth > 32 ? AAddr1Off32 : AAddr1Off0,
- Builder.getIntN(32, (1 << TempMod) - 1));
+ Builder.getInt32((1 << TempMod) - 1));
}
Value *Or35 = nullptr;
if (FloatWidth > 80) {
- Value *And29Trunc = Builder.CreateTrunc(And29, Builder.getIntNTy(128));
+ Value *And29Trunc = Builder.CreateTrunc(And29, Builder.getInt128Ty());
Value *Or31 = Builder.CreateOr(And29Trunc, And34);
Value *Or34 = Builder.CreateShl(Or31, Builder.getIntN(128, 64));
Value *Temp3 = Builder.CreateShl(Builder.getIntN(128, 1),
diff --git a/llvm/lib/CodeGen/InterleavedAccessPass.cpp b/llvm/lib/CodeGen/InterleavedAccessPass.cpp
index 1b69188..5e50898 100644
--- a/llvm/lib/CodeGen/InterleavedAccessPass.cpp
+++ b/llvm/lib/CodeGen/InterleavedAccessPass.cpp
@@ -253,6 +253,21 @@ static bool isReInterleaveMask(ShuffleVectorInst *SVI, unsigned &Factor,
return false;
}
+static Value *getMaskOperand(IntrinsicInst *II) {
+ switch (II->getIntrinsicID()) {
+ default:
+ llvm_unreachable("Unexpected intrinsic");
+ case Intrinsic::vp_load:
+ return II->getOperand(1);
+ case Intrinsic::masked_load:
+ return II->getOperand(2);
+ case Intrinsic::vp_store:
+ return II->getOperand(2);
+ case Intrinsic::masked_store:
+ return II->getOperand(3);
+ }
+}
+
// Return the corresponded deinterleaved mask, or nullptr if there is no valid
// mask.
static Value *getMask(Value *WideMask, unsigned Factor,
@@ -268,8 +283,12 @@ bool InterleavedAccessImpl::lowerInterleavedLoad(
if (isa<ScalableVectorType>(Load->getType()))
return false;
- if (auto *LI = dyn_cast<LoadInst>(Load);
- LI && !LI->isSimple())
+ auto *LI = dyn_cast<LoadInst>(Load);
+ auto *II = dyn_cast<IntrinsicInst>(Load);
+ if (!LI && !II)
+ return false;
+
+ if (LI && !LI->isSimple())
return false;
// Check if all users of this load are shufflevectors. If we encounter any
@@ -322,7 +341,7 @@ bool InterleavedAccessImpl::lowerInterleavedLoad(
// Holds the corresponding index for each DE-interleave shuffle.
SmallVector<unsigned, 4> Indices;
- Type *VecTy = FirstSVI->getType();
+ VectorType *VecTy = cast<VectorType>(FirstSVI->getType());
// Check if other shufflevectors are also DE-interleaved of the same type
// and factor as the first shufflevector.
@@ -360,13 +379,16 @@ bool InterleavedAccessImpl::lowerInterleavedLoad(
replaceBinOpShuffles(BinOpShuffles.getArrayRef(), Shuffles, Load);
Value *Mask = nullptr;
- if (auto *VPLoad = dyn_cast<VPIntrinsic>(Load)) {
- Mask = getMask(VPLoad->getMaskParam(), Factor, cast<VectorType>(VecTy));
+ if (LI) {
+ LLVM_DEBUG(dbgs() << "IA: Found an interleaved load: " << *Load << "\n");
+ } else {
+ // Check mask operand. Handle both all-true/false and interleaved mask.
+ Mask = getMask(getMaskOperand(II), Factor, VecTy);
if (!Mask)
return false;
- LLVM_DEBUG(dbgs() << "IA: Found an interleaved vp.load: " << *Load << "\n");
- } else {
- LLVM_DEBUG(dbgs() << "IA: Found an interleaved load: " << *Load << "\n");
+
+ LLVM_DEBUG(dbgs() << "IA: Found an interleaved vp.load or masked.load: "
+ << *Load << "\n");
}
// Try to create target specific intrinsics to replace the load and
@@ -483,15 +505,16 @@ bool InterleavedAccessImpl::tryReplaceExtracts(
bool InterleavedAccessImpl::lowerInterleavedStore(
Instruction *Store, SmallSetVector<Instruction *, 32> &DeadInsts) {
Value *StoredValue;
- if (auto *SI = dyn_cast<StoreInst>(Store)) {
+ auto *SI = dyn_cast<StoreInst>(Store);
+ auto *II = dyn_cast<IntrinsicInst>(Store);
+ if (SI) {
if (!SI->isSimple())
return false;
StoredValue = SI->getValueOperand();
- } else if (auto *VPStore = dyn_cast<VPIntrinsic>(Store)) {
- assert(VPStore->getIntrinsicID() == Intrinsic::vp_store);
- StoredValue = VPStore->getArgOperand(0);
} else {
- llvm_unreachable("unsupported store operation");
+ assert(II->getIntrinsicID() == Intrinsic::vp_store ||
+ II->getIntrinsicID() == Intrinsic::masked_store);
+ StoredValue = II->getArgOperand(0);
}
auto *SVI = dyn_cast<ShuffleVectorInst>(StoredValue);
@@ -508,18 +531,18 @@ bool InterleavedAccessImpl::lowerInterleavedStore(
"number of stored element should be a multiple of Factor");
Value *Mask = nullptr;
- if (auto *VPStore = dyn_cast<VPIntrinsic>(Store)) {
+ if (SI) {
+ LLVM_DEBUG(dbgs() << "IA: Found an interleaved store: " << *Store << "\n");
+ } else {
+ // Check mask operand. Handle both all-true/false and interleaved mask.
unsigned LaneMaskLen = NumStoredElements / Factor;
- Mask = getMask(VPStore->getMaskParam(), Factor,
+ Mask = getMask(getMaskOperand(II), Factor,
ElementCount::getFixed(LaneMaskLen));
if (!Mask)
return false;
- LLVM_DEBUG(dbgs() << "IA: Found an interleaved vp.store: " << *Store
- << "\n");
-
- } else {
- LLVM_DEBUG(dbgs() << "IA: Found an interleaved store: " << *Store << "\n");
+ LLVM_DEBUG(dbgs() << "IA: Found an interleaved vp.store or masked.store: "
+ << *Store << "\n");
}
// Try to create target specific intrinsics to replace the store and
@@ -564,6 +587,27 @@ static Value *getMask(Value *WideMask, unsigned Factor,
}
}
+ if (auto *SVI = dyn_cast<ShuffleVectorInst>(WideMask)) {
+ // Check that the shuffle mask is: a) an interleave, b) all of the same
+ // set of the elements, and c) contained by the first source. (c) could
+ // be relaxed if desired.
+ unsigned NumSrcElts =
+ cast<FixedVectorType>(SVI->getOperand(1)->getType())->getNumElements();
+ SmallVector<unsigned> StartIndexes;
+ if (ShuffleVectorInst::isInterleaveMask(SVI->getShuffleMask(), Factor,
+ NumSrcElts * 2, StartIndexes) &&
+ llvm::all_of(StartIndexes, [](unsigned Start) { return Start == 0; }) &&
+ llvm::all_of(SVI->getShuffleMask(), [&NumSrcElts](int Idx) {
+ return Idx < (int)NumSrcElts;
+ })) {
+ auto *LeafMaskTy =
+ VectorType::get(Type::getInt1Ty(SVI->getContext()), LeafValueEC);
+ IRBuilder<> Builder(SVI);
+ return Builder.CreateExtractVector(LeafMaskTy, SVI->getOperand(0),
+ uint64_t(0));
+ }
+ }
+
return nullptr;
}
@@ -590,21 +634,12 @@ bool InterleavedAccessImpl::lowerDeinterleaveIntrinsic(
<< " and factor = " << Factor << "\n");
} else {
assert(II);
-
- // Check mask operand. Handle both all-true/false and interleaved mask.
- Value *WideMask;
- switch (II->getIntrinsicID()) {
- default:
+ if (II->getIntrinsicID() != Intrinsic::masked_load &&
+ II->getIntrinsicID() != Intrinsic::vp_load)
return false;
- case Intrinsic::vp_load:
- WideMask = II->getOperand(1);
- break;
- case Intrinsic::masked_load:
- WideMask = II->getOperand(2);
- break;
- }
- Mask = getMask(WideMask, Factor, getDeinterleavedVectorType(DI));
+ // Check mask operand. Handle both all-true/false and interleaved mask.
+ Mask = getMask(getMaskOperand(II), Factor, getDeinterleavedVectorType(DI));
if (!Mask)
return false;
@@ -641,19 +676,11 @@ bool InterleavedAccessImpl::lowerInterleaveIntrinsic(
Value *Mask = nullptr;
if (II) {
- // Check mask operand. Handle both all-true/false and interleaved mask.
- Value *WideMask;
- switch (II->getIntrinsicID()) {
- default:
+ if (II->getIntrinsicID() != Intrinsic::masked_store &&
+ II->getIntrinsicID() != Intrinsic::vp_store)
return false;
- case Intrinsic::vp_store:
- WideMask = II->getOperand(2);
- break;
- case Intrinsic::masked_store:
- WideMask = II->getOperand(3);
- break;
- }
- Mask = getMask(WideMask, Factor,
+ // Check mask operand. Handle both all-true/false and interleaved mask.
+ Mask = getMask(getMaskOperand(II), Factor,
cast<VectorType>(InterleaveValues[0]->getType()));
if (!Mask)
return false;
@@ -687,11 +714,13 @@ bool InterleavedAccessImpl::runOnFunction(Function &F) {
using namespace PatternMatch;
for (auto &I : instructions(F)) {
if (match(&I, m_CombineOr(m_Load(m_Value()),
- m_Intrinsic<Intrinsic::vp_load>())))
+ m_Intrinsic<Intrinsic::vp_load>())) ||
+ match(&I, m_Intrinsic<Intrinsic::masked_load>()))
Changed |= lowerInterleavedLoad(&I, DeadInsts);
if (match(&I, m_CombineOr(m_Store(m_Value(), m_Value()),
- m_Intrinsic<Intrinsic::vp_store>())))
+ m_Intrinsic<Intrinsic::vp_store>())) ||
+ match(&I, m_Intrinsic<Intrinsic::masked_store>()))
Changed |= lowerInterleavedStore(&I, DeadInsts);
if (auto *II = dyn_cast<IntrinsicInst>(&I)) {
diff --git a/llvm/lib/CodeGen/MIRParser/MIRParser.cpp b/llvm/lib/CodeGen/MIRParser/MIRParser.cpp
index 1e9fcf3..3e99e57 100644
--- a/llvm/lib/CodeGen/MIRParser/MIRParser.cpp
+++ b/llvm/lib/CodeGen/MIRParser/MIRParser.cpp
@@ -504,13 +504,21 @@ bool MIRParserImpl::initializeCallSiteInfo(
return error(Error, ArgRegPair.Reg.SourceRange);
CSInfo.ArgRegPairs.emplace_back(Reg, ArgRegPair.ArgNo);
}
+ if (!YamlCSInfo.CalleeTypeIds.empty()) {
+ for (auto CalleeTypeId : YamlCSInfo.CalleeTypeIds) {
+ IntegerType *Int64Ty = Type::getInt64Ty(Context);
+ CSInfo.CalleeTypeIds.push_back(ConstantInt::get(Int64Ty, CalleeTypeId,
+ /*isSigned=*/false));
+ }
+ }
- if (TM.Options.EmitCallSiteInfo)
+ if (TM.Options.EmitCallSiteInfo || TM.Options.EmitCallGraphSection)
MF.addCallSiteInfo(&*CallI, std::move(CSInfo));
}
- if (YamlMF.CallSitesInfo.size() && !TM.Options.EmitCallSiteInfo)
- return error(Twine("Call site info provided but not used"));
+ if (!YamlMF.CallSitesInfo.empty() &&
+ !(TM.Options.EmitCallSiteInfo || TM.Options.EmitCallGraphSection))
+ return error("call site info provided but not used");
return false;
}
diff --git a/llvm/lib/CodeGen/MIRPrinter.cpp b/llvm/lib/CodeGen/MIRPrinter.cpp
index 7710b50..ad7835a 100644
--- a/llvm/lib/CodeGen/MIRPrinter.cpp
+++ b/llvm/lib/CodeGen/MIRPrinter.cpp
@@ -525,24 +525,30 @@ static void convertCallSiteObjects(yaml::MachineFunction &YMF,
const MachineFunction &MF,
ModuleSlotTracker &MST) {
const auto *TRI = MF.getSubtarget().getRegisterInfo();
- for (auto CSInfo : MF.getCallSitesInfo()) {
+ for (auto [MI, CallSiteInfo] : MF.getCallSitesInfo()) {
yaml::CallSiteInfo YmlCS;
yaml::MachineInstrLoc CallLocation;
// Prepare instruction position.
- MachineBasicBlock::const_instr_iterator CallI = CSInfo.first->getIterator();
+ MachineBasicBlock::const_instr_iterator CallI = MI->getIterator();
CallLocation.BlockNum = CallI->getParent()->getNumber();
// Get call instruction offset from the beginning of block.
CallLocation.Offset =
std::distance(CallI->getParent()->instr_begin(), CallI);
YmlCS.CallLocation = CallLocation;
+
+ auto [ArgRegPairs, CalleeTypeIds] = CallSiteInfo;
// Construct call arguments and theirs forwarding register info.
- for (auto ArgReg : CSInfo.second.ArgRegPairs) {
+ for (auto ArgReg : ArgRegPairs) {
yaml::CallSiteInfo::ArgRegPair YmlArgReg;
YmlArgReg.ArgNo = ArgReg.ArgNo;
printRegMIR(ArgReg.Reg, YmlArgReg.Reg, TRI);
YmlCS.ArgForwardingRegs.emplace_back(YmlArgReg);
}
+ // Get type ids.
+ for (auto *CalleeTypeId : CalleeTypeIds) {
+ YmlCS.CalleeTypeIds.push_back(CalleeTypeId->getZExtValue());
+ }
YMF.CallSitesInfo.push_back(std::move(YmlCS));
}
@@ -815,6 +821,9 @@ static void printMI(raw_ostream &OS, MFPrintState &State,
if (MI.getFlag(MachineInstr::SameSign))
OS << "samesign ";
+ // NOTE: Please add new MIFlags also to the MI_FLAGS_STR in
+ // llvm/utils/update_mir_test_checks.py.
+
OS << TII->getName(MI.getOpcode());
LS = ListSeparator();
diff --git a/llvm/lib/CodeGen/MachineFunction.cpp b/llvm/lib/CodeGen/MachineFunction.cpp
index 429a17a..60d42e0 100644
--- a/llvm/lib/CodeGen/MachineFunction.cpp
+++ b/llvm/lib/CodeGen/MachineFunction.cpp
@@ -211,8 +211,7 @@ void MachineFunction::init() {
ConstantPool = new (Allocator) MachineConstantPool(getDataLayout());
Alignment = STI->getTargetLowering()->getMinFunctionAlignment();
- // FIXME: Use Function::hasOptSize().
- if (!F.getAlign() && !F.hasFnAttribute(Attribute::OptimizeForSize))
+ if (!F.getAlign() && !F.hasOptSize())
Alignment = std::max(Alignment,
STI->getTargetLowering()->getPrefFunctionAlignment());
@@ -920,7 +919,7 @@ MachineFunction::getCallSiteInfo(const MachineInstr *MI) {
assert(MI->isCandidateForAdditionalCallInfo() &&
"Call site info refers only to call (MI) candidates");
- if (!Target.Options.EmitCallSiteInfo)
+ if (!Target.Options.EmitCallSiteInfo && !Target.Options.EmitCallGraphSection)
return CallSitesInfo.end();
return CallSitesInfo.find(MI);
}
diff --git a/llvm/lib/CodeGen/MachineInstrBundle.cpp b/llvm/lib/CodeGen/MachineInstrBundle.cpp
index 34896c6..4da0184 100644
--- a/llvm/lib/CodeGen/MachineInstrBundle.cpp
+++ b/llvm/lib/CodeGen/MachineInstrBundle.cpp
@@ -83,27 +83,6 @@ llvm::createUnpackMachineBundles(
return new UnpackMachineBundles(std::move(Ftor));
}
-namespace {
- class FinalizeMachineBundles : public MachineFunctionPass {
- public:
- static char ID; // Pass identification
- FinalizeMachineBundles() : MachineFunctionPass(ID) {
- initializeFinalizeMachineBundlesPass(*PassRegistry::getPassRegistry());
- }
-
- bool runOnMachineFunction(MachineFunction &MF) override;
- };
-} // end anonymous namespace
-
-char FinalizeMachineBundles::ID = 0;
-char &llvm::FinalizeMachineBundlesID = FinalizeMachineBundles::ID;
-INITIALIZE_PASS(FinalizeMachineBundles, "finalize-mi-bundles",
- "Finalize machine instruction bundles", false, false)
-
-bool FinalizeMachineBundles::runOnMachineFunction(MachineFunction &MF) {
- return llvm::finalizeBundles(MF);
-}
-
/// Return the first found DebugLoc that has a DILocation, given a range of
/// instructions. The search range is from FirstMI to LastMI (exclusive). If no
/// DILocation is found, then an empty location is returned.
@@ -359,3 +338,13 @@ PhysRegInfo llvm::AnalyzePhysRegInBundle(const MachineInstr &MI, Register Reg,
return PRI;
}
+
+PreservedAnalyses
+llvm::FinalizeBundleTestPass::run(MachineFunction &MF,
+ MachineFunctionAnalysisManager &) {
+ // For testing purposes, bundle the entire contents of each basic block
+ // except for terminators.
+ for (MachineBasicBlock &MBB : MF)
+ finalizeBundle(MBB, MBB.instr_begin(), MBB.getFirstInstrTerminator());
+ return PreservedAnalyses::none();
+}
diff --git a/llvm/lib/CodeGen/MachineLICM.cpp b/llvm/lib/CodeGen/MachineLICM.cpp
index e144111..286fbfd 100644
--- a/llvm/lib/CodeGen/MachineLICM.cpp
+++ b/llvm/lib/CodeGen/MachineLICM.cpp
@@ -49,7 +49,6 @@
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
-#include <algorithm>
#include <cassert>
#include <limits>
#include <vector>
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index e5704c0..583a85a 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -22,6 +22,7 @@
#include "llvm/CodeGen/StackMaps.h"
#include "llvm/CodeGen/TargetLowering.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/KnownBits.h"
#include "llvm/Support/raw_ostream.h"
@@ -357,6 +358,9 @@ void DAGTypeLegalizer::PromoteIntegerResult(SDNode *N, unsigned ResNo) {
case ISD::PATCHPOINT:
Res = PromoteIntRes_PATCHPOINT(N);
break;
+ case ISD::READ_REGISTER:
+ Res = PromoteIntRes_READ_REGISTER(N);
+ break;
}
// If the result is null then the sub-method took care of registering it.
@@ -2076,6 +2080,9 @@ bool DAGTypeLegalizer::PromoteIntegerOperand(SDNode *N, unsigned OpNo) {
case ISD::PATCHPOINT:
Res = PromoteIntOp_PATCHPOINT(N, OpNo);
break;
+ case ISD::WRITE_REGISTER:
+ Res = PromoteIntOp_WRITE_REGISTER(N, OpNo);
+ break;
case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
Res = PromoteIntOp_VP_STRIDED(N, OpNo);
@@ -2853,6 +2860,15 @@ SDValue DAGTypeLegalizer::PromoteIntOp_PATCHPOINT(SDNode *N, unsigned OpNo) {
return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0);
}
+SDValue DAGTypeLegalizer::PromoteIntOp_WRITE_REGISTER(SDNode *N,
+ unsigned OpNo) {
+ const Function &Fn = DAG.getMachineFunction().getFunction();
+ Fn.getContext().diagnose(DiagnosticInfoLegalizationFailure(
+ "cannot use llvm.write_register with illegal type", Fn,
+ N->getDebugLoc()));
+ return N->getOperand(0);
+}
+
SDValue DAGTypeLegalizer::PromoteIntOp_VP_STRIDED(SDNode *N, unsigned OpNo) {
assert((N->getOpcode() == ISD::EXPERIMENTAL_VP_STRIDED_LOAD && OpNo == 3) ||
(N->getOpcode() == ISD::EXPERIMENTAL_VP_STRIDED_STORE && OpNo == 4));
@@ -3127,6 +3143,10 @@ void DAGTypeLegalizer::ExpandIntegerResult(SDNode *N, unsigned ResNo) {
case ISD::VSCALE:
ExpandIntRes_VSCALE(N, Lo, Hi);
break;
+
+ case ISD::READ_REGISTER:
+ ExpandIntRes_READ_REGISTER(N, Lo, Hi);
+ break;
}
// If Lo/Hi is null, the sub-method took care of registering results etc.
@@ -5471,6 +5491,18 @@ void DAGTypeLegalizer::ExpandIntRes_VSCALE(SDNode *N, SDValue &Lo,
SplitInteger(Res, Lo, Hi);
}
+void DAGTypeLegalizer::ExpandIntRes_READ_REGISTER(SDNode *N, SDValue &Lo,
+ SDValue &Hi) {
+ const Function &Fn = DAG.getMachineFunction().getFunction();
+ Fn.getContext().diagnose(DiagnosticInfoLegalizationFailure(
+ "cannot use llvm.read_register with illegal type", Fn, N->getDebugLoc()));
+ ReplaceValueWith(SDValue(N, 1), N->getOperand(0));
+ EVT LoVT, HiVT;
+ std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
+ Lo = DAG.getPOISON(LoVT);
+ Hi = DAG.getPOISON(HiVT);
+}
+
//===----------------------------------------------------------------------===//
// Integer Operand Expansion
//===----------------------------------------------------------------------===//
@@ -5537,6 +5569,9 @@ bool DAGTypeLegalizer::ExpandIntegerOperand(SDNode *N, unsigned OpNo) {
case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
Res = ExpandIntOp_VP_STRIDED(N, OpNo);
break;
+ case ISD::WRITE_REGISTER:
+ Res = ExpandIntOp_WRITE_REGISTER(N, OpNo);
+ break;
}
// If the result is null, the sub-method took care of registering results etc.
@@ -5935,6 +5970,15 @@ SDValue DAGTypeLegalizer::ExpandIntOp_VP_STRIDED(SDNode *N, unsigned OpNo) {
return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0);
}
+SDValue DAGTypeLegalizer::ExpandIntOp_WRITE_REGISTER(SDNode *N, unsigned OpNo) {
+ const Function &Fn = DAG.getMachineFunction().getFunction();
+ Fn.getContext().diagnose(DiagnosticInfoLegalizationFailure(
+ "cannot use llvm.write_register with illegal type", Fn,
+ N->getDebugLoc()));
+
+ return N->getOperand(0);
+}
+
SDValue DAGTypeLegalizer::PromoteIntRes_VECTOR_SPLICE(SDNode *N) {
SDLoc dl(N);
@@ -6332,6 +6376,16 @@ SDValue DAGTypeLegalizer::PromoteIntRes_PATCHPOINT(SDNode *N) {
return Res.getValue(0);
}
+SDValue DAGTypeLegalizer::PromoteIntRes_READ_REGISTER(SDNode *N) {
+ const Function &Fn = DAG.getMachineFunction().getFunction();
+ Fn.getContext().diagnose(DiagnosticInfoLegalizationFailure(
+ "cannot use llvm.read_register with illegal type", Fn, N->getDebugLoc()));
+
+ EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
+ ReplaceValueWith(SDValue(N, 1), N->getOperand(0));
+ return DAG.getPOISON(NVT);
+}
+
SDValue DAGTypeLegalizer::PromoteIntOp_EXTRACT_VECTOR_ELT(SDNode *N) {
SDLoc dl(N);
SDValue V0 = GetPromotedInteger(N->getOperand(0));
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
index 9b53724..2e13b18 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
@@ -378,6 +378,7 @@ private:
SDValue PromoteIntRes_VPFunnelShift(SDNode *N);
SDValue PromoteIntRes_IS_FPCLASS(SDNode *N);
SDValue PromoteIntRes_PATCHPOINT(SDNode *N);
+ SDValue PromoteIntRes_READ_REGISTER(SDNode *N);
SDValue PromoteIntRes_VECTOR_FIND_LAST_ACTIVE(SDNode *N);
SDValue PromoteIntRes_GET_ACTIVE_LANE_MASK(SDNode *N);
SDValue PromoteIntRes_PARTIAL_REDUCE_MLA(SDNode *N);
@@ -428,6 +429,7 @@ private:
SDValue PromoteIntOp_SET_ROUNDING(SDNode *N);
SDValue PromoteIntOp_STACKMAP(SDNode *N, unsigned OpNo);
SDValue PromoteIntOp_PATCHPOINT(SDNode *N, unsigned OpNo);
+ SDValue PromoteIntOp_WRITE_REGISTER(SDNode *N, unsigned OpNo);
SDValue PromoteIntOp_VP_STRIDED(SDNode *N, unsigned OpNo);
SDValue PromoteIntOp_VP_SPLICE(SDNode *N, unsigned OpNo);
SDValue PromoteIntOp_VECTOR_HISTOGRAM(SDNode *N, unsigned OpNo);
@@ -511,6 +513,7 @@ private:
void ExpandIntRes_FunnelShift (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandIntRes_VSCALE (SDNode *N, SDValue &Lo, SDValue &Hi);
+ void ExpandIntRes_READ_REGISTER(SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandShiftByConstant(SDNode *N, const APInt &Amt,
SDValue &Lo, SDValue &Hi);
@@ -534,6 +537,7 @@ private:
SDValue ExpandIntOp_STACKMAP(SDNode *N, unsigned OpNo);
SDValue ExpandIntOp_PATCHPOINT(SDNode *N, unsigned OpNo);
SDValue ExpandIntOp_VP_STRIDED(SDNode *N, unsigned OpNo);
+ SDValue ExpandIntOp_WRITE_REGISTER(SDNode *N, unsigned OpNo);
void IntegerExpandSetCCOperands(SDValue &NewLHS, SDValue &NewRHS,
ISD::CondCode &CCCode, const SDLoc &dl);
diff --git a/llvm/lib/CodeGen/StackProtector.cpp b/llvm/lib/CodeGen/StackProtector.cpp
index b79911b..2a8234a 100644
--- a/llvm/lib/CodeGen/StackProtector.cpp
+++ b/llvm/lib/CodeGen/StackProtector.cpp
@@ -588,7 +588,14 @@ bool InsertStackProtectors(const TargetMachine *TM, Function *F,
continue;
Instruction *CheckLoc = dyn_cast<ReturnInst>(BB.getTerminator());
if (!CheckLoc && !DisableCheckNoReturn)
- for (auto &Inst : BB)
+ for (auto &Inst : BB) {
+ if (IntrinsicInst *IB = dyn_cast<IntrinsicInst>(&Inst);
+ IB && (IB->getIntrinsicID() == Intrinsic::eh_sjlj_callsite)) {
+ // eh_sjlj_callsite has to be in same BB as the
+ // bb terminator. Don't insert within this range.
+ CheckLoc = IB;
+ break;
+ }
if (auto *CB = dyn_cast<CallBase>(&Inst))
// Do stack check before noreturn calls that aren't nounwind (e.g:
// __cxa_throw).
@@ -596,6 +603,7 @@ bool InsertStackProtectors(const TargetMachine *TM, Function *F,
CheckLoc = CB;
break;
}
+ }
if (!CheckLoc)
continue;
diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp
index d4a3455..68b8a00 100644
--- a/llvm/lib/CodeGen/TargetLoweringBase.cpp
+++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp
@@ -806,7 +806,17 @@ void TargetLoweringBase::initActions() {
ISD::SDIVFIX, ISD::SDIVFIXSAT,
ISD::UDIVFIX, ISD::UDIVFIXSAT,
ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT,
- ISD::IS_FPCLASS},
+ ISD::IS_FPCLASS, ISD::FCBRT,
+ ISD::FLOG, ISD::FLOG2,
+ ISD::FLOG10, ISD::FEXP,
+ ISD::FEXP2, ISD::FEXP10,
+ ISD::FFLOOR, ISD::FNEARBYINT,
+ ISD::FCEIL, ISD::FRINT,
+ ISD::FTRUNC, ISD::FROUNDEVEN,
+ ISD::FTAN, ISD::FACOS,
+ ISD::FASIN, ISD::FATAN,
+ ISD::FCOSH, ISD::FSINH,
+ ISD::FTANH, ISD::FATAN2},
VT, Expand);
// Overflow operations default to expand
@@ -852,13 +862,12 @@ void TargetLoweringBase::initActions() {
// These operations default to expand for vector types.
if (VT.isVector())
- setOperationAction(
- {ISD::FCOPYSIGN, ISD::SIGN_EXTEND_INREG, ISD::ANY_EXTEND_VECTOR_INREG,
- ISD::SIGN_EXTEND_VECTOR_INREG, ISD::ZERO_EXTEND_VECTOR_INREG,
- ISD::SPLAT_VECTOR, ISD::LRINT, ISD::LLRINT, ISD::LROUND,
- ISD::LLROUND, ISD::FTAN, ISD::FACOS, ISD::FASIN, ISD::FATAN,
- ISD::FCOSH, ISD::FSINH, ISD::FTANH, ISD::FATAN2},
- VT, Expand);
+ setOperationAction({ISD::FCOPYSIGN, ISD::SIGN_EXTEND_INREG,
+ ISD::ANY_EXTEND_VECTOR_INREG,
+ ISD::SIGN_EXTEND_VECTOR_INREG,
+ ISD::ZERO_EXTEND_VECTOR_INREG, ISD::SPLAT_VECTOR,
+ ISD::LRINT, ISD::LLRINT, ISD::LROUND, ISD::LLROUND},
+ VT, Expand);
// Constrained floating-point operations default to expand.
#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
@@ -914,15 +923,6 @@ void TargetLoweringBase::initActions() {
{MVT::bf16, MVT::f16, MVT::f32, MVT::f64, MVT::f80, MVT::f128},
Expand);
- // These library functions default to expand.
- setOperationAction({ISD::FCBRT, ISD::FLOG, ISD::FLOG2, ISD::FLOG10,
- ISD::FEXP, ISD::FEXP2, ISD::FEXP10, ISD::FFLOOR,
- ISD::FNEARBYINT, ISD::FCEIL, ISD::FRINT, ISD::FTRUNC,
- ISD::FROUNDEVEN, ISD::FTAN, ISD::FACOS, ISD::FASIN,
- ISD::FATAN, ISD::FCOSH, ISD::FSINH, ISD::FTANH,
- ISD::FATAN2},
- {MVT::f32, MVT::f64, MVT::f128}, Expand);
-
// Insert custom handling default for llvm.canonicalize.*.
setOperationAction(ISD::FCANONICALIZE,
{MVT::f16, MVT::f32, MVT::f64, MVT::f128}, Expand);
diff --git a/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp b/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
index 7e501a9..725e951 100644
--- a/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
+++ b/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
@@ -42,7 +42,6 @@
#include "llvm/IR/Mangler.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
-#include "llvm/IR/PseudoProbe.h"
#include "llvm/IR/Type.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCAsmInfoDarwin.h"
@@ -996,7 +995,7 @@ MCSection *TargetLoweringObjectFileELF::getSectionForLSDA(
if (!LSDASection || (!F.hasComdat() && !TM.getFunctionSections()))
return LSDASection;
- const auto *LSDA = cast<MCSectionELF>(LSDASection);
+ const auto *LSDA = static_cast<const MCSectionELF *>(LSDASection);
unsigned Flags = LSDA->getFlags();
const MCSymbolELF *LinkedToSym = nullptr;
StringRef Group;
@@ -1735,7 +1734,8 @@ MCSection *TargetLoweringObjectFileCOFF::getExplicitSectionGlobal(
Name == getInstrProfSectionName(IPSK_covdata, Triple::COFF,
/*AddSegmentInfo=*/false) ||
Name == getInstrProfSectionName(IPSK_covname, Triple::COFF,
- /*AddSegmentInfo=*/false))
+ /*AddSegmentInfo=*/false) ||
+ Name == ".llvmbc" || Name == ".llvmcmd")
Kind = SectionKind::getMetadata();
int Selection = 0;
unsigned Characteristics = getCOFFSectionFlags(Kind, TM);
@@ -2055,14 +2055,14 @@ MCSection *TargetLoweringObjectFileCOFF::getStaticCtorSection(
unsigned Priority, const MCSymbol *KeySym) const {
return getCOFFStaticStructorSection(
getContext(), getContext().getTargetTriple(), true, Priority, KeySym,
- cast<MCSectionCOFF>(StaticCtorSection));
+ static_cast<MCSectionCOFF *>(StaticCtorSection));
}
MCSection *TargetLoweringObjectFileCOFF::getStaticDtorSection(
unsigned Priority, const MCSymbol *KeySym) const {
return getCOFFStaticStructorSection(
getContext(), getContext().getTargetTriple(), false, Priority, KeySym,
- cast<MCSectionCOFF>(StaticDtorSection));
+ static_cast<MCSectionCOFF *>(StaticDtorSection));
}
const MCExpr *TargetLoweringObjectFileCOFF::lowerRelativeReference(
@@ -2389,23 +2389,25 @@ TargetLoweringObjectFileXCOFF::getTargetSymbol(const GlobalValue *GV,
// here.
if (const GlobalObject *GO = dyn_cast<GlobalObject>(GV)) {
if (GO->isDeclarationForLinker())
- return cast<MCSectionXCOFF>(getSectionForExternalReference(GO, TM))
+ return static_cast<const MCSectionXCOFF *>(
+ getSectionForExternalReference(GO, TM))
->getQualNameSymbol();
if (const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV))
if (GVar->hasAttribute("toc-data"))
- return cast<MCSectionXCOFF>(
+ return static_cast<const MCSectionXCOFF *>(
SectionForGlobal(GVar, SectionKind::getData(), TM))
->getQualNameSymbol();
SectionKind GOKind = getKindForGlobal(GO, TM);
if (GOKind.isText())
- return cast<MCSectionXCOFF>(
+ return static_cast<const MCSectionXCOFF *>(
getSectionForFunctionDescriptor(cast<Function>(GO), TM))
->getQualNameSymbol();
if ((TM.getDataSections() && !GO->hasSection()) || GO->hasCommonLinkage() ||
GOKind.isBSSLocal() || GOKind.isThreadBSSLocal())
- return cast<MCSectionXCOFF>(SectionForGlobal(GO, GOKind, TM))
+ return static_cast<const MCSectionXCOFF *>(
+ SectionForGlobal(GO, GOKind, TM))
->getQualNameSymbol();
}
@@ -2741,7 +2743,7 @@ MCSection *TargetLoweringObjectFileXCOFF::getSectionForTOCEntry(
MCSection *TargetLoweringObjectFileXCOFF::getSectionForLSDA(
const Function &F, const MCSymbol &FnSym, const TargetMachine &TM) const {
- auto *LSDA = cast<MCSectionXCOFF>(LSDASection);
+ auto *LSDA = static_cast<MCSectionXCOFF *>(LSDASection);
if (TM.getFunctionSections()) {
// If option -ffunction-sections is on, append the function name to the
// name of the LSDA csect so that each function has its own LSDA csect.