aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorStephen Tozer <stephen.tozer@sony.com>2024-06-24 17:59:34 +0100
committerStephen Tozer <stephen.tozer@sony.com>2024-06-24 18:00:22 +0100
commitd75f9dd1d29b332bdc51346de63cbc04646354d7 (patch)
treedc4cbd48bb980d4a2aba3c329b472060f74a1421
parent3b5b814647ef83ab763cf7871b6d74edfca67438 (diff)
downloadllvm-d75f9dd1d29b332bdc51346de63cbc04646354d7.zip
llvm-d75f9dd1d29b332bdc51346de63cbc04646354d7.tar.gz
llvm-d75f9dd1d29b332bdc51346de63cbc04646354d7.tar.bz2
Revert "[IR][NFC] Update IRBuilder to use InsertPosition (#96497)"
Reverts the above commit, as it updates a common header function and did not update all callsites: https://lab.llvm.org/buildbot/#/builders/29/builds/382 This reverts commit 6481dc57612671ebe77fe9c34214fba94e1b3b27.
-rw-r--r--clang/lib/CodeGen/CGBlocks.cpp2
-rw-r--r--clang/lib/CodeGen/CGGPUBuiltin.cpp4
-rw-r--r--clang/lib/CodeGen/CGHLSLRuntime.cpp2
-rw-r--r--clang/lib/CodeGen/CGObjC.cpp7
-rw-r--r--clang/lib/CodeGen/CGObjCMac.cpp2
-rw-r--r--clang/lib/CodeGen/CGOpenMPRuntime.cpp2
-rw-r--r--clang/lib/CodeGen/CGStmt.cpp2
-rw-r--r--clang/lib/CodeGen/CodeGenABITypes.cpp2
-rw-r--r--clang/lib/CodeGen/CodeGenFunction.cpp4
-rw-r--r--llvm/include/llvm/IR/IRBuilder.h78
-rw-r--r--llvm/include/llvm/IR/Instruction.h9
-rw-r--r--llvm/include/llvm/Transforms/Utils/ScalarEvolutionExpander.h4
-rw-r--r--llvm/lib/Analysis/MemoryBuiltins.cpp2
-rw-r--r--llvm/lib/CodeGen/AtomicExpandPass.cpp8
-rw-r--r--llvm/lib/CodeGen/CodeGenPrepare.cpp4
-rw-r--r--llvm/lib/CodeGen/ExpandLargeFpConvert.cpp4
-rw-r--r--llvm/lib/CodeGen/ExpandMemCmp.cpp6
-rw-r--r--llvm/lib/CodeGen/ExpandVectorPredication.cpp2
-rw-r--r--llvm/lib/CodeGen/HardwareLoops.cpp2
-rw-r--r--llvm/lib/CodeGen/IntrinsicLowering.cpp2
-rw-r--r--llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp2
-rw-r--r--llvm/lib/CodeGen/SafeStack.cpp2
-rw-r--r--llvm/lib/CodeGen/ShadowStackGCLowering.cpp4
-rw-r--r--llvm/lib/CodeGen/SjLjEHPrepare.cpp5
-rw-r--r--llvm/lib/CodeGen/WasmEHPrepare.cpp2
-rw-r--r--llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp29
-rw-r--r--llvm/lib/IR/AutoUpgrade.cpp4
-rw-r--r--llvm/lib/IR/Core.cpp4
-rw-r--r--llvm/lib/Target/AArch64/SVEIntrinsicOpts.cpp4
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp2
-rw-r--r--llvm/lib/Target/ARM/ARMParallelDSP.cpp6
-rw-r--r--llvm/lib/Target/ARM/MVELaneInterleavingPass.cpp2
-rw-r--r--llvm/lib/Target/ARM/MVETailPredication.cpp2
-rw-r--r--llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp4
-rw-r--r--llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp6
-rw-r--r--llvm/lib/Target/Hexagon/HexagonVectorLoopCarriedReuse.cpp2
-rw-r--r--llvm/lib/Target/PowerPC/PPCBoolRetToInt.cpp2
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp2
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp2
-rw-r--r--llvm/lib/Target/X86/X86LowerAMXIntrinsics.cpp4
-rw-r--r--llvm/lib/Target/X86/X86LowerAMXType.cpp3
-rw-r--r--llvm/lib/Target/X86/X86WinEHState.cpp2
-rw-r--r--llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp2
-rw-r--r--llvm/lib/Transforms/Coroutines/CoroFrame.cpp12
-rw-r--r--llvm/lib/Transforms/IPO/AttributorAttributes.cpp2
-rw-r--r--llvm/lib/Transforms/IPO/OpenMPOpt.cpp3
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp2
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp4
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp4
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp2
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp2
-rw-r--r--llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp5
-rw-r--r--llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp4
-rw-r--r--llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp44
-rw-r--r--llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp2
-rw-r--r--llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp2
-rw-r--r--llvm/lib/Transforms/Instrumentation/MemProfiler.cpp2
-rw-r--r--llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp6
-rw-r--r--llvm/lib/Transforms/ObjCARC/ObjCARC.cpp2
-rw-r--r--llvm/lib/Transforms/Scalar/ConstraintElimination.cpp2
-rw-r--r--llvm/lib/Transforms/Scalar/GuardWidening.cpp2
-rw-r--r--llvm/lib/Transforms/Scalar/LoopBoundSplit.cpp2
-rw-r--r--llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp6
-rw-r--r--llvm/lib/Transforms/Scalar/LoopPredication.cpp2
-rw-r--r--llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp6
-rw-r--r--llvm/lib/Transforms/Scalar/PartiallyInlineLibCalls.cpp2
-rw-r--r--llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp4
-rw-r--r--llvm/lib/Transforms/Scalar/SROA.cpp5
-rw-r--r--llvm/lib/Transforms/Scalar/ScalarizeMaskedMemIntrin.cpp14
-rw-r--r--llvm/lib/Transforms/Scalar/Scalarizer.cpp28
-rw-r--r--llvm/lib/Transforms/Utils/AMDGPUEmitPrintf.cpp4
-rw-r--r--llvm/lib/Transforms/Utils/BypassSlowDivision.cpp10
-rw-r--r--llvm/lib/Transforms/Utils/CallPromotionUtils.cpp2
-rw-r--r--llvm/lib/Transforms/Utils/FlattenCFG.cpp3
-rw-r--r--llvm/lib/Transforms/Utils/InlineFunction.cpp8
-rw-r--r--llvm/lib/Transforms/Utils/IntegerDivision.cpp2
-rw-r--r--llvm/lib/Transforms/Utils/SSAUpdaterBulk.cpp2
-rw-r--r--llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp9
-rw-r--r--llvm/lib/Transforms/Utils/SimplifyCFG.cpp2
-rw-r--r--llvm/lib/Transforms/Utils/SimplifyIndVar.cpp5
-rw-r--r--llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp4
-rw-r--r--llvm/lib/Transforms/Vectorize/LoopIdiomVectorize.cpp2
-rw-r--r--llvm/lib/Transforms/Vectorize/LoopVectorize.cpp8
-rw-r--r--llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp30
-rw-r--r--llvm/unittests/Analysis/MemorySSATest.cpp24
-rw-r--r--llvm/unittests/Frontend/OpenMPIRBuilderTest.cpp16
-rw-r--r--llvm/unittests/IR/BasicBlockTest.cpp2
-rw-r--r--llvm/unittests/IR/DebugInfoTest.cpp2
-rw-r--r--llvm/unittests/IR/IRBuilderTest.cpp6
-rw-r--r--llvm/unittests/Transforms/Utils/SSAUpdaterBulkTest.cpp2
-rw-r--r--mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp5
-rw-r--r--polly/lib/CodeGen/BlockGenerators.cpp4
97 files changed, 326 insertions, 252 deletions
diff --git a/clang/lib/CodeGen/CGBlocks.cpp b/clang/lib/CodeGen/CGBlocks.cpp
index 8b1de12..5dac1cd 100644
--- a/clang/lib/CodeGen/CGBlocks.cpp
+++ b/clang/lib/CodeGen/CGBlocks.cpp
@@ -1545,7 +1545,7 @@ llvm::Function *CodeGenFunction::GenerateBlockFunction(
entry_ptr = entry_ptr->getNextNonDebugInstruction()->getIterator();
else
entry_ptr = entry->end();
- Builder.SetInsertPoint(entry_ptr);
+ Builder.SetInsertPoint(entry, entry_ptr);
// Emit debug information for all the DeclRefExprs.
// FIXME: also for 'this'
diff --git a/clang/lib/CodeGen/CGGPUBuiltin.cpp b/clang/lib/CodeGen/CGGPUBuiltin.cpp
index a0d5768..bd95541 100644
--- a/clang/lib/CodeGen/CGGPUBuiltin.cpp
+++ b/clang/lib/CodeGen/CGGPUBuiltin.cpp
@@ -202,13 +202,13 @@ RValue CodeGenFunction::EmitAMDGPUDevicePrintfCallExpr(const CallExpr *E) {
Args.push_back(Arg);
}
- llvm::IRBuilder<> IRB(Builder.GetInsertPoint());
+ llvm::IRBuilder<> IRB(Builder.GetInsertBlock(), Builder.GetInsertPoint());
IRB.SetCurrentDebugLocation(Builder.getCurrentDebugLocation());
bool isBuffered = (CGM.getTarget().getTargetOpts().AMDGPUPrintfKindVal ==
clang::TargetOptions::AMDGPUPrintfKind::Buffered);
auto Printf = llvm::emitAMDGPUPrintfCall(IRB, Args, isBuffered);
- Builder.SetInsertPoint(IRB.GetInsertPoint());
+ Builder.SetInsertPoint(IRB.GetInsertBlock(), IRB.GetInsertPoint());
return RValue::get(Printf);
}
diff --git a/clang/lib/CodeGen/CGHLSLRuntime.cpp b/clang/lib/CodeGen/CGHLSLRuntime.cpp
index c9f7006..55ba21a 100644
--- a/clang/lib/CodeGen/CGHLSLRuntime.cpp
+++ b/clang/lib/CodeGen/CGHLSLRuntime.cpp
@@ -436,7 +436,7 @@ void CGHLSLRuntime::generateGlobalCtorDtorCalls() {
for (auto &F : M.functions()) {
if (!F.hasFnAttribute("hlsl.shader"))
continue;
- IRBuilder<> B(F.getEntryBlock().begin());
+ IRBuilder<> B(&F.getEntryBlock(), F.getEntryBlock().begin());
for (auto *Fn : CtorFns)
B.CreateCall(FunctionCallee(Fn));
diff --git a/clang/lib/CodeGen/CGObjC.cpp b/clang/lib/CodeGen/CGObjC.cpp
index 12b5412..281b2d9 100644
--- a/clang/lib/CodeGen/CGObjC.cpp
+++ b/clang/lib/CodeGen/CGObjC.cpp
@@ -2970,12 +2970,13 @@ static llvm::Value *emitARCOperationAfterCall(CodeGenFunction &CGF,
value = doFallback(CGF, value);
} else if (llvm::CallInst *call = dyn_cast<llvm::CallInst>(value)) {
// Place the retain immediately following the call.
- CGF.Builder.SetInsertPoint(++llvm::BasicBlock::iterator(call));
+ CGF.Builder.SetInsertPoint(call->getParent(),
+ ++llvm::BasicBlock::iterator(call));
value = doAfterCall(CGF, value);
} else if (llvm::InvokeInst *invoke = dyn_cast<llvm::InvokeInst>(value)) {
// Place the retain at the beginning of the normal destination block.
llvm::BasicBlock *BB = invoke->getNormalDest();
- CGF.Builder.SetInsertPoint(BB->begin());
+ CGF.Builder.SetInsertPoint(BB, BB->begin());
value = doAfterCall(CGF, value);
// Bitcasts can arise because of related-result returns. Rewrite
@@ -2983,7 +2984,7 @@ static llvm::Value *emitARCOperationAfterCall(CodeGenFunction &CGF,
} else if (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(value)) {
// Change the insert point to avoid emitting the fall-back call after the
// bitcast.
- CGF.Builder.SetInsertPoint(bitcast->getIterator());
+ CGF.Builder.SetInsertPoint(bitcast->getParent(), bitcast->getIterator());
llvm::Value *operand = bitcast->getOperand(0);
operand = emitARCOperationAfterCall(CGF, operand, doAfterCall, doFallback);
bitcast->setOperand(0, operand);
diff --git a/clang/lib/CodeGen/CGObjCMac.cpp b/clang/lib/CodeGen/CGObjCMac.cpp
index b7debc8..30f3911 100644
--- a/clang/lib/CodeGen/CGObjCMac.cpp
+++ b/clang/lib/CodeGen/CGObjCMac.cpp
@@ -4417,7 +4417,7 @@ void FragileHazards::emitHazardsInNewBlocks() {
// call. If the call throws, then this is sufficient to
// guarantee correctness as long as it doesn't also write to any
// locals.
- Builder.SetInsertPoint(BI);
+ Builder.SetInsertPoint(&BB, BI);
emitReadHazard(Builder);
}
}
diff --git a/clang/lib/CodeGen/CGOpenMPRuntime.cpp b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
index 532c408..f6d12d4 100644
--- a/clang/lib/CodeGen/CGOpenMPRuntime.cpp
+++ b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
@@ -1447,7 +1447,7 @@ llvm::Value *CGOpenMPRuntime::getThreadID(CodeGenFunction &CGF,
if (!Elem.second.ServiceInsertPt)
setLocThreadIdInsertPt(CGF);
CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
- CGF.Builder.SetInsertPoint(&*Elem.second.ServiceInsertPt);
+ CGF.Builder.SetInsertPoint(Elem.second.ServiceInsertPt);
auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, Loc);
llvm::CallInst *Call = CGF.Builder.CreateCall(
OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
diff --git a/clang/lib/CodeGen/CGStmt.cpp b/clang/lib/CodeGen/CGStmt.cpp
index 1a2a76e..39222c0 100644
--- a/clang/lib/CodeGen/CGStmt.cpp
+++ b/clang/lib/CodeGen/CGStmt.cpp
@@ -3076,7 +3076,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
if (IsGCCAsmGoto && !CBRRegResults.empty()) {
for (llvm::BasicBlock *Succ : CBR->getIndirectDests()) {
llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
- Builder.SetInsertPoint(--(Succ->end()));
+ Builder.SetInsertPoint(Succ, --(Succ->end()));
EmitAsmStores(*this, S, CBRRegResults[Succ], ResultRegTypes,
ResultTruncRegTypes, ResultRegDests, ResultRegQualTys,
ResultTypeRequiresCast, ResultRegIsFlagReg);
diff --git a/clang/lib/CodeGen/CodeGenABITypes.cpp b/clang/lib/CodeGen/CodeGenABITypes.cpp
index 4c2e9b8..a6073e1 100644
--- a/clang/lib/CodeGen/CodeGenABITypes.cpp
+++ b/clang/lib/CodeGen/CodeGenABITypes.cpp
@@ -123,7 +123,7 @@ llvm::Value *CodeGen::getCXXDestructorImplicitParam(
CGF.CurCodeDecl = D;
CGF.CurFuncDecl = D;
CGF.CurFn = InsertBlock->getParent();
- CGF.Builder.SetInsertPoint(InsertPoint);
+ CGF.Builder.SetInsertPoint(InsertBlock, InsertPoint);
return CGM.getCXXABI().getCXXDestructorImplicitParam(
CGF, D, Type, ForVirtualBase, Delegating);
}
diff --git a/clang/lib/CodeGen/CodeGenFunction.cpp b/clang/lib/CodeGen/CodeGenFunction.cpp
index ffcc4b9..650c566 100644
--- a/clang/lib/CodeGen/CodeGenFunction.cpp
+++ b/clang/lib/CodeGen/CodeGenFunction.cpp
@@ -2764,7 +2764,7 @@ void CodeGenFunction::EmitSanitizerStatReport(llvm::SanitizerStatKind SSK) {
if (!CGM.getCodeGenOpts().SanitizeStats)
return;
- llvm::IRBuilder<> IRB(Builder.GetInsertPoint());
+ llvm::IRBuilder<> IRB(Builder.GetInsertBlock(), Builder.GetInsertPoint());
IRB.SetCurrentDebugLocation(Builder.getCurrentDebugLocation());
CGM.getSanStats().create(IRB, SSK);
}
@@ -2883,7 +2883,7 @@ void CodeGenFunction::EmitAArch64MultiVersionResolver(
}
if (!AArch64CpuInitialized) {
- Builder.SetInsertPoint(CurBlock->begin());
+ Builder.SetInsertPoint(CurBlock, CurBlock->begin());
EmitAArch64CpuInit();
AArch64CpuInitialized = true;
Builder.SetInsertPoint(CurBlock);
diff --git a/llvm/include/llvm/IR/IRBuilder.h b/llvm/include/llvm/IR/IRBuilder.h
index fc660fd..c10ea33 100644
--- a/llvm/include/llvm/IR/IRBuilder.h
+++ b/llvm/include/llvm/IR/IRBuilder.h
@@ -173,13 +173,37 @@ public:
BasicBlock::iterator GetInsertPoint() const { return InsertPt; }
LLVMContext &getContext() const { return Context; }
+ /// This specifies that created instructions should be appended to the
+ /// end of the specified block.
+ void SetInsertPoint(BasicBlock *TheBB) {
+ BB = TheBB;
+ InsertPt = BB->end();
+ }
+
+ /// This specifies that created instructions should be inserted before
+ /// the specified instruction.
+ void SetInsertPoint(Instruction *I) {
+ BB = I->getParent();
+ InsertPt = I->getIterator();
+ assert(InsertPt != BB->end() && "Can't read debug loc from end()");
+ SetCurrentDebugLocation(I->getStableDebugLoc());
+ }
+
/// This specifies that created instructions should be inserted at the
- /// specified insert position.
- void SetInsertPoint(InsertPosition IP) {
- BB = IP.getBasicBlock();
+ /// specified point.
+ void SetInsertPoint(BasicBlock *TheBB, BasicBlock::iterator IP) {
+ BB = TheBB;
+ InsertPt = IP;
+ if (IP != TheBB->end())
+ SetCurrentDebugLocation(IP->getStableDebugLoc());
+ }
+
+ /// This specifies that created instructions should be inserted at
+ /// the specified point, but also requires that \p IP is dereferencable.
+ void SetInsertPoint(BasicBlock::iterator IP) {
+ BB = IP->getParent();
InsertPt = IP;
- if (InsertPt != BB->end())
- SetCurrentDebugLocation(InsertPt->getStableDebugLoc());
+ SetCurrentDebugLocation(IP->getStableDebugLoc());
}
/// This specifies that created instructions should inserted at the beginning
@@ -262,7 +286,7 @@ public:
/// Sets the current insert point to a previously-saved location.
void restoreIP(InsertPoint IP) {
if (IP.isSet())
- SetInsertPoint(IP.getPoint());
+ SetInsertPoint(IP.getBlock(), IP.getPoint());
else
ClearInsertionPoint();
}
@@ -2653,22 +2677,46 @@ public:
ArrayRef<OperandBundleDef> OpBundles = std::nullopt)
: IRBuilderBase(C, this->Folder, this->Inserter, FPMathTag, OpBundles) {}
- explicit IRBuilder(InsertPosition IP, MDNode *FPMathTag = nullptr,
+ explicit IRBuilder(BasicBlock *TheBB, FolderTy Folder,
+ MDNode *FPMathTag = nullptr,
ArrayRef<OperandBundleDef> OpBundles = std::nullopt)
- : IRBuilderBase(IP.getBasicBlock()->getContext(), this->Folder,
- this->Inserter, FPMathTag, OpBundles) {
- SetInsertPoint(IP);
+ : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
+ FPMathTag, OpBundles),
+ Folder(Folder) {
+ SetInsertPoint(TheBB);
}
- explicit IRBuilder(InsertPosition IP, FolderTy Folder,
- MDNode *FPMathTag = nullptr,
+ explicit IRBuilder(BasicBlock *TheBB, MDNode *FPMathTag = nullptr,
ArrayRef<OperandBundleDef> OpBundles = std::nullopt)
- : IRBuilderBase(IP.getBasicBlock()->getContext(), this->Folder,
- this->Inserter, FPMathTag, OpBundles),
- Folder(Folder) {
+ : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
+ FPMathTag, OpBundles) {
+ SetInsertPoint(TheBB);
+ }
+
+ explicit IRBuilder(Instruction *IP, MDNode *FPMathTag = nullptr,
+ ArrayRef<OperandBundleDef> OpBundles = std::nullopt)
+ : IRBuilderBase(IP->getContext(), this->Folder, this->Inserter, FPMathTag,
+ OpBundles) {
SetInsertPoint(IP);
}
+ IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP, FolderTy Folder,
+ MDNode *FPMathTag = nullptr,
+ ArrayRef<OperandBundleDef> OpBundles = std::nullopt)
+ : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
+ FPMathTag, OpBundles),
+ Folder(Folder) {
+ SetInsertPoint(TheBB, IP);
+ }
+
+ IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP,
+ MDNode *FPMathTag = nullptr,
+ ArrayRef<OperandBundleDef> OpBundles = std::nullopt)
+ : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
+ FPMathTag, OpBundles) {
+ SetInsertPoint(TheBB, IP);
+ }
+
/// Avoid copying the full IRBuilder. Prefer using InsertPointGuard
/// or FastMathFlagGuard instead.
IRBuilder(const IRBuilder &) = delete;
diff --git a/llvm/include/llvm/IR/Instruction.h b/llvm/include/llvm/IR/Instruction.h
index 914a00b..2e72f67 100644
--- a/llvm/include/llvm/IR/Instruction.h
+++ b/llvm/include/llvm/IR/Instruction.h
@@ -44,13 +44,6 @@ template <> struct ilist_alloc_traits<Instruction> {
iterator_range<simple_ilist<DbgRecord>::iterator>
getDbgRecordRange(DbgMarker *);
-/// Class used to generate an insert position (ultimately always a
-/// BasicBlock::iterator, which it will implicitly convert to) from either:
-/// - An Instruction, inserting immediately prior. This will soon be marked as
-/// deprecated.
-/// - A BasicBlock, inserting at the end.
-/// - An iterator, inserting at its position.
-/// - Any nullptr value, giving a blank iterator (not valid for insertion).
class InsertPosition {
using InstListType = SymbolTableList<Instruction, ilist_iterator_bits<true>,
ilist_parent<BasicBlock>>;
@@ -58,6 +51,8 @@ class InsertPosition {
public:
InsertPosition(std::nullptr_t) : InsertAt() {}
+ // LLVM_DEPRECATED("Use BasicBlock::iterators for insertion instead",
+ // "BasicBlock::iterator")
InsertPosition(Instruction *InsertBefore);
InsertPosition(BasicBlock *InsertAtEnd);
InsertPosition(InstListType::iterator InsertAt) : InsertAt(InsertAt) {}
diff --git a/llvm/include/llvm/Transforms/Utils/ScalarEvolutionExpander.h b/llvm/include/llvm/Transforms/Utils/ScalarEvolutionExpander.h
index e7a1ab0..62c1e15 100644
--- a/llvm/include/llvm/Transforms/Utils/ScalarEvolutionExpander.h
+++ b/llvm/include/llvm/Transforms/Utils/ScalarEvolutionExpander.h
@@ -376,7 +376,9 @@ public:
Builder.SetInsertPoint(IP);
}
- void setInsertPoint(BasicBlock::iterator IP) { Builder.SetInsertPoint(IP); }
+ void setInsertPoint(BasicBlock::iterator IP) {
+ Builder.SetInsertPoint(IP->getParent(), IP);
+ }
/// Clear the current insertion point. This is useful if the instruction
/// that had been serving as the insertion point may have been deleted.
diff --git a/llvm/lib/Analysis/MemoryBuiltins.cpp b/llvm/lib/Analysis/MemoryBuiltins.cpp
index 5b12024..8ca15434 100644
--- a/llvm/lib/Analysis/MemoryBuiltins.cpp
+++ b/llvm/lib/Analysis/MemoryBuiltins.cpp
@@ -1217,7 +1217,7 @@ SizeOffsetValue ObjectSizeOffsetEvaluator::visitPHINode(PHINode &PHI) {
// Compute offset/size for each PHI incoming pointer.
for (unsigned i = 0, e = PHI.getNumIncomingValues(); i != e; ++i) {
BasicBlock *IncomingBlock = PHI.getIncomingBlock(i);
- Builder.SetInsertPoint(IncomingBlock->getFirstInsertionPt());
+ Builder.SetInsertPoint(IncomingBlock, IncomingBlock->getFirstInsertionPt());
SizeOffsetValue EdgeData = compute_(PHI.getIncomingValue(i));
if (!EdgeData.bothKnown()) {
diff --git a/llvm/lib/CodeGen/AtomicExpandPass.cpp b/llvm/lib/CodeGen/AtomicExpandPass.cpp
index 9f6552c..7728cc5 100644
--- a/llvm/lib/CodeGen/AtomicExpandPass.cpp
+++ b/llvm/lib/CodeGen/AtomicExpandPass.cpp
@@ -1242,7 +1242,7 @@ Value *AtomicExpandImpl::insertRMWLLSCLoop(
StoreSuccess, ConstantInt::get(IntegerType::get(Ctx, 32), 0), "tryagain");
Builder.CreateCondBr(TryAgain, LoopBB, ExitBB);
- Builder.SetInsertPoint(ExitBB->begin());
+ Builder.SetInsertPoint(ExitBB, ExitBB->begin());
return Loaded;
}
@@ -1478,7 +1478,7 @@ bool AtomicExpandImpl::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
// succeeded or not. We expose this to later passes by converting any
// subsequent "icmp eq/ne %loaded, %oldval" into a use of an appropriate
// PHI.
- Builder.SetInsertPoint(ExitBB->begin());
+ Builder.SetInsertPoint(ExitBB, ExitBB->begin());
PHINode *LoadedExit =
Builder.CreatePHI(UnreleasedLoad->getType(), 2, "loaded.exit");
LoadedExit->addIncoming(LoadedTryStore, SuccessBB);
@@ -1491,7 +1491,7 @@ bool AtomicExpandImpl::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
// a type wider than the one in the cmpxchg instruction.
Value *LoadedFull = LoadedExit;
- Builder.SetInsertPoint(std::next(Success->getIterator()));
+ Builder.SetInsertPoint(ExitBB, std::next(Success->getIterator()));
Value *Loaded = extractMaskedValue(Builder, LoadedFull, PMV);
// Look for any users of the cmpxchg that are just comparing the loaded value
@@ -1616,7 +1616,7 @@ Value *AtomicExpandImpl::insertRMWCmpXchgLoop(
Builder.CreateCondBr(Success, ExitBB, LoopBB);
- Builder.SetInsertPoint(ExitBB->begin());
+ Builder.SetInsertPoint(ExitBB, ExitBB->begin());
return NewLoaded;
}
diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp
index a686045..900c33b5 100644
--- a/llvm/lib/CodeGen/CodeGenPrepare.cpp
+++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp
@@ -2355,7 +2355,7 @@ static bool despeculateCountZeros(IntrinsicInst *CountZeros,
// Create a PHI in the end block to select either the output of the intrinsic
// or the bit width of the operand.
- Builder.SetInsertPoint(EndBlock->begin());
+ Builder.SetInsertPoint(EndBlock, EndBlock->begin());
PHINode *PN = Builder.CreatePHI(Ty, 2, "ctz");
replaceAllUsesWith(CountZeros, PN, FreshBBs, IsHugeFunc);
Value *BitWidth = Builder.getInt(APInt(SizeInBits, SizeInBits));
@@ -6306,7 +6306,7 @@ bool CodeGenPrepare::splitLargeGEPOffsets() {
NewBaseInsertBB = &BaseGEP->getFunction()->getEntryBlock();
NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt();
}
- IRBuilder<> NewBaseBuilder(NewBaseInsertPt);
+ IRBuilder<> NewBaseBuilder(NewBaseInsertBB, NewBaseInsertPt);
// Create a new base.
Value *BaseIndex = ConstantInt::get(PtrIdxTy, BaseOffset);
NewBaseGEP = OldBase;
diff --git a/llvm/lib/CodeGen/ExpandLargeFpConvert.cpp b/llvm/lib/CodeGen/ExpandLargeFpConvert.cpp
index 2c05f01..11f123a 100644
--- a/llvm/lib/CodeGen/ExpandLargeFpConvert.cpp
+++ b/llvm/lib/CodeGen/ExpandLargeFpConvert.cpp
@@ -212,7 +212,7 @@ static void expandFPToI(Instruction *FPToI) {
Builder.CreateBr(End);
// cleanup:
- Builder.SetInsertPoint(End->begin());
+ Builder.SetInsertPoint(End, End->begin());
PHINode *Retval0 = Builder.CreatePHI(FPToI->getType(), 4);
Retval0->addIncoming(Cond8, IfThen5);
@@ -560,7 +560,7 @@ static void expandIToFP(Instruction *IToFP) {
Builder.CreateBr(End);
// return:
- Builder.SetInsertPoint(End->begin());
+ Builder.SetInsertPoint(End, End->begin());
PHINode *Retval0 = Builder.CreatePHI(IToFP->getType(), 2);
Retval0->addIncoming(A4, IfEnd26);
Retval0->addIncoming(ConstantFP::getZero(IToFP->getType(), false), Entry);
diff --git a/llvm/lib/CodeGen/ExpandMemCmp.cpp b/llvm/lib/CodeGen/ExpandMemCmp.cpp
index 3e59834..bb84813 100644
--- a/llvm/lib/CodeGen/ExpandMemCmp.cpp
+++ b/llvm/lib/CodeGen/ExpandMemCmp.cpp
@@ -574,7 +574,7 @@ void MemCmpExpansion::emitMemCmpResultBlock() {
// need to be calculated and can simply return 1.
if (IsUsedForZeroCmp) {
BasicBlock::iterator InsertPt = ResBlock.BB->getFirstInsertionPt();
- Builder.SetInsertPoint(InsertPt);
+ Builder.SetInsertPoint(ResBlock.BB, InsertPt);
Value *Res = ConstantInt::get(Type::getInt32Ty(CI->getContext()), 1);
PhiRes->addIncoming(Res, ResBlock.BB);
BranchInst *NewBr = BranchInst::Create(EndBlock);
@@ -584,7 +584,7 @@ void MemCmpExpansion::emitMemCmpResultBlock() {
return;
}
BasicBlock::iterator InsertPt = ResBlock.BB->getFirstInsertionPt();
- Builder.SetInsertPoint(InsertPt);
+ Builder.SetInsertPoint(ResBlock.BB, InsertPt);
Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_ULT, ResBlock.PhiSrc1,
ResBlock.PhiSrc2);
@@ -611,7 +611,7 @@ void MemCmpExpansion::setupResultBlockPHINodes() {
}
void MemCmpExpansion::setupEndBlockPHINodes() {
- Builder.SetInsertPoint(EndBlock->begin());
+ Builder.SetInsertPoint(EndBlock, EndBlock->begin());
PhiRes = Builder.CreatePHI(Type::getInt32Ty(CI->getContext()), 2, "phi.res");
}
diff --git a/llvm/lib/CodeGen/ExpandVectorPredication.cpp b/llvm/lib/CodeGen/ExpandVectorPredication.cpp
index a63a868..dc35f33 100644
--- a/llvm/lib/CodeGen/ExpandVectorPredication.cpp
+++ b/llvm/lib/CodeGen/ExpandVectorPredication.cpp
@@ -667,7 +667,7 @@ void CachingVPExpander::discardEVLParameter(VPIntrinsic &VPI) {
auto *M = VPI.getModule();
Function *VScaleFunc =
Intrinsic::getDeclaration(M, Intrinsic::vscale, Int32Ty);
- IRBuilder<> Builder(VPI.getIterator());
+ IRBuilder<> Builder(VPI.getParent(), VPI.getIterator());
Value *FactorConst = Builder.getInt32(StaticElemCount.getKnownMinValue());
Value *VScale = Builder.CreateCall(VScaleFunc, {}, "vscale");
MaxEVL = Builder.CreateMul(VScale, FactorConst, "scalable_size",
diff --git a/llvm/lib/CodeGen/HardwareLoops.cpp b/llvm/lib/CodeGen/HardwareLoops.cpp
index 200b772..cc5aad1 100644
--- a/llvm/lib/CodeGen/HardwareLoops.cpp
+++ b/llvm/lib/CodeGen/HardwareLoops.cpp
@@ -580,7 +580,7 @@ PHINode* HardwareLoop::InsertPHICounter(Value *NumElts, Value *EltsRem) {
BasicBlock *Preheader = L->getLoopPreheader();
BasicBlock *Header = L->getHeader();
BasicBlock *Latch = ExitBranch->getParent();
- IRBuilder<> Builder(Header->getFirstNonPHIIt());
+ IRBuilder<> Builder(Header, Header->getFirstNonPHIIt());
PHINode *Index = Builder.CreatePHI(NumElts->getType(), 2);
Index->addIncoming(NumElts, Preheader);
Index->addIncoming(EltsRem, Latch);
diff --git a/llvm/lib/CodeGen/IntrinsicLowering.cpp b/llvm/lib/CodeGen/IntrinsicLowering.cpp
index 65262cf..45fba43 100644
--- a/llvm/lib/CodeGen/IntrinsicLowering.cpp
+++ b/llvm/lib/CodeGen/IntrinsicLowering.cpp
@@ -40,7 +40,7 @@ static CallInst *ReplaceCallWith(const char *NewFn, CallInst *CI,
FunctionCallee FCache =
M->getOrInsertFunction(NewFn, FunctionType::get(RetTy, ParamTys, false));
- IRBuilder<> Builder(CI->getIterator());
+ IRBuilder<> Builder(CI->getParent(), CI->getIterator());
SmallVector<Value *, 8> Args(ArgBegin, ArgEnd);
CallInst *NewCI = Builder.CreateCall(FCache, Args);
NewCI->setName(CI->getName());
diff --git a/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp b/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp
index 34711e7..0777acf 100644
--- a/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp
+++ b/llvm/lib/CodeGen/PreISelIntrinsicLowering.cpp
@@ -144,7 +144,7 @@ static bool lowerObjCCall(Function &F, const char *NewFn,
auto *CI = cast<CallInst>(CB);
assert(CI->getCalledFunction() && "Cannot lower an indirect call!");
- IRBuilder<> Builder(CI->getIterator());
+ IRBuilder<> Builder(CI->getParent(), CI->getIterator());
SmallVector<Value *, 8> Args(CI->args());
SmallVector<llvm::OperandBundleDef, 1> BundleList;
CI->getOperandBundlesAsDefs(BundleList);
diff --git a/llvm/lib/CodeGen/SafeStack.cpp b/llvm/lib/CodeGen/SafeStack.cpp
index f695420..0a26247 100644
--- a/llvm/lib/CodeGen/SafeStack.cpp
+++ b/llvm/lib/CodeGen/SafeStack.cpp
@@ -784,7 +784,7 @@ bool SafeStack::run() {
if (!StackRestorePoints.empty())
++NumUnsafeStackRestorePointsFunctions;
- IRBuilder<> IRB(F.begin()->getFirstInsertionPt());
+ IRBuilder<> IRB(&F.front(), F.begin()->getFirstInsertionPt());
// Calls must always have a debug location, or else inlining breaks. So
// we explicitly set a artificial debug location here.
if (DISubprogram *SP = F.getSubprogram())
diff --git a/llvm/lib/CodeGen/ShadowStackGCLowering.cpp b/llvm/lib/CodeGen/ShadowStackGCLowering.cpp
index 7497bf3..232e5e2 100644
--- a/llvm/lib/CodeGen/ShadowStackGCLowering.cpp
+++ b/llvm/lib/CodeGen/ShadowStackGCLowering.cpp
@@ -351,7 +351,7 @@ bool ShadowStackGCLoweringImpl::runOnFunction(Function &F,
// Build the shadow stack entry at the very start of the function.
BasicBlock::iterator IP = F.getEntryBlock().begin();
- IRBuilder<> AtEntry(IP);
+ IRBuilder<> AtEntry(IP->getParent(), IP);
Instruction *StackEntry =
AtEntry.CreateAlloca(ConcreteStackEntryTy, nullptr, "gc_frame");
@@ -384,7 +384,7 @@ bool ShadowStackGCLoweringImpl::runOnFunction(Function &F,
// shadow stack.
while (isa<StoreInst>(IP))
++IP;
- AtEntry.SetInsertPoint(IP);
+ AtEntry.SetInsertPoint(IP->getParent(), IP);
// Push the entry onto the shadow stack.
Instruction *EntryNextPtr = CreateGEP(Context, AtEntry, ConcreteStackEntryTy,
diff --git a/llvm/lib/CodeGen/SjLjEHPrepare.cpp b/llvm/lib/CodeGen/SjLjEHPrepare.cpp
index 330d94b..20c827c 100644
--- a/llvm/lib/CodeGen/SjLjEHPrepare.cpp
+++ b/llvm/lib/CodeGen/SjLjEHPrepare.cpp
@@ -182,7 +182,7 @@ void SjLjEHPrepareImpl::substituteLPadValues(LandingPadInst *LPI, Value *ExnVal,
Type *LPadType = LPI->getType();
Value *LPadVal = PoisonValue::get(LPadType);
auto *SelI = cast<Instruction>(SelVal);
- IRBuilder<> Builder(std::next(SelI->getIterator()));
+ IRBuilder<> Builder(SelI->getParent(), std::next(SelI->getIterator()));
LPadVal = Builder.CreateInsertValue(LPadVal, ExnVal, 0, "lpad.val");
LPadVal = Builder.CreateInsertValue(LPadVal, SelVal, 1, "lpad.val");
@@ -206,7 +206,8 @@ SjLjEHPrepareImpl::setupFunctionContext(Function &F,
// Fill in the function context structure.
for (LandingPadInst *LPI : LPads) {
- IRBuilder<> Builder(LPI->getParent()->getFirstInsertionPt());
+ IRBuilder<> Builder(LPI->getParent(),
+ LPI->getParent()->getFirstInsertionPt());
// Reference the __data field.
Value *FCData =
diff --git a/llvm/lib/CodeGen/WasmEHPrepare.cpp b/llvm/lib/CodeGen/WasmEHPrepare.cpp
index a9322dd..16c1dcb 100644
--- a/llvm/lib/CodeGen/WasmEHPrepare.cpp
+++ b/llvm/lib/CodeGen/WasmEHPrepare.cpp
@@ -303,7 +303,7 @@ void WasmEHPrepareImpl::prepareEHPad(BasicBlock *BB, bool NeedPersonality,
unsigned Index) {
assert(BB->isEHPad() && "BB is not an EHPad!");
IRBuilder<> IRB(BB->getContext());
- IRB.SetInsertPoint(BB->getFirstInsertionPt());
+ IRB.SetInsertPoint(BB, BB->getFirstInsertionPt());
auto *FPI = cast<FuncletPadInst>(BB->getFirstNonPHI());
Instruction *GetExnCI = nullptr, *GetSelectorCI = nullptr;
diff --git a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
index 5bf5a6c..dbf7154 100644
--- a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
+++ b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp
@@ -1162,7 +1162,7 @@ void OpenMPIRBuilder::emitCancelationCheckImpl(Value *CancelFlag,
FI.FiniCB(Builder.saveIP());
// The continuation block is where code generation continues.
- Builder.SetInsertPoint(NonCancellationBlock->begin());
+ Builder.SetInsertPoint(NonCancellationBlock, NonCancellationBlock->begin());
}
// Callback used to create OpenMP runtime calls to support
@@ -1196,7 +1196,7 @@ static void targetParallelCallback(
// Add alloca for kernel args
OpenMPIRBuilder ::InsertPointTy CurrentIP = Builder.saveIP();
- Builder.SetInsertPoint(OuterAllocaBB->getFirstInsertionPt());
+ Builder.SetInsertPoint(OuterAllocaBB, OuterAllocaBB->getFirstInsertionPt());
AllocaInst *ArgsAlloca =
Builder.CreateAlloca(ArrayType::get(PtrTy, NumCapturedVars));
Value *Args = ArgsAlloca;
@@ -1571,7 +1571,8 @@ IRBuilder<>::InsertPoint OpenMPIRBuilder::createParallel(
// Store to stack at end of the block that currently branches to the entry
// block of the to-be-outlined region.
- Builder.SetInsertPoint(InsertBB->getTerminator()->getIterator());
+ Builder.SetInsertPoint(InsertBB,
+ InsertBB->getTerminator()->getIterator());
Builder.CreateStore(&V, Ptr);
// Load back next to allocations in the to-be-outlined region.
@@ -1939,7 +1940,7 @@ OpenMPIRBuilder::createTask(const LocationDescription &Loc,
StaleCI->eraseFromParent();
- Builder.SetInsertPoint(TaskAllocaBB->begin());
+ Builder.SetInsertPoint(TaskAllocaBB, TaskAllocaBB->begin());
if (HasShareds) {
LoadInst *Shareds = Builder.CreateLoad(VoidPtr, OutlinedFn.getArg(1));
OutlinedFn.getArg(1)->replaceUsesWithIf(
@@ -1953,7 +1954,7 @@ OpenMPIRBuilder::createTask(const LocationDescription &Loc,
};
addOutlineInfo(std::move(OI));
- Builder.SetInsertPoint(TaskExitBB->begin());
+ Builder.SetInsertPoint(TaskExitBB, TaskExitBB->begin());
return Builder.saveIP();
}
@@ -2161,7 +2162,7 @@ OpenMPIRBuilder::createReductions(const LocationDescription &Loc,
Builder.SetInsertPoint(AllocaIP.getBlock()->getTerminator());
Value *RedArray = Builder.CreateAlloca(RedArrayTy, nullptr, "red.array");
- Builder.SetInsertPoint(InsertBlock->end());
+ Builder.SetInsertPoint(InsertBlock, InsertBlock->end());
for (auto En : enumerate(ReductionInfos)) {
unsigned Index = En.index();
@@ -2600,13 +2601,15 @@ OpenMPIRBuilder::applyStaticWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI,
// the latch block.
CLI->mapIndVar([&](Instruction *OldIV) -> Value * {
- Builder.SetInsertPoint(CLI->getBody()->getFirstInsertionPt());
+ Builder.SetInsertPoint(CLI->getBody(),
+ CLI->getBody()->getFirstInsertionPt());
Builder.SetCurrentDebugLocation(DL);
return Builder.CreateAdd(OldIV, LowerBound);
});
// In the "exit" block, call the "fini" function.
- Builder.SetInsertPoint(CLI->getExit()->getTerminator()->getIterator());
+ Builder.SetInsertPoint(CLI->getExit(),
+ CLI->getExit()->getTerminator()->getIterator());
Builder.CreateCall(StaticFini, {SrcLoc, ThreadNum});
// Add the barrier if requested.
@@ -2747,7 +2750,7 @@ OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::applyStaticChunkedWorkshareLoop(
});
// In the "exit" block, call the "fini" function.
- Builder.SetInsertPoint(DispatchExit->getFirstInsertionPt());
+ Builder.SetInsertPoint(DispatchExit, DispatchExit->getFirstInsertionPt());
Builder.CreateCall(StaticFini, {SrcLoc, ThreadNum});
// Add the barrier if requested.
@@ -3166,7 +3169,7 @@ OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::applyDynamicWorkshareLoop(
PreHeader->getContext(), Twine(PreHeader->getName()) + ".outer.cond",
PreHeader->getParent());
// This needs to be 32-bit always, so can't use the IVTy Zero above.
- Builder.SetInsertPoint(OuterCond->getFirstInsertionPt());
+ Builder.SetInsertPoint(OuterCond, OuterCond->getFirstInsertionPt());
Value *Res =
Builder.CreateCall(DynamicNext, {SrcLoc, ThreadNum, PLastIter,
PLowerBound, PUpperBound, PStride});
@@ -3191,7 +3194,7 @@ OpenMPIRBuilder::InsertPointTy OpenMPIRBuilder::applyDynamicWorkshareLoop(
// Modify the inner condition:
// * Use the UpperBound returned from the DynamicNext call.
// * jump to the loop outer loop when done with one of the inner loops.
- Builder.SetInsertPoint(Cond->getFirstInsertionPt());
+ Builder.SetInsertPoint(Cond, Cond->getFirstInsertionPt());
UpperBound = Builder.CreateLoad(IVTy, PUpperBound, "ub");
Instruction *Comp = &*Builder.GetInsertPoint();
auto *CI = cast<CmpInst>(Comp);
@@ -6339,7 +6342,7 @@ OpenMPIRBuilder::createTeams(const LocationDescription &Loc,
BasicBlock &OuterAllocaBB = CurrentFunction->getEntryBlock();
if (&OuterAllocaBB == Builder.GetInsertBlock()) {
BasicBlock *BodyBB = splitBB(Builder, /*CreateBranch=*/true, "teams.entry");
- Builder.SetInsertPoint(BodyBB->begin());
+ Builder.SetInsertPoint(BodyBB, BodyBB->begin());
}
// The current basic block is split into four basic blocks. After outlining,
@@ -6463,7 +6466,7 @@ OpenMPIRBuilder::createTeams(const LocationDescription &Loc,
addOutlineInfo(std::move(OI));
- Builder.SetInsertPoint(ExitBB->begin());
+ Builder.SetInsertPoint(ExitBB, ExitBB->begin());
return Builder.saveIP();
}
diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp
index 5ae257d..d7825d9 100644
--- a/llvm/lib/IR/AutoUpgrade.cpp
+++ b/llvm/lib/IR/AutoUpgrade.cpp
@@ -2472,7 +2472,7 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
LLVMContext &C = CI->getContext();
IRBuilder<> Builder(C);
- Builder.SetInsertPoint(CI->getIterator());
+ Builder.SetInsertPoint(CI->getParent(), CI->getIterator());
if (!NewFn) {
bool FallthroughToDefaultUpgrade = false;
@@ -5006,7 +5006,7 @@ void llvm::UpgradeARCRuntime(Module &M) {
if (!CI || CI->getCalledFunction() != Fn)
continue;
- IRBuilder<> Builder(CI->getIterator());
+ IRBuilder<> Builder(CI->getParent(), CI->getIterator());
FunctionType *NewFuncTy = NewFn->getFunctionType();
SmallVector<Value *, 2> Args;
diff --git a/llvm/lib/IR/Core.cpp b/llvm/lib/IR/Core.cpp
index d8aa9b8..3b6b01f 100644
--- a/llvm/lib/IR/Core.cpp
+++ b/llvm/lib/IR/Core.cpp
@@ -3135,10 +3135,8 @@ LLVMBuilderRef LLVMCreateBuilder(void) {
static void LLVMPositionBuilderImpl(IRBuilder<> *Builder, BasicBlock *Block,
Instruction *Instr, bool BeforeDbgRecords) {
BasicBlock::iterator I = Instr ? Instr->getIterator() : Block->end();
- assert(I.getNodeParent() == Block &&
- "Non-null Instr must be contained in Block!");
I.setHeadBit(BeforeDbgRecords);
- Builder->SetInsertPoint(I);
+ Builder->SetInsertPoint(Block, I);
}
void LLVMPositionBuilder(LLVMBuilderRef Builder, LLVMBasicBlockRef Block,
diff --git a/llvm/lib/Target/AArch64/SVEIntrinsicOpts.cpp b/llvm/lib/Target/AArch64/SVEIntrinsicOpts.cpp
index 0088172..fe68203 100644
--- a/llvm/lib/Target/AArch64/SVEIntrinsicOpts.cpp
+++ b/llvm/lib/Target/AArch64/SVEIntrinsicOpts.cpp
@@ -158,7 +158,7 @@ bool SVEIntrinsicOpts::coalescePTrueIntrinsicCalls(
LLVMContext &Ctx = BB.getContext();
IRBuilder<> Builder(Ctx);
- Builder.SetInsertPoint(++MostEncompassingPTrue->getIterator());
+ Builder.SetInsertPoint(&BB, ++MostEncompassingPTrue->getIterator());
auto *MostEncompassingPTrueVTy =
cast<VectorType>(MostEncompassingPTrue->getType());
@@ -175,7 +175,7 @@ bool SVEIntrinsicOpts::coalescePTrueIntrinsicCalls(
if (MostEncompassingPTrueVTy != PTrueVTy) {
ConvertFromCreated = true;
- Builder.SetInsertPoint(++ConvertToSVBool->getIterator());
+ Builder.SetInsertPoint(&BB, ++ConvertToSVBool->getIterator());
auto *ConvertFromSVBool =
Builder.CreateIntrinsic(Intrinsic::aarch64_sve_convert_from_svbool,
{PTrueVTy}, {ConvertToSVBool});
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp
index 8b5e385..38cc5a9 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAtomicOptimizer.cpp
@@ -994,7 +994,7 @@ void AMDGPUAtomicOptimizerImpl::optimizeAtomic(Instruction &I,
if (IsPixelShader) {
// Need a final PHI to reconverge to above the helper lane branch mask.
- B.SetInsertPoint(PixelExitBB->getFirstNonPHIIt());
+ B.SetInsertPoint(PixelExitBB, PixelExitBB->getFirstNonPHIIt());
PHINode *const PHI = B.CreatePHI(Ty, 2);
PHI->addIncoming(PoisonValue::get(Ty), PixelEntryBB);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
index b067b8b..6e7d34f 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
@@ -1118,7 +1118,7 @@ bool AMDGPUCodeGenPrepareImpl::visitFDiv(BinaryOperator &FDiv) {
if (ReqdAccuracy < 1.0f)
return false;
- IRBuilder<> Builder(std::next(FDiv.getIterator()));
+ IRBuilder<> Builder(FDiv.getParent(), std::next(FDiv.getIterator()));
Builder.setFastMathFlags(DivFMF);
Builder.SetCurrentDebugLocation(FDiv.getDebugLoc());
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp b/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp
index 9af645d..456f3cb 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp
@@ -1328,7 +1328,7 @@ AMDGPULibCalls::insertSinCos(Value *Arg, FastMathFlags FMF, IRBuilder<> &B,
// sincos call there. Otherwise, right after the allocas works well enough
// if it's an argument or constant.
- B.SetInsertPoint(++ArgInst->getIterator());
+ B.SetInsertPoint(ArgInst->getParent(), ++ArgInst->getIterator());
// SetInsertPoint unwelcomely always tries to set the debug loc.
B.SetCurrentDebugLocation(DL);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp b/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp
index dce0e5f..1873fdb 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULowerKernelArguments.cpp
@@ -106,7 +106,7 @@ static bool lowerKernelArguments(Function &F, const TargetMachine &TM) {
LLVMContext &Ctx = F.getParent()->getContext();
const DataLayout &DL = F.getParent()->getDataLayout();
BasicBlock &EntryBlock = *F.begin();
- IRBuilder<> Builder(getInsertPt(EntryBlock));
+ IRBuilder<> Builder(&EntryBlock, getInsertPt(EntryBlock));
const Align KernArgBaseAlign(16); // FIXME: Increase if necessary
const uint64_t BaseOffset = ST.getExplicitKernelArgOffset();
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp b/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp
index 58ba577..2bdbf41 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp
@@ -283,7 +283,7 @@ class AMDGPULowerModuleLDS {
// codegen would suffice for that, but one would still need to ensure that
// the variables are allocated in the anticpated order.
BasicBlock *Entry = &Func->getEntryBlock();
- IRBuilder<> Builder(Entry->getFirstNonPHIIt());
+ IRBuilder<> Builder(Entry, Entry->getFirstNonPHIIt());
Function *Decl =
Intrinsic::getDeclaration(Func->getParent(), Intrinsic::donothing, {});
diff --git a/llvm/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp b/llvm/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp
index f629b64..08e1d6b 100644
--- a/llvm/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp
+++ b/llvm/lib/Target/AMDGPU/SIAnnotateControlFlow.cpp
@@ -336,7 +336,7 @@ bool SIAnnotateControlFlow::closeControlFlow(BasicBlock *BB) {
// Split edge to make Def dominate Use
FirstInsertionPt = SplitEdge(DefBB, BB, DT, LI)->getFirstInsertionPt();
}
- IRBuilder<> IRB(FirstInsertionPt);
+ IRBuilder<> IRB(FirstInsertionPt->getParent(), FirstInsertionPt);
// TODO: StructurizeCFG 'Flow' blocks have debug locations from the
// condition, for now just avoid copying these DebugLocs so that stepping
// out of the then/else block in a debugger doesn't step to the condition.
diff --git a/llvm/lib/Target/ARM/ARMParallelDSP.cpp b/llvm/lib/Target/ARM/ARMParallelDSP.cpp
index 7383f01..8c1b332 100644
--- a/llvm/lib/Target/ARM/ARMParallelDSP.cpp
+++ b/llvm/lib/Target/ARM/ARMParallelDSP.cpp
@@ -637,7 +637,8 @@ void ARMParallelDSP::InsertParallelMACs(Reduction &R) {
Intrinsic::getDeclaration(M, Intrinsic::arm_smlad) :
Intrinsic::getDeclaration(M, Intrinsic::arm_smlald);
- IRBuilder<NoFolder> Builder(InsertAfter->getIterator());
+ IRBuilder<NoFolder> Builder(InsertAfter->getParent(),
+ BasicBlock::iterator(InsertAfter));
Instruction *Call = Builder.CreateCall(SMLAD, Args);
NumSMLAD++;
return Call;
@@ -757,7 +758,8 @@ LoadInst* ARMParallelDSP::CreateWideLoad(MemInstList &Loads,
// Insert the load at the point of the original dominating load.
LoadInst *DomLoad = DT->dominates(Base, Offset) ? Base : Offset;
- IRBuilder<NoFolder> IRB(++BasicBlock::iterator(DomLoad));
+ IRBuilder<NoFolder> IRB(DomLoad->getParent(),
+ ++BasicBlock::iterator(DomLoad));
// Create the wide load, while making sure to maintain the original alignment
// as this prevents ldrd from being generated when it could be illegal due to
diff --git a/llvm/lib/Target/ARM/MVELaneInterleavingPass.cpp b/llvm/lib/Target/ARM/MVELaneInterleavingPass.cpp
index 0352b9d..5ac79cb 100644
--- a/llvm/lib/Target/ARM/MVELaneInterleavingPass.cpp
+++ b/llvm/lib/Target/ARM/MVELaneInterleavingPass.cpp
@@ -382,7 +382,7 @@ static bool tryInterleave(Instruction *Start,
for (Instruction *I : Truncs) {
LLVM_DEBUG(dbgs() << "Replacing trunc " << *I << "\n");
- Builder.SetInsertPoint(++I->getIterator());
+ Builder.SetInsertPoint(I->getParent(), ++I->getIterator());
Value *Shuf = Builder.CreateShuffleVector(I, TruncMask);
I->replaceAllUsesWith(Shuf);
cast<Instruction>(Shuf)->setOperand(0, I);
diff --git a/llvm/lib/Target/ARM/MVETailPredication.cpp b/llvm/lib/Target/ARM/MVETailPredication.cpp
index 11c85bd..fe97d4f 100644
--- a/llvm/lib/Target/ARM/MVETailPredication.cpp
+++ b/llvm/lib/Target/ARM/MVETailPredication.cpp
@@ -381,7 +381,7 @@ void MVETailPredication::InsertVCTPIntrinsic(IntrinsicInst *ActiveLaneMask,
cast<FixedVectorType>(ActiveLaneMask->getType())->getNumElements();
// Insert a phi to count the number of elements processed by the loop.
- Builder.SetInsertPoint(L->getHeader()->getFirstNonPHIIt());
+ Builder.SetInsertPoint(L->getHeader(), L->getHeader()->getFirstNonPHIIt());
PHINode *Processed = Builder.CreatePHI(Ty, 2);
Processed->addIncoming(Start, L->getLoopPreheader());
diff --git a/llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp b/llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp
index 50a9970..7777ae2 100644
--- a/llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp
@@ -1115,7 +1115,7 @@ bool PolynomialMultiplyRecognize::promoteTypes(BasicBlock *LoopB,
assert(Ty0 == DestTy);
// In order to create the trunc, P must have the promoted type.
P->mutateType(Ty0);
- Value *T = IRBuilder<>(End).CreateTrunc(P, PTy);
+ Value *T = IRBuilder<>(ExitB, End).CreateTrunc(P, PTy);
// In order for the RAUW to work, the types of P and T must match.
P->mutateType(PTy);
P->replaceAllUsesWith(T);
@@ -1462,7 +1462,7 @@ bool PolynomialMultiplyRecognize::convertShiftsToLeft(BasicBlock *LoopB,
// them right after the loop exit.
// Take advantage of the loop-closed SSA form, which has all the post-
// loop values in phi nodes.
- IRB.SetInsertPoint(ExitB->getFirstInsertionPt());
+ IRB.SetInsertPoint(ExitB, ExitB->getFirstInsertionPt());
for (auto P = ExitB->begin(), Q = ExitB->end(); P != Q; ++P) {
if (!isa<PHINode>(P))
break;
diff --git a/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp b/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp
index 838b78f..797b798 100644
--- a/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonVectorCombine.cpp
@@ -1460,7 +1460,8 @@ auto AlignVectors::realignGroup(const MoveGroup &Move) const -> bool {
InsertAt = &*std::next(InsertAt->getIterator());
}
- IRBuilder Builder(InsertAt->getIterator(), InstSimplifyFolder(HVC.DL));
+ IRBuilder Builder(InsertAt->getParent(), InsertAt->getIterator(),
+ InstSimplifyFolder(HVC.DL));
Value *AlignAddr = nullptr; // Actual aligned address.
Value *AlignVal = nullptr; // Right-shift amount (for valign).
@@ -1740,7 +1741,8 @@ auto HvxIdioms::processFxpMul(Instruction &In, const FxpOp &Op) const
// TODO: Add multiplication of vectors by scalar registers (up to 4 bytes).
Value *X = Op.X.Val, *Y = Op.Y.Val;
- IRBuilder Builder(In.getIterator(), InstSimplifyFolder(HVC.DL));
+ IRBuilder Builder(In.getParent(), In.getIterator(),
+ InstSimplifyFolder(HVC.DL));
auto roundUpWidth = [](unsigned Width) -> unsigned {
if (Width <= 32 && !isPowerOf2_32(Width)) {
diff --git a/llvm/lib/Target/Hexagon/HexagonVectorLoopCarriedReuse.cpp b/llvm/lib/Target/Hexagon/HexagonVectorLoopCarriedReuse.cpp
index 1ff3162..f4f966e 100644
--- a/llvm/lib/Target/Hexagon/HexagonVectorLoopCarriedReuse.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonVectorLoopCarriedReuse.cpp
@@ -552,7 +552,7 @@ void HexagonVectorLoopCarriedReuse::reuseValue() {
}
BasicBlock *BB = BEInst->getParent();
IRBuilder<> IRB(BB);
- IRB.SetInsertPoint(BB->getFirstNonPHIIt());
+ IRB.SetInsertPoint(BB, BB->getFirstNonPHIIt());
Value *BEVal = BEInst;
PHINode *NewPhi;
for (int i = Iterations-1; i >=0 ; --i) {
diff --git a/llvm/lib/Target/PowerPC/PPCBoolRetToInt.cpp b/llvm/lib/Target/PowerPC/PPCBoolRetToInt.cpp
index 31bed91..4a3b64f 100644
--- a/llvm/lib/Target/PowerPC/PPCBoolRetToInt.cpp
+++ b/llvm/lib/Target/PowerPC/PPCBoolRetToInt.cpp
@@ -111,7 +111,7 @@ class PPCBoolRetToInt : public FunctionPass {
if (auto *I = dyn_cast<Instruction>(V))
IRB.SetInsertPoint(I->getNextNode());
else
- IRB.SetInsertPoint(Func->getEntryBlock().begin());
+ IRB.SetInsertPoint(&Func->getEntryBlock(), Func->getEntryBlock().begin());
return IRB.CreateZExt(V, IntTy);
}
diff --git a/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp b/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp
index 05eb47f..11e662c 100644
--- a/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp
@@ -1402,7 +1402,7 @@ bool SPIRVEmitIntrinsics::runOnFunction(Function &Func) {
AggrStores.insert(&I);
}
- B.SetInsertPoint(Func.getEntryBlock().begin());
+ B.SetInsertPoint(&Func.getEntryBlock(), Func.getEntryBlock().begin());
for (auto &GV : Func.getParent()->globals())
processGlobalValue(GV, B);
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp
index edac6cd..027ee10 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyLowerEmscriptenEHSjLj.cpp
@@ -1305,7 +1305,7 @@ bool WebAssemblyLowerEmscriptenEHSjLj::runSjLjOnFunction(Function &F) {
// Add a phi to the tail, which will be the output of setjmp, which
// indicates if this is the first call or a longjmp back. The phi directly
// uses the right value based on where we arrive from
- IRB.SetInsertPoint(Tail->getFirstNonPHIIt());
+ IRB.SetInsertPoint(Tail, Tail->getFirstNonPHIIt());
PHINode *SetjmpRet = IRB.CreatePHI(IRB.getInt32Ty(), 2, "setjmp.ret");
// setjmp initial call returns 0
diff --git a/llvm/lib/Target/X86/X86LowerAMXIntrinsics.cpp b/llvm/lib/Target/X86/X86LowerAMXIntrinsics.cpp
index 55ddd59..e355a4b 100644
--- a/llvm/lib/Target/X86/X86LowerAMXIntrinsics.cpp
+++ b/llvm/lib/Target/X86/X86LowerAMXIntrinsics.cpp
@@ -492,7 +492,7 @@ X86LowerAMXIntrinsics::lowerTileDP(Instruction *TileDP) {
KDWord, C, A, B);
// we cannot assume there always be bitcast after tiledpbssd. So we need to
// insert one bitcast as required
- Builder.SetInsertPoint(End->getFirstNonPHIIt());
+ Builder.SetInsertPoint(End, End->getFirstNonPHIIt());
Value *ResAMX =
Builder.CreateBitCast(ResVec, Type::getX86_AMXTy(Builder.getContext()));
// Delete TileDP intrinsic and do some clean-up.
@@ -536,7 +536,7 @@ bool X86LowerAMXIntrinsics::lowerTileLoadStore(Instruction *TileLoadStore) {
if (IsTileLoad) {
// we cannot assume there always be bitcast after tileload. So we need to
// insert one bitcast as required
- Builder.SetInsertPoint(End->getFirstNonPHIIt());
+ Builder.SetInsertPoint(End, End->getFirstNonPHIIt());
Value *ResAMX =
Builder.CreateBitCast(ResVec, Type::getX86_AMXTy(Builder.getContext()));
// Delete tileloadd6 intrinsic and do some clean-up
diff --git a/llvm/lib/Target/X86/X86LowerAMXType.cpp b/llvm/lib/Target/X86/X86LowerAMXType.cpp
index a827d5e..079ac98 100644
--- a/llvm/lib/Target/X86/X86LowerAMXType.cpp
+++ b/llvm/lib/Target/X86/X86LowerAMXType.cpp
@@ -476,8 +476,9 @@ static Instruction *createTileStore(Instruction *TileDef, Value *Ptr) {
Value *Row = II->getOperand(0);
Value *Col = II->getOperand(1);
+ BasicBlock *BB = TileDef->getParent();
BasicBlock::iterator Iter = TileDef->getIterator();
- IRBuilder<> Builder(++Iter);
+ IRBuilder<> Builder(BB, ++Iter);
Value *Stride = Builder.getInt64(64);
std::array<Value *, 5> Args = {Row, Col, Ptr, Stride, TileDef};
diff --git a/llvm/lib/Target/X86/X86WinEHState.cpp b/llvm/lib/Target/X86/X86WinEHState.cpp
index 98d7fb9..578d653 100644
--- a/llvm/lib/Target/X86/X86WinEHState.cpp
+++ b/llvm/lib/Target/X86/X86WinEHState.cpp
@@ -274,7 +274,7 @@ void WinEHStatePass::emitExceptionRegistrationRecord(Function *F) {
// Struct type of RegNode. Used for GEPing.
Type *RegNodeTy;
- IRBuilder<> Builder(F->getEntryBlock().begin());
+ IRBuilder<> Builder(&F->getEntryBlock(), F->getEntryBlock().begin());
Type *Int8PtrType = Builder.getPtrTy();
Type *Int32Ty = Builder.getInt32Ty();
Type *VoidTy = Builder.getVoidTy();
diff --git a/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp b/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp
index c88907f..c7e84a0 100644
--- a/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp
+++ b/llvm/lib/Transforms/AggressiveInstCombine/AggressiveInstCombine.cpp
@@ -139,7 +139,7 @@ static bool foldGuardedFunnelShift(Instruction &I, const DominatorTree &DT) {
if (Pred != CmpInst::ICMP_EQ)
return false;
- IRBuilder<> Builder(PhiBB->getFirstInsertionPt());
+ IRBuilder<> Builder(PhiBB, PhiBB->getFirstInsertionPt());
if (ShVal0 == ShVal1)
++NumGuardedRotates;
diff --git a/llvm/lib/Transforms/Coroutines/CoroFrame.cpp b/llvm/lib/Transforms/Coroutines/CoroFrame.cpp
index 7f724f9..30ef768 100644
--- a/llvm/lib/Transforms/Coroutines/CoroFrame.cpp
+++ b/llvm/lib/Transforms/Coroutines/CoroFrame.cpp
@@ -1859,7 +1859,7 @@ static void insertSpills(const FrameDataInfo &FrameData, coro::Shape &Shape) {
}
auto Index = FrameData.getFieldIndex(Def);
- Builder.SetInsertPoint(InsertPt);
+ Builder.SetInsertPoint(InsertPt->getParent(), InsertPt);
auto *G = Builder.CreateConstInBoundsGEP2_32(
FrameTy, FramePtr, 0, Index, Def->getName() + Twine(".spill.addr"));
if (ByValTy) {
@@ -1879,7 +1879,8 @@ static void insertSpills(const FrameDataInfo &FrameData, coro::Shape &Shape) {
// reference provided with the frame GEP.
if (CurrentBlock != U->getParent()) {
CurrentBlock = U->getParent();
- Builder.SetInsertPoint(CurrentBlock->getFirstInsertionPt());
+ Builder.SetInsertPoint(CurrentBlock,
+ CurrentBlock->getFirstInsertionPt());
auto *GEP = GetFramePointer(E.first);
GEP->setName(E.first->getName() + Twine(".reload.addr"));
@@ -1970,7 +1971,7 @@ static void insertSpills(const FrameDataInfo &FrameData, coro::Shape &Shape) {
if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
Shape.ABI == coro::ABI::Async) {
// If we found any allocas, replace all of their remaining uses with Geps.
- Builder.SetInsertPoint(SpillBlock->begin());
+ Builder.SetInsertPoint(SpillBlock, SpillBlock->begin());
for (const auto &P : FrameData.Allocas) {
AllocaInst *Alloca = P.Alloca;
auto *G = GetFramePointer(Alloca);
@@ -1989,7 +1990,8 @@ static void insertSpills(const FrameDataInfo &FrameData, coro::Shape &Shape) {
// dbg.declares and dbg.values with the reload from the frame.
// Note: We cannot replace the alloca with GEP instructions indiscriminately,
// as some of the uses may not be dominated by CoroBegin.
- Builder.SetInsertPoint(Shape.AllocaSpillBlock->begin());
+ Builder.SetInsertPoint(Shape.AllocaSpillBlock,
+ Shape.AllocaSpillBlock->begin());
SmallVector<Instruction *, 4> UsersToUpdate;
for (const auto &A : FrameData.Allocas) {
AllocaInst *Alloca = A.Alloca;
@@ -2874,7 +2876,7 @@ salvageDebugInfoImpl(SmallDenseMap<Argument *, AllocaInst *, 4> &ArgToAllocaMap,
auto InsertPt = F->getEntryBlock().getFirstInsertionPt();
while (isa<IntrinsicInst>(InsertPt))
++InsertPt;
- Builder.SetInsertPoint(InsertPt);
+ Builder.SetInsertPoint(&F->getEntryBlock(), InsertPt);
while (auto *Inst = dyn_cast_or_null<Instruction>(Storage)) {
if (auto *LdInst = dyn_cast<LoadInst>(Inst)) {
diff --git a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
index a026a4e..c4b9375 100644
--- a/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
+++ b/llvm/lib/Transforms/IPO/AttributorAttributes.cpp
@@ -7469,7 +7469,7 @@ struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
unsigned ArgNo, BasicBlock::iterator IP) {
assert(PrivType && "Expected privatizable type!");
- IRBuilder<NoFolder> IRB(IP);
+ IRBuilder<NoFolder> IRB(IP->getParent(), IP);
const DataLayout &DL = F.getParent()->getDataLayout();
// Traverse the type, build GEPs and stores.
diff --git a/llvm/lib/Transforms/IPO/OpenMPOpt.cpp b/llvm/lib/Transforms/IPO/OpenMPOpt.cpp
index 9e92438..e3a4821 100644
--- a/llvm/lib/Transforms/IPO/OpenMPOpt.cpp
+++ b/llvm/lib/Transforms/IPO/OpenMPOpt.cpp
@@ -1722,7 +1722,8 @@ private:
auto &IRBuilder = OMPInfoCache.OMPBuilder;
Function *F = RuntimeCall.getCaller();
BasicBlock &Entry = F->getEntryBlock();
- IRBuilder.Builder.SetInsertPoint(Entry.getFirstNonPHIOrDbgOrAlloca());
+ IRBuilder.Builder.SetInsertPoint(&Entry,
+ Entry.getFirstNonPHIOrDbgOrAlloca());
Value *Handle = IRBuilder.Builder.CreateAlloca(
IRBuilder.AsyncInfo, /*ArraySize=*/nullptr, "handle");
Handle =
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index 08eaf1c..19a1234 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -4153,7 +4153,7 @@ Value *InstCombinerImpl::foldXorOfICmps(ICmpInst *LHS, ICmpInst *RHS,
// users are freely-invertible, so that 'not' *will* get folded away.
BuilderTy::InsertPointGuard Guard(Builder);
// Set insertion point to right after the Y.
- Builder.SetInsertPoint(++(Y->getIterator()));
+ Builder.SetInsertPoint(Y->getParent(), ++(Y->getIterator()));
Value *NotY = Builder.CreateNot(Y, Y->getName() + ".not");
// Replace all uses of Y (excluding the one in NotY!) with NotY.
Worklist.pushUsersToWorkList(*Y);
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
index a2a6d42..1ad6375 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -514,7 +514,7 @@ static void setInsertionPoint(IRBuilder<> &Builder, Value *V,
bool Before = true) {
if (auto *PHI = dyn_cast<PHINode>(V)) {
BasicBlock *Parent = PHI->getParent();
- Builder.SetInsertPoint(Parent->getFirstInsertionPt());
+ Builder.SetInsertPoint(Parent, Parent->getFirstInsertionPt());
return;
}
if (auto *I = dyn_cast<Instruction>(V)) {
@@ -526,7 +526,7 @@ static void setInsertionPoint(IRBuilder<> &Builder, Value *V,
if (auto *A = dyn_cast<Argument>(V)) {
// Set the insertion point in the entry block.
BasicBlock &Entry = A->getParent()->getEntryBlock();
- Builder.SetInsertPoint(Entry.getFirstInsertionPt());
+ Builder.SetInsertPoint(&Entry, Entry.getFirstInsertionPt());
return;
}
// Otherwise, this is a constant and we don't need to set a new
diff --git a/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp b/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp
index 19464d0..8641132 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp
@@ -1371,7 +1371,7 @@ static Value *simplifyUsingControlFlow(InstCombiner &Self, PHINode &PN,
// sinking.
auto InsertPt = BB->getFirstInsertionPt();
if (InsertPt != BB->end()) {
- Self.Builder.SetInsertPoint(InsertPt);
+ Self.Builder.SetInsertPoint(&*BB, InsertPt);
return Self.Builder.CreateNot(Cond);
}
@@ -1417,7 +1417,7 @@ static Value *foldDependentIVs(PHINode &PN, IRBuilderBase &Builder) {
if (Iv2Start != Identity)
return nullptr;
- Builder.SetInsertPoint(BB->getFirstInsertionPt());
+ Builder.SetInsertPoint(&*BB, BB->getFirstInsertionPt());
if (!BO) {
auto *GEP = cast<GEPOperator>(IvNext);
return Builder.CreateGEP(GEP->getSourceElementType(), Start, Iv2, "",
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
index e78d6eb..27563d4 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
@@ -2678,7 +2678,7 @@ static Instruction *foldSelectToPhiImpl(SelectInst &Sel, BasicBlock *BB,
return nullptr;
}
- Builder.SetInsertPoint(BB->begin());
+ Builder.SetInsertPoint(BB, BB->begin());
auto *PN = Builder.CreatePHI(Sel.getType(), Inputs.size());
for (auto *Pred : predecessors(BB))
PN->addIncoming(Inputs[Pred], Pred);
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp b/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
index 9639e30..ebc2930 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
@@ -1123,7 +1123,7 @@ Instruction *InstCombinerImpl::foldAggregateConstructionIntoAggregateReuse(
// Note that the same block can be a predecessor more than once,
// and we need to preserve that invariant for the PHI node.
BuilderTy::InsertPointGuard Guard(Builder);
- Builder.SetInsertPoint(UseBB->getFirstNonPHIIt());
+ Builder.SetInsertPoint(UseBB, UseBB->getFirstNonPHIIt());
auto *PHI =
Builder.CreatePHI(AggTy, Preds.size(), OrigIVI.getName() + ".merged");
for (BasicBlock *Pred : Preds)
diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index a45545c..18b98e9 100644
--- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -1956,7 +1956,8 @@ void AddressSanitizer::instrumentUnusualSizeOrAlignment(
void ModuleAddressSanitizer::poisonOneInitializer(Function &GlobalInit,
GlobalValue *ModuleName) {
// Set up the arguments to our poison/unpoison functions.
- IRBuilder<> IRB(GlobalInit.front().getFirstInsertionPt());
+ IRBuilder<> IRB(&GlobalInit.front(),
+ GlobalInit.front().getFirstInsertionPt());
// Add a call to poison all external globals before the given function starts.
Value *ModuleNameAddr = ConstantExpr::getPointerCast(ModuleName, IntptrTy);
@@ -2868,7 +2869,7 @@ bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(Function &F) {
if (F.getName().contains(" load]")) {
FunctionCallee AsanInitFunction =
declareSanitizerInitFunction(*F.getParent(), kAsanInitName, {});
- IRBuilder<> IRB(F.front().begin());
+ IRBuilder<> IRB(&F.front(), F.front().begin());
IRB.CreateCall(AsanInitFunction, {});
return true;
}
diff --git a/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp b/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp
index 219474a..cfa8ae2 100644
--- a/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp
+++ b/llvm/lib/Transforms/Instrumentation/BoundsChecking.cpp
@@ -155,7 +155,7 @@ static bool addBoundsChecking(Function &F, TargetLibraryInfo &TLI,
SmallVector<std::pair<Instruction *, Value *>, 4> TrapInfo;
for (Instruction &I : instructions(F)) {
Value *Or = nullptr;
- BuilderTy IRB(I.getIterator(), TargetFolder(DL));
+ BuilderTy IRB(I.getParent(), BasicBlock::iterator(&I), TargetFolder(DL));
if (LoadInst *LI = dyn_cast<LoadInst>(&I)) {
if (!LI->isVolatile())
Or = getBoundsCheckCond(LI->getPointerOperand(), LI, DL, TLI,
@@ -215,7 +215,7 @@ static bool addBoundsChecking(Function &F, TargetLibraryInfo &TLI,
// Add the checks.
for (const auto &Entry : TrapInfo) {
Instruction *Inst = Entry.first;
- BuilderTy IRB(Inst->getIterator(), TargetFolder(DL));
+ BuilderTy IRB(Inst->getParent(), BasicBlock::iterator(Inst), TargetFolder(DL));
insertBoundsCheck(Entry.second, IRB, GetTrapBB);
}
diff --git a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
index b7fe498..f0b0917 100644
--- a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
@@ -988,7 +988,7 @@ Value *DFSanFunction::expandFromPrimitiveShadow(Type *T, Value *PrimitiveShadow,
if (DFS.isZeroShadow(PrimitiveShadow))
return DFS.getZeroShadow(ShadowTy);
- IRBuilder<> IRB(Pos);
+ IRBuilder<> IRB(Pos->getParent(), Pos);
SmallVector<unsigned, 4> Indices;
Value *Shadow = UndefValue::get(ShadowTy);
Shadow = expandFromPrimitiveShadowRecursive(Shadow, Indices, ShadowTy,
@@ -1039,7 +1039,7 @@ Value *DFSanFunction::collapseToPrimitiveShadow(Value *Shadow,
if (CS && DT.dominates(CS, Pos))
return CS;
- IRBuilder<> IRB(Pos);
+ IRBuilder<> IRB(Pos->getParent(), Pos);
Value *PrimitiveShadow = collapseToPrimitiveShadow(Shadow, IRB);
// Caches the converted primitive shadow value.
CS = PrimitiveShadow;
@@ -1772,7 +1772,7 @@ bool DataFlowSanitizer::runImpl(
Pos = DFSF.F->getEntryBlock().begin();
while (isa<PHINode>(Pos) || isa<AllocaInst>(Pos))
Pos = std::next(Pos->getIterator());
- IRBuilder<> IRB(Pos);
+ IRBuilder<> IRB(Pos->getParent(), Pos);
Value *PrimitiveShadow = DFSF.collapseToPrimitiveShadow(V, Pos);
Value *Ne =
IRB.CreateICmpNE(PrimitiveShadow, DFSF.DFS.ZeroPrimitiveShadow);
@@ -1919,7 +1919,7 @@ std::pair<Value *, Value *>
DataFlowSanitizer::getShadowOriginAddress(Value *Addr, Align InstAlignment,
BasicBlock::iterator Pos) {
// Returns ((Addr & shadow_mask) + origin_base - shadow_base) & ~4UL
- IRBuilder<> IRB(Pos);
+ IRBuilder<> IRB(Pos->getParent(), Pos);
Value *ShadowOffset = getShadowOffset(Addr, IRB);
Value *ShadowLong = ShadowOffset;
uint64_t ShadowBase = MapParams->ShadowBase;
@@ -1952,13 +1952,13 @@ DataFlowSanitizer::getShadowOriginAddress(Value *Addr, Align InstAlignment,
Value *DataFlowSanitizer::getShadowAddress(Value *Addr,
BasicBlock::iterator Pos,
Value *ShadowOffset) {
- IRBuilder<> IRB(Pos);
+ IRBuilder<> IRB(Pos->getParent(), Pos);
return IRB.CreateIntToPtr(ShadowOffset, PrimitiveShadowPtrTy);
}
Value *DataFlowSanitizer::getShadowAddress(Value *Addr,
BasicBlock::iterator Pos) {
- IRBuilder<> IRB(Pos);
+ IRBuilder<> IRB(Pos->getParent(), Pos);
Value *ShadowOffset = getShadowOffset(Addr, IRB);
return getShadowAddress(Addr, Pos, ShadowOffset);
}
@@ -2010,7 +2010,7 @@ Value *DFSanFunction::combineShadows(Value *V1, Value *V2,
Value *PV1 = collapseToPrimitiveShadow(V1, Pos);
Value *PV2 = collapseToPrimitiveShadow(V2, Pos);
- IRBuilder<> IRB(Pos);
+ IRBuilder<> IRB(Pos->getParent(), Pos);
CCS.Block = Pos->getParent();
CCS.Shadow = IRB.CreateOr(PV1, PV2);
@@ -2074,7 +2074,7 @@ Value *DFSanFunction::combineOrigins(const std::vector<Value *> &Shadows,
}
Value *OpShadow = Shadows[I];
Value *PrimitiveShadow = collapseToPrimitiveShadow(OpShadow, Pos);
- IRBuilder<> IRB(Pos);
+ IRBuilder<> IRB(Pos->getParent(), Pos);
Value *Cond = IRB.CreateICmpNE(PrimitiveShadow, Zero);
Origin = IRB.CreateSelect(Cond, OpOrigin, Origin);
}
@@ -2143,7 +2143,7 @@ bool DFSanFunction::useCallbackLoadLabelAndOrigin(uint64_t Size,
Value *DataFlowSanitizer::loadNextOrigin(BasicBlock::iterator Pos,
Align OriginAlign,
Value **OriginAddr) {
- IRBuilder<> IRB(Pos);
+ IRBuilder<> IRB(Pos->getParent(), Pos);
*OriginAddr =
IRB.CreateGEP(OriginTy, *OriginAddr, ConstantInt::get(IntptrTy, 1));
return IRB.CreateAlignedLoad(OriginTy, *OriginAddr, OriginAlign);
@@ -2175,7 +2175,7 @@ std::pair<Value *, Value *> DFSanFunction::loadShadowFast(
Type *WideShadowTy =
ShadowSize == 4 ? Type::getInt32Ty(*DFS.Ctx) : Type::getInt64Ty(*DFS.Ctx);
- IRBuilder<> IRB(Pos);
+ IRBuilder<> IRB(Pos->getParent(), Pos);
Value *CombinedWideShadow =
IRB.CreateAlignedLoad(WideShadowTy, ShadowAddr, ShadowAlign);
@@ -2244,7 +2244,7 @@ std::pair<Value *, Value *> DFSanFunction::loadShadowOriginSansLoadTracking(
if (AllocaInst *AI = dyn_cast<AllocaInst>(Addr)) {
const auto SI = AllocaShadowMap.find(AI);
if (SI != AllocaShadowMap.end()) {
- IRBuilder<> IRB(Pos);
+ IRBuilder<> IRB(Pos->getParent(), Pos);
Value *ShadowLI = IRB.CreateLoad(DFS.PrimitiveShadowTy, SI->second);
const auto OI = AllocaOriginMap.find(AI);
assert(!ShouldTrackOrigins || OI != AllocaOriginMap.end());
@@ -2279,7 +2279,7 @@ std::pair<Value *, Value *> DFSanFunction::loadShadowOriginSansLoadTracking(
// tracking.
if (ShouldTrackOrigins &&
useCallbackLoadLabelAndOrigin(Size, InstAlignment)) {
- IRBuilder<> IRB(Pos);
+ IRBuilder<> IRB(Pos->getParent(), Pos);
CallInst *Call =
IRB.CreateCall(DFS.DFSanLoadLabelAndOriginFn,
{Addr, ConstantInt::get(DFS.IntptrTy, Size)});
@@ -2298,7 +2298,7 @@ std::pair<Value *, Value *> DFSanFunction::loadShadowOriginSansLoadTracking(
const Align OriginAlign = getOriginAlign(InstAlignment);
Value *Origin = nullptr;
if (ShouldTrackOrigins) {
- IRBuilder<> IRB(Pos);
+ IRBuilder<> IRB(Pos->getParent(), Pos);
Origin = IRB.CreateAlignedLoad(DFS.OriginTy, OriginAddr, OriginAlign);
}
@@ -2311,7 +2311,7 @@ std::pair<Value *, Value *> DFSanFunction::loadShadowOriginSansLoadTracking(
return {LI, Origin};
}
case 2: {
- IRBuilder<> IRB(Pos);
+ IRBuilder<> IRB(Pos->getParent(), Pos);
Value *ShadowAddr1 = IRB.CreateGEP(DFS.PrimitiveShadowTy, ShadowAddr,
ConstantInt::get(DFS.IntptrTy, 1));
Value *Load =
@@ -2327,7 +2327,7 @@ std::pair<Value *, Value *> DFSanFunction::loadShadowOriginSansLoadTracking(
return loadShadowFast(ShadowAddr, OriginAddr, Size, ShadowAlign,
OriginAlign, Origin, Pos);
- IRBuilder<> IRB(Pos);
+ IRBuilder<> IRB(Pos->getParent(), Pos);
CallInst *FallbackCall = IRB.CreateCall(
DFS.DFSanUnionLoadFn, {ShadowAddr, ConstantInt::get(DFS.IntptrTy, Size)});
FallbackCall->addRetAttr(Attribute::ZExt);
@@ -2342,7 +2342,7 @@ DFSanFunction::loadShadowOrigin(Value *Addr, uint64_t Size, Align InstAlignment,
loadShadowOriginSansLoadTracking(Addr, Size, InstAlignment, Pos);
if (DFS.shouldTrackOrigins()) {
if (ClTrackOrigins == 2) {
- IRBuilder<> IRB(Pos);
+ IRBuilder<> IRB(Pos->getParent(), Pos);
auto *ConstantShadow = dyn_cast<Constant>(PrimitiveShadow);
if (!ConstantShadow || !ConstantShadow->isZeroValue())
Origin = updateOriginIfTainted(PrimitiveShadow, Origin, IRB);
@@ -2445,14 +2445,14 @@ void DFSanVisitor::visitLoadInst(LoadInst &LI) {
}
if (ClEventCallbacks) {
- IRBuilder<> IRB(Pos);
+ IRBuilder<> IRB(Pos->getParent(), Pos);
Value *Addr = LI.getPointerOperand();
CallInst *CI =
IRB.CreateCall(DFSF.DFS.DFSanLoadCallbackFn, {PrimitiveShadow, Addr});
CI->addParamAttr(0, Attribute::ZExt);
}
- IRBuilder<> IRB(AfterLi);
+ IRBuilder<> IRB(AfterLi->getParent(), AfterLi);
DFSF.addReachesFunctionCallbacksIfEnabled(IRB, LI, &LI);
}
@@ -2531,7 +2531,7 @@ void DFSanFunction::storeOrigin(BasicBlock::iterator Pos, Value *Addr,
// untainted sinks.
const Align OriginAlignment = getOriginAlign(InstAlignment);
Value *CollapsedShadow = collapseToPrimitiveShadow(Shadow, Pos);
- IRBuilder<> IRB(Pos);
+ IRBuilder<> IRB(Pos->getParent(), Pos);
if (auto *ConstantShadow = dyn_cast<Constant>(CollapsedShadow)) {
if (!ConstantShadow->isZeroValue())
paintOrigin(IRB, updateOrigin(Origin, IRB), StoreOriginAddr, Size,
@@ -2558,7 +2558,7 @@ void DFSanFunction::storeOrigin(BasicBlock::iterator Pos, Value *Addr,
void DFSanFunction::storeZeroPrimitiveShadow(Value *Addr, uint64_t Size,
Align ShadowAlign,
BasicBlock::iterator Pos) {
- IRBuilder<> IRB(Pos);
+ IRBuilder<> IRB(Pos->getParent(), Pos);
IntegerType *ShadowTy =
IntegerType::get(*DFS.Ctx, Size * DFS.ShadowWidthBits);
Value *ExtZeroShadow = ConstantInt::get(ShadowTy, 0);
@@ -2578,7 +2578,7 @@ void DFSanFunction::storePrimitiveShadowOrigin(Value *Addr, uint64_t Size,
if (AllocaInst *AI = dyn_cast<AllocaInst>(Addr)) {
const auto SI = AllocaShadowMap.find(AI);
if (SI != AllocaShadowMap.end()) {
- IRBuilder<> IRB(Pos);
+ IRBuilder<> IRB(Pos->getParent(), Pos);
IRB.CreateStore(PrimitiveShadow, SI->second);
// Do not write origins for 0 shadows because we do not trace origins for
@@ -2598,7 +2598,7 @@ void DFSanFunction::storePrimitiveShadowOrigin(Value *Addr, uint64_t Size,
return;
}
- IRBuilder<> IRB(Pos);
+ IRBuilder<> IRB(Pos->getParent(), Pos);
Value *ShadowAddr, *OriginAddr;
std::tie(ShadowAddr, OriginAddr) =
DFS.getShadowOriginAddress(Addr, InstAlignment, Pos);
diff --git a/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp b/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp
index 088c058..c7f6f2a 100644
--- a/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp
+++ b/llvm/lib/Transforms/Instrumentation/GCOVProfiling.cpp
@@ -918,7 +918,7 @@ bool GCOVProfiler::emitProfileNotes(
for (size_t I : llvm::seq<size_t>(0, Measured)) {
const Edge &E = *MST.allEdges()[I];
- IRBuilder<> Builder(E.Place->getFirstInsertionPt());
+ IRBuilder<> Builder(E.Place, E.Place->getFirstInsertionPt());
Value *V = Builder.CreateConstInBoundsGEP2_64(
Counters->getValueType(), Counters, 0, I);
// Disable sanitizers to decrease size bloat. We don't expect
diff --git a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
index f5b3158..a0e63bf1 100644
--- a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
@@ -1622,7 +1622,7 @@ void HWAddressSanitizer::sanitizeFunction(Function &F,
A.removeAttr(llvm::Attribute::WriteOnly);
BasicBlock::iterator InsertPt = F.getEntryBlock().begin();
- IRBuilder<> EntryIRB(InsertPt);
+ IRBuilder<> EntryIRB(&F.getEntryBlock(), InsertPt);
emitPrologue(EntryIRB,
/*WithFrameRecord*/ ClRecordStackHistory != none &&
Mapping.WithFrameRecord &&
diff --git a/llvm/lib/Transforms/Instrumentation/MemProfiler.cpp b/llvm/lib/Transforms/Instrumentation/MemProfiler.cpp
index e5f9273..8a12fa1 100644
--- a/llvm/lib/Transforms/Instrumentation/MemProfiler.cpp
+++ b/llvm/lib/Transforms/Instrumentation/MemProfiler.cpp
@@ -558,7 +558,7 @@ bool MemProfiler::maybeInsertMemProfInitAtFunctionEntry(Function &F) {
if (F.getName().contains(" load]")) {
FunctionCallee MemProfInitFunction =
declareSanitizerInitFunction(*F.getParent(), MemProfInitName, {});
- IRBuilder<> IRB(F.front().begin());
+ IRBuilder<> IRB(&F.front(), F.front().begin());
IRB.CreateCall(MemProfInitFunction, {});
return true;
}
diff --git a/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp b/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp
index dda869c..572d37a 100644
--- a/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp
+++ b/llvm/lib/Transforms/Instrumentation/PGOInstrumentation.cpp
@@ -885,7 +885,7 @@ static void instrumentOneFunc(
FuncInfo.FunctionHash);
if (PGOFunctionEntryCoverage) {
auto &EntryBB = F.getEntryBlock();
- IRBuilder<> Builder(EntryBB.getFirstInsertionPt());
+ IRBuilder<> Builder(&EntryBB, EntryBB.getFirstInsertionPt());
// llvm.instrprof.cover(i8* <name>, i64 <hash>, i32 <num-counters>,
// i32 <index>)
Builder.CreateCall(
@@ -940,7 +940,7 @@ static void instrumentOneFunc(
if (PGOTemporalInstrumentation) {
NumCounters += PGOBlockCoverage ? 8 : 1;
auto &EntryBB = F.getEntryBlock();
- IRBuilder<> Builder(EntryBB.getFirstInsertionPt());
+ IRBuilder<> Builder(&EntryBB, EntryBB.getFirstInsertionPt());
// llvm.instrprof.timestamp(i8* <name>, i64 <hash>, i32 <num-counters>,
// i32 <index>)
Builder.CreateCall(
@@ -950,7 +950,7 @@ static void instrumentOneFunc(
}
for (auto *InstrBB : InstrumentBBs) {
- IRBuilder<> Builder(InstrBB->getFirstInsertionPt());
+ IRBuilder<> Builder(InstrBB, InstrBB->getFirstInsertionPt());
assert(Builder.GetInsertPoint() != InstrBB->end() &&
"Cannot get the Instrumentation point");
// llvm.instrprof.increment(i8* <name>, i64 <hash>, i32 <num-counters>,
diff --git a/llvm/lib/Transforms/ObjCARC/ObjCARC.cpp b/llvm/lib/Transforms/ObjCARC/ObjCARC.cpp
index b4f5e82..33870d7 100644
--- a/llvm/lib/Transforms/ObjCARC/ObjCARC.cpp
+++ b/llvm/lib/Transforms/ObjCARC/ObjCARC.cpp
@@ -80,7 +80,7 @@ CallInst *BundledRetainClaimRVs::insertRVCall(BasicBlock::iterator InsertPt,
CallInst *BundledRetainClaimRVs::insertRVCallWithColors(
BasicBlock::iterator InsertPt, CallBase *AnnotatedCall,
const DenseMap<BasicBlock *, ColorVector> &BlockColors) {
- IRBuilder<> Builder(InsertPt);
+ IRBuilder<> Builder(InsertPt->getParent(), InsertPt);
Function *Func = *objcarc::getAttachedARCFunction(AnnotatedCall);
assert(Func && "operand isn't a Function");
Type *ParamTy = Func->getArg(0)->getType();
diff --git a/llvm/lib/Transforms/Scalar/ConstraintElimination.cpp b/llvm/lib/Transforms/Scalar/ConstraintElimination.cpp
index 4ec4750..70bfa46 100644
--- a/llvm/lib/Transforms/Scalar/ConstraintElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/ConstraintElimination.cpp
@@ -1575,7 +1575,7 @@ void ConstraintInfo::addFact(CmpInst::Predicate Pred, Value *A, Value *B,
static bool replaceSubOverflowUses(IntrinsicInst *II, Value *A, Value *B,
SmallVectorImpl<Instruction *> &ToRemove) {
bool Changed = false;
- IRBuilder<> Builder(II->getIterator());
+ IRBuilder<> Builder(II->getParent(), II->getIterator());
Value *Sub = nullptr;
for (User *U : make_early_inc_range(II->users())) {
if (match(U, m_ExtractValue<0>(m_Value()))) {
diff --git a/llvm/lib/Transforms/Scalar/GuardWidening.cpp b/llvm/lib/Transforms/Scalar/GuardWidening.cpp
index 0c2ef23..b6498c4 100644
--- a/llvm/lib/Transforms/Scalar/GuardWidening.cpp
+++ b/llvm/lib/Transforms/Scalar/GuardWidening.cpp
@@ -792,7 +792,7 @@ Value *GuardWideningImpl::hoistChecks(SmallVectorImpl<Value *> &ChecksToHoist,
Value *OldCondition,
BasicBlock::iterator InsertPt) {
assert(!ChecksToHoist.empty());
- IRBuilder<> Builder(InsertPt);
+ IRBuilder<> Builder(InsertPt->getParent(), InsertPt);
makeAvailableAt(ChecksToHoist, InsertPt);
makeAvailableAt(OldCondition, InsertPt);
Value *Result = Builder.CreateAnd(ChecksToHoist);
diff --git a/llvm/lib/Transforms/Scalar/LoopBoundSplit.cpp b/llvm/lib/Transforms/Scalar/LoopBoundSplit.cpp
index cff473b..9a27a08 100644
--- a/llvm/lib/Transforms/Scalar/LoopBoundSplit.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopBoundSplit.cpp
@@ -430,7 +430,7 @@ static bool splitLoopBound(Loop &L, DominatorTree &DT, LoopInfo &LI,
ExitingCond.BI->setSuccessor(1, PostLoopPreHeader);
// Update phi node in exit block of post-loop.
- Builder.SetInsertPoint(PostLoopPreHeader->begin());
+ Builder.SetInsertPoint(PostLoopPreHeader, PostLoopPreHeader->begin());
for (PHINode &PN : PostLoop->getExitBlock()->phis()) {
for (auto i : seq<int>(0, PN.getNumOperands())) {
// Check incoming block is pre-loop's exiting block.
diff --git a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
index 12a11cf..3fe5478 100644
--- a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
@@ -2491,7 +2491,7 @@ bool LoopIdiomRecognize::recognizeShiftUntilBitTest() {
// Step 4: Rewrite the loop into a countable form, with canonical IV.
// The new canonical induction variable.
- Builder.SetInsertPoint(LoopHeaderBB->begin());
+ Builder.SetInsertPoint(LoopHeaderBB, LoopHeaderBB->begin());
auto *IV = Builder.CreatePHI(Ty, 2, CurLoop->getName() + ".iv");
// The induction itself.
@@ -2815,11 +2815,11 @@ bool LoopIdiomRecognize::recognizeShiftUntilZero() {
// Step 3: Rewrite the loop into a countable form, with canonical IV.
// The new canonical induction variable.
- Builder.SetInsertPoint(LoopHeaderBB->begin());
+ Builder.SetInsertPoint(LoopHeaderBB, LoopHeaderBB->begin());
auto *CIV = Builder.CreatePHI(Ty, 2, CurLoop->getName() + ".iv");
// The induction itself.
- Builder.SetInsertPoint(LoopHeaderBB->getFirstNonPHIIt());
+ Builder.SetInsertPoint(LoopHeaderBB, LoopHeaderBB->getFirstNonPHIIt());
auto *CIVNext =
Builder.CreateAdd(CIV, ConstantInt::get(Ty, 1), CIV->getName() + ".next",
/*HasNUW=*/true, /*HasNSW=*/Bitwidth != 2);
diff --git a/llvm/lib/Transforms/Scalar/LoopPredication.cpp b/llvm/lib/Transforms/Scalar/LoopPredication.cpp
index e25b34b..027dbb9 100644
--- a/llvm/lib/Transforms/Scalar/LoopPredication.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopPredication.cpp
@@ -782,7 +782,7 @@ bool LoopPredication::widenWidenableBranchGuardConditions(
BI->setCondition(AllChecks);
if (InsertAssumesOfPredicatedGuardsConditions) {
BasicBlock *IfTrueBB = BI->getSuccessor(0);
- Builder.SetInsertPoint(IfTrueBB->getFirstInsertionPt());
+ Builder.SetInsertPoint(IfTrueBB, IfTrueBB->getFirstInsertionPt());
// If this block has other predecessors, we might not be able to use Cond.
// In this case, create a Phi where every other input is `true` and input
// from guard block is Cond.
diff --git a/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp b/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp
index fb8f10f..e991296 100644
--- a/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp
+++ b/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp
@@ -1656,7 +1656,7 @@ public:
// condition holds, they alias, otherwise they are guaranteed to not
// overlap.
Check1->getTerminator()->eraseFromParent();
- Builder.SetInsertPoint(Check1->begin());
+ Builder.SetInsertPoint(Check1, Check1->begin());
Value *LoadEnd = Builder.CreateAdd(
LoadBegin, ConstantInt::get(IntPtrTy, LoadLoc.Size.getValue()),
"load.end", true, true);
@@ -1664,7 +1664,7 @@ public:
Fusion);
// Copy load operand to new alloca.
- Builder.SetInsertPoint(Copy->begin());
+ Builder.SetInsertPoint(Copy, Copy->begin());
auto *VT = cast<FixedVectorType>(Load->getType());
// Use an array type for the alloca, to avoid potentially huge alignment
// requirements for large vector types.
@@ -1674,7 +1674,7 @@ public:
Builder.CreateMemCpy(Alloca, Alloca->getAlign(), Load->getPointerOperand(),
Load->getAlign(), LoadLoc.Size.getValue());
- Builder.SetInsertPoint(Fusion->begin());
+ Builder.SetInsertPoint(Fusion, Fusion->begin());
PHINode *PHI = Builder.CreatePHI(Load->getPointerOperandType(), 3);
PHI->addIncoming(Load->getPointerOperand(), Check0);
PHI->addIncoming(Load->getPointerOperand(), Check1);
diff --git a/llvm/lib/Transforms/Scalar/PartiallyInlineLibCalls.cpp b/llvm/lib/Transforms/Scalar/PartiallyInlineLibCalls.cpp
index 6c4c5f6..3a699df 100644
--- a/llvm/lib/Transforms/Scalar/PartiallyInlineLibCalls.cpp
+++ b/llvm/lib/Transforms/Scalar/PartiallyInlineLibCalls.cpp
@@ -70,7 +70,7 @@ static bool optimizeSQRT(CallInst *Call, Function *CalledFunc,
// Create phi that will merge results of either sqrt and replace all uses.
BasicBlock *JoinBB = LibCallTerm->getSuccessor(0);
JoinBB->setName(CurrBB.getName() + ".split");
- Builder.SetInsertPoint(JoinBB->begin());
+ Builder.SetInsertPoint(JoinBB, JoinBB->begin());
PHINode *Phi = Builder.CreatePHI(Ty, 2);
Call->replaceAllUsesWith(Phi);
diff --git a/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp b/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
index 5677ecc..e0a9cff 100644
--- a/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
+++ b/llvm/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp
@@ -1870,7 +1870,7 @@ makeStatepointExplicitImpl(CallBase *Call, /* to replace */
UnwindBlock->getUniquePredecessor() &&
"can't safely insert in this block!");
- Builder.SetInsertPoint(UnwindBlock->getFirstInsertionPt());
+ Builder.SetInsertPoint(UnwindBlock, UnwindBlock->getFirstInsertionPt());
Builder.SetCurrentDebugLocation(II->getDebugLoc());
// Attach exceptional gc relocates to the landingpad.
@@ -1885,7 +1885,7 @@ makeStatepointExplicitImpl(CallBase *Call, /* to replace */
NormalDest->getUniquePredecessor() &&
"can't safely insert in this block!");
- Builder.SetInsertPoint(NormalDest->getFirstInsertionPt());
+ Builder.SetInsertPoint(NormalDest, NormalDest->getFirstInsertionPt());
// gc relocates will be generated later as if it were regular call
// statepoint
diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp
index b90059d..2adbdca4 100644
--- a/llvm/lib/Transforms/Scalar/SROA.cpp
+++ b/llvm/lib/Transforms/Scalar/SROA.cpp
@@ -2942,7 +2942,7 @@ private:
// after the load, so that variable values referring to the load are
// dominated by it.
LIIt.setHeadBit(true);
- IRB.SetInsertPoint(LIIt);
+ IRB.SetInsertPoint(LI.getParent(), LIIt);
// Create a placeholder value with the same type as LI to use as the
// basis for the new value. This allows us to replace the uses of LI with
// the computed value, and then replace the placeholder with LI, leaving
@@ -3604,7 +3604,8 @@ private:
// dominate the PHI.
IRBuilderBase::InsertPointGuard Guard(IRB);
if (isa<PHINode>(OldPtr))
- IRB.SetInsertPoint(OldPtr->getParent()->getFirstInsertionPt());
+ IRB.SetInsertPoint(OldPtr->getParent(),
+ OldPtr->getParent()->getFirstInsertionPt());
else
IRB.SetInsertPoint(OldPtr);
IRB.SetCurrentDebugLocation(OldPtr->getDebugLoc());
diff --git a/llvm/lib/Transforms/Scalar/ScalarizeMaskedMemIntrin.cpp b/llvm/lib/Transforms/Scalar/ScalarizeMaskedMemIntrin.cpp
index f419117..8f820a3 100644
--- a/llvm/lib/Transforms/Scalar/ScalarizeMaskedMemIntrin.cpp
+++ b/llvm/lib/Transforms/Scalar/ScalarizeMaskedMemIntrin.cpp
@@ -239,7 +239,7 @@ static void scalarizeMaskedLoad(const DataLayout &DL, CallInst *CI,
IfBlock = NewIfBlock;
// Create the phi to join the new and previous value.
- Builder.SetInsertPoint(NewIfBlock->begin());
+ Builder.SetInsertPoint(NewIfBlock, NewIfBlock->begin());
PHINode *Phi = Builder.CreatePHI(VecType, 2, "res.phi.else");
Phi->addIncoming(NewVResult, CondBlock);
Phi->addIncoming(VResult, PrevIfBlock);
@@ -366,7 +366,7 @@ static void scalarizeMaskedStore(const DataLayout &DL, CallInst *CI,
BasicBlock *NewIfBlock = ThenTerm->getSuccessor(0);
NewIfBlock->setName("else");
- Builder.SetInsertPoint(NewIfBlock->begin());
+ Builder.SetInsertPoint(NewIfBlock, NewIfBlock->begin());
}
CI->eraseFromParent();
@@ -493,7 +493,7 @@ static void scalarizeMaskedGather(const DataLayout &DL, CallInst *CI,
IfBlock = NewIfBlock;
// Create the phi to join the new and previous value.
- Builder.SetInsertPoint(NewIfBlock->begin());
+ Builder.SetInsertPoint(NewIfBlock, NewIfBlock->begin());
PHINode *Phi = Builder.CreatePHI(VecType, 2, "res.phi.else");
Phi->addIncoming(NewVResult, CondBlock);
Phi->addIncoming(VResult, PrevIfBlock);
@@ -615,7 +615,7 @@ static void scalarizeMaskedScatter(const DataLayout &DL, CallInst *CI,
BasicBlock *NewIfBlock = ThenTerm->getSuccessor(0);
NewIfBlock->setName("else");
- Builder.SetInsertPoint(NewIfBlock->begin());
+ Builder.SetInsertPoint(NewIfBlock, NewIfBlock->begin());
}
CI->eraseFromParent();
@@ -733,7 +733,7 @@ static void scalarizeMaskedExpandLoad(const DataLayout &DL, CallInst *CI,
IfBlock = NewIfBlock;
// Create the phi to join the new and previous value.
- Builder.SetInsertPoint(NewIfBlock->begin());
+ Builder.SetInsertPoint(NewIfBlock, NewIfBlock->begin());
PHINode *ResultPhi = Builder.CreatePHI(VecType, 2, "res.phi.else");
ResultPhi->addIncoming(NewVResult, CondBlock);
ResultPhi->addIncoming(VResult, PrevIfBlock);
@@ -847,7 +847,7 @@ static void scalarizeMaskedCompressStore(const DataLayout &DL, CallInst *CI,
BasicBlock *PrevIfBlock = IfBlock;
IfBlock = NewIfBlock;
- Builder.SetInsertPoint(NewIfBlock->begin());
+ Builder.SetInsertPoint(NewIfBlock, NewIfBlock->begin());
// Add a PHI for the pointer if this isn't the last iteration.
if ((Idx + 1) != VectorWidth) {
@@ -918,7 +918,7 @@ static void scalarizeMaskedVectorHistogram(const DataLayout &DL, CallInst *CI,
// Create "else" block, fill it in the next iteration
BasicBlock *NewIfBlock = ThenTerm->getSuccessor(0);
NewIfBlock->setName("else");
- Builder.SetInsertPoint(NewIfBlock->begin());
+ Builder.SetInsertPoint(NewIfBlock, NewIfBlock->begin());
}
CI->eraseFromParent();
diff --git a/llvm/lib/Transforms/Scalar/Scalarizer.cpp b/llvm/lib/Transforms/Scalar/Scalarizer.cpp
index fd7d8c2..3eca9ac 100644
--- a/llvm/lib/Transforms/Scalar/Scalarizer.cpp
+++ b/llvm/lib/Transforms/Scalar/Scalarizer.cpp
@@ -127,10 +127,10 @@ public:
Scatterer() = default;
// Scatter V into Size components. If new instructions are needed,
- // insert them before BBI. If Cache is nonnull, use it to cache
+ // insert them before BBI in BB. If Cache is nonnull, use it to cache
// the results.
- Scatterer(BasicBlock::iterator bbi, Value *v, const VectorSplit &VS,
- ValueVector *cachePtr = nullptr);
+ Scatterer(BasicBlock *bb, BasicBlock::iterator bbi, Value *v,
+ const VectorSplit &VS, ValueVector *cachePtr = nullptr);
// Return component I, creating a new Value for it if necessary.
Value *operator[](unsigned I);
@@ -139,6 +139,7 @@ public:
unsigned size() const { return VS.NumFragments; }
private:
+ BasicBlock *BB;
BasicBlock::iterator BBI;
Value *V;
VectorSplit VS;
@@ -341,9 +342,9 @@ private:
} // end anonymous namespace
-Scatterer::Scatterer(BasicBlock::iterator bbi, Value *v, const VectorSplit &VS,
- ValueVector *cachePtr)
- : BBI(bbi), V(v), VS(VS), CachePtr(cachePtr) {
+Scatterer::Scatterer(BasicBlock *bb, BasicBlock::iterator bbi, Value *v,
+ const VectorSplit &VS, ValueVector *cachePtr)
+ : BB(bb), BBI(bbi), V(v), VS(VS), CachePtr(cachePtr) {
IsPointer = V->getType()->isPointerTy();
if (!CachePtr) {
Tmp.resize(VS.NumFragments, nullptr);
@@ -362,7 +363,7 @@ Value *Scatterer::operator[](unsigned Frag) {
// Try to reuse a previous value.
if (CV[Frag])
return CV[Frag];
- IRBuilder<> Builder(BBI);
+ IRBuilder<> Builder(BB, BBI);
if (IsPointer) {
if (Frag == 0)
CV[Frag] = V;
@@ -442,7 +443,7 @@ Scatterer ScalarizerVisitor::scatter(Instruction *Point, Value *V,
// so that it can be used everywhere.
Function *F = VArg->getParent();
BasicBlock *BB = &F->getEntryBlock();
- return Scatterer(BB->begin(), V, VS, &Scattered[{V, VS.SplitTy}]);
+ return Scatterer(BB, BB->begin(), V, VS, &Scattered[{V, VS.SplitTy}]);
}
if (Instruction *VOp = dyn_cast<Instruction>(V)) {
// When scalarizing PHI nodes we might try to examine/rewrite InsertElement
@@ -452,17 +453,18 @@ Scatterer ScalarizerVisitor::scatter(Instruction *Point, Value *V,
// originating from instructions in unreachable blocks as undef we do not
// need to analyse them further.
if (!DT->isReachableFromEntry(VOp->getParent()))
- return Scatterer(Point->getIterator(), PoisonValue::get(V->getType()),
- VS);
+ return Scatterer(Point->getParent(), Point->getIterator(),
+ PoisonValue::get(V->getType()), VS);
// Put the scattered form of an instruction directly after the
// instruction, skipping over PHI nodes and debug intrinsics.
+ BasicBlock *BB = VOp->getParent();
return Scatterer(
- skipPastPhiNodesAndDbg(std::next(BasicBlock::iterator(VOp))), V, VS,
+ BB, skipPastPhiNodesAndDbg(std::next(BasicBlock::iterator(VOp))), V, VS,
&Scattered[{V, VS.SplitTy}]);
}
// In the fallback case, just put the scattered before Point and
// keep the result local to Point.
- return Scatterer(Point->getIterator(), V, VS);
+ return Scatterer(Point->getParent(), Point->getIterator(), V, VS);
}
// Replace Op with the gathered form of the components in CV. Defer the
@@ -1179,7 +1181,7 @@ bool ScalarizerVisitor::finish() {
BasicBlock *BB = Op->getParent();
IRBuilder<> Builder(Op);
if (isa<PHINode>(Op))
- Builder.SetInsertPoint(BB->getFirstInsertionPt());
+ Builder.SetInsertPoint(BB, BB->getFirstInsertionPt());
VectorSplit VS = *getVectorSplit(Ty);
assert(VS.NumFragments == CV.size());
diff --git a/llvm/lib/Transforms/Utils/AMDGPUEmitPrintf.cpp b/llvm/lib/Transforms/Utils/AMDGPUEmitPrintf.cpp
index 4cc16be..6ca737d 100644
--- a/llvm/lib/Transforms/Utils/AMDGPUEmitPrintf.cpp
+++ b/llvm/lib/Transforms/Utils/AMDGPUEmitPrintf.cpp
@@ -134,7 +134,7 @@ static Value *getStrlenWithNull(IRBuilder<> &Builder, Value *Str) {
Builder.CreateCondBr(Cmp, WhileDone, While);
// Add one to the computed length.
- Builder.SetInsertPoint(WhileDone->begin());
+ Builder.SetInsertPoint(WhileDone, WhileDone->begin());
auto Begin = Builder.CreatePtrToInt(Str, Int64Ty);
auto End = Builder.CreatePtrToInt(PtrPhi, Int64Ty);
auto Len = Builder.CreateSub(End, Begin);
@@ -142,7 +142,7 @@ static Value *getStrlenWithNull(IRBuilder<> &Builder, Value *Str) {
// Final join.
BranchInst::Create(Join, WhileDone);
- Builder.SetInsertPoint(Join->begin());
+ Builder.SetInsertPoint(Join, Join->begin());
auto LenPhi = Builder.CreatePHI(Len->getType(), 2);
LenPhi->addIncoming(Len, WhileDone);
LenPhi->addIncoming(Zero, Prev);
diff --git a/llvm/lib/Transforms/Utils/BypassSlowDivision.cpp b/llvm/lib/Transforms/Utils/BypassSlowDivision.cpp
index 58056b9..73a50b7 100644
--- a/llvm/lib/Transforms/Utils/BypassSlowDivision.cpp
+++ b/llvm/lib/Transforms/Utils/BypassSlowDivision.cpp
@@ -260,7 +260,7 @@ QuotRemWithBB FastDivInsertionTask::createSlowBB(BasicBlock *SuccessorBB) {
QuotRemWithBB DivRemPair;
DivRemPair.BB = BasicBlock::Create(MainBB->getParent()->getContext(), "",
MainBB->getParent(), SuccessorBB);
- IRBuilder<> Builder(DivRemPair.BB->begin());
+ IRBuilder<> Builder(DivRemPair.BB, DivRemPair.BB->begin());
Builder.SetCurrentDebugLocation(SlowDivOrRem->getDebugLoc());
Value *Dividend = SlowDivOrRem->getOperand(0);
@@ -284,7 +284,7 @@ QuotRemWithBB FastDivInsertionTask::createFastBB(BasicBlock *SuccessorBB) {
QuotRemWithBB DivRemPair;
DivRemPair.BB = BasicBlock::Create(MainBB->getParent()->getContext(), "",
MainBB->getParent(), SuccessorBB);
- IRBuilder<> Builder(DivRemPair.BB->begin());
+ IRBuilder<> Builder(DivRemPair.BB, DivRemPair.BB->begin());
Builder.SetCurrentDebugLocation(SlowDivOrRem->getDebugLoc());
Value *Dividend = SlowDivOrRem->getOperand(0);
@@ -310,7 +310,7 @@ QuotRemWithBB FastDivInsertionTask::createFastBB(BasicBlock *SuccessorBB) {
QuotRemPair FastDivInsertionTask::createDivRemPhiNodes(QuotRemWithBB &LHS,
QuotRemWithBB &RHS,
BasicBlock *PhiBB) {
- IRBuilder<> Builder(PhiBB->begin());
+ IRBuilder<> Builder(PhiBB, PhiBB->begin());
Builder.SetCurrentDebugLocation(SlowDivOrRem->getDebugLoc());
PHINode *QuoPhi = Builder.CreatePHI(getSlowType(), 2);
QuoPhi->addIncoming(LHS.Quotient, LHS.BB);
@@ -327,7 +327,7 @@ QuotRemPair FastDivInsertionTask::createDivRemPhiNodes(QuotRemWithBB &LHS,
/// doesn't need a runtime check.
Value *FastDivInsertionTask::insertOperandRuntimeCheck(Value *Op1, Value *Op2) {
assert((Op1 || Op2) && "Nothing to check");
- IRBuilder<> Builder(MainBB->end());
+ IRBuilder<> Builder(MainBB, MainBB->end());
Builder.SetCurrentDebugLocation(SlowDivOrRem->getDebugLoc());
Value *OrV;
@@ -397,7 +397,7 @@ std::optional<QuotRemPair> FastDivInsertionTask::insertFastDivAndRem() {
isa<ConstantInt>(BCI->getOperand(0)))
return std::nullopt;
- IRBuilder<> Builder(MainBB->end());
+ IRBuilder<> Builder(MainBB, MainBB->end());
Builder.SetCurrentDebugLocation(SlowDivOrRem->getDebugLoc());
if (DividendShort && !isSignedOp()) {
diff --git a/llvm/lib/Transforms/Utils/CallPromotionUtils.cpp b/llvm/lib/Transforms/Utils/CallPromotionUtils.cpp
index d708501..dda80d4 100644
--- a/llvm/lib/Transforms/Utils/CallPromotionUtils.cpp
+++ b/llvm/lib/Transforms/Utils/CallPromotionUtils.cpp
@@ -113,7 +113,7 @@ static void createRetPHINode(Instruction *OrigInst, Instruction *NewInst,
if (OrigInst->getType()->isVoidTy() || OrigInst->use_empty())
return;
- Builder.SetInsertPoint(MergeBlock->begin());
+ Builder.SetInsertPoint(MergeBlock, MergeBlock->begin());
PHINode *Phi = Builder.CreatePHI(OrigInst->getType(), 0);
SmallVector<User *, 16> UsersToUpdate(OrigInst->users());
for (User *U : UsersToUpdate)
diff --git a/llvm/lib/Transforms/Utils/FlattenCFG.cpp b/llvm/lib/Transforms/Utils/FlattenCFG.cpp
index 27201af..16b4bb1 100644
--- a/llvm/lib/Transforms/Utils/FlattenCFG.cpp
+++ b/llvm/lib/Transforms/Utils/FlattenCFG.cpp
@@ -487,6 +487,7 @@ bool FlattenCFGOpt::MergeIfRegion(BasicBlock *BB, IRBuilder<> &Builder) {
FirstEntryBlock->splice(FirstEntryBlock->end(), SecondEntryBlock);
BranchInst *PBI = cast<BranchInst>(FirstEntryBlock->getTerminator());
assert(PBI->getCondition() == CInst2);
+ BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
Builder.SetInsertPoint(PBI);
if (InvertCond2) {
@@ -494,7 +495,7 @@ bool FlattenCFGOpt::MergeIfRegion(BasicBlock *BB, IRBuilder<> &Builder) {
}
Value *NC = Builder.CreateBinOp(CombineOp, CInst1, PBI->getCondition());
PBI->replaceUsesOfWith(PBI->getCondition(), NC);
- Builder.SetInsertPoint(SaveInsertPt);
+ Builder.SetInsertPoint(SaveInsertBB, SaveInsertPt);
// Remove IfTrue1
if (IfTrue1 != FirstEntryBlock) {
diff --git a/llvm/lib/Transforms/Utils/InlineFunction.cpp b/llvm/lib/Transforms/Utils/InlineFunction.cpp
index 7ed8af1..39e3a2c 100644
--- a/llvm/lib/Transforms/Utils/InlineFunction.cpp
+++ b/llvm/lib/Transforms/Utils/InlineFunction.cpp
@@ -1611,7 +1611,7 @@ static void HandleByValArgumentInit(Type *ByValType, Value *Dst, Value *Src,
Module *M, BasicBlock *InsertBlock,
InlineFunctionInfo &IFI,
Function *CalledFunc) {
- IRBuilder<> Builder(InsertBlock->begin());
+ IRBuilder<> Builder(InsertBlock, InsertBlock->begin());
Value *Size =
Builder.getInt64(M->getDataLayout().getTypeStoreSize(ByValType));
@@ -2611,7 +2611,7 @@ llvm::InlineResult llvm::InlineFunction(CallBase &CB, InlineFunctionInfo &IFI,
// `Caller->isPresplitCoroutine()` would affect AlwaysInliner at O0 only.
if ((InsertLifetime || Caller->isPresplitCoroutine()) &&
!IFI.StaticAllocas.empty()) {
- IRBuilder<> builder(FirstNewBlock->begin());
+ IRBuilder<> builder(&*FirstNewBlock, FirstNewBlock->begin());
for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) {
AllocaInst *AI = IFI.StaticAllocas[ai];
// Don't mark swifterror allocas. They can't have bitcast uses.
@@ -2666,8 +2666,8 @@ llvm::InlineResult llvm::InlineFunction(CallBase &CB, InlineFunctionInfo &IFI,
// code with llvm.stacksave/llvm.stackrestore intrinsics.
if (InlinedFunctionInfo.ContainsDynamicAllocas) {
// Insert the llvm.stacksave.
- CallInst *SavedPtr =
- IRBuilder<>(FirstNewBlock->begin()).CreateStackSave("savedstack");
+ CallInst *SavedPtr = IRBuilder<>(&*FirstNewBlock, FirstNewBlock->begin())
+ .CreateStackSave("savedstack");
// Insert a call to llvm.stackrestore before any return instructions in the
// inlined function.
diff --git a/llvm/lib/Transforms/Utils/IntegerDivision.cpp b/llvm/lib/Transforms/Utils/IntegerDivision.cpp
index efb9307..cea0954 100644
--- a/llvm/lib/Transforms/Utils/IntegerDivision.cpp
+++ b/llvm/lib/Transforms/Utils/IntegerDivision.cpp
@@ -316,7 +316,7 @@ static Value *generateUnsignedDivisionCode(Value *Dividend, Value *Divisor,
// ; end: ; preds = %loop-exit, %special-cases
// ; %q_5 = phi i32 [ %q_4, %loop-exit ], [ %retVal, %special-cases ]
// ; ret i32 %q_5
- Builder.SetInsertPoint(End->begin());
+ Builder.SetInsertPoint(End, End->begin());
PHINode *Q_5 = Builder.CreatePHI(DivTy, 2);
// Populate the Phis, since all values have now been created. Our Phis were:
diff --git a/llvm/lib/Transforms/Utils/SSAUpdaterBulk.cpp b/llvm/lib/Transforms/Utils/SSAUpdaterBulk.cpp
index 92713bc..cad7ff6 100644
--- a/llvm/lib/Transforms/Utils/SSAUpdaterBulk.cpp
+++ b/llvm/lib/Transforms/Utils/SSAUpdaterBulk.cpp
@@ -150,7 +150,7 @@ void SSAUpdaterBulk::RewriteAllUses(DominatorTree *DT,
// We've computed IDF, now insert new phi-nodes there.
SmallVector<PHINode *, 4> InsertedPHIsForVar;
for (auto *FrontierBB : IDFBlocks) {
- IRBuilder<> B(FrontierBB->begin());
+ IRBuilder<> B(FrontierBB, FrontierBB->begin());
PHINode *PN = B.CreatePHI(R.Ty, 0, R.Name);
R.Defines[FrontierBB] = PN;
InsertedPHIsForVar.push_back(PN);
diff --git a/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp b/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp
index be5ff67..c7d758a 100644
--- a/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp
+++ b/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp
@@ -1069,7 +1069,7 @@ SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
// Create the PHI.
BasicBlock *Header = L->getHeader();
- Builder.SetInsertPoint(Header->begin());
+ Builder.SetInsertPoint(Header, Header->begin());
PHINode *PN =
Builder.CreatePHI(ExpandTy, pred_size(Header), Twine(IVName) + ".iv");
@@ -1521,7 +1521,7 @@ Value *SCEVExpander::expand(const SCEV *S) {
return I->second;
SCEVInsertPointGuard Guard(Builder, this);
- Builder.SetInsertPoint(InsertPt);
+ Builder.SetInsertPoint(InsertPt->getParent(), InsertPt);
// Expand the expression into instructions.
SmallVector<Instruction *> DropPoisonGeneratingInsts;
@@ -1656,7 +1656,7 @@ void SCEVExpander::replaceCongruentIVInc(
else
IP = OrigInc->getNextNonDebugInstruction()->getIterator();
- IRBuilder<> Builder(IP);
+ IRBuilder<> Builder(IP->getParent(), IP);
Builder.SetCurrentDebugLocation(IsomorphicInc->getDebugLoc());
NewInc =
Builder.CreateTruncOrBitCast(OrigInc, IsomorphicInc->getType(), IVName);
@@ -1759,7 +1759,8 @@ SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT,
++NumElim;
Value *NewIV = OrigPhiRef;
if (OrigPhiRef->getType() != Phi->getType()) {
- IRBuilder<> Builder(L->getHeader()->getFirstInsertionPt());
+ IRBuilder<> Builder(L->getHeader(),
+ L->getHeader()->getFirstInsertionPt());
Builder.SetCurrentDebugLocation(Phi->getDebugLoc());
NewIV = Builder.CreateTruncOrBitCast(OrigPhiRef, Phi->getType(), IVName);
}
diff --git a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
index a185efe..4e2dc7f 100644
--- a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
+++ b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
@@ -4180,7 +4180,7 @@ static bool mergeConditionalStoreToAddress(
QStore->getParent(), PPHI);
BasicBlock::iterator PostBBFirst = PostBB->getFirstInsertionPt();
- IRBuilder<> QB(PostBBFirst);
+ IRBuilder<> QB(PostBB, PostBBFirst);
QB.SetCurrentDebugLocation(PostBBFirst->getStableDebugLoc());
Value *PPred = PStore->getParent() == PTB ? PCond : QB.CreateNot(PCond);
diff --git a/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp b/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp
index 9a175e8..74af0ef 100644
--- a/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp
+++ b/llvm/lib/Transforms/Utils/SimplifyIndVar.cpp
@@ -1796,7 +1796,8 @@ bool WidenIV::widenWithVariantUse(WidenIV::NarrowIVDefUse DU) {
assert(LoopExitingBlock && L->contains(LoopExitingBlock) &&
"Not a LCSSA Phi?");
WidePN->addIncoming(WideBO, LoopExitingBlock);
- Builder.SetInsertPoint(User->getParent()->getFirstInsertionPt());
+ Builder.SetInsertPoint(User->getParent(),
+ User->getParent()->getFirstInsertionPt());
auto *TruncPN = Builder.CreateTrunc(WidePN, User->getType());
User->replaceAllUsesWith(TruncPN);
DeadInsts.emplace_back(User);
@@ -1859,7 +1860,7 @@ Instruction *WidenIV::widenIVUse(WidenIV::NarrowIVDefUse DU,
UsePhi->getIterator());
WidePhi->addIncoming(DU.WideDef, UsePhi->getIncomingBlock(0));
BasicBlock *WidePhiBB = WidePhi->getParent();
- IRBuilder<> Builder(WidePhiBB->getFirstInsertionPt());
+ IRBuilder<> Builder(WidePhiBB, WidePhiBB->getFirstInsertionPt());
Value *Trunc = Builder.CreateTrunc(WidePhi, DU.NarrowDef->getType(), "",
CanWidenByZExt, CanWidenBySExt);
UsePhi->replaceAllUsesWith(Trunc);
diff --git a/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp b/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
index 22b82f3..60ea200 100644
--- a/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
+++ b/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
@@ -2829,12 +2829,12 @@ static bool insertSinCosCall(IRBuilderBase &B, Function *OrigCallee, Value *Arg,
if (Instruction *ArgInst = dyn_cast<Instruction>(Arg)) {
// If the argument is an instruction, it must dominate all uses so put our
// sincos call there.
- B.SetInsertPoint(++ArgInst->getIterator());
+ B.SetInsertPoint(ArgInst->getParent(), ++ArgInst->getIterator());
} else {
// Otherwise (e.g. for a constant) the beginning of the function is as
// good a place as any.
BasicBlock &EntryBB = B.GetInsertBlock()->getParent()->getEntryBlock();
- B.SetInsertPoint(EntryBB.begin());
+ B.SetInsertPoint(&EntryBB, EntryBB.begin());
}
SinCos = B.CreateCall(Callee, Arg, "sincospi");
diff --git a/llvm/lib/Transforms/Vectorize/LoopIdiomVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopIdiomVectorize.cpp
index b6afb74..38095b1 100644
--- a/llvm/lib/Transforms/Vectorize/LoopIdiomVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopIdiomVectorize.cpp
@@ -646,7 +646,7 @@ Value *LoopIdiomVectorize::expandFindMismatch(
// 3. We didn't find a mismatch in the vector loop, so we return MaxLen.
// 4. We exitted the vector loop early due to a mismatch and need to return
// the index that we found.
- Builder.SetInsertPoint(EndBlock->getFirstInsertionPt());
+ Builder.SetInsertPoint(EndBlock, EndBlock->getFirstInsertionPt());
PHINode *ResPhi = Builder.CreatePHI(ResType, 4, "mismatch_result");
ResPhi->addIncoming(MaxLen, LoopIncBlock);
ResPhi->addIncoming(IndexPhi, LoopStartBlock);
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index 2261eb8..771fb24 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -3026,7 +3026,8 @@ PHINode *InnerLoopVectorizer::createInductionResumeValue(
// Compute the end value for the additional bypass (if applicable).
if (AdditionalBypass.first) {
- B.SetInsertPoint(AdditionalBypass.first->getFirstInsertionPt());
+ B.SetInsertPoint(AdditionalBypass.first,
+ AdditionalBypass.first->getFirstInsertionPt());
EndValueFromAdditionalBypass =
emitTransformedIndex(B, AdditionalBypass.second, II.getStartValue(),
Step, II.getKind(), II.getInductionBinOp());
@@ -3440,7 +3441,8 @@ void InnerLoopVectorizer::fixVectorizedLoop(VPTransformState &State,
// Fix LCSSA phis not already fixed earlier. Extracts may need to be generated
// in the exit block, so update the builder.
- State.Builder.SetInsertPoint(State.CFG.ExitBB->getFirstNonPHIIt());
+ State.Builder.SetInsertPoint(State.CFG.ExitBB,
+ State.CFG.ExitBB->getFirstNonPHIIt());
for (const auto &KV : Plan.getLiveOuts())
KV.second->fixPhi(Plan, State);
@@ -3483,7 +3485,7 @@ void InnerLoopVectorizer::fixFixedOrderRecurrence(VPLiveOut *LO,
PHINode *ScalarHeaderPhi = LO->getPhi();
auto *InitScalarFOR =
ScalarHeaderPhi->getIncomingValueForBlock(LoopScalarPreHeader);
- Builder.SetInsertPoint(LoopScalarPreHeader->begin());
+ Builder.SetInsertPoint(LoopScalarPreHeader, LoopScalarPreHeader->begin());
auto *ScalarPreheaderPhi =
Builder.CreatePHI(ScalarHeaderPhi->getType(), 2, "scalar.recur.init");
for (auto *BB : predecessors(LoopScalarPreHeader)) {
diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
index e95de7b..501f6af 100644
--- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
+++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
@@ -11374,11 +11374,12 @@ void BoUpSLP::setInsertPointAfterBundle(const TreeEntry *E) {
LastInstIt = LastInst->getParent()->getFirstNonPHIIt();
if (IsPHI || (E->State != TreeEntry::NeedToGather &&
doesNotNeedToSchedule(E->Scalars))) {
- Builder.SetInsertPoint(LastInstIt);
+ Builder.SetInsertPoint(LastInst->getParent(), LastInstIt);
} else {
// Set the insertion point after the last instruction in the bundle. Set the
// debug location to Front.
Builder.SetInsertPoint(
+ LastInst->getParent(),
LastInst->getNextNonDebugInstruction()->getIterator());
}
Builder.SetCurrentDebugLocation(Front->getDebugLoc());
@@ -12614,7 +12615,8 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E, bool PostponedPHIs) {
if (PostponedPHIs && E->VectorizedValue)
return E->VectorizedValue;
auto *PH = cast<PHINode>(VL0);
- Builder.SetInsertPoint(PH->getParent()->getFirstNonPHIIt());
+ Builder.SetInsertPoint(PH->getParent(),
+ PH->getParent()->getFirstNonPHIIt());
Builder.SetCurrentDebugLocation(PH->getDebugLoc());
if (PostponedPHIs || !E->VectorizedValue) {
PHINode *NewPhi = Builder.CreatePHI(VecTy, PH->getNumIncomingValues());
@@ -12622,7 +12624,8 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E, bool PostponedPHIs) {
Value *V = NewPhi;
// Adjust insertion point once all PHI's have been generated.
- Builder.SetInsertPoint(PH->getParent()->getFirstInsertionPt());
+ Builder.SetInsertPoint(PH->getParent(),
+ PH->getParent()->getFirstInsertionPt());
Builder.SetCurrentDebugLocation(PH->getDebugLoc());
V = FinalShuffle(V, E, VecTy);
@@ -13494,9 +13497,10 @@ Value *BoUpSLP::vectorizeTree(
EntryToLastInstruction.clear();
if (ReductionRoot)
- Builder.SetInsertPoint(ReductionRoot->getIterator());
+ Builder.SetInsertPoint(ReductionRoot->getParent(),
+ ReductionRoot->getIterator());
else
- Builder.SetInsertPoint(F->getEntryBlock().begin());
+ Builder.SetInsertPoint(&F->getEntryBlock(), F->getEntryBlock().begin());
// Postpone emission of PHIs operands to avoid cyclic dependencies issues.
(void)vectorizeTree(VectorizableTree[0].get(), /*PostponedPHIs=*/true);
@@ -13750,11 +13754,13 @@ Value *BoUpSLP::vectorizeTree(
"instructions");
if (auto *VecI = dyn_cast<Instruction>(Vec)) {
if (auto *PHI = dyn_cast<PHINode>(VecI))
- Builder.SetInsertPoint(PHI->getParent()->getFirstNonPHIIt());
+ Builder.SetInsertPoint(PHI->getParent(),
+ PHI->getParent()->getFirstNonPHIIt());
else
- Builder.SetInsertPoint(std::next(VecI->getIterator()));
+ Builder.SetInsertPoint(VecI->getParent(),
+ std::next(VecI->getIterator()));
} else {
- Builder.SetInsertPoint(F->getEntryBlock().begin());
+ Builder.SetInsertPoint(&F->getEntryBlock(), F->getEntryBlock().begin());
}
Value *NewInst = ExtractAndExtendIfNeeded(Vec);
// Required to update internally referenced instructions.
@@ -13860,7 +13866,8 @@ Value *BoUpSLP::vectorizeTree(
Instruction *IncomingTerminator =
PH->getIncomingBlock(I)->getTerminator();
if (isa<CatchSwitchInst>(IncomingTerminator)) {
- Builder.SetInsertPoint(std::next(VecI->getIterator()));
+ Builder.SetInsertPoint(VecI->getParent(),
+ std::next(VecI->getIterator()));
} else {
Builder.SetInsertPoint(PH->getIncomingBlock(I)->getTerminator());
}
@@ -13874,7 +13881,7 @@ Value *BoUpSLP::vectorizeTree(
User->replaceUsesOfWith(Scalar, NewInst);
}
} else {
- Builder.SetInsertPoint(F->getEntryBlock().begin());
+ Builder.SetInsertPoint(&F->getEntryBlock(), F->getEntryBlock().begin());
Value *NewInst = ExtractAndExtendIfNeeded(Vec);
User->replaceUsesOfWith(Scalar, NewInst);
}
@@ -14044,7 +14051,8 @@ Value *BoUpSLP::vectorizeTree(
It != MinBWs.end() &&
ReductionBitWidth != It->second.first) {
IRBuilder<>::InsertPointGuard Guard(Builder);
- Builder.SetInsertPoint(ReductionRoot->getIterator());
+ Builder.SetInsertPoint(ReductionRoot->getParent(),
+ ReductionRoot->getIterator());
Vec = Builder.CreateIntCast(
Vec,
VectorType::get(Builder.getIntNTy(ReductionBitWidth),
diff --git a/llvm/unittests/Analysis/MemorySSATest.cpp b/llvm/unittests/Analysis/MemorySSATest.cpp
index f4132c6..e730c5b 100644
--- a/llvm/unittests/Analysis/MemorySSATest.cpp
+++ b/llvm/unittests/Analysis/MemorySSATest.cpp
@@ -121,7 +121,7 @@ TEST_F(MemorySSATest, CreateLoadsAndStoreUpdater) {
BasicBlock *Merge(BasicBlock::Create(C, "", F));
B.SetInsertPoint(Entry);
B.CreateCondBr(B.getTrue(), Left, Right);
- B.SetInsertPoint(Left->begin());
+ B.SetInsertPoint(Left, Left->begin());
Argument *PointerArg = &*F->arg_begin();
B.SetInsertPoint(Left);
B.CreateBr(Merge);
@@ -132,14 +132,14 @@ TEST_F(MemorySSATest, CreateLoadsAndStoreUpdater) {
MemorySSA &MSSA = *Analyses->MSSA;
MemorySSAUpdater Updater(&MSSA);
// Add the store
- B.SetInsertPoint(Entry->begin());
+ B.SetInsertPoint(Entry, Entry->begin());
StoreInst *EntryStore = B.CreateStore(B.getInt8(16), PointerArg);
MemoryAccess *EntryStoreAccess = Updater.createMemoryAccessInBB(
EntryStore, nullptr, Entry, MemorySSA::Beginning);
Updater.insertDef(cast<MemoryDef>(EntryStoreAccess));
// Add the load
- B.SetInsertPoint(Merge->begin());
+ B.SetInsertPoint(Merge, Merge->begin());
LoadInst *FirstLoad = B.CreateLoad(B.getInt8Ty(), PointerArg);
// MemoryPHI should not already exist.
@@ -156,7 +156,7 @@ TEST_F(MemorySSATest, CreateLoadsAndStoreUpdater) {
// Create a store on the left
// Add the store
- B.SetInsertPoint(Left->begin());
+ B.SetInsertPoint(Left, Left->begin());
StoreInst *LeftStore = B.CreateStore(B.getInt8(16), PointerArg);
MemoryAccess *LeftStoreAccess = Updater.createMemoryAccessInBB(
LeftStore, nullptr, Left, MemorySSA::Beginning);
@@ -167,7 +167,7 @@ TEST_F(MemorySSATest, CreateLoadsAndStoreUpdater) {
EXPECT_NE(MP, nullptr);
// Add the second load
- B.SetInsertPoint(Merge->begin());
+ B.SetInsertPoint(Merge, Merge->begin());
LoadInst *SecondLoad = B.CreateLoad(B.getInt8Ty(), PointerArg);
// Create the load memory access
@@ -181,7 +181,7 @@ TEST_F(MemorySSATest, CreateLoadsAndStoreUpdater) {
EXPECT_EQ(MergePhi->getIncomingValue(0), EntryStoreAccess);
EXPECT_EQ(MergePhi->getIncomingValue(1), LeftStoreAccess);
// Now create a store below the existing one in the entry
- B.SetInsertPoint(--Entry->end());
+ B.SetInsertPoint(Entry, --Entry->end());
StoreInst *SecondEntryStore = B.CreateStore(B.getInt8(16), PointerArg);
MemoryAccess *SecondEntryStoreAccess = Updater.createMemoryAccessInBB(
SecondEntryStore, nullptr, Entry, MemorySSA::End);
@@ -210,7 +210,7 @@ TEST_F(MemorySSATest, CreateALoadUpdater) {
BasicBlock *Merge(BasicBlock::Create(C, "", F));
B.SetInsertPoint(Entry);
B.CreateCondBr(B.getTrue(), Left, Right);
- B.SetInsertPoint(Left->begin());
+ B.SetInsertPoint(Left, Left->begin());
Argument *PointerArg = &*F->arg_begin();
B.SetInsertPoint(Left);
B.CreateBr(Merge);
@@ -220,7 +220,7 @@ TEST_F(MemorySSATest, CreateALoadUpdater) {
setupAnalyses();
MemorySSA &MSSA = *Analyses->MSSA;
MemorySSAUpdater Updater(&MSSA);
- B.SetInsertPoint(Left->begin());
+ B.SetInsertPoint(Left, Left->begin());
// Add the store
StoreInst *SI = B.CreateStore(B.getInt8(16), PointerArg);
MemoryAccess *StoreAccess =
@@ -232,7 +232,7 @@ TEST_F(MemorySSATest, CreateALoadUpdater) {
EXPECT_NE(MP, nullptr);
// Add the load
- B.SetInsertPoint(Merge->begin());
+ B.SetInsertPoint(Merge, Merge->begin());
LoadInst *LoadInst = B.CreateLoad(B.getInt8Ty(), PointerArg);
// Create the load memory acccess
@@ -253,7 +253,7 @@ TEST_F(MemorySSATest, SinkLoad) {
BasicBlock *Merge(BasicBlock::Create(C, "", F));
B.SetInsertPoint(Entry);
B.CreateCondBr(B.getTrue(), Left, Right);
- B.SetInsertPoint(Left->begin());
+ B.SetInsertPoint(Left, Left->begin());
Argument *PointerArg = &*F->arg_begin();
B.SetInsertPoint(Left);
B.CreateBr(Merge);
@@ -261,10 +261,10 @@ TEST_F(MemorySSATest, SinkLoad) {
B.CreateBr(Merge);
// Load in left block
- B.SetInsertPoint(Left->begin());
+ B.SetInsertPoint(Left, Left->begin());
LoadInst *LoadInst1 = B.CreateLoad(B.getInt8Ty(), PointerArg);
// Store in merge block
- B.SetInsertPoint(Merge->begin());
+ B.SetInsertPoint(Merge, Merge->begin());
B.CreateStore(B.getInt8(16), PointerArg);
setupAnalyses();
diff --git a/llvm/unittests/Frontend/OpenMPIRBuilderTest.cpp b/llvm/unittests/Frontend/OpenMPIRBuilderTest.cpp
index 0adb615..3ed3034 100644
--- a/llvm/unittests/Frontend/OpenMPIRBuilderTest.cpp
+++ b/llvm/unittests/Frontend/OpenMPIRBuilderTest.cpp
@@ -2253,7 +2253,7 @@ TEST_F(OpenMPIRBuilderTest, StaticWorkshareLoopTarget) {
BasicBlock *Preheader = CLI->getPreheader();
Value *TripCount = CLI->getTripCount();
- Builder.SetInsertPoint(BB->getFirstInsertionPt());
+ Builder.SetInsertPoint(BB, BB->getFirstInsertionPt());
IRBuilder<>::InsertPoint AfterIP = OMPBuilder.applyWorkshareLoop(
DL, CLI, AllocaIP, true, OMP_SCHEDULE_Static, nullptr, false, false,
@@ -2317,7 +2317,7 @@ TEST_F(OpenMPIRBuilderTest, StaticWorkShareLoop) {
Value *IV = CLI->getIndVar();
BasicBlock *ExitBlock = CLI->getExit();
- Builder.SetInsertPoint(BB->getFirstInsertionPt());
+ Builder.SetInsertPoint(BB, BB->getFirstInsertionPt());
InsertPointTy AllocaIP = Builder.saveIP();
OMPBuilder.applyWorkshareLoop(DL, CLI, AllocaIP, /*NeedsBarrier=*/true,
@@ -2507,7 +2507,7 @@ TEST_P(OpenMPIRBuilderTestWithParams, DynamicWorkShareLoop) {
Loc, LoopBodyGen, StartVal, StopVal, StepVal,
/*IsSigned=*/false, /*InclusiveStop=*/false);
- Builder.SetInsertPoint(BB->getFirstInsertionPt());
+ Builder.SetInsertPoint(BB, BB->getFirstInsertionPt());
InsertPointTy AllocaIP = Builder.saveIP();
// Collect all the info from CLI, as it isn't usable after the call to
@@ -2649,7 +2649,7 @@ TEST_F(OpenMPIRBuilderTest, DynamicWorkShareLoopOrdered) {
Loc, LoopBodyGen, StartVal, StopVal, StepVal,
/*IsSigned=*/false, /*InclusiveStop=*/false);
- Builder.SetInsertPoint(BB->getFirstInsertionPt());
+ Builder.SetInsertPoint(BB, BB->getFirstInsertionPt());
InsertPointTy AllocaIP = Builder.saveIP();
// Collect all the info from CLI, as it isn't usable after the call to
@@ -4850,7 +4850,7 @@ static bool findGEPZeroOne(Value *Ptr, Value *&Zero, Value *&One) {
static OpenMPIRBuilder::InsertPointTy
sumReduction(OpenMPIRBuilder::InsertPointTy IP, Value *LHS, Value *RHS,
Value *&Result) {
- IRBuilder<> Builder(IP.getPoint());
+ IRBuilder<> Builder(IP.getBlock(), IP.getPoint());
Result = Builder.CreateFAdd(LHS, RHS, "red.add");
return Builder.saveIP();
}
@@ -4858,7 +4858,7 @@ sumReduction(OpenMPIRBuilder::InsertPointTy IP, Value *LHS, Value *RHS,
static OpenMPIRBuilder::InsertPointTy
sumAtomicReduction(OpenMPIRBuilder::InsertPointTy IP, Type *Ty, Value *LHS,
Value *RHS) {
- IRBuilder<> Builder(IP.getPoint());
+ IRBuilder<> Builder(IP.getBlock(), IP.getPoint());
Value *Partial = Builder.CreateLoad(Ty, RHS, "red.partial");
Builder.CreateAtomicRMW(AtomicRMWInst::FAdd, LHS, Partial, std::nullopt,
AtomicOrdering::Monotonic);
@@ -4868,7 +4868,7 @@ sumAtomicReduction(OpenMPIRBuilder::InsertPointTy IP, Type *Ty, Value *LHS,
static OpenMPIRBuilder::InsertPointTy
xorReduction(OpenMPIRBuilder::InsertPointTy IP, Value *LHS, Value *RHS,
Value *&Result) {
- IRBuilder<> Builder(IP.getPoint());
+ IRBuilder<> Builder(IP.getBlock(), IP.getPoint());
Result = Builder.CreateXor(LHS, RHS, "red.xor");
return Builder.saveIP();
}
@@ -4876,7 +4876,7 @@ xorReduction(OpenMPIRBuilder::InsertPointTy IP, Value *LHS, Value *RHS,
static OpenMPIRBuilder::InsertPointTy
xorAtomicReduction(OpenMPIRBuilder::InsertPointTy IP, Type *Ty, Value *LHS,
Value *RHS) {
- IRBuilder<> Builder(IP.getPoint());
+ IRBuilder<> Builder(IP.getBlock(), IP.getPoint());
Value *Partial = Builder.CreateLoad(Ty, RHS, "red.partial");
Builder.CreateAtomicRMW(AtomicRMWInst::Xor, LHS, Partial, std::nullopt,
AtomicOrdering::Monotonic);
diff --git a/llvm/unittests/IR/BasicBlockTest.cpp b/llvm/unittests/IR/BasicBlockTest.cpp
index 2e99e53..3756f22 100644
--- a/llvm/unittests/IR/BasicBlockTest.cpp
+++ b/llvm/unittests/IR/BasicBlockTest.cpp
@@ -202,7 +202,7 @@ TEST_F(InstrOrderInvalidationTest, InsertInvalidation) {
EXPECT_TRUE(BB->isInstrOrderValid());
// Invalidate orders.
- IRBuilder<> Builder(I2->getIterator());
+ IRBuilder<> Builder(BB, I2->getIterator());
Instruction *I1a = Builder.CreateCall(Nop);
EXPECT_FALSE(BB->isInstrOrderValid());
EXPECT_TRUE(I1->comesBefore(I1a));
diff --git a/llvm/unittests/IR/DebugInfoTest.cpp b/llvm/unittests/IR/DebugInfoTest.cpp
index 50f2b0d..cac8acb 100644
--- a/llvm/unittests/IR/DebugInfoTest.cpp
+++ b/llvm/unittests/IR/DebugInfoTest.cpp
@@ -698,7 +698,7 @@ TEST(IRBuilder, GetSetInsertionPointWithEmptyBasicBlock) {
SmallVector<Value *, 3> Args = {DIV, DIV, DIV};
Builder.CreateCall(DbgDeclare, Args);
auto IP = BB->getFirstInsertionPt();
- Builder.SetInsertPoint(IP);
+ Builder.SetInsertPoint(BB.get(), IP);
}
TEST(AssignmentTrackingTest, InstrMethods) {
diff --git a/llvm/unittests/IR/IRBuilderTest.cpp b/llvm/unittests/IR/IRBuilderTest.cpp
index 8e61cd2..ff96df8 100644
--- a/llvm/unittests/IR/IRBuilderTest.cpp
+++ b/llvm/unittests/IR/IRBuilderTest.cpp
@@ -1188,7 +1188,7 @@ TEST_F(IRBuilderTest, DebugLoc) {
EXPECT_EQ(DL1, Call1->getDebugLoc());
Call1->setDebugLoc(DL2);
- Builder.SetInsertPoint(Call1->getIterator());
+ Builder.SetInsertPoint(Call1->getParent(), Call1->getIterator());
EXPECT_EQ(DL2, Builder.getCurrentDebugLocation());
auto Call2 = Builder.CreateCall(Callee, std::nullopt);
EXPECT_EQ(DL2, Call2->getDebugLoc());
@@ -1311,10 +1311,10 @@ TEST_F(IRBuilderTest, CTAD) {
// The block BB is empty, so don't test this one.
// IRBuilder Builder5(BB->getTerminator());
// static_assert(std::is_same_v<decltype(Builder5), IRBuilder<>>);
- IRBuilder Builder6(BB->end(), Folder);
+ IRBuilder Builder6(BB, BB->end(), Folder);
static_assert(
std::is_same_v<decltype(Builder6), IRBuilder<InstSimplifyFolder>>);
- IRBuilder Builder7(BB->end());
+ IRBuilder Builder7(BB, BB->end());
static_assert(std::is_same_v<decltype(Builder7), IRBuilder<>>);
}
}
diff --git a/llvm/unittests/Transforms/Utils/SSAUpdaterBulkTest.cpp b/llvm/unittests/Transforms/Utils/SSAUpdaterBulkTest.cpp
index 349b2d8..b75a492 100644
--- a/llvm/unittests/Transforms/Utils/SSAUpdaterBulkTest.cpp
+++ b/llvm/unittests/Transforms/Utils/SSAUpdaterBulkTest.cpp
@@ -62,7 +62,7 @@ TEST(SSAUpdaterBulk, SimpleMerge) {
Value *SubOp2 = B.CreateSub(FirstArg, ConstantInt::get(I32Ty, 4));
B.CreateBr(MergeBB);
- B.SetInsertPoint(MergeBB->begin());
+ B.SetInsertPoint(MergeBB, MergeBB->begin());
auto *I1 = cast<Instruction>(B.CreateAdd(AddOp1, ConstantInt::get(I32Ty, 5)));
auto *I2 = cast<Instruction>(B.CreateAdd(AddOp2, ConstantInt::get(I32Ty, 6)));
auto *I3 = cast<Instruction>(B.CreateAdd(SubOp1, SubOp2));
diff --git a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
index 2fee945..35d992e 100644
--- a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
+++ b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp
@@ -193,7 +193,7 @@ static llvm::BasicBlock *convertOmpOpRegions(
if (continuationBlockPHIs) {
llvm::IRBuilderBase::InsertPointGuard guard(builder);
continuationBlockPHIs->reserve(continuationBlockPHITypes.size());
- builder.SetInsertPoint(continuationBlock->begin());
+ builder.SetInsertPoint(continuationBlock, continuationBlock->begin());
for (llvm::Type *ty : continuationBlockPHITypes)
continuationBlockPHIs->push_back(builder.CreatePHI(ty, numYields));
}
@@ -413,7 +413,8 @@ static LogicalResult inlineConvertOmpRegions(
return failure();
if (continuationBlockArgs)
llvm::append_range(*continuationBlockArgs, phis);
- builder.SetInsertPoint(continuationBlock->getFirstInsertionPt());
+ builder.SetInsertPoint(continuationBlock,
+ continuationBlock->getFirstInsertionPt());
return success();
}
diff --git a/polly/lib/CodeGen/BlockGenerators.cpp b/polly/lib/CodeGen/BlockGenerators.cpp
index 90bdc0b..da0e503 100644
--- a/polly/lib/CodeGen/BlockGenerators.cpp
+++ b/polly/lib/CodeGen/BlockGenerators.cpp
@@ -630,9 +630,9 @@ void BlockGenerator::generateConditionalExecution(
// Put the client code into the conditional block and continue in the merge
// block afterwards.
- Builder.SetInsertPoint(ThenBlock->getFirstInsertionPt());
+ Builder.SetInsertPoint(ThenBlock, ThenBlock->getFirstInsertionPt());
GenThenFunc();
- Builder.SetInsertPoint(TailBlock->getFirstInsertionPt());
+ Builder.SetInsertPoint(TailBlock, TailBlock->getFirstInsertionPt());
}
static std::string getInstName(Value *Val) {