diff options
author | Jeremy Morse <jeremy.morse@sony.com> | 2025-07-16 11:41:32 +0100 |
---|---|---|
committer | GitHub <noreply@github.com> | 2025-07-16 11:41:32 +0100 |
commit | 5b8c15c6e7f3ac17383c12483f466a721b1040ba (patch) | |
tree | 41d75f628ff729c860c4af44a55cf1836084c7a1 | |
parent | 38be53aa04de8c6d494de8074328ac8907f3f631 (diff) | |
download | llvm-5b8c15c6e7f3ac17383c12483f466a721b1040ba.zip llvm-5b8c15c6e7f3ac17383c12483f466a721b1040ba.tar.gz llvm-5b8c15c6e7f3ac17383c12483f466a721b1040ba.tar.bz2 |
[DebugInfo] Remove getPrevNonDebugInstruction (#148859)
With the advent of intrinsic-less debug-info, we no longer need to
scatter calls to getPrevNonDebugInstruction around the codebase. Remove
most of them -- there are one or two that have the "SkipPseudoOp" flag
turned on, however they don't seem to be in positions where skipping
anything would be reasonable.
-rw-r--r-- | llvm/include/llvm/IR/Instruction.h | 11 | ||||
-rw-r--r-- | llvm/include/llvm/Transforms/Utils/LockstepReverseIterator.h | 4 | ||||
-rw-r--r-- | llvm/lib/CodeGen/CodeGenPrepare.cpp | 4 | ||||
-rw-r--r-- | llvm/lib/CodeGen/StackProtector.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/IR/Instruction.cpp | 8 | ||||
-rw-r--r-- | llvm/lib/Transforms/IPO/OpenMPOpt.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Transforms/InstCombine/InstructionCombining.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Transforms/Scalar/GVN.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Transforms/Scalar/GVNSink.cpp | 2 | ||||
-rw-r--r-- | llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp | 3 | ||||
-rw-r--r-- | llvm/unittests/Frontend/OpenMPIRBuilderTest.cpp | 8 |
13 files changed, 16 insertions, 36 deletions
diff --git a/llvm/include/llvm/IR/Instruction.h b/llvm/include/llvm/IR/Instruction.h index c317a06..5d25804 100644 --- a/llvm/include/llvm/IR/Instruction.h +++ b/llvm/include/llvm/IR/Instruction.h @@ -898,17 +898,6 @@ public: /// Return true if the instruction is a DbgInfoIntrinsic or PseudoProbeInst. LLVM_ABI bool isDebugOrPseudoInst() const LLVM_READONLY; - /// Return a pointer to the previous non-debug instruction in the same basic - /// block as 'this', or nullptr if no such instruction exists. Skip any pseudo - /// operations if \c SkipPseudoOp is true. - LLVM_ABI const Instruction * - getPrevNonDebugInstruction(bool SkipPseudoOp = false) const; - Instruction *getPrevNonDebugInstruction(bool SkipPseudoOp = false) { - return const_cast<Instruction *>( - static_cast<const Instruction *>(this)->getPrevNonDebugInstruction( - SkipPseudoOp)); - } - /// Create a copy of 'this' instruction that is identical in all ways except /// the following: /// * The instruction has no parent diff --git a/llvm/include/llvm/Transforms/Utils/LockstepReverseIterator.h b/llvm/include/llvm/Transforms/Utils/LockstepReverseIterator.h index cd525a9..5b92b33 100644 --- a/llvm/include/llvm/Transforms/Utils/LockstepReverseIterator.h +++ b/llvm/include/llvm/Transforms/Utils/LockstepReverseIterator.h @@ -61,7 +61,7 @@ public: } Insts.clear(); for (BasicBlock *BB : Blocks) { - Instruction *Prev = BB->getTerminator()->getPrevNonDebugInstruction(); + Instruction *Prev = BB->getTerminator()->getPrevNode(); if (!Prev) { // Block wasn't big enough - only contained a terminator. if constexpr (EarlyFailure) { @@ -108,7 +108,7 @@ public: return *this; SmallVector<Instruction *, 4> NewInsts; for (Instruction *Inst : Insts) { - Instruction *Prev = Inst->getPrevNonDebugInstruction(); + Instruction *Prev = Inst->getPrevNode(); if (!Prev) { if constexpr (!EarlyFailure) { this->ActiveBlocks.remove(Inst->getParent()); diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp index 70a9788..d9d41f1 100644 --- a/llvm/lib/CodeGen/CodeGenPrepare.cpp +++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp @@ -3015,7 +3015,7 @@ bool CodeGenPrepare::dupRetToEnableTailCallOpts(BasicBlock *BB, // %phi = phi ptr [ %0, %bb0 ], [ %2, %entry ] if (PredBB && PredBB->getSingleSuccessor() == BB) CI = dyn_cast_or_null<CallInst>( - PredBB->getTerminator()->getPrevNonDebugInstruction(true)); + PredBB->getTerminator()->getPrevNode()); if (CI && CI->use_empty() && isIntrinsicOrLFToBeTailCalled(TLInfo, CI) && @@ -3032,7 +3032,7 @@ bool CodeGenPrepare::dupRetToEnableTailCallOpts(BasicBlock *BB, for (BasicBlock *Pred : predecessors(BB)) { if (!VisitedBBs.insert(Pred).second) continue; - if (Instruction *I = Pred->rbegin()->getPrevNonDebugInstruction(true)) { + if (Instruction *I = Pred->rbegin()->getPrevNode()) { CallInst *CI = dyn_cast<CallInst>(I); if (CI && CI->use_empty() && TLI->mayBeEmittedAsTailCall(CI) && attributesPermitTailCall(F, CI, RetI, *TLI)) { diff --git a/llvm/lib/CodeGen/StackProtector.cpp b/llvm/lib/CodeGen/StackProtector.cpp index 3ec7008..9cc9af8 100644 --- a/llvm/lib/CodeGen/StackProtector.cpp +++ b/llvm/lib/CodeGen/StackProtector.cpp @@ -626,7 +626,7 @@ bool InsertStackProtectors(const TargetMachine *TM, Function *F, // If we're instrumenting a block with a tail call, the check has to be // inserted before the call rather than between it and the return. - Instruction *Prev = CheckLoc->getPrevNonDebugInstruction(); + Instruction *Prev = CheckLoc->getPrevNode(); if (auto *CI = dyn_cast_if_present<CallInst>(Prev)) if (CI->isTailCall() && isInTailCallPosition(*CI, *TM)) CheckLoc = Prev; diff --git a/llvm/lib/IR/Instruction.cpp b/llvm/lib/IR/Instruction.cpp index c6dca72..763cc18 100644 --- a/llvm/lib/IR/Instruction.cpp +++ b/llvm/lib/IR/Instruction.cpp @@ -1235,14 +1235,6 @@ bool Instruction::isDebugOrPseudoInst() const { return isa<DbgInfoIntrinsic>(this) || isa<PseudoProbeInst>(this); } -const Instruction * -Instruction::getPrevNonDebugInstruction(bool SkipPseudoOp) const { - for (const Instruction *I = getPrevNode(); I; I = I->getPrevNode()) - if (!isa<DbgInfoIntrinsic>(I) && !(SkipPseudoOp && isa<PseudoProbeInst>(I))) - return I; - return nullptr; -} - const DebugLoc &Instruction::getStableDebugLoc() const { return getDebugLoc(); } diff --git a/llvm/lib/Transforms/IPO/OpenMPOpt.cpp b/llvm/lib/Transforms/IPO/OpenMPOpt.cpp index 5de2285..5e2247f 100644 --- a/llvm/lib/Transforms/IPO/OpenMPOpt.cpp +++ b/llvm/lib/Transforms/IPO/OpenMPOpt.cpp @@ -2875,7 +2875,7 @@ struct AAExecutionDomainFunction : public AAExecutionDomain { if (It->getSecond().IsReachedFromAlignedBarrierOnly) break; return false; - } while ((CurI = CurI->getPrevNonDebugInstruction())); + } while ((CurI = CurI->getPrevNode())); // Delayed decision on the forward pass to allow aligned barrier detection // in the backwards traversal. diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp index 73293bb..3321435 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -3933,7 +3933,7 @@ Instruction *InstCombinerImpl::visitFenceInst(FenceInst &FI) { if (NFI && isIdenticalOrStrongerFence(NFI, &FI)) return eraseInstFromFunction(FI); - if (auto *PFI = dyn_cast_or_null<FenceInst>(FI.getPrevNonDebugInstruction())) + if (auto *PFI = dyn_cast_or_null<FenceInst>(FI.getPrevNode())) if (isIdenticalOrStrongerFence(PFI, &FI)) return eraseInstFromFunction(FI); return nullptr; diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp index 91a1b61..b587d76 100644 --- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp +++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp @@ -3890,7 +3890,7 @@ bool InstCombinerImpl::removeInstructionsBeforeUnreachable(Instruction &I) { // This includes instructions like stores and "llvm.assume" that may not get // removed by simple dead code elimination. bool Changed = false; - while (Instruction *Prev = I.getPrevNonDebugInstruction()) { + while (Instruction *Prev = I.getPrevNode()) { // While we theoretically can erase EH, that would result in a block that // used to start with an EH no longer starting with EH, which is invalid. // To make it valid, we'd need to fixup predecessors to no longer refer to diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp index dfbe4f8..5957940 100644 --- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp @@ -3424,7 +3424,7 @@ static void findStoresToUninstrumentedArgAllocas( isa<Argument>(cast<CastInst>(Val)->getOperand(0)) && // Check that the cast appears directly before the store. Otherwise // moving the cast before InsBefore may break the IR. - Val == It->getPrevNonDebugInstruction(); + Val == It->getPrevNode(); bool IsArgInit = IsDirectArgInit || IsArgInitViaCast; if (!IsArgInit) continue; diff --git a/llvm/lib/Transforms/Scalar/GVN.cpp b/llvm/lib/Transforms/Scalar/GVN.cpp index d9d05c3..8bff458 100644 --- a/llvm/lib/Transforms/Scalar/GVN.cpp +++ b/llvm/lib/Transforms/Scalar/GVN.cpp @@ -1310,7 +1310,7 @@ static Value *findDominatingValue(const MemoryLocation &Loc, Type *LoadTy, BatchAAResults BatchAA(*AA); for (BasicBlock *BB = FromBB; BB; BB = BB->getSinglePredecessor()) for (auto *Inst = BB == FromBB ? From : BB->getTerminator(); - Inst != nullptr; Inst = Inst->getPrevNonDebugInstruction()) { + Inst != nullptr; Inst = Inst->getPrevNode()) { // Stop the search if limit is reached. if (++NumVisitedInsts > MaxNumVisitedInsts) return nullptr; diff --git a/llvm/lib/Transforms/Scalar/GVNSink.cpp b/llvm/lib/Transforms/Scalar/GVNSink.cpp index 2058df3..a5fc0b4 100644 --- a/llvm/lib/Transforms/Scalar/GVNSink.cpp +++ b/llvm/lib/Transforms/Scalar/GVNSink.cpp @@ -799,7 +799,7 @@ void GVNSink::sinkLastInstruction(ArrayRef<BasicBlock *> Blocks, BasicBlock *BBEnd) { SmallVector<Instruction *, 4> Insts; for (BasicBlock *BB : Blocks) - Insts.push_back(BB->getTerminator()->getPrevNonDebugInstruction()); + Insts.push_back(BB->getTerminator()->getPrevNode()); Instruction *I0 = Insts.front(); SmallVector<Value *, 4> NewOperands; diff --git a/llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp b/llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp index a09303b..60e5df0 100644 --- a/llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp +++ b/llvm/lib/Transforms/Scalar/SimplifyCFGPass.cpp @@ -194,8 +194,7 @@ static bool tailMergeBlocksWithSimilarFunctionTerminators(Function &F, // Calls to experimental_deoptimize must be followed by a return // of the value computed by experimental_deoptimize. // I.e., we can not change `ret` to `br` for this block. - if (auto *CI = - dyn_cast_or_null<CallInst>(Term->getPrevNonDebugInstruction())) { + if (auto *CI = dyn_cast_or_null<CallInst>(Term->getPrevNode())) { if (Function *F = CI->getCalledFunction()) if (Intrinsic::ID ID = F->getIntrinsicID()) if (ID == Intrinsic::experimental_deoptimize) diff --git a/llvm/unittests/Frontend/OpenMPIRBuilderTest.cpp b/llvm/unittests/Frontend/OpenMPIRBuilderTest.cpp index 55dc8a7..d6b578a 100644 --- a/llvm/unittests/Frontend/OpenMPIRBuilderTest.cpp +++ b/llvm/unittests/Frontend/OpenMPIRBuilderTest.cpp @@ -2736,8 +2736,8 @@ TEST_P(OpenMPIRBuilderTestWithParams, DynamicWorkShareLoop) { EXPECT_EQ(OrigUpperBound->getValue(), 21); EXPECT_EQ(OrigStride->getValue(), 1); - CallInst *FiniCall = dyn_cast<CallInst>( - &*(LatchBlock->getTerminator()->getPrevNonDebugInstruction(true))); + CallInst *FiniCall = + dyn_cast<CallInst>(&*(LatchBlock->getTerminator()->getPrevNode())); EXPECT_EQ(FiniCall, nullptr); // The original loop iterator should only be used in the condition, in the @@ -2840,8 +2840,8 @@ TEST_F(OpenMPIRBuilderTest, DynamicWorkShareLoopOrdered) { EXPECT_EQ(SchedVal->getValue(), static_cast<uint64_t>(OMPScheduleType::OrderedStaticChunked)); - CallInst *FiniCall = dyn_cast<CallInst>( - &*(LatchBlock->getTerminator()->getPrevNonDebugInstruction(true))); + CallInst *FiniCall = + dyn_cast<CallInst>(&*(LatchBlock->getTerminator()->getPrevNode())); ASSERT_NE(FiniCall, nullptr); EXPECT_EQ(FiniCall->getCalledFunction()->getName(), "__kmpc_dispatch_fini_4u"); |