diff options
Diffstat (limited to 'clang/lib/CodeGen')
-rw-r--r-- | clang/lib/CodeGen/CGAtomic.cpp | 155 | ||||
-rw-r--r-- | clang/lib/CodeGen/CGBlocks.cpp | 2 | ||||
-rw-r--r-- | clang/lib/CodeGen/CGBuiltin.cpp | 2 | ||||
-rw-r--r-- | clang/lib/CodeGen/CGCUDANV.cpp | 2 | ||||
-rw-r--r-- | clang/lib/CodeGen/CGExpr.cpp | 6 | ||||
-rw-r--r-- | clang/lib/CodeGen/CGObjCMac.cpp | 26 | ||||
-rw-r--r-- | clang/lib/CodeGen/CGOpenMPRuntime.cpp | 6 | ||||
-rw-r--r-- | clang/lib/CodeGen/CGPointerAuth.cpp | 4 | ||||
-rw-r--r-- | clang/lib/CodeGen/CodeGenTypeCache.h | 2 | ||||
-rw-r--r-- | clang/lib/CodeGen/ItaniumCXXABI.cpp | 24 | ||||
-rw-r--r-- | clang/lib/CodeGen/MicrosoftCXXABI.cpp | 32 | ||||
-rw-r--r-- | clang/lib/CodeGen/TargetBuiltins/ARM.cpp | 22 | ||||
-rw-r--r-- | clang/lib/CodeGen/TargetBuiltins/PPC.cpp | 2 | ||||
-rw-r--r-- | clang/lib/CodeGen/Targets/PPC.cpp | 2 |
14 files changed, 160 insertions, 127 deletions
diff --git a/clang/lib/CodeGen/CGAtomic.cpp b/clang/lib/CodeGen/CGAtomic.cpp index d95dab3..a012581 100644 --- a/clang/lib/CodeGen/CGAtomic.cpp +++ b/clang/lib/CodeGen/CGAtomic.cpp @@ -86,7 +86,7 @@ namespace { llvm::Value *StoragePtr = CGF.Builder.CreateConstGEP1_64( CGF.Int8Ty, BitFieldPtr, OffsetInChars.getQuantity()); StoragePtr = CGF.Builder.CreateAddrSpaceCast( - StoragePtr, CGF.UnqualPtrTy, "atomic_bitfield_base"); + StoragePtr, CGF.DefaultPtrTy, "atomic_bitfield_base"); BFI = OrigBFI; BFI.Offset = Offset; BFI.StorageSize = AtomicSizeInBits; @@ -374,10 +374,9 @@ bool AtomicInfo::emitMemSetZeroIfNecessary() const { } static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak, - Address Dest, Address Ptr, - Address Val1, Address Val2, - uint64_t Size, - llvm::AtomicOrdering SuccessOrder, + Address Dest, Address Ptr, Address Val1, + Address Val2, Address ExpectedResult, + uint64_t Size, llvm::AtomicOrdering SuccessOrder, llvm::AtomicOrdering FailureOrder, llvm::SyncScope::ID Scope) { // Note that cmpxchg doesn't support weak cmpxchg, at least at the moment. @@ -411,8 +410,30 @@ static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak, CGF.Builder.SetInsertPoint(StoreExpectedBB); // Update the memory at Expected with Old's value. - auto *I = CGF.Builder.CreateStore(Old, Val1); - CGF.addInstToCurrentSourceAtom(I, Old); + llvm::Type *ExpectedType = ExpectedResult.getElementType(); + const llvm::DataLayout &DL = CGF.CGM.getDataLayout(); + uint64_t ExpectedSizeInBytes = DL.getTypeStoreSize(ExpectedType); + + if (ExpectedSizeInBytes == Size) { + // Sizes match: store directly + auto *I = CGF.Builder.CreateStore(Old, ExpectedResult); + CGF.addInstToCurrentSourceAtom(I, Old); + } else { + // store only the first ExpectedSizeInBytes bytes of Old + llvm::Type *OldType = Old->getType(); + + // Allocate temporary storage for Old value + Address OldTmp = + CGF.CreateTempAlloca(OldType, Ptr.getAlignment(), "old.tmp"); + + // Store Old into this temporary + auto *I = CGF.Builder.CreateStore(Old, OldTmp); + CGF.addInstToCurrentSourceAtom(I, Old); + + // Perform memcpy for first ExpectedSizeInBytes bytes + CGF.Builder.CreateMemCpy(ExpectedResult, OldTmp, ExpectedSizeInBytes, + /*isVolatile=*/false); + } // Finally, branch to the exit point. CGF.Builder.CreateBr(ContinueBB); @@ -425,13 +446,11 @@ static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak, /// Given an ordering required on success, emit all possible cmpxchg /// instructions to cope with the provided (but possibly only dynamically known) /// FailureOrder. -static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E, - bool IsWeak, Address Dest, Address Ptr, - Address Val1, Address Val2, - llvm::Value *FailureOrderVal, - uint64_t Size, - llvm::AtomicOrdering SuccessOrder, - llvm::SyncScope::ID Scope) { +static void emitAtomicCmpXchgFailureSet( + CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak, Address Dest, Address Ptr, + Address Val1, Address Val2, Address ExpectedResult, + llvm::Value *FailureOrderVal, uint64_t Size, + llvm::AtomicOrdering SuccessOrder, llvm::SyncScope::ID Scope) { llvm::AtomicOrdering FailureOrder; if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) { auto FOS = FO->getSExtValue(); @@ -458,8 +477,8 @@ static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E, // success argument". This condition has been lifted and the only // precondition is 31.7.2.18. Effectively treat this as a DR and skip // language version checks. - emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder, - FailureOrder, Scope); + emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, ExpectedResult, + Size, SuccessOrder, FailureOrder, Scope); return; } @@ -483,18 +502,19 @@ static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E, // Emit all the different atomics CGF.Builder.SetInsertPoint(MonotonicBB); - emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, - Size, SuccessOrder, llvm::AtomicOrdering::Monotonic, Scope); + emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, ExpectedResult, Size, + SuccessOrder, llvm::AtomicOrdering::Monotonic, Scope); CGF.Builder.CreateBr(ContBB); CGF.Builder.SetInsertPoint(AcquireBB); - emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder, - llvm::AtomicOrdering::Acquire, Scope); + emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, ExpectedResult, Size, + SuccessOrder, llvm::AtomicOrdering::Acquire, Scope); CGF.Builder.CreateBr(ContBB); CGF.Builder.SetInsertPoint(SeqCstBB); - emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder, - llvm::AtomicOrdering::SequentiallyConsistent, Scope); + emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, ExpectedResult, Size, + SuccessOrder, llvm::AtomicOrdering::SequentiallyConsistent, + Scope); CGF.Builder.CreateBr(ContBB); CGF.Builder.SetInsertPoint(ContBB); @@ -538,8 +558,9 @@ static llvm::Value *EmitPostAtomicMinMax(CGBuilderTy &Builder, static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest, Address Ptr, Address Val1, Address Val2, - llvm::Value *IsWeak, llvm::Value *FailureOrder, - uint64_t Size, llvm::AtomicOrdering Order, + Address ExpectedResult, llvm::Value *IsWeak, + llvm::Value *FailureOrder, uint64_t Size, + llvm::AtomicOrdering Order, llvm::SyncScope::ID Scope) { llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add; bool PostOpMinMax = false; @@ -554,13 +575,15 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest, case AtomicExpr::AO__hip_atomic_compare_exchange_strong: case AtomicExpr::AO__opencl_atomic_compare_exchange_strong: emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2, - FailureOrder, Size, Order, Scope); + ExpectedResult, FailureOrder, Size, Order, + Scope); return; case AtomicExpr::AO__c11_atomic_compare_exchange_weak: case AtomicExpr::AO__opencl_atomic_compare_exchange_weak: case AtomicExpr::AO__hip_atomic_compare_exchange_weak: emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2, - FailureOrder, Size, Order, Scope); + ExpectedResult, FailureOrder, Size, Order, + Scope); return; case AtomicExpr::AO__atomic_compare_exchange: case AtomicExpr::AO__atomic_compare_exchange_n: @@ -568,7 +591,8 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest, case AtomicExpr::AO__scoped_atomic_compare_exchange_n: { if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) { emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr, - Val1, Val2, FailureOrder, Size, Order, Scope); + Val1, Val2, ExpectedResult, FailureOrder, + Size, Order, Scope); } else { // Create all the relevant BB's llvm::BasicBlock *StrongBB = @@ -582,12 +606,14 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest, CGF.Builder.SetInsertPoint(StrongBB); emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2, - FailureOrder, Size, Order, Scope); + ExpectedResult, FailureOrder, Size, Order, + Scope); CGF.Builder.CreateBr(ContBB); CGF.Builder.SetInsertPoint(WeakBB); emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2, - FailureOrder, Size, Order, Scope); + ExpectedResult, FailureOrder, Size, Order, + Scope); CGF.Builder.CreateBr(ContBB); CGF.Builder.SetInsertPoint(ContBB); @@ -797,9 +823,9 @@ EmitValToTemp(CodeGenFunction &CGF, Expr *E) { static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *Expr, Address Dest, Address Ptr, Address Val1, Address Val2, - llvm::Value *IsWeak, llvm::Value *FailureOrder, - uint64_t Size, llvm::AtomicOrdering Order, - llvm::Value *Scope) { + Address OriginalVal1, llvm::Value *IsWeak, + llvm::Value *FailureOrder, uint64_t Size, + llvm::AtomicOrdering Order, llvm::Value *Scope) { auto ScopeModel = Expr->getScopeModel(); // LLVM atomic instructions always have sync scope. If clang atomic @@ -816,8 +842,8 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *Expr, Address Dest, Order, CGF.getLLVMContext()); else SS = llvm::SyncScope::System; - EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size, - Order, SS); + EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak, + FailureOrder, Size, Order, SS); return; } @@ -826,8 +852,8 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *Expr, Address Dest, auto SCID = CGF.getTargetHooks().getLLVMSyncScopeID( CGF.CGM.getLangOpts(), ScopeModel->map(SC->getZExtValue()), Order, CGF.CGM.getLLVMContext()); - EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size, - Order, SCID); + EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak, + FailureOrder, Size, Order, SCID); return; } @@ -852,12 +878,11 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *Expr, Address Dest, SI->addCase(Builder.getInt32(S), B); Builder.SetInsertPoint(B); - EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size, - Order, - CGF.getTargetHooks().getLLVMSyncScopeID(CGF.CGM.getLangOpts(), - ScopeModel->map(S), - Order, - CGF.getLLVMContext())); + EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak, + FailureOrder, Size, Order, + CGF.getTargetHooks().getLLVMSyncScopeID( + CGF.CGM.getLangOpts(), ScopeModel->map(S), Order, + CGF.getLLVMContext())); Builder.CreateBr(ContBB); } @@ -1058,6 +1083,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) { LValue AtomicVal = MakeAddrLValue(Ptr, AtomicTy); AtomicInfo Atomics(*this, AtomicVal); + Address OriginalVal1 = Val1; if (ShouldCastToIntPtrTy) { Ptr = Atomics.castToAtomicIntPointer(Ptr); if (Val1.isValid()) @@ -1301,30 +1327,32 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) { if (llvm::isValidAtomicOrderingCABI(ord)) switch ((llvm::AtomicOrderingCABI)ord) { case llvm::AtomicOrderingCABI::relaxed: - EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, - llvm::AtomicOrdering::Monotonic, Scope); + EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak, + OrderFail, Size, llvm::AtomicOrdering::Monotonic, Scope); break; case llvm::AtomicOrderingCABI::consume: case llvm::AtomicOrderingCABI::acquire: if (IsStore) break; // Avoid crashing on code with undefined behavior - EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, - llvm::AtomicOrdering::Acquire, Scope); + EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak, + OrderFail, Size, llvm::AtomicOrdering::Acquire, Scope); break; case llvm::AtomicOrderingCABI::release: if (IsLoad) break; // Avoid crashing on code with undefined behavior - EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, - llvm::AtomicOrdering::Release, Scope); + EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak, + OrderFail, Size, llvm::AtomicOrdering::Release, Scope); break; case llvm::AtomicOrderingCABI::acq_rel: if (IsLoad || IsStore) break; // Avoid crashing on code with undefined behavior - EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, - llvm::AtomicOrdering::AcquireRelease, Scope); + EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak, + OrderFail, Size, llvm::AtomicOrdering::AcquireRelease, + Scope); break; case llvm::AtomicOrderingCABI::seq_cst: - EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, + EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak, + OrderFail, Size, llvm::AtomicOrdering::SequentiallyConsistent, Scope); break; } @@ -1360,13 +1388,13 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) { // Emit all the different atomics Builder.SetInsertPoint(MonotonicBB); - EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, - llvm::AtomicOrdering::Monotonic, Scope); + EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak, OrderFail, + Size, llvm::AtomicOrdering::Monotonic, Scope); Builder.CreateBr(ContBB); if (!IsStore) { Builder.SetInsertPoint(AcquireBB); - EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, - llvm::AtomicOrdering::Acquire, Scope); + EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak, + OrderFail, Size, llvm::AtomicOrdering::Acquire, Scope); Builder.CreateBr(ContBB); SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::consume), AcquireBB); @@ -1375,23 +1403,23 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) { } if (!IsLoad) { Builder.SetInsertPoint(ReleaseBB); - EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, - llvm::AtomicOrdering::Release, Scope); + EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak, + OrderFail, Size, llvm::AtomicOrdering::Release, Scope); Builder.CreateBr(ContBB); SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::release), ReleaseBB); } if (!IsLoad && !IsStore) { Builder.SetInsertPoint(AcqRelBB); - EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, - llvm::AtomicOrdering::AcquireRelease, Scope); + EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak, + OrderFail, Size, llvm::AtomicOrdering::AcquireRelease, Scope); Builder.CreateBr(ContBB); SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acq_rel), AcqRelBB); } Builder.SetInsertPoint(SeqCstBB); - EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, - llvm::AtomicOrdering::SequentiallyConsistent, Scope); + EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak, OrderFail, + Size, llvm::AtomicOrdering::SequentiallyConsistent, Scope); Builder.CreateBr(ContBB); SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst), SeqCstBB); @@ -1417,6 +1445,11 @@ Address AtomicInfo::convertToAtomicIntPointer(Address Addr) const { uint64_t SourceSizeInBits = CGF.CGM.getDataLayout().getTypeSizeInBits(Ty); if (SourceSizeInBits != AtomicSizeInBits) { Address Tmp = CreateTempAlloca(); + CGF.Builder.CreateMemSet( + Tmp.emitRawPointer(CGF), llvm::ConstantInt::get(CGF.Int8Ty, 0), + CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits).getQuantity(), + Tmp.getAlignment().getAsAlign()); + CGF.Builder.CreateMemCpy(Tmp, Addr, std::min(AtomicSizeInBits, SourceSizeInBits) / 8); Addr = Tmp; diff --git a/clang/lib/CodeGen/CGBlocks.cpp b/clang/lib/CodeGen/CGBlocks.cpp index 597127ab..20a595e 100644 --- a/clang/lib/CodeGen/CGBlocks.cpp +++ b/clang/lib/CodeGen/CGBlocks.cpp @@ -1207,7 +1207,7 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr *E, } else { // Bitcast the block literal to a generic block literal. BlockPtr = - Builder.CreatePointerCast(BlockPtr, UnqualPtrTy, "block.literal"); + Builder.CreatePointerCast(BlockPtr, DefaultPtrTy, "block.literal"); // Get pointer to the block invoke function FuncPtr = Builder.CreateStructGEP(GenBlockTy, BlockPtr, 3); diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp index 92dba32..fd14cd6 100644 --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -1673,7 +1673,7 @@ static llvm::Value *EmitX86BitTestIntrinsic(CodeGenFunction &CGF, CGF.getLLVMContext(), CGF.getContext().getTypeSize(E->getArg(1)->getType())); llvm::FunctionType *FTy = - llvm::FunctionType::get(CGF.Int8Ty, {CGF.UnqualPtrTy, IntType}, false); + llvm::FunctionType::get(CGF.Int8Ty, {CGF.DefaultPtrTy, IntType}, false); llvm::InlineAsm *IA = llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true); diff --git a/clang/lib/CodeGen/CGCUDANV.cpp b/clang/lib/CodeGen/CGCUDANV.cpp index cb16fe1..b463f88 100644 --- a/clang/lib/CodeGen/CGCUDANV.cpp +++ b/clang/lib/CodeGen/CGCUDANV.cpp @@ -230,7 +230,7 @@ CGNVCUDARuntime::CGNVCUDARuntime(CodeGenModule &CGM) IntTy = CGM.IntTy; SizeTy = CGM.SizeTy; VoidTy = CGM.VoidTy; - PtrTy = CGM.UnqualPtrTy; + PtrTy = CGM.DefaultPtrTy; if (CGM.getLangOpts().OffloadViaLLVM) Prefix = "llvm"; diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp index 8439ec7..fd73314 100644 --- a/clang/lib/CodeGen/CGExpr.cpp +++ b/clang/lib/CodeGen/CGExpr.cpp @@ -1756,7 +1756,7 @@ LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E, const char *Name) { ErrorUnsupported(E, Name); llvm::Type *ElTy = ConvertType(E->getType()); - llvm::Type *Ty = UnqualPtrTy; + llvm::Type *Ty = DefaultPtrTy; return MakeAddrLValue( Address(llvm::UndefValue::get(Ty), ElTy, CharUnits::One()), E->getType()); } @@ -4270,7 +4270,7 @@ void CodeGenFunction::EmitCfiCheckFail() { llvm::StructType::get(Int8Ty, SourceLocationTy, VoidPtrTy); llvm::Value *V = Builder.CreateConstGEP2_32( - CfiCheckFailDataTy, Builder.CreatePointerCast(Data, UnqualPtrTy), 0, 0); + CfiCheckFailDataTy, Builder.CreatePointerCast(Data, DefaultPtrTy), 0, 0); Address CheckKindAddr(V, Int8Ty, getIntAlign()); llvm::Value *CheckKind = Builder.CreateLoad(CheckKindAddr); @@ -5711,7 +5711,7 @@ std::optional<LValue> HandleConditionalOperatorLValueSimpleCase( if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Live->IgnoreParens())) { CGF.EmitCXXThrowExpr(ThrowExpr); llvm::Type *ElemTy = CGF.ConvertType(Dead->getType()); - llvm::Type *Ty = CGF.UnqualPtrTy; + llvm::Type *Ty = CGF.DefaultPtrTy; return CGF.MakeAddrLValue( Address(llvm::UndefValue::get(Ty), ElemTy, CharUnits::One()), Dead->getType()); diff --git a/clang/lib/CodeGen/CGObjCMac.cpp b/clang/lib/CodeGen/CGObjCMac.cpp index c571821a..cb5bb40 100644 --- a/clang/lib/CodeGen/CGObjCMac.cpp +++ b/clang/lib/CodeGen/CGObjCMac.cpp @@ -338,7 +338,7 @@ public: /// GcReadWeakFn -- LLVM objc_read_weak (id *src) function. llvm::FunctionCallee getGcReadWeakFn() { // id objc_read_weak (id *) - llvm::Type *args[] = {CGM.UnqualPtrTy}; + llvm::Type *args[] = {CGM.DefaultPtrTy}; llvm::FunctionType *FTy = llvm::FunctionType::get(ObjectPtrTy, args, false); return CGM.CreateRuntimeFunction(FTy, "objc_read_weak"); } @@ -346,7 +346,7 @@ public: /// GcAssignWeakFn -- LLVM objc_assign_weak function. llvm::FunctionCallee getGcAssignWeakFn() { // id objc_assign_weak (id, id *) - llvm::Type *args[] = {ObjectPtrTy, CGM.UnqualPtrTy}; + llvm::Type *args[] = {ObjectPtrTy, CGM.DefaultPtrTy}; llvm::FunctionType *FTy = llvm::FunctionType::get(ObjectPtrTy, args, false); return CGM.CreateRuntimeFunction(FTy, "objc_assign_weak"); } @@ -354,7 +354,7 @@ public: /// GcAssignGlobalFn -- LLVM objc_assign_global function. llvm::FunctionCallee getGcAssignGlobalFn() { // id objc_assign_global(id, id *) - llvm::Type *args[] = {ObjectPtrTy, CGM.UnqualPtrTy}; + llvm::Type *args[] = {ObjectPtrTy, CGM.DefaultPtrTy}; llvm::FunctionType *FTy = llvm::FunctionType::get(ObjectPtrTy, args, false); return CGM.CreateRuntimeFunction(FTy, "objc_assign_global"); } @@ -362,7 +362,7 @@ public: /// GcAssignThreadLocalFn -- LLVM objc_assign_threadlocal function. llvm::FunctionCallee getGcAssignThreadLocalFn() { // id objc_assign_threadlocal(id src, id * dest) - llvm::Type *args[] = {ObjectPtrTy, CGM.UnqualPtrTy}; + llvm::Type *args[] = {ObjectPtrTy, CGM.DefaultPtrTy}; llvm::FunctionType *FTy = llvm::FunctionType::get(ObjectPtrTy, args, false); return CGM.CreateRuntimeFunction(FTy, "objc_assign_threadlocal"); } @@ -370,7 +370,7 @@ public: /// GcAssignIvarFn -- LLVM objc_assign_ivar function. llvm::FunctionCallee getGcAssignIvarFn() { // id objc_assign_ivar(id, id *, ptrdiff_t) - llvm::Type *args[] = {ObjectPtrTy, CGM.UnqualPtrTy, CGM.PtrDiffTy}; + llvm::Type *args[] = {ObjectPtrTy, CGM.DefaultPtrTy, CGM.PtrDiffTy}; llvm::FunctionType *FTy = llvm::FunctionType::get(ObjectPtrTy, args, false); return CGM.CreateRuntimeFunction(FTy, "objc_assign_ivar"); } @@ -386,7 +386,7 @@ public: /// GcAssignStrongCastFn -- LLVM objc_assign_strongCast function. llvm::FunctionCallee getGcAssignStrongCastFn() { // id objc_assign_strongCast(id, id *) - llvm::Type *args[] = {ObjectPtrTy, CGM.UnqualPtrTy}; + llvm::Type *args[] = {ObjectPtrTy, CGM.DefaultPtrTy}; llvm::FunctionType *FTy = llvm::FunctionType::get(ObjectPtrTy, args, false); return CGM.CreateRuntimeFunction(FTy, "objc_assign_strongCast"); } @@ -517,7 +517,7 @@ public: /// ExceptionTryEnterFn - LLVM objc_exception_try_enter function. llvm::FunctionCallee getExceptionTryEnterFn() { - llvm::Type *params[] = {CGM.UnqualPtrTy}; + llvm::Type *params[] = {CGM.DefaultPtrTy}; return CGM.CreateRuntimeFunction( llvm::FunctionType::get(CGM.VoidTy, params, false), "objc_exception_try_enter"); @@ -525,7 +525,7 @@ public: /// ExceptionTryExitFn - LLVM objc_exception_try_exit function. llvm::FunctionCallee getExceptionTryExitFn() { - llvm::Type *params[] = {CGM.UnqualPtrTy}; + llvm::Type *params[] = {CGM.DefaultPtrTy}; return CGM.CreateRuntimeFunction( llvm::FunctionType::get(CGM.VoidTy, params, false), "objc_exception_try_exit"); @@ -533,7 +533,7 @@ public: /// ExceptionExtractFn - LLVM objc_exception_extract function. llvm::FunctionCallee getExceptionExtractFn() { - llvm::Type *params[] = {CGM.UnqualPtrTy}; + llvm::Type *params[] = {CGM.DefaultPtrTy}; return CGM.CreateRuntimeFunction( llvm::FunctionType::get(ObjectPtrTy, params, false), "objc_exception_extract"); @@ -550,7 +550,7 @@ public: /// SetJmpFn - LLVM _setjmp function. llvm::FunctionCallee getSetJmpFn() { // This is specifically the prototype for x86. - llvm::Type *params[] = {CGM.UnqualPtrTy}; + llvm::Type *params[] = {CGM.DefaultPtrTy}; return CGM.CreateRuntimeFunction( llvm::FunctionType::get(CGM.Int32Ty, params, false), "_setjmp", llvm::AttributeList::get(CGM.getLLVMContext(), @@ -1927,7 +1927,7 @@ CGObjCCommonMac::GenerateConstantNSString(const StringLiteral *Literal) { // If we don't already have it, construct the type for a constant NSString. if (!NSConstantStringType) { NSConstantStringType = - llvm::StructType::create({CGM.UnqualPtrTy, CGM.Int8PtrTy, CGM.IntTy}, + llvm::StructType::create({CGM.DefaultPtrTy, CGM.Int8PtrTy, CGM.IntTy}, "struct.__builtin_NSString"); } @@ -5959,7 +5959,7 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper( Int8PtrTy, PropertyListPtrTy); // ImpnfABITy - LLVM for id (*)(id, SEL, ...) - ImpnfABITy = CGM.UnqualPtrTy; + ImpnfABITy = CGM.DefaultPtrTy; // struct _class_t { // struct _class_t *isa; @@ -6380,7 +6380,7 @@ void CGObjCNonFragileABIMac::GenerateClass(const ObjCImplementationDecl *ID) { CGM.getModule(), ObjCTypes.ImpnfABITy, false, llvm::GlobalValue::ExternalLinkage, nullptr, "_objc_empty_vtable"); else - ObjCEmptyVtableVar = llvm::ConstantPointerNull::get(CGM.UnqualPtrTy); + ObjCEmptyVtableVar = llvm::ConstantPointerNull::get(CGM.DefaultPtrTy); } // FIXME: Is this correct (that meta class size is never computed)? diff --git a/clang/lib/CodeGen/CGOpenMPRuntime.cpp b/clang/lib/CodeGen/CGOpenMPRuntime.cpp index 1ff2be7..85b2404 100644 --- a/clang/lib/CodeGen/CGOpenMPRuntime.cpp +++ b/clang/lib/CodeGen/CGOpenMPRuntime.cpp @@ -1714,12 +1714,12 @@ llvm::Function *CGOpenMPRuntime::emitThreadPrivateVarDefinition( // Copying constructor for the threadprivate variable. // Must be NULL - reserved by runtime, but currently it requires that this // parameter is always NULL. Otherwise it fires assertion. - CopyCtor = llvm::Constant::getNullValue(CGM.UnqualPtrTy); + CopyCtor = llvm::Constant::getNullValue(CGM.DefaultPtrTy); if (Ctor == nullptr) { - Ctor = llvm::Constant::getNullValue(CGM.UnqualPtrTy); + Ctor = llvm::Constant::getNullValue(CGM.DefaultPtrTy); } if (Dtor == nullptr) { - Dtor = llvm::Constant::getNullValue(CGM.UnqualPtrTy); + Dtor = llvm::Constant::getNullValue(CGM.DefaultPtrTy); } if (!CGF) { auto *InitFunctionTy = diff --git a/clang/lib/CodeGen/CGPointerAuth.cpp b/clang/lib/CodeGen/CGPointerAuth.cpp index 375f87a..dbb7bc9 100644 --- a/clang/lib/CodeGen/CGPointerAuth.cpp +++ b/clang/lib/CodeGen/CGPointerAuth.cpp @@ -426,10 +426,10 @@ CodeGenModule::getConstantSignedPointer(llvm::Constant *Pointer, unsigned Key, llvm::ConstantInt *OtherDiscriminator) { llvm::Constant *AddressDiscriminator; if (StorageAddress) { - assert(StorageAddress->getType() == UnqualPtrTy); + assert(StorageAddress->getType() == DefaultPtrTy); AddressDiscriminator = StorageAddress; } else { - AddressDiscriminator = llvm::Constant::getNullValue(UnqualPtrTy); + AddressDiscriminator = llvm::Constant::getNullValue(DefaultPtrTy); } llvm::ConstantInt *IntegerDiscriminator; diff --git a/clang/lib/CodeGen/CodeGenTypeCache.h b/clang/lib/CodeGen/CodeGenTypeCache.h index e273ebe..015306b 100644 --- a/clang/lib/CodeGen/CodeGenTypeCache.h +++ b/clang/lib/CodeGen/CodeGenTypeCache.h @@ -53,7 +53,7 @@ struct CodeGenTypeCache { /// void*, void** in the target's default address space (often 0) union { - llvm::PointerType *UnqualPtrTy; + llvm::PointerType *DefaultPtrTy; llvm::PointerType *VoidPtrTy; llvm::PointerType *Int8PtrTy; llvm::PointerType *VoidPtrPtrTy; diff --git a/clang/lib/CodeGen/ItaniumCXXABI.cpp b/clang/lib/CodeGen/ItaniumCXXABI.cpp index 9e195a9..65c4763 100644 --- a/clang/lib/CodeGen/ItaniumCXXABI.cpp +++ b/clang/lib/CodeGen/ItaniumCXXABI.cpp @@ -774,7 +774,7 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer( } else { llvm::Value *VFPAddr = CGF.Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset); - VirtualFn = CGF.Builder.CreateAlignedLoad(CGF.UnqualPtrTy, VFPAddr, + VirtualFn = CGF.Builder.CreateAlignedLoad(CGF.DefaultPtrTy, VFPAddr, CGF.getPointerAlign(), "memptr.virtualfn"); } @@ -816,7 +816,7 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer( // function pointer. CGF.EmitBlock(FnNonVirtual); llvm::Value *NonVirtualFn = - Builder.CreateIntToPtr(FnAsInt, CGF.UnqualPtrTy, "memptr.nonvirtualfn"); + Builder.CreateIntToPtr(FnAsInt, CGF.DefaultPtrTy, "memptr.nonvirtualfn"); // Check the function pointer if CFI on member function pointers is enabled. if (ShouldEmitCFICheck) { @@ -856,7 +856,7 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer( // We're done. CGF.EmitBlock(FnEnd); - llvm::PHINode *CalleePtr = Builder.CreatePHI(CGF.UnqualPtrTy, 2); + llvm::PHINode *CalleePtr = Builder.CreatePHI(CGF.DefaultPtrTy, 2); CalleePtr->addIncoming(VirtualFn, FnVirtual); CalleePtr->addIncoming(NonVirtualFn, FnNonVirtual); @@ -1403,7 +1403,7 @@ void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF, // Grab the vtable pointer as an intptr_t*. auto *ClassDecl = ElementType->castAsCXXRecordDecl(); - llvm::Value *VTable = CGF.GetVTablePtr(Ptr, CGF.UnqualPtrTy, ClassDecl); + llvm::Value *VTable = CGF.GetVTablePtr(Ptr, CGF.DefaultPtrTy, ClassDecl); // Track back to entry -2 and pull out the offset there. llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64( @@ -1749,7 +1749,7 @@ llvm::Value *ItaniumCXXABI::emitExactDynamicCast( auto AuthenticateVTable = [&](Address ThisAddr, const CXXRecordDecl *Decl) { if (!CGF.getLangOpts().PointerAuthCalls) return; - (void)CGF.GetVTablePtr(ThisAddr, CGF.UnqualPtrTy, Decl, + (void)CGF.GetVTablePtr(ThisAddr, CGF.DefaultPtrTy, Decl, CodeGenFunction::VTableAuthMode::MustTrap); }; @@ -1775,7 +1775,7 @@ llvm::Value *ItaniumCXXABI::emitExactDynamicCast( if (PerformPostCastAuthentication) VTable = CGF.EmitPointerAuthAuth(StrippingAuthInfo, VTable); } else - VTable = CGF.GetVTablePtr(ThisAddr, CGF.UnqualPtrTy, SrcDecl); + VTable = CGF.GetVTablePtr(ThisAddr, CGF.DefaultPtrTy, SrcDecl); // Compare the vptr against the expected vptr for the destination type at // this offset. @@ -1828,7 +1828,7 @@ llvm::Value *ItaniumCXXABI::emitDynamicCastToVoid(CodeGenFunction &CGF, if (CGM.getItaniumVTableContext().isRelativeLayout()) { // Get the vtable pointer. llvm::Value *VTable = - CGF.GetVTablePtr(ThisAddr, CGF.UnqualPtrTy, ClassDecl); + CGF.GetVTablePtr(ThisAddr, CGF.DefaultPtrTy, ClassDecl); // Get the offset-to-top from the vtable. OffsetToTop = @@ -1841,7 +1841,7 @@ llvm::Value *ItaniumCXXABI::emitDynamicCastToVoid(CodeGenFunction &CGF, // Get the vtable pointer. llvm::Value *VTable = - CGF.GetVTablePtr(ThisAddr, CGF.UnqualPtrTy, ClassDecl); + CGF.GetVTablePtr(ThisAddr, CGF.DefaultPtrTy, ClassDecl); // Get the offset-to-top from the vtable. OffsetToTop = @@ -2578,7 +2578,7 @@ llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF, // We can't simply ignore this load using nosanitize metadata because // the metadata may be lost. llvm::FunctionType *FTy = - llvm::FunctionType::get(CGF.SizeTy, CGF.UnqualPtrTy, false); + llvm::FunctionType::get(CGF.SizeTy, CGF.DefaultPtrTy, false); llvm::FunctionCallee F = CGM.CreateRuntimeFunction(FTy, "__asan_load_cxx_array_cookie"); return CGF.Builder.CreateCall(F, numElementsPtr.emitRawPointer(CGF)); @@ -2921,7 +2921,7 @@ static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF, // We're assuming that the destructor function is something we can // reasonably call with the default CC. - llvm::Type *dtorTy = CGF.UnqualPtrTy; + llvm::Type *dtorTy = CGF.DefaultPtrTy; // Preserve address space of addr. auto AddrAS = addr ? addr->getType()->getPointerAddressSpace() : 0; @@ -5035,7 +5035,7 @@ static void InitCatchParam(CodeGenFunction &CGF, auto catchRD = CatchType->getAsCXXRecordDecl(); CharUnits caughtExnAlignment = CGF.CGM.getClassPointerAlignment(catchRD); - llvm::Type *PtrTy = CGF.UnqualPtrTy; // addrspace 0 ok + llvm::Type *PtrTy = CGF.DefaultPtrTy; // Check for a copy expression. If we don't have a copy expression, // that means a trivial copy is okay. @@ -5244,7 +5244,7 @@ void XLCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D, llvm::FunctionCallee Dtor, llvm::Constant *Addr) { if (D.getTLSKind() != VarDecl::TLS_None) { - llvm::PointerType *PtrTy = CGF.UnqualPtrTy; + llvm::PointerType *PtrTy = CGF.DefaultPtrTy; // extern "C" int __pt_atexit_np(int flags, int(*)(int,...), ...); llvm::FunctionType *AtExitTy = diff --git a/clang/lib/CodeGen/MicrosoftCXXABI.cpp b/clang/lib/CodeGen/MicrosoftCXXABI.cpp index 19d9265..71e2449 100644 --- a/clang/lib/CodeGen/MicrosoftCXXABI.cpp +++ b/clang/lib/CodeGen/MicrosoftCXXABI.cpp @@ -528,7 +528,7 @@ public: CGM.IntTy, CGM.IntTy, CGM.IntTy, - getImageRelativeType(CGM.UnqualPtrTy), + getImageRelativeType(CGM.DefaultPtrTy), }; BaseClassDescriptorType = llvm::StructType::create( CGM.getLLVMContext(), FieldTypes, "rtti.BaseClassDescriptor"); @@ -540,7 +540,7 @@ public: return ClassHierarchyDescriptorType; // Forward-declare RTTIClassHierarchyDescriptor to break a cycle. llvm::Type *FieldTypes[] = {CGM.IntTy, CGM.IntTy, CGM.IntTy, - getImageRelativeType(CGM.UnqualPtrTy)}; + getImageRelativeType(CGM.DefaultPtrTy)}; ClassHierarchyDescriptorType = llvm::StructType::create(FieldTypes, "rtti.ClassHierarchyDescriptor"); return ClassHierarchyDescriptorType; @@ -554,7 +554,7 @@ public: CGM.IntTy, CGM.IntTy, getImageRelativeType(CGM.Int8PtrTy), - getImageRelativeType(CGM.UnqualPtrTy), + getImageRelativeType(CGM.DefaultPtrTy), getImageRelativeType(CGM.VoidTy), }; llvm::ArrayRef<llvm::Type *> FieldTypesRef(FieldTypes); @@ -752,7 +752,7 @@ public: llvm::SmallString<23> CTATypeName("eh.CatchableTypeArray."); CTATypeName += llvm::utostr(NumEntries); - llvm::Type *CTType = getImageRelativeType(CGM.UnqualPtrTy); + llvm::Type *CTType = getImageRelativeType(CGM.DefaultPtrTy); llvm::Type *FieldTypes[] = { CGM.IntTy, // NumEntries llvm::ArrayType::get(CTType, NumEntries) // CatchableTypes @@ -779,7 +779,7 @@ public: llvm::FunctionCallee getThrowFn() { // _CxxThrowException is passed an exception object and a ThrowInfo object // which describes the exception. - llvm::Type *Args[] = {CGM.Int8PtrTy, CGM.UnqualPtrTy}; + llvm::Type *Args[] = {CGM.Int8PtrTy, CGM.DefaultPtrTy}; llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, Args, /*isVarArg=*/false); llvm::FunctionCallee Throw = @@ -920,7 +920,7 @@ void MicrosoftCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF, void MicrosoftCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) { llvm::Value *Args[] = {llvm::ConstantPointerNull::get(CGM.Int8PtrTy), - llvm::ConstantPointerNull::get(CGM.UnqualPtrTy)}; + llvm::ConstantPointerNull::get(CGM.DefaultPtrTy)}; llvm::FunctionCallee Fn = getThrowFn(); if (isNoReturn) CGF.EmitNoreturnRuntimeCallOrInvoke(Fn, Args); @@ -1969,13 +1969,13 @@ CGCallee MicrosoftCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF, SourceLocation Loc) { CGBuilderTy &Builder = CGF.Builder; - Ty = CGF.UnqualPtrTy; + Ty = CGF.DefaultPtrTy; Address VPtr = adjustThisArgumentForVirtualFunctionCall(CGF, GD, This, true); auto *MethodDecl = cast<CXXMethodDecl>(GD.getDecl()); llvm::Value *VTable = - CGF.GetVTablePtr(VPtr, CGF.UnqualPtrTy, MethodDecl->getParent()); + CGF.GetVTablePtr(VPtr, CGF.DefaultPtrTy, MethodDecl->getParent()); MicrosoftVTableContext &VFTContext = CGM.getMicrosoftVTableContext(); MethodVFTableLocation ML = VFTContext.getMethodVFTableLocation(GD); @@ -2136,9 +2136,9 @@ MicrosoftCXXABI::EmitVirtualMemPtrThunk(const CXXMethodDecl *MD, // Load the vfptr and then callee from the vftable. The callee should have // adjusted 'this' so that the vfptr is at offset zero. - llvm::Type *ThunkPtrTy = CGF.UnqualPtrTy; + llvm::Type *ThunkPtrTy = CGF.DefaultPtrTy; llvm::Value *VTable = - CGF.GetVTablePtr(getThisAddress(CGF), CGF.UnqualPtrTy, MD->getParent()); + CGF.GetVTablePtr(getThisAddress(CGF), CGF.DefaultPtrTy, MD->getParent()); llvm::Value *VFuncPtr = CGF.Builder.CreateConstInBoundsGEP1_64( ThunkPtrTy, VTable, ML.Index, "vfn"); @@ -2562,7 +2562,7 @@ static ConstantAddress getInitThreadEpochPtr(CodeGenModule &CGM) { static llvm::FunctionCallee getInitThreadHeaderFn(CodeGenModule &CGM) { llvm::FunctionType *FTy = llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()), - CGM.UnqualPtrTy, /*isVarArg=*/false); + CGM.DefaultPtrTy, /*isVarArg=*/false); return CGM.CreateRuntimeFunction( FTy, "_Init_thread_header", llvm::AttributeList::get(CGM.getLLVMContext(), @@ -2574,7 +2574,7 @@ static llvm::FunctionCallee getInitThreadHeaderFn(CodeGenModule &CGM) { static llvm::FunctionCallee getInitThreadFooterFn(CodeGenModule &CGM) { llvm::FunctionType *FTy = llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()), - CGM.UnqualPtrTy, /*isVarArg=*/false); + CGM.DefaultPtrTy, /*isVarArg=*/false); return CGM.CreateRuntimeFunction( FTy, "_Init_thread_footer", llvm::AttributeList::get(CGM.getLLVMContext(), @@ -2586,7 +2586,7 @@ static llvm::FunctionCallee getInitThreadFooterFn(CodeGenModule &CGM) { static llvm::FunctionCallee getInitThreadAbortFn(CodeGenModule &CGM) { llvm::FunctionType *FTy = llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()), - CGM.UnqualPtrTy, /*isVarArg=*/false); + CGM.DefaultPtrTy, /*isVarArg=*/false); return CGM.CreateRuntimeFunction( FTy, "_Init_thread_abort", llvm::AttributeList::get(CGM.getLLVMContext(), @@ -3169,7 +3169,7 @@ MicrosoftCXXABI::GetVBaseOffsetFromVBPtr(CodeGenFunction &CGF, } llvm::Value *VBTable = - Builder.CreateAlignedLoad(CGM.UnqualPtrTy, VBPtr, VBPtrAlign, "vbtable"); + Builder.CreateAlignedLoad(CGM.DefaultPtrTy, VBPtr, VBPtrAlign, "vbtable"); // Translate from byte offset to table index. It improves analyzability. llvm::Value *VBTableIndex = Builder.CreateAShr( @@ -3825,7 +3825,7 @@ MSRTTIBuilder::getBaseClassArray(SmallVectorImpl<MSRTTIClass> &Classes) { // mode) bytes of padding. We provide a pointer sized amount of padding by // adding +1 to Classes.size(). The sections have pointer alignment and are // marked pick-any so it shouldn't matter. - llvm::Type *PtrType = ABI.getImageRelativeType(CGM.UnqualPtrTy); + llvm::Type *PtrType = ABI.getImageRelativeType(CGM.DefaultPtrTy); auto *ArrType = llvm::ArrayType::get(PtrType, Classes.size() + 1); auto *BCA = new llvm::GlobalVariable(Module, ArrType, @@ -4372,7 +4372,7 @@ llvm::GlobalVariable *MicrosoftCXXABI::getCatchableTypeArray(QualType T) { CatchableTypes.insert(getCatchableType(getContext().VoidPtrTy)); uint32_t NumEntries = CatchableTypes.size(); - llvm::Type *CTType = getImageRelativeType(CGM.UnqualPtrTy); + llvm::Type *CTType = getImageRelativeType(CGM.DefaultPtrTy); llvm::ArrayType *AT = llvm::ArrayType::get(CTType, NumEntries); llvm::StructType *CTAType = getCatchableTypeArrayType(NumEntries); llvm::Constant *Fields[] = { diff --git a/clang/lib/CodeGen/TargetBuiltins/ARM.cpp b/clang/lib/CodeGen/TargetBuiltins/ARM.cpp index 2429a43..60f9b86 100644 --- a/clang/lib/CodeGen/TargetBuiltins/ARM.cpp +++ b/clang/lib/CodeGen/TargetBuiltins/ARM.cpp @@ -2037,7 +2037,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr( case NEON::BI__builtin_neon_vld1q_x3_v: case NEON::BI__builtin_neon_vld1_x4_v: case NEON::BI__builtin_neon_vld1q_x4_v: { - llvm::Type *Tys[2] = {VTy, UnqualPtrTy}; + llvm::Type *Tys[2] = {VTy, DefaultPtrTy}; Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys); Ops[1] = Builder.CreateCall(F, Ops[1], "vld1xN"); return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); @@ -2263,11 +2263,11 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr( // in AArch64 it comes last. We may want to stick to one or another. if (Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::aarch64_be || Arch == llvm::Triple::aarch64_32) { - llvm::Type *Tys[2] = {VTy, UnqualPtrTy}; + llvm::Type *Tys[2] = {VTy, DefaultPtrTy}; std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end()); return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, ""); } - llvm::Type *Tys[2] = {UnqualPtrTy, VTy}; + llvm::Type *Tys[2] = {DefaultPtrTy, VTy}; return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, ""); } case NEON::BI__builtin_neon_vsubhn_v: { @@ -2858,7 +2858,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID, Function *F = CGM.getIntrinsic( BuiltinID == clang::ARM::BI__builtin_arm_ldaex ? Intrinsic::arm_ldaex : Intrinsic::arm_ldrex, - UnqualPtrTy); + DefaultPtrTy); CallInst *Val = Builder.CreateCall(F, LoadAddr, "ldrex"); Val->addParamAttr( 0, Attribute::get(getLLVMContext(), Attribute::ElementType, IntTy)); @@ -5225,7 +5225,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID, CGM.getIntrinsic(BuiltinID == clang::AArch64::BI__builtin_arm_ldaex ? Intrinsic::aarch64_ldaxr : Intrinsic::aarch64_ldxr, - UnqualPtrTy); + DefaultPtrTy); CallInst *Val = Builder.CreateCall(F, LoadAddr, "ldxr"); Val->addParamAttr( 0, Attribute::get(getLLVMContext(), Attribute::ElementType, IntTy)); @@ -7482,42 +7482,42 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID, } case NEON::BI__builtin_neon_vld2_v: case NEON::BI__builtin_neon_vld2q_v: { - llvm::Type *Tys[2] = {VTy, UnqualPtrTy}; + llvm::Type *Tys[2] = {VTy, DefaultPtrTy}; Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2, Tys); Ops[1] = Builder.CreateCall(F, Ops[1], "vld2"); return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); } case NEON::BI__builtin_neon_vld3_v: case NEON::BI__builtin_neon_vld3q_v: { - llvm::Type *Tys[2] = {VTy, UnqualPtrTy}; + llvm::Type *Tys[2] = {VTy, DefaultPtrTy}; Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3, Tys); Ops[1] = Builder.CreateCall(F, Ops[1], "vld3"); return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); } case NEON::BI__builtin_neon_vld4_v: case NEON::BI__builtin_neon_vld4q_v: { - llvm::Type *Tys[2] = {VTy, UnqualPtrTy}; + llvm::Type *Tys[2] = {VTy, DefaultPtrTy}; Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4, Tys); Ops[1] = Builder.CreateCall(F, Ops[1], "vld4"); return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); } case NEON::BI__builtin_neon_vld2_dup_v: case NEON::BI__builtin_neon_vld2q_dup_v: { - llvm::Type *Tys[2] = {VTy, UnqualPtrTy}; + llvm::Type *Tys[2] = {VTy, DefaultPtrTy}; Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2r, Tys); Ops[1] = Builder.CreateCall(F, Ops[1], "vld2"); return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); } case NEON::BI__builtin_neon_vld3_dup_v: case NEON::BI__builtin_neon_vld3q_dup_v: { - llvm::Type *Tys[2] = {VTy, UnqualPtrTy}; + llvm::Type *Tys[2] = {VTy, DefaultPtrTy}; Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3r, Tys); Ops[1] = Builder.CreateCall(F, Ops[1], "vld3"); return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); } case NEON::BI__builtin_neon_vld4_dup_v: case NEON::BI__builtin_neon_vld4q_dup_v: { - llvm::Type *Tys[2] = {VTy, UnqualPtrTy}; + llvm::Type *Tys[2] = {VTy, DefaultPtrTy}; Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4r, Tys); Ops[1] = Builder.CreateCall(F, Ops[1], "vld4"); return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); diff --git a/clang/lib/CodeGen/TargetBuiltins/PPC.cpp b/clang/lib/CodeGen/TargetBuiltins/PPC.cpp index e71dc9e..44d5938 100644 --- a/clang/lib/CodeGen/TargetBuiltins/PPC.cpp +++ b/clang/lib/CodeGen/TargetBuiltins/PPC.cpp @@ -59,7 +59,7 @@ static llvm::Value *emitPPCLoadReserveIntrinsic(CodeGenFunction &CGF, Constraints += MachineClobbers; } - llvm::Type *PtrType = CGF.UnqualPtrTy; + llvm::Type *PtrType = CGF.DefaultPtrTy; llvm::FunctionType *FTy = llvm::FunctionType::get(RetType, {PtrType}, false); llvm::InlineAsm *IA = diff --git a/clang/lib/CodeGen/Targets/PPC.cpp b/clang/lib/CodeGen/Targets/PPC.cpp index 380e8c0..35e7655 100644 --- a/clang/lib/CodeGen/Targets/PPC.cpp +++ b/clang/lib/CodeGen/Targets/PPC.cpp @@ -490,7 +490,7 @@ RValue PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList, llvm::Type *DirectTy = CGF.ConvertType(Ty), *ElementTy = DirectTy; if (isIndirect) - DirectTy = CGF.UnqualPtrTy; + DirectTy = CGF.DefaultPtrTy; // Case 1: consume registers. Address RegAddr = Address::invalid(); |