aboutsummaryrefslogtreecommitdiff
path: root/clang/lib/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'clang/lib/CodeGen')
-rw-r--r--clang/lib/CodeGen/BackendUtil.cpp6
-rw-r--r--clang/lib/CodeGen/CGAtomic.cpp155
-rw-r--r--clang/lib/CodeGen/CGBlocks.cpp2
-rw-r--r--clang/lib/CodeGen/CGBuiltin.cpp2
-rw-r--r--clang/lib/CodeGen/CGCUDANV.cpp2
-rw-r--r--clang/lib/CodeGen/CGCall.cpp18
-rw-r--r--clang/lib/CodeGen/CGExpr.cpp6
-rw-r--r--clang/lib/CodeGen/CGObjCMac.cpp26
-rw-r--r--clang/lib/CodeGen/CGOpenMPRuntime.cpp6
-rw-r--r--clang/lib/CodeGen/CGPointerAuth.cpp4
-rw-r--r--clang/lib/CodeGen/CodeGenModule.cpp84
-rw-r--r--clang/lib/CodeGen/CodeGenModule.h7
-rw-r--r--clang/lib/CodeGen/CodeGenTypeCache.h2
-rw-r--r--clang/lib/CodeGen/ItaniumCXXABI.cpp24
-rw-r--r--clang/lib/CodeGen/MicrosoftCXXABI.cpp32
-rw-r--r--clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp29
-rw-r--r--clang/lib/CodeGen/TargetBuiltins/ARM.cpp22
-rw-r--r--clang/lib/CodeGen/TargetBuiltins/PPC.cpp2
-rw-r--r--clang/lib/CodeGen/Targets/AMDGPU.cpp4
-rw-r--r--clang/lib/CodeGen/Targets/PPC.cpp2
-rw-r--r--clang/lib/CodeGen/Targets/SPIR.cpp2
21 files changed, 279 insertions, 158 deletions
diff --git a/clang/lib/CodeGen/BackendUtil.cpp b/clang/lib/CodeGen/BackendUtil.cpp
index 6020684..ecfbcb5 100644
--- a/clang/lib/CodeGen/BackendUtil.cpp
+++ b/clang/lib/CodeGen/BackendUtil.cpp
@@ -430,12 +430,6 @@ static bool initTargetOptions(const CompilerInstance &CI,
Options.NoInfsFPMath = LangOpts.NoHonorInfs;
Options.NoNaNsFPMath = LangOpts.NoHonorNaNs;
Options.NoZerosInBSS = CodeGenOpts.NoZeroInitializedInBSS;
- Options.UnsafeFPMath = LangOpts.AllowFPReassoc && LangOpts.AllowRecip &&
- LangOpts.NoSignedZero && LangOpts.ApproxFunc &&
- (LangOpts.getDefaultFPContractMode() ==
- LangOptions::FPModeKind::FPM_Fast ||
- LangOpts.getDefaultFPContractMode() ==
- LangOptions::FPModeKind::FPM_FastHonorPragmas);
Options.BBAddrMap = CodeGenOpts.BBAddrMap;
Options.BBSections =
diff --git a/clang/lib/CodeGen/CGAtomic.cpp b/clang/lib/CodeGen/CGAtomic.cpp
index d95dab3..a012581 100644
--- a/clang/lib/CodeGen/CGAtomic.cpp
+++ b/clang/lib/CodeGen/CGAtomic.cpp
@@ -86,7 +86,7 @@ namespace {
llvm::Value *StoragePtr = CGF.Builder.CreateConstGEP1_64(
CGF.Int8Ty, BitFieldPtr, OffsetInChars.getQuantity());
StoragePtr = CGF.Builder.CreateAddrSpaceCast(
- StoragePtr, CGF.UnqualPtrTy, "atomic_bitfield_base");
+ StoragePtr, CGF.DefaultPtrTy, "atomic_bitfield_base");
BFI = OrigBFI;
BFI.Offset = Offset;
BFI.StorageSize = AtomicSizeInBits;
@@ -374,10 +374,9 @@ bool AtomicInfo::emitMemSetZeroIfNecessary() const {
}
static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
- Address Dest, Address Ptr,
- Address Val1, Address Val2,
- uint64_t Size,
- llvm::AtomicOrdering SuccessOrder,
+ Address Dest, Address Ptr, Address Val1,
+ Address Val2, Address ExpectedResult,
+ uint64_t Size, llvm::AtomicOrdering SuccessOrder,
llvm::AtomicOrdering FailureOrder,
llvm::SyncScope::ID Scope) {
// Note that cmpxchg doesn't support weak cmpxchg, at least at the moment.
@@ -411,8 +410,30 @@ static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
CGF.Builder.SetInsertPoint(StoreExpectedBB);
// Update the memory at Expected with Old's value.
- auto *I = CGF.Builder.CreateStore(Old, Val1);
- CGF.addInstToCurrentSourceAtom(I, Old);
+ llvm::Type *ExpectedType = ExpectedResult.getElementType();
+ const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
+ uint64_t ExpectedSizeInBytes = DL.getTypeStoreSize(ExpectedType);
+
+ if (ExpectedSizeInBytes == Size) {
+ // Sizes match: store directly
+ auto *I = CGF.Builder.CreateStore(Old, ExpectedResult);
+ CGF.addInstToCurrentSourceAtom(I, Old);
+ } else {
+ // store only the first ExpectedSizeInBytes bytes of Old
+ llvm::Type *OldType = Old->getType();
+
+ // Allocate temporary storage for Old value
+ Address OldTmp =
+ CGF.CreateTempAlloca(OldType, Ptr.getAlignment(), "old.tmp");
+
+ // Store Old into this temporary
+ auto *I = CGF.Builder.CreateStore(Old, OldTmp);
+ CGF.addInstToCurrentSourceAtom(I, Old);
+
+ // Perform memcpy for first ExpectedSizeInBytes bytes
+ CGF.Builder.CreateMemCpy(ExpectedResult, OldTmp, ExpectedSizeInBytes,
+ /*isVolatile=*/false);
+ }
// Finally, branch to the exit point.
CGF.Builder.CreateBr(ContinueBB);
@@ -425,13 +446,11 @@ static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
/// Given an ordering required on success, emit all possible cmpxchg
/// instructions to cope with the provided (but possibly only dynamically known)
/// FailureOrder.
-static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
- bool IsWeak, Address Dest, Address Ptr,
- Address Val1, Address Val2,
- llvm::Value *FailureOrderVal,
- uint64_t Size,
- llvm::AtomicOrdering SuccessOrder,
- llvm::SyncScope::ID Scope) {
+static void emitAtomicCmpXchgFailureSet(
+ CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak, Address Dest, Address Ptr,
+ Address Val1, Address Val2, Address ExpectedResult,
+ llvm::Value *FailureOrderVal, uint64_t Size,
+ llvm::AtomicOrdering SuccessOrder, llvm::SyncScope::ID Scope) {
llvm::AtomicOrdering FailureOrder;
if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
auto FOS = FO->getSExtValue();
@@ -458,8 +477,8 @@ static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
// success argument". This condition has been lifted and the only
// precondition is 31.7.2.18. Effectively treat this as a DR and skip
// language version checks.
- emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
- FailureOrder, Scope);
+ emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, ExpectedResult,
+ Size, SuccessOrder, FailureOrder, Scope);
return;
}
@@ -483,18 +502,19 @@ static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
// Emit all the different atomics
CGF.Builder.SetInsertPoint(MonotonicBB);
- emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
- Size, SuccessOrder, llvm::AtomicOrdering::Monotonic, Scope);
+ emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, ExpectedResult, Size,
+ SuccessOrder, llvm::AtomicOrdering::Monotonic, Scope);
CGF.Builder.CreateBr(ContBB);
CGF.Builder.SetInsertPoint(AcquireBB);
- emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
- llvm::AtomicOrdering::Acquire, Scope);
+ emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, ExpectedResult, Size,
+ SuccessOrder, llvm::AtomicOrdering::Acquire, Scope);
CGF.Builder.CreateBr(ContBB);
CGF.Builder.SetInsertPoint(SeqCstBB);
- emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
- llvm::AtomicOrdering::SequentiallyConsistent, Scope);
+ emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, ExpectedResult, Size,
+ SuccessOrder, llvm::AtomicOrdering::SequentiallyConsistent,
+ Scope);
CGF.Builder.CreateBr(ContBB);
CGF.Builder.SetInsertPoint(ContBB);
@@ -538,8 +558,9 @@ static llvm::Value *EmitPostAtomicMinMax(CGBuilderTy &Builder,
static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
Address Ptr, Address Val1, Address Val2,
- llvm::Value *IsWeak, llvm::Value *FailureOrder,
- uint64_t Size, llvm::AtomicOrdering Order,
+ Address ExpectedResult, llvm::Value *IsWeak,
+ llvm::Value *FailureOrder, uint64_t Size,
+ llvm::AtomicOrdering Order,
llvm::SyncScope::ID Scope) {
llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
bool PostOpMinMax = false;
@@ -554,13 +575,15 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
- FailureOrder, Size, Order, Scope);
+ ExpectedResult, FailureOrder, Size, Order,
+ Scope);
return;
case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
- FailureOrder, Size, Order, Scope);
+ ExpectedResult, FailureOrder, Size, Order,
+ Scope);
return;
case AtomicExpr::AO__atomic_compare_exchange:
case AtomicExpr::AO__atomic_compare_exchange_n:
@@ -568,7 +591,8 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
case AtomicExpr::AO__scoped_atomic_compare_exchange_n: {
if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr,
- Val1, Val2, FailureOrder, Size, Order, Scope);
+ Val1, Val2, ExpectedResult, FailureOrder,
+ Size, Order, Scope);
} else {
// Create all the relevant BB's
llvm::BasicBlock *StrongBB =
@@ -582,12 +606,14 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
CGF.Builder.SetInsertPoint(StrongBB);
emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
- FailureOrder, Size, Order, Scope);
+ ExpectedResult, FailureOrder, Size, Order,
+ Scope);
CGF.Builder.CreateBr(ContBB);
CGF.Builder.SetInsertPoint(WeakBB);
emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
- FailureOrder, Size, Order, Scope);
+ ExpectedResult, FailureOrder, Size, Order,
+ Scope);
CGF.Builder.CreateBr(ContBB);
CGF.Builder.SetInsertPoint(ContBB);
@@ -797,9 +823,9 @@ EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *Expr, Address Dest,
Address Ptr, Address Val1, Address Val2,
- llvm::Value *IsWeak, llvm::Value *FailureOrder,
- uint64_t Size, llvm::AtomicOrdering Order,
- llvm::Value *Scope) {
+ Address OriginalVal1, llvm::Value *IsWeak,
+ llvm::Value *FailureOrder, uint64_t Size,
+ llvm::AtomicOrdering Order, llvm::Value *Scope) {
auto ScopeModel = Expr->getScopeModel();
// LLVM atomic instructions always have sync scope. If clang atomic
@@ -816,8 +842,8 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *Expr, Address Dest,
Order, CGF.getLLVMContext());
else
SS = llvm::SyncScope::System;
- EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
- Order, SS);
+ EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
+ FailureOrder, Size, Order, SS);
return;
}
@@ -826,8 +852,8 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *Expr, Address Dest,
auto SCID = CGF.getTargetHooks().getLLVMSyncScopeID(
CGF.CGM.getLangOpts(), ScopeModel->map(SC->getZExtValue()),
Order, CGF.CGM.getLLVMContext());
- EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
- Order, SCID);
+ EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
+ FailureOrder, Size, Order, SCID);
return;
}
@@ -852,12 +878,11 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *Expr, Address Dest,
SI->addCase(Builder.getInt32(S), B);
Builder.SetInsertPoint(B);
- EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
- Order,
- CGF.getTargetHooks().getLLVMSyncScopeID(CGF.CGM.getLangOpts(),
- ScopeModel->map(S),
- Order,
- CGF.getLLVMContext()));
+ EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
+ FailureOrder, Size, Order,
+ CGF.getTargetHooks().getLLVMSyncScopeID(
+ CGF.CGM.getLangOpts(), ScopeModel->map(S), Order,
+ CGF.getLLVMContext()));
Builder.CreateBr(ContBB);
}
@@ -1058,6 +1083,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
LValue AtomicVal = MakeAddrLValue(Ptr, AtomicTy);
AtomicInfo Atomics(*this, AtomicVal);
+ Address OriginalVal1 = Val1;
if (ShouldCastToIntPtrTy) {
Ptr = Atomics.castToAtomicIntPointer(Ptr);
if (Val1.isValid())
@@ -1301,30 +1327,32 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
if (llvm::isValidAtomicOrderingCABI(ord))
switch ((llvm::AtomicOrderingCABI)ord) {
case llvm::AtomicOrderingCABI::relaxed:
- EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
- llvm::AtomicOrdering::Monotonic, Scope);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
+ OrderFail, Size, llvm::AtomicOrdering::Monotonic, Scope);
break;
case llvm::AtomicOrderingCABI::consume:
case llvm::AtomicOrderingCABI::acquire:
if (IsStore)
break; // Avoid crashing on code with undefined behavior
- EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
- llvm::AtomicOrdering::Acquire, Scope);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
+ OrderFail, Size, llvm::AtomicOrdering::Acquire, Scope);
break;
case llvm::AtomicOrderingCABI::release:
if (IsLoad)
break; // Avoid crashing on code with undefined behavior
- EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
- llvm::AtomicOrdering::Release, Scope);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
+ OrderFail, Size, llvm::AtomicOrdering::Release, Scope);
break;
case llvm::AtomicOrderingCABI::acq_rel:
if (IsLoad || IsStore)
break; // Avoid crashing on code with undefined behavior
- EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
- llvm::AtomicOrdering::AcquireRelease, Scope);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
+ OrderFail, Size, llvm::AtomicOrdering::AcquireRelease,
+ Scope);
break;
case llvm::AtomicOrderingCABI::seq_cst:
- EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
+ OrderFail, Size,
llvm::AtomicOrdering::SequentiallyConsistent, Scope);
break;
}
@@ -1360,13 +1388,13 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
// Emit all the different atomics
Builder.SetInsertPoint(MonotonicBB);
- EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
- llvm::AtomicOrdering::Monotonic, Scope);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak, OrderFail,
+ Size, llvm::AtomicOrdering::Monotonic, Scope);
Builder.CreateBr(ContBB);
if (!IsStore) {
Builder.SetInsertPoint(AcquireBB);
- EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
- llvm::AtomicOrdering::Acquire, Scope);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
+ OrderFail, Size, llvm::AtomicOrdering::Acquire, Scope);
Builder.CreateBr(ContBB);
SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
AcquireBB);
@@ -1375,23 +1403,23 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
}
if (!IsLoad) {
Builder.SetInsertPoint(ReleaseBB);
- EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
- llvm::AtomicOrdering::Release, Scope);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
+ OrderFail, Size, llvm::AtomicOrdering::Release, Scope);
Builder.CreateBr(ContBB);
SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::release),
ReleaseBB);
}
if (!IsLoad && !IsStore) {
Builder.SetInsertPoint(AcqRelBB);
- EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
- llvm::AtomicOrdering::AcquireRelease, Scope);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
+ OrderFail, Size, llvm::AtomicOrdering::AcquireRelease, Scope);
Builder.CreateBr(ContBB);
SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acq_rel),
AcqRelBB);
}
Builder.SetInsertPoint(SeqCstBB);
- EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
- llvm::AtomicOrdering::SequentiallyConsistent, Scope);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak, OrderFail,
+ Size, llvm::AtomicOrdering::SequentiallyConsistent, Scope);
Builder.CreateBr(ContBB);
SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
SeqCstBB);
@@ -1417,6 +1445,11 @@ Address AtomicInfo::convertToAtomicIntPointer(Address Addr) const {
uint64_t SourceSizeInBits = CGF.CGM.getDataLayout().getTypeSizeInBits(Ty);
if (SourceSizeInBits != AtomicSizeInBits) {
Address Tmp = CreateTempAlloca();
+ CGF.Builder.CreateMemSet(
+ Tmp.emitRawPointer(CGF), llvm::ConstantInt::get(CGF.Int8Ty, 0),
+ CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits).getQuantity(),
+ Tmp.getAlignment().getAsAlign());
+
CGF.Builder.CreateMemCpy(Tmp, Addr,
std::min(AtomicSizeInBits, SourceSizeInBits) / 8);
Addr = Tmp;
diff --git a/clang/lib/CodeGen/CGBlocks.cpp b/clang/lib/CodeGen/CGBlocks.cpp
index 597127ab..20a595e 100644
--- a/clang/lib/CodeGen/CGBlocks.cpp
+++ b/clang/lib/CodeGen/CGBlocks.cpp
@@ -1207,7 +1207,7 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr *E,
} else {
// Bitcast the block literal to a generic block literal.
BlockPtr =
- Builder.CreatePointerCast(BlockPtr, UnqualPtrTy, "block.literal");
+ Builder.CreatePointerCast(BlockPtr, DefaultPtrTy, "block.literal");
// Get pointer to the block invoke function
FuncPtr = Builder.CreateStructGEP(GenBlockTy, BlockPtr, 3);
diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index 92dba32..fd14cd6 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -1673,7 +1673,7 @@ static llvm::Value *EmitX86BitTestIntrinsic(CodeGenFunction &CGF,
CGF.getLLVMContext(),
CGF.getContext().getTypeSize(E->getArg(1)->getType()));
llvm::FunctionType *FTy =
- llvm::FunctionType::get(CGF.Int8Ty, {CGF.UnqualPtrTy, IntType}, false);
+ llvm::FunctionType::get(CGF.Int8Ty, {CGF.DefaultPtrTy, IntType}, false);
llvm::InlineAsm *IA =
llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
diff --git a/clang/lib/CodeGen/CGCUDANV.cpp b/clang/lib/CodeGen/CGCUDANV.cpp
index cb16fe1..b463f88 100644
--- a/clang/lib/CodeGen/CGCUDANV.cpp
+++ b/clang/lib/CodeGen/CGCUDANV.cpp
@@ -230,7 +230,7 @@ CGNVCUDARuntime::CGNVCUDARuntime(CodeGenModule &CGM)
IntTy = CGM.IntTy;
SizeTy = CGM.SizeTy;
VoidTy = CGM.VoidTy;
- PtrTy = CGM.UnqualPtrTy;
+ PtrTy = CGM.DefaultPtrTy;
if (CGM.getLangOpts().OffloadViaLLVM)
Prefix = "llvm";
diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp
index 741fa44..465f3f4 100644
--- a/clang/lib/CodeGen/CGCall.cpp
+++ b/clang/lib/CodeGen/CGCall.cpp
@@ -5937,8 +5937,24 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
CI->getCalledFunction()->getName().starts_with("_Z4sqrt")) {
SetSqrtFPAccuracy(CI);
}
- if (callOrInvoke)
+ if (callOrInvoke) {
*callOrInvoke = CI;
+ if (CGM.getCodeGenOpts().CallGraphSection) {
+ QualType CST;
+ if (TargetDecl && TargetDecl->getFunctionType())
+ CST = QualType(TargetDecl->getFunctionType(), 0);
+ else if (const auto *FPT =
+ Callee.getAbstractInfo().getCalleeFunctionProtoType())
+ CST = QualType(FPT, 0);
+ else
+ llvm_unreachable(
+ "Cannot find the callee type to generate callee_type metadata.");
+
+ // Set type identifier metadata of indirect calls for call graph section.
+ if (!CST.isNull())
+ CGM.createCalleeTypeMetadataForIcall(CST, *callOrInvoke);
+ }
+ }
// If this is within a function that has the guard(nocf) attribute and is an
// indirect call, add the "guard_nocf" attribute to this call to indicate that
diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp
index 8439ec7..fd73314 100644
--- a/clang/lib/CodeGen/CGExpr.cpp
+++ b/clang/lib/CodeGen/CGExpr.cpp
@@ -1756,7 +1756,7 @@ LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E,
const char *Name) {
ErrorUnsupported(E, Name);
llvm::Type *ElTy = ConvertType(E->getType());
- llvm::Type *Ty = UnqualPtrTy;
+ llvm::Type *Ty = DefaultPtrTy;
return MakeAddrLValue(
Address(llvm::UndefValue::get(Ty), ElTy, CharUnits::One()), E->getType());
}
@@ -4270,7 +4270,7 @@ void CodeGenFunction::EmitCfiCheckFail() {
llvm::StructType::get(Int8Ty, SourceLocationTy, VoidPtrTy);
llvm::Value *V = Builder.CreateConstGEP2_32(
- CfiCheckFailDataTy, Builder.CreatePointerCast(Data, UnqualPtrTy), 0, 0);
+ CfiCheckFailDataTy, Builder.CreatePointerCast(Data, DefaultPtrTy), 0, 0);
Address CheckKindAddr(V, Int8Ty, getIntAlign());
llvm::Value *CheckKind = Builder.CreateLoad(CheckKindAddr);
@@ -5711,7 +5711,7 @@ std::optional<LValue> HandleConditionalOperatorLValueSimpleCase(
if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Live->IgnoreParens())) {
CGF.EmitCXXThrowExpr(ThrowExpr);
llvm::Type *ElemTy = CGF.ConvertType(Dead->getType());
- llvm::Type *Ty = CGF.UnqualPtrTy;
+ llvm::Type *Ty = CGF.DefaultPtrTy;
return CGF.MakeAddrLValue(
Address(llvm::UndefValue::get(Ty), ElemTy, CharUnits::One()),
Dead->getType());
diff --git a/clang/lib/CodeGen/CGObjCMac.cpp b/clang/lib/CodeGen/CGObjCMac.cpp
index c571821a..cb5bb40 100644
--- a/clang/lib/CodeGen/CGObjCMac.cpp
+++ b/clang/lib/CodeGen/CGObjCMac.cpp
@@ -338,7 +338,7 @@ public:
/// GcReadWeakFn -- LLVM objc_read_weak (id *src) function.
llvm::FunctionCallee getGcReadWeakFn() {
// id objc_read_weak (id *)
- llvm::Type *args[] = {CGM.UnqualPtrTy};
+ llvm::Type *args[] = {CGM.DefaultPtrTy};
llvm::FunctionType *FTy = llvm::FunctionType::get(ObjectPtrTy, args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_read_weak");
}
@@ -346,7 +346,7 @@ public:
/// GcAssignWeakFn -- LLVM objc_assign_weak function.
llvm::FunctionCallee getGcAssignWeakFn() {
// id objc_assign_weak (id, id *)
- llvm::Type *args[] = {ObjectPtrTy, CGM.UnqualPtrTy};
+ llvm::Type *args[] = {ObjectPtrTy, CGM.DefaultPtrTy};
llvm::FunctionType *FTy = llvm::FunctionType::get(ObjectPtrTy, args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_assign_weak");
}
@@ -354,7 +354,7 @@ public:
/// GcAssignGlobalFn -- LLVM objc_assign_global function.
llvm::FunctionCallee getGcAssignGlobalFn() {
// id objc_assign_global(id, id *)
- llvm::Type *args[] = {ObjectPtrTy, CGM.UnqualPtrTy};
+ llvm::Type *args[] = {ObjectPtrTy, CGM.DefaultPtrTy};
llvm::FunctionType *FTy = llvm::FunctionType::get(ObjectPtrTy, args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_assign_global");
}
@@ -362,7 +362,7 @@ public:
/// GcAssignThreadLocalFn -- LLVM objc_assign_threadlocal function.
llvm::FunctionCallee getGcAssignThreadLocalFn() {
// id objc_assign_threadlocal(id src, id * dest)
- llvm::Type *args[] = {ObjectPtrTy, CGM.UnqualPtrTy};
+ llvm::Type *args[] = {ObjectPtrTy, CGM.DefaultPtrTy};
llvm::FunctionType *FTy = llvm::FunctionType::get(ObjectPtrTy, args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_assign_threadlocal");
}
@@ -370,7 +370,7 @@ public:
/// GcAssignIvarFn -- LLVM objc_assign_ivar function.
llvm::FunctionCallee getGcAssignIvarFn() {
// id objc_assign_ivar(id, id *, ptrdiff_t)
- llvm::Type *args[] = {ObjectPtrTy, CGM.UnqualPtrTy, CGM.PtrDiffTy};
+ llvm::Type *args[] = {ObjectPtrTy, CGM.DefaultPtrTy, CGM.PtrDiffTy};
llvm::FunctionType *FTy = llvm::FunctionType::get(ObjectPtrTy, args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_assign_ivar");
}
@@ -386,7 +386,7 @@ public:
/// GcAssignStrongCastFn -- LLVM objc_assign_strongCast function.
llvm::FunctionCallee getGcAssignStrongCastFn() {
// id objc_assign_strongCast(id, id *)
- llvm::Type *args[] = {ObjectPtrTy, CGM.UnqualPtrTy};
+ llvm::Type *args[] = {ObjectPtrTy, CGM.DefaultPtrTy};
llvm::FunctionType *FTy = llvm::FunctionType::get(ObjectPtrTy, args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_assign_strongCast");
}
@@ -517,7 +517,7 @@ public:
/// ExceptionTryEnterFn - LLVM objc_exception_try_enter function.
llvm::FunctionCallee getExceptionTryEnterFn() {
- llvm::Type *params[] = {CGM.UnqualPtrTy};
+ llvm::Type *params[] = {CGM.DefaultPtrTy};
return CGM.CreateRuntimeFunction(
llvm::FunctionType::get(CGM.VoidTy, params, false),
"objc_exception_try_enter");
@@ -525,7 +525,7 @@ public:
/// ExceptionTryExitFn - LLVM objc_exception_try_exit function.
llvm::FunctionCallee getExceptionTryExitFn() {
- llvm::Type *params[] = {CGM.UnqualPtrTy};
+ llvm::Type *params[] = {CGM.DefaultPtrTy};
return CGM.CreateRuntimeFunction(
llvm::FunctionType::get(CGM.VoidTy, params, false),
"objc_exception_try_exit");
@@ -533,7 +533,7 @@ public:
/// ExceptionExtractFn - LLVM objc_exception_extract function.
llvm::FunctionCallee getExceptionExtractFn() {
- llvm::Type *params[] = {CGM.UnqualPtrTy};
+ llvm::Type *params[] = {CGM.DefaultPtrTy};
return CGM.CreateRuntimeFunction(
llvm::FunctionType::get(ObjectPtrTy, params, false),
"objc_exception_extract");
@@ -550,7 +550,7 @@ public:
/// SetJmpFn - LLVM _setjmp function.
llvm::FunctionCallee getSetJmpFn() {
// This is specifically the prototype for x86.
- llvm::Type *params[] = {CGM.UnqualPtrTy};
+ llvm::Type *params[] = {CGM.DefaultPtrTy};
return CGM.CreateRuntimeFunction(
llvm::FunctionType::get(CGM.Int32Ty, params, false), "_setjmp",
llvm::AttributeList::get(CGM.getLLVMContext(),
@@ -1927,7 +1927,7 @@ CGObjCCommonMac::GenerateConstantNSString(const StringLiteral *Literal) {
// If we don't already have it, construct the type for a constant NSString.
if (!NSConstantStringType) {
NSConstantStringType =
- llvm::StructType::create({CGM.UnqualPtrTy, CGM.Int8PtrTy, CGM.IntTy},
+ llvm::StructType::create({CGM.DefaultPtrTy, CGM.Int8PtrTy, CGM.IntTy},
"struct.__builtin_NSString");
}
@@ -5959,7 +5959,7 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(
Int8PtrTy, PropertyListPtrTy);
// ImpnfABITy - LLVM for id (*)(id, SEL, ...)
- ImpnfABITy = CGM.UnqualPtrTy;
+ ImpnfABITy = CGM.DefaultPtrTy;
// struct _class_t {
// struct _class_t *isa;
@@ -6380,7 +6380,7 @@ void CGObjCNonFragileABIMac::GenerateClass(const ObjCImplementationDecl *ID) {
CGM.getModule(), ObjCTypes.ImpnfABITy, false,
llvm::GlobalValue::ExternalLinkage, nullptr, "_objc_empty_vtable");
else
- ObjCEmptyVtableVar = llvm::ConstantPointerNull::get(CGM.UnqualPtrTy);
+ ObjCEmptyVtableVar = llvm::ConstantPointerNull::get(CGM.DefaultPtrTy);
}
// FIXME: Is this correct (that meta class size is never computed)?
diff --git a/clang/lib/CodeGen/CGOpenMPRuntime.cpp b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
index 1ff2be7..85b2404 100644
--- a/clang/lib/CodeGen/CGOpenMPRuntime.cpp
+++ b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
@@ -1714,12 +1714,12 @@ llvm::Function *CGOpenMPRuntime::emitThreadPrivateVarDefinition(
// Copying constructor for the threadprivate variable.
// Must be NULL - reserved by runtime, but currently it requires that this
// parameter is always NULL. Otherwise it fires assertion.
- CopyCtor = llvm::Constant::getNullValue(CGM.UnqualPtrTy);
+ CopyCtor = llvm::Constant::getNullValue(CGM.DefaultPtrTy);
if (Ctor == nullptr) {
- Ctor = llvm::Constant::getNullValue(CGM.UnqualPtrTy);
+ Ctor = llvm::Constant::getNullValue(CGM.DefaultPtrTy);
}
if (Dtor == nullptr) {
- Dtor = llvm::Constant::getNullValue(CGM.UnqualPtrTy);
+ Dtor = llvm::Constant::getNullValue(CGM.DefaultPtrTy);
}
if (!CGF) {
auto *InitFunctionTy =
diff --git a/clang/lib/CodeGen/CGPointerAuth.cpp b/clang/lib/CodeGen/CGPointerAuth.cpp
index 375f87a..dbb7bc9 100644
--- a/clang/lib/CodeGen/CGPointerAuth.cpp
+++ b/clang/lib/CodeGen/CGPointerAuth.cpp
@@ -426,10 +426,10 @@ CodeGenModule::getConstantSignedPointer(llvm::Constant *Pointer, unsigned Key,
llvm::ConstantInt *OtherDiscriminator) {
llvm::Constant *AddressDiscriminator;
if (StorageAddress) {
- assert(StorageAddress->getType() == UnqualPtrTy);
+ assert(StorageAddress->getType() == DefaultPtrTy);
AddressDiscriminator = StorageAddress;
} else {
- AddressDiscriminator = llvm::Constant::getNullValue(UnqualPtrTy);
+ AddressDiscriminator = llvm::Constant::getNullValue(DefaultPtrTy);
}
llvm::ConstantInt *IntegerDiscriminator;
diff --git a/clang/lib/CodeGen/CodeGenModule.cpp b/clang/lib/CodeGen/CodeGenModule.cpp
index c5eb14e..3746bc04 100644
--- a/clang/lib/CodeGen/CodeGenModule.cpp
+++ b/clang/lib/CodeGen/CodeGenModule.cpp
@@ -83,6 +83,7 @@ static llvm::cl::opt<bool> LimitedCoverage(
llvm::cl::desc("Emit limited coverage mapping information (experimental)"));
static const char AnnotationSection[] = "llvm.metadata";
+static constexpr auto ErrnoTBAAMDName = "llvm.errno.tbaa";
static CGCXXABI *createCXXABI(CodeGenModule &CGM) {
switch (CGM.getContext().getCXXABIKind()) {
@@ -1324,22 +1325,29 @@ void CodeGenModule::Release() {
"tag-stack-memory-buildattr", 1);
if (T.isARM() || T.isThumb() || T.isAArch64()) {
+ // Previously 1 is used and meant for the backed to derive the function
+ // attribute form it. 2 now means function attributes already set for all
+ // functions in this module, so no need to propagate those from the module
+ // flag. Value is only used in case of LTO module merge because the backend
+ // will see all required function attribute set already. Value is used
+ // before modules got merged. Any posive value means the feature is active
+ // and required binary markings need to be emit accordingly.
if (LangOpts.BranchTargetEnforcement)
getModule().addModuleFlag(llvm::Module::Min, "branch-target-enforcement",
- 1);
+ 2);
if (LangOpts.BranchProtectionPAuthLR)
getModule().addModuleFlag(llvm::Module::Min, "branch-protection-pauth-lr",
- 1);
+ 2);
if (LangOpts.GuardedControlStack)
- getModule().addModuleFlag(llvm::Module::Min, "guarded-control-stack", 1);
+ getModule().addModuleFlag(llvm::Module::Min, "guarded-control-stack", 2);
if (LangOpts.hasSignReturnAddress())
- getModule().addModuleFlag(llvm::Module::Min, "sign-return-address", 1);
+ getModule().addModuleFlag(llvm::Module::Min, "sign-return-address", 2);
if (LangOpts.isSignReturnAddressScopeAll())
getModule().addModuleFlag(llvm::Module::Min, "sign-return-address-all",
- 1);
+ 2);
if (!LangOpts.isSignReturnAddressWithAKey())
getModule().addModuleFlag(llvm::Module::Min,
- "sign-return-address-with-bkey", 1);
+ "sign-return-address-with-bkey", 2);
if (LangOpts.PointerAuthELFGOT)
getModule().addModuleFlag(llvm::Module::Min, "ptrauth-elf-got", 1);
@@ -1583,6 +1591,17 @@ void CodeGenModule::Release() {
}
}
}
+
+ // Emit `!llvm.errno.tbaa`, a module-level metadata that specifies the TBAA
+ // for an int access. This allows LLVM to reason about what memory can be
+ // accessed by certain library calls that only touch errno.
+ if (TBAA) {
+ TBAAAccessInfo TBAAInfo = getTBAAAccessInfo(Context.IntTy);
+ if (llvm::MDNode *IntegerNode = getTBAAAccessTagInfo(TBAAInfo)) {
+ auto *ErrnoTBAAMD = TheModule.getOrInsertNamedMetadata(ErrnoTBAAMDName);
+ ErrnoTBAAMD->addOperand(IntegerNode);
+ }
+ }
}
void CodeGenModule::EmitOpenCLMetadata() {
@@ -2851,6 +2870,11 @@ void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
}
}
+ if (CodeGenOpts.CallGraphSection) {
+ if (auto *FD = dyn_cast<FunctionDecl>(D))
+ createIndirectFunctionTypeMD(FD, F);
+ }
+
// Emit type metadata on member functions for member function pointer checks.
// These are only ever necessary on definitions; we're guaranteed that the
// definition will be present in the LTO unit as a result of LTO visibility.
@@ -3054,6 +3078,26 @@ static void setLinkageForGV(llvm::GlobalValue *GV, const NamedDecl *ND) {
GV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
}
+static bool hasExistingGeneralizedTypeMD(llvm::Function *F) {
+ llvm::MDNode *MD = F->getMetadata(llvm::LLVMContext::MD_type);
+ return MD && MD->hasGeneralizedMDString();
+}
+
+void CodeGenModule::createIndirectFunctionTypeMD(const FunctionDecl *FD,
+ llvm::Function *F) {
+ // Return if generalized type metadata is already attached.
+ if (hasExistingGeneralizedTypeMD(F))
+ return;
+
+ // All functions which are not internal linkage could be indirect targets.
+ // Address taken functions with internal linkage could be indirect targets.
+ if (!F->hasLocalLinkage() ||
+ F->getFunction().hasAddressTaken(nullptr, /*IgnoreCallbackUses=*/true,
+ /*IgnoreAssumeLikeCalls=*/true,
+ /*IgnoreLLVMUsed=*/false))
+ F->addTypeMetadata(0, CreateMetadataIdentifierGeneralized(FD->getType()));
+}
+
void CodeGenModule::createFunctionTypeMetadataForIcall(const FunctionDecl *FD,
llvm::Function *F) {
// Only if we are checking indirect calls.
@@ -3069,10 +3113,12 @@ void CodeGenModule::createFunctionTypeMetadataForIcall(const FunctionDecl *FD,
/*GeneralizePointers=*/false);
llvm::Metadata *MD = CreateMetadataIdentifierForType(FnType);
F->addTypeMetadata(0, MD);
-
- QualType GenPtrFnType = GeneralizeFunctionType(getContext(), FD->getType(),
- /*GeneralizePointers=*/true);
- F->addTypeMetadata(0, CreateMetadataIdentifierGeneralized(GenPtrFnType));
+ // Add the generalized identifier if not added already.
+ if (!hasExistingGeneralizedTypeMD(F)) {
+ QualType GenPtrFnType = GeneralizeFunctionType(getContext(), FD->getType(),
+ /*GeneralizePointers=*/true);
+ F->addTypeMetadata(0, CreateMetadataIdentifierGeneralized(GenPtrFnType));
+ }
// Emit a hash-based bit set entry for cross-DSO calls.
if (CodeGenOpts.SanitizeCfiCrossDso)
@@ -3080,6 +3126,21 @@ void CodeGenModule::createFunctionTypeMetadataForIcall(const FunctionDecl *FD,
F->addTypeMetadata(0, llvm::ConstantAsMetadata::get(CrossDsoTypeId));
}
+void CodeGenModule::createCalleeTypeMetadataForIcall(const QualType &QT,
+ llvm::CallBase *CB) {
+ // Only if needed for call graph section and only for indirect calls.
+ if (!CodeGenOpts.CallGraphSection || !CB->isIndirectCall())
+ return;
+
+ llvm::Metadata *TypeIdMD = CreateMetadataIdentifierGeneralized(QT);
+ llvm::MDTuple *TypeTuple = llvm::MDTuple::get(
+ getLLVMContext(), {llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
+ llvm::Type::getInt64Ty(getLLVMContext()), 0)),
+ TypeIdMD});
+ llvm::MDTuple *MDN = llvm::MDNode::get(getLLVMContext(), {TypeTuple});
+ CB->setMetadata(llvm::LLVMContext::MD_callee_type, MDN);
+}
+
void CodeGenModule::setKCFIType(const FunctionDecl *FD, llvm::Function *F) {
llvm::LLVMContext &Ctx = F->getContext();
llvm::MDBuilder MDB(Ctx);
@@ -3215,6 +3276,9 @@ void CodeGenModule::SetFunctionAttributes(GlobalDecl GD, llvm::Function *F,
!CodeGenOpts.SanitizeCfiCanonicalJumpTables)
createFunctionTypeMetadataForIcall(FD, F);
+ if (CodeGenOpts.CallGraphSection)
+ createIndirectFunctionTypeMD(FD, F);
+
if (LangOpts.Sanitize.has(SanitizerKind::KCFI))
setKCFIType(FD, F);
diff --git a/clang/lib/CodeGen/CodeGenModule.h b/clang/lib/CodeGen/CodeGenModule.h
index 3971b29..a253bcd 100644
--- a/clang/lib/CodeGen/CodeGenModule.h
+++ b/clang/lib/CodeGen/CodeGenModule.h
@@ -1644,6 +1644,13 @@ public:
void createFunctionTypeMetadataForIcall(const FunctionDecl *FD,
llvm::Function *F);
+ /// Create and attach type metadata if the function is a potential indirect
+ /// call target to support call graph section.
+ void createIndirectFunctionTypeMD(const FunctionDecl *FD, llvm::Function *F);
+
+ /// Create and attach type metadata to the given call.
+ void createCalleeTypeMetadataForIcall(const QualType &QT, llvm::CallBase *CB);
+
/// Set type metadata to the given function.
void setKCFIType(const FunctionDecl *FD, llvm::Function *F);
diff --git a/clang/lib/CodeGen/CodeGenTypeCache.h b/clang/lib/CodeGen/CodeGenTypeCache.h
index e273ebe..015306b 100644
--- a/clang/lib/CodeGen/CodeGenTypeCache.h
+++ b/clang/lib/CodeGen/CodeGenTypeCache.h
@@ -53,7 +53,7 @@ struct CodeGenTypeCache {
/// void*, void** in the target's default address space (often 0)
union {
- llvm::PointerType *UnqualPtrTy;
+ llvm::PointerType *DefaultPtrTy;
llvm::PointerType *VoidPtrTy;
llvm::PointerType *Int8PtrTy;
llvm::PointerType *VoidPtrPtrTy;
diff --git a/clang/lib/CodeGen/ItaniumCXXABI.cpp b/clang/lib/CodeGen/ItaniumCXXABI.cpp
index 9e195a9..65c4763 100644
--- a/clang/lib/CodeGen/ItaniumCXXABI.cpp
+++ b/clang/lib/CodeGen/ItaniumCXXABI.cpp
@@ -774,7 +774,7 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
} else {
llvm::Value *VFPAddr =
CGF.Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
- VirtualFn = CGF.Builder.CreateAlignedLoad(CGF.UnqualPtrTy, VFPAddr,
+ VirtualFn = CGF.Builder.CreateAlignedLoad(CGF.DefaultPtrTy, VFPAddr,
CGF.getPointerAlign(),
"memptr.virtualfn");
}
@@ -816,7 +816,7 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
// function pointer.
CGF.EmitBlock(FnNonVirtual);
llvm::Value *NonVirtualFn =
- Builder.CreateIntToPtr(FnAsInt, CGF.UnqualPtrTy, "memptr.nonvirtualfn");
+ Builder.CreateIntToPtr(FnAsInt, CGF.DefaultPtrTy, "memptr.nonvirtualfn");
// Check the function pointer if CFI on member function pointers is enabled.
if (ShouldEmitCFICheck) {
@@ -856,7 +856,7 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
// We're done.
CGF.EmitBlock(FnEnd);
- llvm::PHINode *CalleePtr = Builder.CreatePHI(CGF.UnqualPtrTy, 2);
+ llvm::PHINode *CalleePtr = Builder.CreatePHI(CGF.DefaultPtrTy, 2);
CalleePtr->addIncoming(VirtualFn, FnVirtual);
CalleePtr->addIncoming(NonVirtualFn, FnNonVirtual);
@@ -1403,7 +1403,7 @@ void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
// Grab the vtable pointer as an intptr_t*.
auto *ClassDecl = ElementType->castAsCXXRecordDecl();
- llvm::Value *VTable = CGF.GetVTablePtr(Ptr, CGF.UnqualPtrTy, ClassDecl);
+ llvm::Value *VTable = CGF.GetVTablePtr(Ptr, CGF.DefaultPtrTy, ClassDecl);
// Track back to entry -2 and pull out the offset there.
llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
@@ -1749,7 +1749,7 @@ llvm::Value *ItaniumCXXABI::emitExactDynamicCast(
auto AuthenticateVTable = [&](Address ThisAddr, const CXXRecordDecl *Decl) {
if (!CGF.getLangOpts().PointerAuthCalls)
return;
- (void)CGF.GetVTablePtr(ThisAddr, CGF.UnqualPtrTy, Decl,
+ (void)CGF.GetVTablePtr(ThisAddr, CGF.DefaultPtrTy, Decl,
CodeGenFunction::VTableAuthMode::MustTrap);
};
@@ -1775,7 +1775,7 @@ llvm::Value *ItaniumCXXABI::emitExactDynamicCast(
if (PerformPostCastAuthentication)
VTable = CGF.EmitPointerAuthAuth(StrippingAuthInfo, VTable);
} else
- VTable = CGF.GetVTablePtr(ThisAddr, CGF.UnqualPtrTy, SrcDecl);
+ VTable = CGF.GetVTablePtr(ThisAddr, CGF.DefaultPtrTy, SrcDecl);
// Compare the vptr against the expected vptr for the destination type at
// this offset.
@@ -1828,7 +1828,7 @@ llvm::Value *ItaniumCXXABI::emitDynamicCastToVoid(CodeGenFunction &CGF,
if (CGM.getItaniumVTableContext().isRelativeLayout()) {
// Get the vtable pointer.
llvm::Value *VTable =
- CGF.GetVTablePtr(ThisAddr, CGF.UnqualPtrTy, ClassDecl);
+ CGF.GetVTablePtr(ThisAddr, CGF.DefaultPtrTy, ClassDecl);
// Get the offset-to-top from the vtable.
OffsetToTop =
@@ -1841,7 +1841,7 @@ llvm::Value *ItaniumCXXABI::emitDynamicCastToVoid(CodeGenFunction &CGF,
// Get the vtable pointer.
llvm::Value *VTable =
- CGF.GetVTablePtr(ThisAddr, CGF.UnqualPtrTy, ClassDecl);
+ CGF.GetVTablePtr(ThisAddr, CGF.DefaultPtrTy, ClassDecl);
// Get the offset-to-top from the vtable.
OffsetToTop =
@@ -2578,7 +2578,7 @@ llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
// We can't simply ignore this load using nosanitize metadata because
// the metadata may be lost.
llvm::FunctionType *FTy =
- llvm::FunctionType::get(CGF.SizeTy, CGF.UnqualPtrTy, false);
+ llvm::FunctionType::get(CGF.SizeTy, CGF.DefaultPtrTy, false);
llvm::FunctionCallee F =
CGM.CreateRuntimeFunction(FTy, "__asan_load_cxx_array_cookie");
return CGF.Builder.CreateCall(F, numElementsPtr.emitRawPointer(CGF));
@@ -2921,7 +2921,7 @@ static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
// We're assuming that the destructor function is something we can
// reasonably call with the default CC.
- llvm::Type *dtorTy = CGF.UnqualPtrTy;
+ llvm::Type *dtorTy = CGF.DefaultPtrTy;
// Preserve address space of addr.
auto AddrAS = addr ? addr->getType()->getPointerAddressSpace() : 0;
@@ -5035,7 +5035,7 @@ static void InitCatchParam(CodeGenFunction &CGF,
auto catchRD = CatchType->getAsCXXRecordDecl();
CharUnits caughtExnAlignment = CGF.CGM.getClassPointerAlignment(catchRD);
- llvm::Type *PtrTy = CGF.UnqualPtrTy; // addrspace 0 ok
+ llvm::Type *PtrTy = CGF.DefaultPtrTy;
// Check for a copy expression. If we don't have a copy expression,
// that means a trivial copy is okay.
@@ -5244,7 +5244,7 @@ void XLCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
llvm::FunctionCallee Dtor,
llvm::Constant *Addr) {
if (D.getTLSKind() != VarDecl::TLS_None) {
- llvm::PointerType *PtrTy = CGF.UnqualPtrTy;
+ llvm::PointerType *PtrTy = CGF.DefaultPtrTy;
// extern "C" int __pt_atexit_np(int flags, int(*)(int,...), ...);
llvm::FunctionType *AtExitTy =
diff --git a/clang/lib/CodeGen/MicrosoftCXXABI.cpp b/clang/lib/CodeGen/MicrosoftCXXABI.cpp
index 19d9265..71e2449 100644
--- a/clang/lib/CodeGen/MicrosoftCXXABI.cpp
+++ b/clang/lib/CodeGen/MicrosoftCXXABI.cpp
@@ -528,7 +528,7 @@ public:
CGM.IntTy,
CGM.IntTy,
CGM.IntTy,
- getImageRelativeType(CGM.UnqualPtrTy),
+ getImageRelativeType(CGM.DefaultPtrTy),
};
BaseClassDescriptorType = llvm::StructType::create(
CGM.getLLVMContext(), FieldTypes, "rtti.BaseClassDescriptor");
@@ -540,7 +540,7 @@ public:
return ClassHierarchyDescriptorType;
// Forward-declare RTTIClassHierarchyDescriptor to break a cycle.
llvm::Type *FieldTypes[] = {CGM.IntTy, CGM.IntTy, CGM.IntTy,
- getImageRelativeType(CGM.UnqualPtrTy)};
+ getImageRelativeType(CGM.DefaultPtrTy)};
ClassHierarchyDescriptorType =
llvm::StructType::create(FieldTypes, "rtti.ClassHierarchyDescriptor");
return ClassHierarchyDescriptorType;
@@ -554,7 +554,7 @@ public:
CGM.IntTy,
CGM.IntTy,
getImageRelativeType(CGM.Int8PtrTy),
- getImageRelativeType(CGM.UnqualPtrTy),
+ getImageRelativeType(CGM.DefaultPtrTy),
getImageRelativeType(CGM.VoidTy),
};
llvm::ArrayRef<llvm::Type *> FieldTypesRef(FieldTypes);
@@ -752,7 +752,7 @@ public:
llvm::SmallString<23> CTATypeName("eh.CatchableTypeArray.");
CTATypeName += llvm::utostr(NumEntries);
- llvm::Type *CTType = getImageRelativeType(CGM.UnqualPtrTy);
+ llvm::Type *CTType = getImageRelativeType(CGM.DefaultPtrTy);
llvm::Type *FieldTypes[] = {
CGM.IntTy, // NumEntries
llvm::ArrayType::get(CTType, NumEntries) // CatchableTypes
@@ -779,7 +779,7 @@ public:
llvm::FunctionCallee getThrowFn() {
// _CxxThrowException is passed an exception object and a ThrowInfo object
// which describes the exception.
- llvm::Type *Args[] = {CGM.Int8PtrTy, CGM.UnqualPtrTy};
+ llvm::Type *Args[] = {CGM.Int8PtrTy, CGM.DefaultPtrTy};
llvm::FunctionType *FTy =
llvm::FunctionType::get(CGM.VoidTy, Args, /*isVarArg=*/false);
llvm::FunctionCallee Throw =
@@ -920,7 +920,7 @@ void MicrosoftCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
void MicrosoftCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) {
llvm::Value *Args[] = {llvm::ConstantPointerNull::get(CGM.Int8PtrTy),
- llvm::ConstantPointerNull::get(CGM.UnqualPtrTy)};
+ llvm::ConstantPointerNull::get(CGM.DefaultPtrTy)};
llvm::FunctionCallee Fn = getThrowFn();
if (isNoReturn)
CGF.EmitNoreturnRuntimeCallOrInvoke(Fn, Args);
@@ -1969,13 +1969,13 @@ CGCallee MicrosoftCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
SourceLocation Loc) {
CGBuilderTy &Builder = CGF.Builder;
- Ty = CGF.UnqualPtrTy;
+ Ty = CGF.DefaultPtrTy;
Address VPtr =
adjustThisArgumentForVirtualFunctionCall(CGF, GD, This, true);
auto *MethodDecl = cast<CXXMethodDecl>(GD.getDecl());
llvm::Value *VTable =
- CGF.GetVTablePtr(VPtr, CGF.UnqualPtrTy, MethodDecl->getParent());
+ CGF.GetVTablePtr(VPtr, CGF.DefaultPtrTy, MethodDecl->getParent());
MicrosoftVTableContext &VFTContext = CGM.getMicrosoftVTableContext();
MethodVFTableLocation ML = VFTContext.getMethodVFTableLocation(GD);
@@ -2136,9 +2136,9 @@ MicrosoftCXXABI::EmitVirtualMemPtrThunk(const CXXMethodDecl *MD,
// Load the vfptr and then callee from the vftable. The callee should have
// adjusted 'this' so that the vfptr is at offset zero.
- llvm::Type *ThunkPtrTy = CGF.UnqualPtrTy;
+ llvm::Type *ThunkPtrTy = CGF.DefaultPtrTy;
llvm::Value *VTable =
- CGF.GetVTablePtr(getThisAddress(CGF), CGF.UnqualPtrTy, MD->getParent());
+ CGF.GetVTablePtr(getThisAddress(CGF), CGF.DefaultPtrTy, MD->getParent());
llvm::Value *VFuncPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
ThunkPtrTy, VTable, ML.Index, "vfn");
@@ -2562,7 +2562,7 @@ static ConstantAddress getInitThreadEpochPtr(CodeGenModule &CGM) {
static llvm::FunctionCallee getInitThreadHeaderFn(CodeGenModule &CGM) {
llvm::FunctionType *FTy =
llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()),
- CGM.UnqualPtrTy, /*isVarArg=*/false);
+ CGM.DefaultPtrTy, /*isVarArg=*/false);
return CGM.CreateRuntimeFunction(
FTy, "_Init_thread_header",
llvm::AttributeList::get(CGM.getLLVMContext(),
@@ -2574,7 +2574,7 @@ static llvm::FunctionCallee getInitThreadHeaderFn(CodeGenModule &CGM) {
static llvm::FunctionCallee getInitThreadFooterFn(CodeGenModule &CGM) {
llvm::FunctionType *FTy =
llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()),
- CGM.UnqualPtrTy, /*isVarArg=*/false);
+ CGM.DefaultPtrTy, /*isVarArg=*/false);
return CGM.CreateRuntimeFunction(
FTy, "_Init_thread_footer",
llvm::AttributeList::get(CGM.getLLVMContext(),
@@ -2586,7 +2586,7 @@ static llvm::FunctionCallee getInitThreadFooterFn(CodeGenModule &CGM) {
static llvm::FunctionCallee getInitThreadAbortFn(CodeGenModule &CGM) {
llvm::FunctionType *FTy =
llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()),
- CGM.UnqualPtrTy, /*isVarArg=*/false);
+ CGM.DefaultPtrTy, /*isVarArg=*/false);
return CGM.CreateRuntimeFunction(
FTy, "_Init_thread_abort",
llvm::AttributeList::get(CGM.getLLVMContext(),
@@ -3169,7 +3169,7 @@ MicrosoftCXXABI::GetVBaseOffsetFromVBPtr(CodeGenFunction &CGF,
}
llvm::Value *VBTable =
- Builder.CreateAlignedLoad(CGM.UnqualPtrTy, VBPtr, VBPtrAlign, "vbtable");
+ Builder.CreateAlignedLoad(CGM.DefaultPtrTy, VBPtr, VBPtrAlign, "vbtable");
// Translate from byte offset to table index. It improves analyzability.
llvm::Value *VBTableIndex = Builder.CreateAShr(
@@ -3825,7 +3825,7 @@ MSRTTIBuilder::getBaseClassArray(SmallVectorImpl<MSRTTIClass> &Classes) {
// mode) bytes of padding. We provide a pointer sized amount of padding by
// adding +1 to Classes.size(). The sections have pointer alignment and are
// marked pick-any so it shouldn't matter.
- llvm::Type *PtrType = ABI.getImageRelativeType(CGM.UnqualPtrTy);
+ llvm::Type *PtrType = ABI.getImageRelativeType(CGM.DefaultPtrTy);
auto *ArrType = llvm::ArrayType::get(PtrType, Classes.size() + 1);
auto *BCA =
new llvm::GlobalVariable(Module, ArrType,
@@ -4372,7 +4372,7 @@ llvm::GlobalVariable *MicrosoftCXXABI::getCatchableTypeArray(QualType T) {
CatchableTypes.insert(getCatchableType(getContext().VoidPtrTy));
uint32_t NumEntries = CatchableTypes.size();
- llvm::Type *CTType = getImageRelativeType(CGM.UnqualPtrTy);
+ llvm::Type *CTType = getImageRelativeType(CGM.DefaultPtrTy);
llvm::ArrayType *AT = llvm::ArrayType::get(CTType, NumEntries);
llvm::StructType *CTAType = getCatchableTypeArrayType(NumEntries);
llvm::Constant *Fields[] = {
diff --git a/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp b/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
index 5049a0a..f49a5af 100644
--- a/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
+++ b/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
@@ -12,6 +12,7 @@
#include "CGBuiltin.h"
#include "CodeGenFunction.h"
+#include "clang/Basic/SyncScope.h"
#include "clang/Basic/TargetBuiltins.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "llvm/Analysis/ValueTracking.h"
@@ -313,33 +314,33 @@ void CodeGenFunction::ProcessOrderScopeAMDGCN(Value *Order, Value *Scope,
}
// Older builtins had an enum argument for the memory scope.
+ const char *SSN = nullptr;
int scope = cast<llvm::ConstantInt>(Scope)->getZExtValue();
switch (scope) {
- case 0: // __MEMORY_SCOPE_SYSTEM
+ case AtomicScopeGenericModel::System: // __MEMORY_SCOPE_SYSTEM
SSID = llvm::SyncScope::System;
break;
- case 1: // __MEMORY_SCOPE_DEVICE
- if (getTarget().getTriple().isSPIRV())
- SSID = getLLVMContext().getOrInsertSyncScopeID("device");
- else
- SSID = getLLVMContext().getOrInsertSyncScopeID("agent");
+ case AtomicScopeGenericModel::Device: // __MEMORY_SCOPE_DEVICE
+ SSN = getTarget().getTriple().isSPIRV() ? "device" : "agent";
break;
- case 2: // __MEMORY_SCOPE_WRKGRP
- SSID = getLLVMContext().getOrInsertSyncScopeID("workgroup");
+ case AtomicScopeGenericModel::Workgroup: // __MEMORY_SCOPE_WRKGRP
+ SSN = "workgroup";
break;
- case 3: // __MEMORY_SCOPE_WVFRNT
- if (getTarget().getTriple().isSPIRV())
- SSID = getLLVMContext().getOrInsertSyncScopeID("subgroup");
- else
- SSID = getLLVMContext().getOrInsertSyncScopeID("wavefront");
+ case AtomicScopeGenericModel::Cluster: // __MEMORY_SCOPE_CLUSTR
+ SSN = getTarget().getTriple().isSPIRV() ? "workgroup" : "cluster";
+ break;
+ case AtomicScopeGenericModel::Wavefront: // __MEMORY_SCOPE_WVFRNT
+ SSN = getTarget().getTriple().isSPIRV() ? "subgroup" : "wavefront";
break;
- case 4: // __MEMORY_SCOPE_SINGLE
+ case AtomicScopeGenericModel::Single: // __MEMORY_SCOPE_SINGLE
SSID = llvm::SyncScope::SingleThread;
break;
default:
SSID = llvm::SyncScope::System;
break;
}
+ if (SSN)
+ SSID = getLLVMContext().getOrInsertSyncScopeID(SSN);
}
llvm::Value *CodeGenFunction::EmitScalarOrConstFoldImmArg(unsigned ICEArguments,
diff --git a/clang/lib/CodeGen/TargetBuiltins/ARM.cpp b/clang/lib/CodeGen/TargetBuiltins/ARM.cpp
index 2429a43..60f9b86 100644
--- a/clang/lib/CodeGen/TargetBuiltins/ARM.cpp
+++ b/clang/lib/CodeGen/TargetBuiltins/ARM.cpp
@@ -2037,7 +2037,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
case NEON::BI__builtin_neon_vld1q_x3_v:
case NEON::BI__builtin_neon_vld1_x4_v:
case NEON::BI__builtin_neon_vld1q_x4_v: {
- llvm::Type *Tys[2] = {VTy, UnqualPtrTy};
+ llvm::Type *Tys[2] = {VTy, DefaultPtrTy};
Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld1xN");
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
@@ -2263,11 +2263,11 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
// in AArch64 it comes last. We may want to stick to one or another.
if (Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::aarch64_be ||
Arch == llvm::Triple::aarch64_32) {
- llvm::Type *Tys[2] = {VTy, UnqualPtrTy};
+ llvm::Type *Tys[2] = {VTy, DefaultPtrTy};
std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "");
}
- llvm::Type *Tys[2] = {UnqualPtrTy, VTy};
+ llvm::Type *Tys[2] = {DefaultPtrTy, VTy};
return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "");
}
case NEON::BI__builtin_neon_vsubhn_v: {
@@ -2858,7 +2858,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Function *F = CGM.getIntrinsic(
BuiltinID == clang::ARM::BI__builtin_arm_ldaex ? Intrinsic::arm_ldaex
: Intrinsic::arm_ldrex,
- UnqualPtrTy);
+ DefaultPtrTy);
CallInst *Val = Builder.CreateCall(F, LoadAddr, "ldrex");
Val->addParamAttr(
0, Attribute::get(getLLVMContext(), Attribute::ElementType, IntTy));
@@ -5225,7 +5225,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
CGM.getIntrinsic(BuiltinID == clang::AArch64::BI__builtin_arm_ldaex
? Intrinsic::aarch64_ldaxr
: Intrinsic::aarch64_ldxr,
- UnqualPtrTy);
+ DefaultPtrTy);
CallInst *Val = Builder.CreateCall(F, LoadAddr, "ldxr");
Val->addParamAttr(
0, Attribute::get(getLLVMContext(), Attribute::ElementType, IntTy));
@@ -7482,42 +7482,42 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
}
case NEON::BI__builtin_neon_vld2_v:
case NEON::BI__builtin_neon_vld2q_v: {
- llvm::Type *Tys[2] = {VTy, UnqualPtrTy};
+ llvm::Type *Tys[2] = {VTy, DefaultPtrTy};
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld3_v:
case NEON::BI__builtin_neon_vld3q_v: {
- llvm::Type *Tys[2] = {VTy, UnqualPtrTy};
+ llvm::Type *Tys[2] = {VTy, DefaultPtrTy};
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld4_v:
case NEON::BI__builtin_neon_vld4q_v: {
- llvm::Type *Tys[2] = {VTy, UnqualPtrTy};
+ llvm::Type *Tys[2] = {VTy, DefaultPtrTy};
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld2_dup_v:
case NEON::BI__builtin_neon_vld2q_dup_v: {
- llvm::Type *Tys[2] = {VTy, UnqualPtrTy};
+ llvm::Type *Tys[2] = {VTy, DefaultPtrTy};
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2r, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld3_dup_v:
case NEON::BI__builtin_neon_vld3q_dup_v: {
- llvm::Type *Tys[2] = {VTy, UnqualPtrTy};
+ llvm::Type *Tys[2] = {VTy, DefaultPtrTy};
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3r, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld4_dup_v:
case NEON::BI__builtin_neon_vld4q_dup_v: {
- llvm::Type *Tys[2] = {VTy, UnqualPtrTy};
+ llvm::Type *Tys[2] = {VTy, DefaultPtrTy};
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4r, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
diff --git a/clang/lib/CodeGen/TargetBuiltins/PPC.cpp b/clang/lib/CodeGen/TargetBuiltins/PPC.cpp
index e71dc9e..44d5938 100644
--- a/clang/lib/CodeGen/TargetBuiltins/PPC.cpp
+++ b/clang/lib/CodeGen/TargetBuiltins/PPC.cpp
@@ -59,7 +59,7 @@ static llvm::Value *emitPPCLoadReserveIntrinsic(CodeGenFunction &CGF,
Constraints += MachineClobbers;
}
- llvm::Type *PtrType = CGF.UnqualPtrTy;
+ llvm::Type *PtrType = CGF.DefaultPtrTy;
llvm::FunctionType *FTy = llvm::FunctionType::get(RetType, {PtrType}, false);
llvm::InlineAsm *IA =
diff --git a/clang/lib/CodeGen/Targets/AMDGPU.cpp b/clang/lib/CodeGen/Targets/AMDGPU.cpp
index 16d5919..0bc4b4b7 100644
--- a/clang/lib/CodeGen/Targets/AMDGPU.cpp
+++ b/clang/lib/CodeGen/Targets/AMDGPU.cpp
@@ -508,6 +508,10 @@ AMDGPUTargetCodeGenInfo::getLLVMSyncScopeID(const LangOptions &LangOpts,
case SyncScope::WavefrontScope:
Name = "wavefront";
break;
+ case SyncScope::HIPCluster:
+ case SyncScope::ClusterScope:
+ Name = "cluster";
+ break;
case SyncScope::HIPWorkgroup:
case SyncScope::OpenCLWorkGroup:
case SyncScope::WorkgroupScope:
diff --git a/clang/lib/CodeGen/Targets/PPC.cpp b/clang/lib/CodeGen/Targets/PPC.cpp
index 380e8c0..35e7655 100644
--- a/clang/lib/CodeGen/Targets/PPC.cpp
+++ b/clang/lib/CodeGen/Targets/PPC.cpp
@@ -490,7 +490,7 @@ RValue PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList,
llvm::Type *DirectTy = CGF.ConvertType(Ty), *ElementTy = DirectTy;
if (isIndirect)
- DirectTy = CGF.UnqualPtrTy;
+ DirectTy = CGF.DefaultPtrTy;
// Case 1: consume registers.
Address RegAddr = Address::invalid();
diff --git a/clang/lib/CodeGen/Targets/SPIR.cpp b/clang/lib/CodeGen/Targets/SPIR.cpp
index 3f6d4e0..80e096e 100644
--- a/clang/lib/CodeGen/Targets/SPIR.cpp
+++ b/clang/lib/CodeGen/Targets/SPIR.cpp
@@ -93,6 +93,8 @@ inline StringRef mapClangSyncScopeToLLVM(SyncScope Scope) {
case SyncScope::OpenCLSubGroup:
case SyncScope::WavefrontScope:
return "subgroup";
+ case SyncScope::HIPCluster:
+ case SyncScope::ClusterScope:
case SyncScope::HIPWorkgroup:
case SyncScope::OpenCLWorkGroup:
case SyncScope::WorkgroupScope: