aboutsummaryrefslogtreecommitdiff
path: root/clang/lib/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'clang/lib/CodeGen')
-rw-r--r--clang/lib/CodeGen/ABIInfo.cpp2
-rw-r--r--clang/lib/CodeGen/ABIInfoImpl.cpp36
-rw-r--r--clang/lib/CodeGen/CGBlocks.cpp3
-rw-r--r--clang/lib/CodeGen/CGBuiltin.cpp19
-rw-r--r--clang/lib/CodeGen/CGCUDANV.cpp3
-rw-r--r--clang/lib/CodeGen/CGCXX.cpp19
-rw-r--r--clang/lib/CodeGen/CGCXXABI.cpp4
-rw-r--r--clang/lib/CodeGen/CGCXXABI.h20
-rw-r--r--clang/lib/CodeGen/CGCall.cpp77
-rw-r--r--clang/lib/CodeGen/CGCall.h8
-rw-r--r--clang/lib/CodeGen/CGClass.cpp95
-rw-r--r--clang/lib/CodeGen/CGDebugInfo.cpp127
-rw-r--r--clang/lib/CodeGen/CGDecl.cpp61
-rw-r--r--clang/lib/CodeGen/CGExpr.cpp124
-rw-r--r--clang/lib/CodeGen/CGExprAgg.cpp35
-rw-r--r--clang/lib/CodeGen/CGExprCXX.cpp55
-rw-r--r--clang/lib/CodeGen/CGExprConstant.cpp29
-rw-r--r--clang/lib/CodeGen/CGExprScalar.cpp121
-rw-r--r--clang/lib/CodeGen/CGHLSLRuntime.cpp49
-rw-r--r--clang/lib/CodeGen/CGHLSLRuntime.h4
-rw-r--r--clang/lib/CodeGen/CGNonTrivialStruct.cpp9
-rw-r--r--clang/lib/CodeGen/CGObjC.cpp4
-rw-r--r--clang/lib/CodeGen/CGObjCMac.cpp11
-rw-r--r--clang/lib/CodeGen/CGObjCRuntime.cpp4
-rw-r--r--clang/lib/CodeGen/CGOpenMPRuntime.cpp35
-rw-r--r--clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp4
-rw-r--r--clang/lib/CodeGen/CGPointerAuth.cpp2
-rw-r--r--clang/lib/CodeGen/CGStmt.cpp4
-rw-r--r--clang/lib/CodeGen/CGStmtOpenMP.cpp9
-rw-r--r--clang/lib/CodeGen/CGVTables.cpp8
-rw-r--r--clang/lib/CodeGen/CodeGenFunction.cpp8
-rw-r--r--clang/lib/CodeGen/CodeGenFunction.h36
-rw-r--r--clang/lib/CodeGen/CodeGenModule.cpp28
-rw-r--r--clang/lib/CodeGen/CodeGenTBAA.cpp13
-rw-r--r--clang/lib/CodeGen/CodeGenTypes.cpp33
-rw-r--r--clang/lib/CodeGen/CoverageMappingGen.cpp10
-rw-r--r--clang/lib/CodeGen/HLSLBufferLayoutBuilder.cpp3
-rw-r--r--clang/lib/CodeGen/ItaniumCXXABI.cpp155
-rw-r--r--clang/lib/CodeGen/MicrosoftCXXABI.cpp31
-rw-r--r--clang/lib/CodeGen/SwiftCallingConv.cpp9
-rw-r--r--clang/lib/CodeGen/TargetBuiltins/WebAssembly.cpp29
-rw-r--r--clang/lib/CodeGen/TargetBuiltins/X86.cpp9
-rw-r--r--clang/lib/CodeGen/Targets/AArch64.cpp9
-rw-r--r--clang/lib/CodeGen/Targets/AMDGPU.cpp6
-rw-r--r--clang/lib/CodeGen/Targets/ARC.cpp5
-rw-r--r--clang/lib/CodeGen/Targets/ARM.cpp9
-rw-r--r--clang/lib/CodeGen/Targets/BPF.cpp5
-rw-r--r--clang/lib/CodeGen/Targets/CSKY.cpp2
-rw-r--r--clang/lib/CodeGen/Targets/Hexagon.cpp5
-rw-r--r--clang/lib/CodeGen/Targets/Lanai.cpp5
-rw-r--r--clang/lib/CodeGen/Targets/LoongArch.cpp10
-rw-r--r--clang/lib/CodeGen/Targets/Mips.cpp8
-rw-r--r--clang/lib/CodeGen/Targets/NVPTX.cpp6
-rw-r--r--clang/lib/CodeGen/Targets/PPC.cpp8
-rw-r--r--clang/lib/CodeGen/Targets/RISCV.cpp10
-rw-r--r--clang/lib/CodeGen/Targets/SPIR.cpp6
-rw-r--r--clang/lib/CodeGen/Targets/Sparc.cpp2
-rw-r--r--clang/lib/CodeGen/Targets/SystemZ.cpp8
-rw-r--r--clang/lib/CodeGen/Targets/WebAssembly.cpp3
-rw-r--r--clang/lib/CodeGen/Targets/X86.cpp59
-rw-r--r--clang/lib/CodeGen/Targets/XCore.cpp4
61 files changed, 855 insertions, 660 deletions
diff --git a/clang/lib/CodeGen/ABIInfo.cpp b/clang/lib/CodeGen/ABIInfo.cpp
index 3ef430e1..1604ad5 100644
--- a/clang/lib/CodeGen/ABIInfo.cpp
+++ b/clang/lib/CodeGen/ABIInfo.cpp
@@ -68,7 +68,7 @@ bool ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base,
return false;
Members *= NElements;
} else if (const RecordType *RT = Ty->getAs<RecordType>()) {
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
if (RD->hasFlexibleArrayMember())
return false;
diff --git a/clang/lib/CodeGen/ABIInfoImpl.cpp b/clang/lib/CodeGen/ABIInfoImpl.cpp
index 0a612d3..79dbe70 100644
--- a/clang/lib/CodeGen/ABIInfoImpl.cpp
+++ b/clang/lib/CodeGen/ABIInfoImpl.cpp
@@ -29,7 +29,7 @@ ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ Ty = EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
ASTContext &Context = getContext();
if (const auto *EIT = Ty->getAs<BitIntType>())
@@ -53,7 +53,7 @@ ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
- RetTy = EnumTy->getDecl()->getIntegerType();
+ RetTy = EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
if (const auto *EIT = RetTy->getAs<BitIntType>())
if (EIT->getNumBits() >
@@ -105,13 +105,12 @@ llvm::Type *CodeGen::getVAListElementType(CodeGenFunction &CGF) {
CGCXXABI::RecordArgABI CodeGen::getRecordArgABI(const RecordType *RT,
CGCXXABI &CXXABI) {
- const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
- if (!RD) {
- if (!RT->getDecl()->canPassInRegisters())
- return CGCXXABI::RAA_Indirect;
- return CGCXXABI::RAA_Default;
- }
- return CXXABI.getRecordArgABI(RD);
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
+ if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD))
+ return CXXABI.getRecordArgABI(CXXRD);
+ if (!RD->canPassInRegisters())
+ return CGCXXABI::RAA_Indirect;
+ return CGCXXABI::RAA_Default;
}
CGCXXABI::RecordArgABI CodeGen::getRecordArgABI(QualType T, CGCXXABI &CXXABI) {
@@ -125,20 +124,21 @@ bool CodeGen::classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI,
const ABIInfo &Info) {
QualType Ty = FI.getReturnType();
- if (const auto *RT = Ty->getAs<RecordType>())
- if (!isa<CXXRecordDecl>(RT->getDecl()) &&
- !RT->getDecl()->canPassInRegisters()) {
+ if (const auto *RT = Ty->getAs<RecordType>()) {
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
+ if (!isa<CXXRecordDecl>(RD) && !RD->canPassInRegisters()) {
FI.getReturnInfo() = Info.getNaturalAlignIndirect(
Ty, Info.getDataLayout().getAllocaAddrSpace());
return true;
}
+ }
return CXXABI.classifyReturnType(FI);
}
QualType CodeGen::useFirstFieldIfTransparentUnion(QualType Ty) {
if (const RecordType *UT = Ty->getAsUnionType()) {
- const RecordDecl *UD = UT->getDecl();
+ const RecordDecl *UD = UT->getOriginalDecl()->getDefinitionOrSelf();
if (UD->hasAttr<TransparentUnionAttr>()) {
assert(!UD->field_empty() && "sema created an empty transparent union");
return UD->field_begin()->getType();
@@ -276,7 +276,7 @@ bool CodeGen::isEmptyField(ASTContext &Context, const FieldDecl *FD,
// according to the Itanium ABI. The exception applies only to records,
// not arrays of records, so we must also check whether we stripped off an
// array type above.
- if (isa<CXXRecordDecl>(RT->getDecl()) &&
+ if (isa<CXXRecordDecl>(RT->getOriginalDecl()) &&
(WasArray || (!AsIfNoUniqueAddr && !FD->hasAttr<NoUniqueAddressAttr>())))
return false;
@@ -288,7 +288,7 @@ bool CodeGen::isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays,
const RecordType *RT = T->getAs<RecordType>();
if (!RT)
return false;
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
if (RD->hasFlexibleArrayMember())
return false;
@@ -320,7 +320,7 @@ bool CodeGen::isEmptyRecordForLayout(const ASTContext &Context, QualType T) {
if (!RT)
return false;
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
// If this is a C++ record, check the bases first.
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
@@ -344,7 +344,7 @@ const Type *CodeGen::isSingleElementStruct(QualType T, ASTContext &Context) {
if (!RT)
return nullptr;
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
if (RD->hasFlexibleArrayMember())
return nullptr;
@@ -463,7 +463,7 @@ bool CodeGen::isRecordWithSIMDVectorType(ASTContext &Context, QualType Ty) {
const RecordType *RT = Ty->getAs<RecordType>();
if (!RT)
return false;
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
// If this is a C++ record, check the bases first.
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
diff --git a/clang/lib/CodeGen/CGBlocks.cpp b/clang/lib/CodeGen/CGBlocks.cpp
index 0e80522..cfeba6f 100644
--- a/clang/lib/CodeGen/CGBlocks.cpp
+++ b/clang/lib/CodeGen/CGBlocks.cpp
@@ -425,7 +425,8 @@ static bool isSafeForCXXConstantCapture(QualType type) {
// Only records can be unsafe.
if (!recordType) return true;
- const auto *record = cast<CXXRecordDecl>(recordType->getDecl());
+ const auto *record =
+ cast<CXXRecordDecl>(recordType->getOriginalDecl())->getDefinitionOrSelf();
// Maintain semantics for classes with non-trivial dtors or copy ctors.
if (!record->hasTrivialDestructor()) return false;
diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index a648bde..071667a 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -5985,8 +5985,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
// Create a temporary array to hold the sizes of local pointer arguments
// for the block. \p First is the position of the first size argument.
- auto CreateArrayForSizeVar = [=](unsigned First)
- -> std::tuple<llvm::Value *, llvm::Value *, llvm::Value *> {
+ auto CreateArrayForSizeVar =
+ [=](unsigned First) -> std::pair<llvm::Value *, llvm::Value *> {
llvm::APInt ArraySize(32, NumArgs - First);
QualType SizeArrayTy = getContext().getConstantArrayType(
getContext().getSizeType(), ArraySize, nullptr,
@@ -5999,9 +5999,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
// actually the Alloca ascasted to the default AS, hence the
// stripPointerCasts()
llvm::Value *Alloca = TmpPtr->stripPointerCasts();
- llvm::Value *TmpSize = EmitLifetimeStart(
- CGM.getDataLayout().getTypeAllocSize(Tmp.getElementType()), Alloca);
llvm::Value *ElemPtr;
+ EmitLifetimeStart(Alloca);
// Each of the following arguments specifies the size of the corresponding
// argument passed to the enqueued block.
auto *Zero = llvm::ConstantInt::get(IntTy, 0);
@@ -6018,7 +6017,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
}
// Return the Alloca itself rather than a potential ascast as this is only
// used by the paired EmitLifetimeEnd.
- return {ElemPtr, TmpSize, Alloca};
+ return {ElemPtr, Alloca};
};
// Could have events and/or varargs.
@@ -6030,7 +6029,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
llvm::Value *Kernel =
Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
auto *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
- auto [ElemPtr, TmpSize, TmpPtr] = CreateArrayForSizeVar(4);
+ auto [ElemPtr, TmpPtr] = CreateArrayForSizeVar(4);
// Create a vector of the arguments, as well as a constant value to
// express to the runtime the number of variadic arguments.
@@ -6045,8 +6044,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
auto Call = RValue::get(
EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Args));
- if (TmpSize)
- EmitLifetimeEnd(TmpSize, TmpPtr);
+ EmitLifetimeEnd(TmpPtr);
return Call;
}
// Any calls now have event arguments passed.
@@ -6111,15 +6109,14 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
ArgTys.push_back(Int32Ty);
Name = "__enqueue_kernel_events_varargs";
- auto [ElemPtr, TmpSize, TmpPtr] = CreateArrayForSizeVar(7);
+ auto [ElemPtr, TmpPtr] = CreateArrayForSizeVar(7);
Args.push_back(ElemPtr);
ArgTys.push_back(ElemPtr->getType());
llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
auto Call = RValue::get(
EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Args));
- if (TmpSize)
- EmitLifetimeEnd(TmpSize, TmpPtr);
+ EmitLifetimeEnd(TmpPtr);
return Call;
}
llvm_unreachable("Unexpected enqueue_kernel signature");
diff --git a/clang/lib/CodeGen/CGCUDANV.cpp b/clang/lib/CodeGen/CGCUDANV.cpp
index dd26be7..c7f4bf8 100644
--- a/clang/lib/CodeGen/CGCUDANV.cpp
+++ b/clang/lib/CodeGen/CGCUDANV.cpp
@@ -1131,7 +1131,8 @@ void CGNVCUDARuntime::handleVarRegistration(const VarDecl *D,
// Builtin surfaces and textures and their template arguments are
// also registered with CUDA runtime.
const auto *TD = cast<ClassTemplateSpecializationDecl>(
- D->getType()->castAs<RecordType>()->getDecl());
+ D->getType()->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
const TemplateArgumentList &Args = TD->getTemplateArgs();
if (TD->hasAttr<CUDADeviceBuiltinSurfaceTypeAttr>()) {
assert(Args.size() == 2 &&
diff --git a/clang/lib/CodeGen/CGCXX.cpp b/clang/lib/CodeGen/CGCXX.cpp
index 78a7b02..f9aff89 100644
--- a/clang/lib/CodeGen/CGCXX.cpp
+++ b/clang/lib/CodeGen/CGCXX.cpp
@@ -83,8 +83,9 @@ bool CodeGenModule::TryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) {
if (I.isVirtual()) continue;
// Skip base classes with trivial destructors.
- const auto *Base =
- cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
+ const auto *Base = cast<CXXRecordDecl>(
+ I.getType()->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
if (Base->hasTrivialDestructor()) continue;
// If we've already found a base class with a non-trivial
@@ -277,18 +278,18 @@ static CGCallee BuildAppleKextVirtualCall(CodeGenFunction &CGF,
/// BuildAppleKextVirtualCall - This routine is to support gcc's kext ABI making
/// indirect call to virtual functions. It makes the call through indexing
/// into the vtable.
-CGCallee
-CodeGenFunction::BuildAppleKextVirtualCall(const CXXMethodDecl *MD,
- NestedNameSpecifier *Qual,
- llvm::Type *Ty) {
- assert((Qual->getKind() == NestedNameSpecifier::TypeSpec) &&
+CGCallee CodeGenFunction::BuildAppleKextVirtualCall(const CXXMethodDecl *MD,
+ NestedNameSpecifier Qual,
+ llvm::Type *Ty) {
+ assert(Qual.getKind() == NestedNameSpecifier::Kind::Type &&
"BuildAppleKextVirtualCall - bad Qual kind");
- const Type *QTy = Qual->getAsType();
+ const Type *QTy = Qual.getAsType();
QualType T = QualType(QTy, 0);
const RecordType *RT = T->getAs<RecordType>();
assert(RT && "BuildAppleKextVirtualCall - Qual type must be record");
- const auto *RD = cast<CXXRecordDecl>(RT->getDecl());
+ const auto *RD =
+ cast<CXXRecordDecl>(RT->getOriginalDecl())->getDefinitionOrSelf();
if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD))
return BuildAppleKextVirtualDestructorCall(DD, Dtor_Complete, RD);
diff --git a/clang/lib/CodeGen/CGCXXABI.cpp b/clang/lib/CodeGen/CGCXXABI.cpp
index d42e0bb8..cca6758 100644
--- a/clang/lib/CodeGen/CGCXXABI.cpp
+++ b/clang/lib/CodeGen/CGCXXABI.cpp
@@ -52,7 +52,7 @@ CGCallee CGCXXABI::EmitLoadOfMemberFunctionPointer(
const auto *RD = MPT->getMostRecentCXXRecordDecl();
ThisPtrForCall =
- CGF.getAsNaturalPointerTo(This, CGF.getContext().getRecordType(RD));
+ CGF.getAsNaturalPointerTo(This, CGF.getContext().getCanonicalTagType(RD));
const FunctionProtoType *FPT =
MPT->getPointeeType()->getAs<FunctionProtoType>();
llvm::Constant *FnPtr = llvm::Constant::getNullValue(
@@ -106,7 +106,7 @@ CGCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
llvm::Constant *CGCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) {
return GetBogusMemberPointer(CGM.getContext().getMemberPointerType(
- MD->getType(), /*Qualifier=*/nullptr, MD->getParent()));
+ MD->getType(), /*Qualifier=*/std::nullopt, MD->getParent()));
}
llvm::Constant *CGCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
diff --git a/clang/lib/CodeGen/CGCXXABI.h b/clang/lib/CodeGen/CGCXXABI.h
index 96fe046..2dd320d 100644
--- a/clang/lib/CodeGen/CGCXXABI.h
+++ b/clang/lib/CodeGen/CGCXXABI.h
@@ -294,14 +294,22 @@ public:
Address Value,
QualType SrcRecordTy) = 0;
+ struct ExactDynamicCastInfo {
+ bool RequiresCastToPrimaryBase;
+ CharUnits Offset;
+ };
+
+ virtual std::optional<ExactDynamicCastInfo>
+ getExactDynamicCastInfo(QualType SrcRecordTy, QualType DestTy,
+ QualType DestRecordTy) = 0;
+
/// Emit a dynamic_cast from SrcRecordTy to DestRecordTy. The cast fails if
/// the dynamic type of Value is not exactly DestRecordTy.
- virtual llvm::Value *emitExactDynamicCast(CodeGenFunction &CGF, Address Value,
- QualType SrcRecordTy,
- QualType DestTy,
- QualType DestRecordTy,
- llvm::BasicBlock *CastSuccess,
- llvm::BasicBlock *CastFail) = 0;
+ virtual llvm::Value *emitExactDynamicCast(
+ CodeGenFunction &CGF, Address Value, QualType SrcRecordTy,
+ QualType DestTy, QualType DestRecordTy,
+ const ExactDynamicCastInfo &CastInfo, llvm::BasicBlock *CastSuccess,
+ llvm::BasicBlock *CastFail) = 0;
virtual bool EmitBadCastCall(CodeGenFunction &CGF) = 0;
diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp
index d9bd443..b959982 100644
--- a/clang/lib/CodeGen/CGCall.cpp
+++ b/clang/lib/CodeGen/CGCall.cpp
@@ -125,16 +125,16 @@ unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) {
/// calling a method pointer.
CanQualType CodeGenTypes::DeriveThisType(const CXXRecordDecl *RD,
const CXXMethodDecl *MD) {
- QualType RecTy;
+ CanQualType RecTy;
if (RD)
- RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
+ RecTy = Context.getCanonicalTagType(RD);
else
RecTy = Context.VoidTy;
if (MD)
- RecTy = Context.getAddrSpaceQualType(
- RecTy, MD->getMethodQualifiers().getAddressSpace());
- return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
+ RecTy = CanQualType::CreateUnsafe(Context.getAddrSpaceQualType(
+ RecTy, MD->getMethodQualifiers().getAddressSpace()));
+ return Context.getPointerType(RecTy);
}
/// Returns the canonical formal type of the given C++ method.
@@ -1008,7 +1008,7 @@ getTypeExpansion(QualType Ty, const ASTContext &Context) {
if (const RecordType *RT = Ty->getAs<RecordType>()) {
SmallVector<const CXXBaseSpecifier *, 1> Bases;
SmallVector<const FieldDecl *, 1> Fields;
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
assert(!RD->hasFlexibleArrayMember() &&
"Cannot expand structure with flexible array.");
if (RD->isUnion()) {
@@ -1895,7 +1895,7 @@ bool CodeGenModule::MayDropFunctionReturn(const ASTContext &Context,
// complex destructor or a non-trivially copyable type.
if (const RecordType *RT =
ReturnType.getCanonicalType()->getAs<RecordType>()) {
- if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
+ if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getOriginalDecl()))
return ClassDecl->hasTrivialDestructor();
}
return ReturnType.isTriviallyCopyableType(Context);
@@ -2870,7 +2870,8 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
// (e.g., Obj-C ARC-managed structs, MSVC callee-destroyed objects).
if (!ParamType.isDestructedType() || !ParamType->isRecordType() ||
ParamType->castAs<RecordType>()
- ->getDecl()
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf()
->isParamDestroyedInCallee())
Attrs.addAttribute(llvm::Attribute::DeadOnReturn);
}
@@ -3828,7 +3829,7 @@ static void setUsedBits(CodeGenModule &CGM, const RecordType *RTy, int Offset,
SmallVectorImpl<uint64_t> &Bits) {
ASTContext &Context = CGM.getContext();
int CharWidth = Context.getCharWidth();
- const RecordDecl *RD = RTy->getDecl()->getDefinition();
+ const RecordDecl *RD = RTy->getOriginalDecl()->getDefinition();
const ASTRecordLayout &ASTLayout = Context.getASTRecordLayout(RD);
const CGRecordLayout &Layout = CGM.getTypes().getCGRecordLayout(RD);
@@ -4289,7 +4290,10 @@ void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
// Deactivate the cleanup for the callee-destructed param that was pushed.
if (type->isRecordType() && !CurFuncIsThunk &&
- type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee() &&
+ type->castAs<RecordType>()
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf()
+ ->isParamDestroyedInCallee() &&
param->needsDestruction(getContext())) {
EHScopeStack::stable_iterator cleanup =
CalleeDestructedParamCleanups.lookup(cast<ParmVarDecl>(param));
@@ -4319,10 +4323,7 @@ static void emitWriteback(CodeGenFunction &CGF,
if (writeback.WritebackExpr) {
CGF.EmitIgnoredExpr(writeback.WritebackExpr);
-
- if (writeback.LifetimeSz)
- CGF.EmitLifetimeEnd(writeback.LifetimeSz,
- writeback.Temporary.getBasePointer());
+ CGF.EmitLifetimeEnd(writeback.Temporary.getBasePointer());
return;
}
@@ -4885,8 +4886,10 @@ void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
// In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
// However, we still have to push an EH-only cleanup in case we unwind before
// we make it to the call.
- if (type->isRecordType() &&
- type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) {
+ if (type->isRecordType() && type->castAs<RecordType>()
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf()
+ ->isParamDestroyedInCallee()) {
// If we're using inalloca, use the argument memory. Otherwise, use a
// temporary.
AggValueSlot Slot = args.isUsingInAlloca()
@@ -5282,7 +5285,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// If the call returns a temporary with struct return, create a temporary
// alloca to hold the result, unless one is given to us.
Address SRetPtr = Address::invalid();
- llvm::Value *UnusedReturnSizePtr = nullptr;
+ bool NeedSRetLifetimeEnd = false;
if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) {
// For virtual function pointer thunks and musttail calls, we must always
// forward an incoming SRet pointer to the callee, because a local alloca
@@ -5296,11 +5299,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
SRetPtr = ReturnValue.getAddress();
} else {
SRetPtr = CreateMemTempWithoutCast(RetTy, "tmp");
- if (HaveInsertPoint() && ReturnValue.isUnused()) {
- llvm::TypeSize size =
- CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy));
- UnusedReturnSizePtr = EmitLifetimeStart(size, SRetPtr.getBasePointer());
- }
+ if (HaveInsertPoint() && ReturnValue.isUnused())
+ NeedSRetLifetimeEnd = EmitLifetimeStart(SRetPtr.getBasePointer());
}
if (IRFunctionArgs.hasSRetArg()) {
// A mismatch between the allocated return value's AS and the target's
@@ -5484,15 +5484,10 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
Val = Builder.CreateFreeze(Val);
IRCallArgs[FirstIRArg] = Val;
- // Emit lifetime markers for the temporary alloca.
- llvm::TypeSize ByvalTempElementSize =
- CGM.getDataLayout().getTypeAllocSize(AI.getElementType());
- llvm::Value *LifetimeSize =
- EmitLifetimeStart(ByvalTempElementSize, AI.getPointer());
-
- // Add cleanup code to emit the end lifetime marker after the call.
- if (LifetimeSize) // In case we disabled lifetime markers.
- CallLifetimeEndAfterCall.emplace_back(AI, LifetimeSize);
+ // Emit lifetime markers for the temporary alloca and add cleanup code to
+ // emit the end lifetime marker after the call.
+ if (EmitLifetimeStart(AI.getPointer()))
+ CallLifetimeEndAfterCall.emplace_back(AI);
// Generate the copy.
I->copyInto(*this, AI);
@@ -5653,9 +5648,9 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
auto unpaddedCoercionType = ArgInfo.getUnpaddedCoerceAndExpandType();
auto *unpaddedStruct = dyn_cast<llvm::StructType>(unpaddedCoercionType);
- llvm::Value *tempSize = nullptr;
Address addr = Address::invalid();
RawAddress AllocaAddr = RawAddress::invalid();
+ bool NeedLifetimeEnd = false;
if (I->isAggregate()) {
addr = I->hasLValue() ? I->getKnownLValue().getAddress()
: I->getKnownRValue().getAggregateAddress();
@@ -5665,7 +5660,6 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
assert(RV.isScalar()); // complex should always just be direct
llvm::Type *scalarType = RV.getScalarVal()->getType();
- auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType);
auto scalarAlign = CGM.getDataLayout().getPrefTypeAlign(scalarType);
// Materialize to a temporary.
@@ -5674,7 +5668,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
layout->getAlignment(), scalarAlign)),
"tmp",
/*ArraySize=*/nullptr, &AllocaAddr);
- tempSize = EmitLifetimeStart(scalarSize, AllocaAddr.getPointer());
+ NeedLifetimeEnd = EmitLifetimeStart(AllocaAddr.getPointer());
Builder.CreateStore(RV.getScalarVal(), addr);
}
@@ -5699,10 +5693,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
}
assert(IRArgPos == FirstIRArg + NumIRArgs);
- if (tempSize) {
- EmitLifetimeEnd(tempSize, AllocaAddr.getPointer());
- }
-
+ if (NeedLifetimeEnd)
+ EmitLifetimeEnd(AllocaAddr.getPointer());
break;
}
@@ -5871,9 +5863,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// can't depend on being inside of an ExprWithCleanups, so we need to manually
// pop this cleanup later on. Being eager about this is OK, since this
// temporary is 'invisible' outside of the callee.
- if (UnusedReturnSizePtr)
- pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, SRetPtr,
- UnusedReturnSizePtr);
+ if (NeedSRetLifetimeEnd)
+ pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, SRetPtr);
llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest();
@@ -6007,7 +5998,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// insertion point; this allows the rest of IRGen to discard
// unreachable code.
if (CI->doesNotReturn()) {
- if (UnusedReturnSizePtr)
+ if (NeedSRetLifetimeEnd)
PopCleanupBlock();
// Strip away the noreturn attribute to better diagnose unreachable UB.
@@ -6122,7 +6113,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
case ABIArgInfo::InAlloca:
case ABIArgInfo::Indirect: {
RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation());
- if (UnusedReturnSizePtr)
+ if (NeedSRetLifetimeEnd)
PopCleanupBlock();
return ret;
}
diff --git a/clang/lib/CodeGen/CGCall.h b/clang/lib/CodeGen/CGCall.h
index 0b4e3f9..3157b7f 100644
--- a/clang/lib/CodeGen/CGCall.h
+++ b/clang/lib/CodeGen/CGCall.h
@@ -289,9 +289,6 @@ public:
/// An Expression (optional) that performs the writeback with any required
/// casting.
const Expr *WritebackExpr;
-
- // Size for optional lifetime end on the temporary.
- llvm::Value *LifetimeSz;
};
struct CallArgCleanup {
@@ -321,9 +318,8 @@ public:
}
void addWriteback(LValue srcLV, Address temporary, llvm::Value *toUse,
- const Expr *writebackExpr = nullptr,
- llvm::Value *lifetimeSz = nullptr) {
- Writeback writeback = {srcLV, temporary, toUse, writebackExpr, lifetimeSz};
+ const Expr *writebackExpr = nullptr) {
+ Writeback writeback = {srcLV, temporary, toUse, writebackExpr};
Writebacks.push_back(writeback);
}
diff --git a/clang/lib/CodeGen/CGClass.cpp b/clang/lib/CodeGen/CGClass.cpp
index 4a465e6..e9a92ae 100644
--- a/clang/lib/CodeGen/CGClass.cpp
+++ b/clang/lib/CodeGen/CGClass.cpp
@@ -181,7 +181,9 @@ CharUnits CodeGenModule::computeNonVirtualBaseClassOffset(
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
const auto *BaseDecl =
- cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(
+ Base->getType()->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
// Add the offset.
Offset += Layout.getBaseClassOffset(BaseDecl);
@@ -301,7 +303,8 @@ Address CodeGenFunction::GetAddressOfBaseClass(
// and hence will not require any further steps.
if ((*Start)->isVirtual()) {
VBase = cast<CXXRecordDecl>(
- (*Start)->getType()->castAs<RecordType>()->getDecl());
+ (*Start)->getType()->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
++Start;
}
@@ -326,7 +329,7 @@ Address CodeGenFunction::GetAddressOfBaseClass(
llvm::Type *PtrTy = llvm::PointerType::get(
CGM.getLLVMContext(), Value.getType()->getPointerAddressSpace());
- QualType DerivedTy = getContext().getRecordType(Derived);
+ CanQualType DerivedTy = getContext().getCanonicalTagType(Derived);
CharUnits DerivedAlign = CGM.getClassPointerAlignment(Derived);
// If the static offset is zero and we don't have a virtual step,
@@ -401,8 +404,7 @@ CodeGenFunction::GetAddressOfDerivedClass(Address BaseAddr,
bool NullCheckValue) {
assert(PathBegin != PathEnd && "Base path should not be empty!");
- QualType DerivedTy =
- getContext().getCanonicalType(getContext().getTagDeclType(Derived));
+ CanQualType DerivedTy = getContext().getCanonicalTagType(Derived);
llvm::Type *DerivedValueTy = ConvertType(DerivedTy);
llvm::Value *NonVirtualOffset =
@@ -559,7 +561,8 @@ static void EmitBaseInitializer(CodeGenFunction &CGF,
const Type *BaseType = BaseInit->getBaseClass();
const auto *BaseClassDecl =
- cast<CXXRecordDecl>(BaseType->castAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(BaseType->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
bool isBaseVirtual = BaseInit->isBaseVirtual();
@@ -638,7 +641,7 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
QualType FieldType = Field->getType();
llvm::Value *ThisPtr = CGF.LoadCXXThis();
- QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl);
+ CanQualType RecordTy = CGF.getContext().getCanonicalTagType(ClassDecl);
LValue LHS;
// If a base constructor is being emitted, create an LValue that has the
@@ -974,7 +977,7 @@ namespace {
}
CharUnits MemcpySize = getMemcpySize(FirstByteOffset);
- QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl);
+ CanQualType RecordTy = CGF.getContext().getCanonicalTagType(ClassDecl);
Address ThisPtr = CGF.LoadCXXThisAddress();
LValue DestLV = CGF.MakeAddrLValue(ThisPtr, RecordTy);
LValue Dest = CGF.EmitLValueForFieldInitialization(DestLV, FirstField);
@@ -1122,7 +1125,7 @@ namespace {
void pushEHDestructors() {
Address ThisPtr = CGF.LoadCXXThisAddress();
- QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl);
+ CanQualType RecordTy = CGF.getContext().getCanonicalTagType(ClassDecl);
LValue LHS = CGF.MakeAddrLValue(ThisPtr, RecordTy);
for (unsigned i = 0; i < AggregatedInits.size(); ++i) {
@@ -1265,7 +1268,8 @@ namespace {
static bool isInitializerOfDynamicClass(const CXXCtorInitializer *BaseInit) {
const Type *BaseType = BaseInit->getBaseClass();
const auto *BaseClassDecl =
- cast<CXXRecordDecl>(BaseType->castAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(BaseType->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
return BaseClassDecl->isDynamicClass();
}
@@ -1374,7 +1378,9 @@ HasTrivialDestructorBody(ASTContext &Context,
continue;
const CXXRecordDecl *NonVirtualBase =
- cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(
+ I.getType()->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
if (!HasTrivialDestructorBody(Context, NonVirtualBase,
MostDerivedClassDecl))
return false;
@@ -1383,8 +1389,10 @@ HasTrivialDestructorBody(ASTContext &Context,
if (BaseClassDecl == MostDerivedClassDecl) {
// Check virtual bases.
for (const auto &I : BaseClassDecl->vbases()) {
- const CXXRecordDecl *VirtualBase =
- cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
+ const auto *VirtualBase =
+ cast<CXXRecordDecl>(
+ I.getType()->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
if (!HasTrivialDestructorBody(Context, VirtualBase,
MostDerivedClassDecl))
return false;
@@ -1404,7 +1412,8 @@ FieldHasTrivialDestructorBody(ASTContext &Context,
if (!RT)
return true;
- CXXRecordDecl *FieldClassDecl = cast<CXXRecordDecl>(RT->getDecl());
+ auto *FieldClassDecl =
+ cast<CXXRecordDecl>(RT->getOriginalDecl())->getDefinitionOrSelf();
// The destructor for an implicit anonymous union member is never invoked.
if (FieldClassDecl->isUnion() && FieldClassDecl->isAnonymousStructOrUnion())
@@ -1588,7 +1597,7 @@ namespace {
const CXXRecordDecl *ClassDecl = Dtor->getParent();
CGF.EmitDeleteCall(Dtor->getOperatorDelete(),
LoadThisForDtorDelete(CGF, Dtor),
- CGF.getContext().getTagDeclType(ClassDecl));
+ CGF.getContext().getCanonicalTagType(ClassDecl));
}
};
@@ -1606,7 +1615,7 @@ namespace {
const CXXRecordDecl *ClassDecl = Dtor->getParent();
CGF.EmitDeleteCall(Dtor->getOperatorDelete(),
LoadThisForDtorDelete(CGF, Dtor),
- CGF.getContext().getTagDeclType(ClassDecl));
+ CGF.getContext().getCanonicalTagType(ClassDecl));
assert(Dtor->getOperatorDelete()->isDestroyingOperatorDelete() ==
ReturnAfterDelete &&
"unexpected value for ReturnAfterDelete");
@@ -1647,7 +1656,8 @@ namespace {
void Emit(CodeGenFunction &CGF, Flags flags) override {
// Find the address of the field.
Address thisValue = CGF.LoadCXXThisAddress();
- QualType RecordTy = CGF.getContext().getTagDeclType(field->getParent());
+ CanQualType RecordTy =
+ CGF.getContext().getCanonicalTagType(field->getParent());
LValue ThisLV = CGF.MakeAddrLValue(thisValue, RecordTy);
LValue LV = CGF.EmitLValueForField(ThisLV, field);
assert(LV.isSimple());
@@ -1870,7 +1880,7 @@ void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD,
const CXXRecordDecl *ClassDecl = DD->getParent();
EmitDeleteCall(DD->getOperatorDelete(),
LoadThisForDtorDelete(*this, DD),
- getContext().getTagDeclType(ClassDecl));
+ getContext().getCanonicalTagType(ClassDecl));
EmitBranchThroughCleanup(ReturnBlock);
} else {
EHStack.pushCleanup<CallDtorDelete>(NormalAndEHCleanup);
@@ -1898,7 +1908,9 @@ void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD,
// the reverse order.
for (const auto &Base : ClassDecl->vbases()) {
auto *BaseClassDecl =
- cast<CXXRecordDecl>(Base.getType()->castAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(
+ Base.getType()->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
if (BaseClassDecl->hasTrivialDestructor()) {
// Under SanitizeMemoryUseAfterDtor, poison the trivial base class
@@ -1964,7 +1976,7 @@ void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD,
// Anonymous union members do not have their destructors called.
const RecordType *RT = type->getAsUnionType();
- if (RT && RT->getDecl()->isAnonymousStructOrUnion())
+ if (RT && RT->getOriginalDecl()->isAnonymousStructOrUnion())
continue;
CleanupKind cleanupKind = getCleanupKind(dtorKind);
@@ -2057,7 +2069,7 @@ void CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
//
// Note that these are complete objects and so we don't need to
// use the non-virtual size or alignment.
- QualType type = getContext().getTypeDeclType(ctor->getParent());
+ CanQualType type = getContext().getCanonicalTagType(ctor->getParent());
CharUnits eltAlignment =
arrayBase.getAlignment()
.alignmentOfArrayElement(getContext().getTypeSizeInChars(type));
@@ -2119,7 +2131,8 @@ void CodeGenFunction::destroyCXXObject(CodeGenFunction &CGF,
Address addr,
QualType type) {
const RecordType *rtype = type->castAs<RecordType>();
- const CXXRecordDecl *record = cast<CXXRecordDecl>(rtype->getDecl());
+ const auto *record =
+ cast<CXXRecordDecl>(rtype->getOriginalDecl())->getDefinitionOrSelf();
const CXXDestructorDecl *dtor = record->getDestructor();
assert(!dtor->isTrivial());
CGF.EmitCXXDestructorCall(dtor, Dtor_Complete, /*for vbase*/ false,
@@ -2158,7 +2171,7 @@ void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
const Expr *Arg = E->getArg(0);
LValue Src = EmitLValue(Arg);
- QualType DestTy = getContext().getTypeDeclType(D->getParent());
+ CanQualType DestTy = getContext().getCanonicalTagType(D->getParent());
LValue Dest = MakeAddrLValue(This, DestTy);
EmitAggregateCopyCtor(Dest, Src, ThisAVS.mayOverlap());
return;
@@ -2210,7 +2223,8 @@ void CodeGenFunction::EmitCXXConstructorCall(
if (!NewPointerIsChecked)
EmitTypeCheck(CodeGenFunction::TCK_ConstructorCall, Loc, This,
- getContext().getRecordType(ClassDecl), CharUnits::Zero());
+ getContext().getCanonicalTagType(ClassDecl),
+ CharUnits::Zero());
if (D->isTrivial() && D->isDefaultConstructor()) {
assert(Args.size() == 1 && "trivial default ctor with args");
@@ -2226,7 +2240,7 @@ void CodeGenFunction::EmitCXXConstructorCall(
Address Src = makeNaturalAddressForPointer(
Args[1].getRValue(*this).getScalarVal(), SrcTy);
LValue SrcLVal = MakeAddrLValue(Src, SrcTy);
- QualType DestTy = getContext().getTypeDeclType(ClassDecl);
+ CanQualType DestTy = getContext().getCanonicalTagType(ClassDecl);
LValue DestLVal = MakeAddrLValue(This, DestTy);
EmitAggregateCopyCtor(DestLVal, SrcLVal, Overlap);
return;
@@ -2638,8 +2652,9 @@ void CodeGenFunction::getVTablePointers(BaseSubobject Base,
// Traverse bases.
for (const auto &I : RD->bases()) {
- auto *BaseDecl =
- cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
+ auto *BaseDecl = cast<CXXRecordDecl>(
+ I.getType()->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
// Ignore classes without a vtable.
if (!BaseDecl->isDynamicClass())
@@ -2772,7 +2787,7 @@ void CodeGenFunction::EmitTypeMetadataCodeForVCall(const CXXRecordDecl *RD,
// Don't insert type test assumes if we are forcing public
// visibility.
!CGM.AlwaysHasLTOVisibilityPublic(RD)) {
- QualType Ty = QualType(RD->getTypeForDecl(), 0);
+ CanQualType Ty = CGM.getContext().getCanonicalTagType(RD);
llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType(Ty);
llvm::Value *TypeId =
llvm::MetadataAsValue::get(CGM.getLLVMContext(), MD);
@@ -2839,7 +2854,8 @@ void CodeGenFunction::EmitVTablePtrCheckForCast(QualType T, Address Derived,
if (!ClassTy)
return;
- const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(ClassTy->getDecl());
+ const auto *ClassDecl =
+ cast<CXXRecordDecl>(ClassTy->getOriginalDecl())->getDefinitionOrSelf();
if (!ClassDecl->isCompleteDefinition() || !ClassDecl->isDynamicClass())
return;
@@ -2896,8 +2912,8 @@ void CodeGenFunction::EmitVTablePtrCheck(const CXXRecordDecl *RD,
EmitSanitizerStatReport(SSK);
- llvm::Metadata *MD =
- CGM.CreateMetadataIdentifierForType(QualType(RD->getTypeForDecl(), 0));
+ CanQualType T = CGM.getContext().getCanonicalTagType(RD);
+ llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType(T);
llvm::Value *TypeId = llvm::MetadataAsValue::get(getLLVMContext(), MD);
llvm::Value *TypeTest = Builder.CreateCall(
@@ -2906,7 +2922,7 @@ void CodeGenFunction::EmitVTablePtrCheck(const CXXRecordDecl *RD,
llvm::Constant *StaticData[] = {
llvm::ConstantInt::get(Int8Ty, TCK),
EmitCheckSourceLocation(Loc),
- EmitCheckTypeDescriptor(QualType(RD->getTypeForDecl(), 0)),
+ EmitCheckTypeDescriptor(T),
};
auto CrossDsoTypeId = CGM.CreateCrossDsoCfiTypeId(MD);
@@ -2956,8 +2972,8 @@ llvm::Value *CodeGenFunction::EmitVTableTypeCheckedLoad(
EmitSanitizerStatReport(llvm::SanStat_CFI_VCall);
- llvm::Metadata *MD =
- CGM.CreateMetadataIdentifierForType(QualType(RD->getTypeForDecl(), 0));
+ CanQualType T = CGM.getContext().getCanonicalTagType(RD);
+ llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType(T);
llvm::Value *TypeId = llvm::MetadataAsValue::get(CGM.getLLVMContext(), MD);
auto CheckedLoadIntrinsic = CGM.getVTables().useRelativeLayout()
@@ -3039,7 +3055,8 @@ void CodeGenFunction::EmitLambdaBlockInvokeBody() {
// Start building arguments for forwarding call
CallArgList CallArgs;
- QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda));
+ CanQualType ThisType =
+ getContext().getPointerType(getContext().getCanonicalTagType(Lambda));
Address ThisPtr = GetAddrOfBlockDecl(variable);
CallArgs.add(RValue::get(getAsNaturalPointerTo(ThisPtr, ThisType)), ThisType);
@@ -3066,8 +3083,8 @@ void CodeGenFunction::EmitLambdaStaticInvokeBody(const CXXMethodDecl *MD) {
// Start building arguments for forwarding call
CallArgList CallArgs;
- QualType LambdaType = getContext().getRecordType(Lambda);
- QualType ThisType = getContext().getPointerType(LambdaType);
+ CanQualType LambdaType = getContext().getCanonicalTagType(Lambda);
+ CanQualType ThisType = getContext().getPointerType(LambdaType);
Address ThisPtr = CreateMemTemp(LambdaType, "unused.capture");
CallArgs.add(RValue::get(ThisPtr.emitRawPointer(*this)), ThisType);
@@ -3118,8 +3135,8 @@ void CodeGenFunction::EmitLambdaInAllocaCallOpBody(const CXXMethodDecl *MD) {
// Forward %this argument.
CallArgList CallArgs;
- QualType LambdaType = getContext().getRecordType(MD->getParent());
- QualType ThisType = getContext().getPointerType(LambdaType);
+ CanQualType LambdaType = getContext().getCanonicalTagType(MD->getParent());
+ CanQualType ThisType = getContext().getPointerType(LambdaType);
llvm::Value *ThisArg = CurFn->getArg(0);
CallArgs.add(RValue::get(ThisArg), ThisType);
diff --git a/clang/lib/CodeGen/CGDebugInfo.cpp b/clang/lib/CodeGen/CGDebugInfo.cpp
index 2b469f2..994bdbd 100644
--- a/clang/lib/CodeGen/CGDebugInfo.cpp
+++ b/clang/lib/CodeGen/CGDebugInfo.cpp
@@ -366,7 +366,7 @@ llvm::DIScope *CGDebugInfo::getContextDescriptor(const Decl *Context,
if (const auto *RDecl = dyn_cast<RecordDecl>(Context))
if (!RDecl->isDependentType())
- return getOrCreateType(CGM.getContext().getTypeDeclType(RDecl),
+ return getOrCreateType(CGM.getContext().getCanonicalTagType(RDecl),
TheCU->getFile());
return Default;
}
@@ -1285,7 +1285,7 @@ static bool needsTypeIdentifier(const TagDecl *TD, CodeGenModule &CGM,
static SmallString<256> getTypeIdentifier(const TagType *Ty, CodeGenModule &CGM,
llvm::DICompileUnit *TheCU) {
SmallString<256> Identifier;
- const TagDecl *TD = Ty->getDecl();
+ const TagDecl *TD = Ty->getOriginalDecl()->getDefinitionOrSelf();
if (!needsTypeIdentifier(TD, CGM, TheCU))
return Identifier;
@@ -1321,8 +1321,8 @@ static llvm::dwarf::Tag getTagForRecord(const RecordDecl *RD) {
llvm::DICompositeType *
CGDebugInfo::getOrCreateRecordFwdDecl(const RecordType *Ty,
llvm::DIScope *Ctx) {
- const RecordDecl *RD = Ty->getDecl();
- if (llvm::DIType *T = getTypeOrNull(CGM.getContext().getRecordType(RD)))
+ const RecordDecl *RD = Ty->getOriginalDecl()->getDefinitionOrSelf();
+ if (llvm::DIType *T = getTypeOrNull(QualType(Ty, 0)))
return cast<llvm::DICompositeType>(T);
llvm::DIFile *DefUnit = getOrCreateFile(RD->getLocation());
const unsigned Line =
@@ -2015,6 +2015,8 @@ void CGDebugInfo::CollectRecordNestedType(
const TypeDecl *TD, SmallVectorImpl<llvm::Metadata *> &elements) {
QualType Ty = CGM.getContext().getTypeDeclType(TD);
// Injected class names are not considered nested records.
+ // FIXME: Is this supposed to be testing for injected class name declarations
+ // instead?
if (isa<InjectedClassNameType>(Ty))
return;
SourceLocation Loc = TD->getLocation();
@@ -2356,7 +2358,9 @@ void CGDebugInfo::CollectCXXBasesAux(
const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
for (const auto &BI : Bases) {
const auto *Base =
- cast<CXXRecordDecl>(BI.getType()->castAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(
+ BI.getType()->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinition();
if (!SeenTypes.insert(Base).second)
continue;
auto *BaseTy = getOrCreateType(BI.getType(), Unit);
@@ -2825,12 +2829,12 @@ void CGDebugInfo::addHeapAllocSiteMetadata(llvm::CallBase *CI,
void CGDebugInfo::completeType(const EnumDecl *ED) {
if (DebugKind <= llvm::codegenoptions::DebugLineTablesOnly)
return;
- QualType Ty = CGM.getContext().getEnumType(ED);
+ CanQualType Ty = CGM.getContext().getCanonicalTagType(ED);
void *TyPtr = Ty.getAsOpaquePtr();
auto I = TypeCache.find(TyPtr);
if (I == TypeCache.end() || !cast<llvm::DIType>(I->second)->isForwardDecl())
return;
- llvm::DIType *Res = CreateTypeDefinition(Ty->castAs<EnumType>());
+ llvm::DIType *Res = CreateTypeDefinition(dyn_cast<EnumType>(Ty));
assert(!Res->isForwardDecl());
TypeCache[TyPtr].reset(Res);
}
@@ -2900,7 +2904,7 @@ void CGDebugInfo::completeClassData(const RecordDecl *RD) {
void CGDebugInfo::completeClass(const RecordDecl *RD) {
if (DebugKind <= llvm::codegenoptions::DebugLineTablesOnly)
return;
- QualType Ty = CGM.getContext().getRecordType(RD);
+ CanQualType Ty = CGM.getContext().getCanonicalTagType(RD);
void *TyPtr = Ty.getAsOpaquePtr();
auto I = TypeCache.find(TyPtr);
if (I != TypeCache.end() && !cast<llvm::DIType>(I->second)->isForwardDecl())
@@ -2909,7 +2913,7 @@ void CGDebugInfo::completeClass(const RecordDecl *RD) {
// We want the canonical definition of the structure to not
// be the typedef. Since that would lead to circular typedef
// metadata.
- auto [Res, PrefRes] = CreateTypeDefinition(Ty->castAs<RecordType>());
+ auto [Res, PrefRes] = CreateTypeDefinition(dyn_cast<RecordType>(Ty));
assert(!Res->isForwardDecl());
TypeCache[TyPtr].reset(Res);
}
@@ -3013,14 +3017,14 @@ void CGDebugInfo::completeRequiredType(const RecordDecl *RD) {
if (shouldOmitDefinition(DebugKind, DebugTypeExtRefs, RD, CGM.getLangOpts()))
return;
- QualType Ty = CGM.getContext().getRecordType(RD);
+ CanQualType Ty = CGM.getContext().getCanonicalTagType(RD);
llvm::DIType *T = getTypeOrNull(Ty);
if (T && T->isForwardDecl())
completeClassData(RD);
}
llvm::DIType *CGDebugInfo::CreateType(const RecordType *Ty) {
- RecordDecl *RD = Ty->getDecl();
+ RecordDecl *RD = Ty->getOriginalDecl()->getDefinitionOrSelf();
llvm::DIType *T = cast_or_null<llvm::DIType>(getTypeOrNull(QualType(Ty, 0)));
if (T || shouldOmitDefinition(DebugKind, DebugTypeExtRefs, RD,
CGM.getLangOpts())) {
@@ -3048,7 +3052,7 @@ llvm::DIType *CGDebugInfo::GetPreferredNameType(const CXXRecordDecl *RD,
std::pair<llvm::DIType *, llvm::DIType *>
CGDebugInfo::CreateTypeDefinition(const RecordType *Ty) {
- RecordDecl *RD = Ty->getDecl();
+ RecordDecl *RD = Ty->getOriginalDecl()->getDefinitionOrSelf();
// Get overall information about the record type for the debug info.
llvm::DIFile *DefUnit = getOrCreateFile(RD->getLocation());
@@ -3070,7 +3074,7 @@ CGDebugInfo::CreateTypeDefinition(const RecordType *Ty) {
// Push the struct on region stack.
LexicalBlockStack.emplace_back(&*FwdDecl);
- RegionMap[Ty->getDecl()].reset(FwdDecl);
+ RegionMap[RD].reset(FwdDecl);
// Convert all the elements.
SmallVector<llvm::Metadata *, 16> EltTys;
@@ -3092,7 +3096,7 @@ CGDebugInfo::CreateTypeDefinition(const RecordType *Ty) {
CollectCXXMemberFunctions(CXXDecl, DefUnit, EltTys, FwdDecl);
LexicalBlockStack.pop_back();
- RegionMap.erase(Ty->getDecl());
+ RegionMap.erase(RD);
llvm::DINodeArray Elements = DBuilder.getOrCreateArray(EltTys);
DBuilder.replaceArrays(FwdDecl, Elements);
@@ -3101,7 +3105,7 @@ CGDebugInfo::CreateTypeDefinition(const RecordType *Ty) {
FwdDecl =
llvm::MDNode::replaceWithPermanent(llvm::TempDICompositeType(FwdDecl));
- RegionMap[Ty->getDecl()].reset(FwdDecl);
+ RegionMap[RD].reset(FwdDecl);
if (CGM.getCodeGenOpts().getDebuggerTuning() == llvm::DebuggerKind::LLDB)
if (auto *PrefDI = GetPreferredNameType(CXXDecl, DefUnit))
@@ -3651,8 +3655,9 @@ llvm::DIType *CGDebugInfo::CreateType(const MemberPointerType *Ty,
}
}
- llvm::DIType *ClassType = getOrCreateType(
- QualType(Ty->getMostRecentCXXRecordDecl()->getTypeForDecl(), 0), U);
+ CanQualType T =
+ CGM.getContext().getCanonicalTagType(Ty->getMostRecentCXXRecordDecl());
+ llvm::DIType *ClassType = getOrCreateType(T, U);
if (Ty->isMemberDataPointerType())
return DBuilder.createMemberPointerType(
getOrCreateType(Ty->getPointeeType(), U), ClassType, Size, /*Align=*/0,
@@ -3687,17 +3692,21 @@ llvm::DIType *CGDebugInfo::CreateType(const HLSLInlineSpirvType *Ty,
return nullptr;
}
-llvm::DIType *CGDebugInfo::CreateEnumType(const EnumType *Ty) {
- const EnumDecl *ED = Ty->getDecl();
+static auto getEnumInfo(CodeGenModule &CGM, llvm::DICompileUnit *TheCU,
+ const EnumType *Ty) {
+ const EnumDecl *ED = Ty->getOriginalDecl()->getDefinitionOrSelf();
uint64_t Size = 0;
uint32_t Align = 0;
- if (!ED->getTypeForDecl()->isIncompleteType()) {
- Size = CGM.getContext().getTypeSize(ED->getTypeForDecl());
+ if (ED->isComplete()) {
+ Size = CGM.getContext().getTypeSize(QualType(Ty, 0));
Align = getDeclAlignIfRequired(ED, CGM.getContext());
}
+ return std::make_tuple(ED, Size, Align, getTypeIdentifier(Ty, CGM, TheCU));
+}
- SmallString<256> Identifier = getTypeIdentifier(Ty, CGM, TheCU);
+llvm::DIType *CGDebugInfo::CreateEnumType(const EnumType *Ty) {
+ auto [ED, Size, Align, Identifier] = getEnumInfo(CGM, TheCU, Ty);
bool isImportedFromModule =
DebugTypeExtRefs && ED->isFromASTFile() && ED->getDefinition();
@@ -3732,15 +3741,7 @@ llvm::DIType *CGDebugInfo::CreateEnumType(const EnumType *Ty) {
}
llvm::DIType *CGDebugInfo::CreateTypeDefinition(const EnumType *Ty) {
- const EnumDecl *ED = Ty->getDecl();
- uint64_t Size = 0;
- uint32_t Align = 0;
- if (!ED->getTypeForDecl()->isIncompleteType()) {
- Size = CGM.getContext().getTypeSize(ED->getTypeForDecl());
- Align = getDeclAlignIfRequired(ED, CGM.getContext());
- }
-
- SmallString<256> Identifier = getTypeIdentifier(Ty, CGM, TheCU);
+ auto [ED, Size, Align, Identifier] = getEnumInfo(CGM, TheCU, Ty);
SmallVector<llvm::Metadata *, 16> Enumerators;
ED = ED->getDefinition();
@@ -3815,6 +3816,11 @@ static QualType UnwrapTypeForDebugInfo(QualType T, const ASTContext &C) {
switch (T->getTypeClass()) {
default:
return C.getQualifiedType(T.getTypePtr(), Quals);
+ case Type::Enum:
+ case Type::Record:
+ case Type::InjectedClassName:
+ return C.getQualifiedType(T->getCanonicalTypeUnqualified().getTypePtr(),
+ Quals);
case Type::TemplateSpecialization: {
const auto *Spec = cast<TemplateSpecializationType>(T);
if (Spec->isTypeAlias())
@@ -3843,11 +3849,8 @@ static QualType UnwrapTypeForDebugInfo(QualType T, const ASTContext &C) {
case Type::CountAttributed:
T = cast<CountAttributedType>(T)->desugar();
break;
- case Type::Elaborated:
- T = cast<ElaboratedType>(T)->getNamedType();
- break;
case Type::Using:
- T = cast<UsingType>(T)->getUnderlyingType();
+ T = cast<UsingType>(T)->desugar();
break;
case Type::Paren:
T = cast<ParenType>(T)->getInnerType();
@@ -3906,7 +3909,8 @@ void CGDebugInfo::completeUnusedClass(const CXXRecordDecl &D) {
completeClassData(&D);
// In case this type has no member function definitions being emitted, ensure
// it is retained
- RetainedTypes.push_back(CGM.getContext().getRecordType(&D).getAsOpaquePtr());
+ RetainedTypes.push_back(
+ CGM.getContext().getCanonicalTagType(&D).getAsOpaquePtr());
}
llvm::DIType *CGDebugInfo::getOrCreateType(QualType Ty, llvm::DIFile *Unit) {
@@ -4051,7 +4055,6 @@ llvm::DIType *CGDebugInfo::CreateTypeNode(QualType Ty, llvm::DIFile *Unit) {
case Type::Adjusted:
case Type::Decayed:
case Type::DeducedTemplateSpecialization:
- case Type::Elaborated:
case Type::Using:
case Type::Paren:
case Type::MacroQualified:
@@ -4094,7 +4097,7 @@ CGDebugInfo::getOrCreateLimitedType(const RecordType *Ty) {
// TODO: Currently used for context chains when limiting debug info.
llvm::DICompositeType *CGDebugInfo::CreateLimitedType(const RecordType *Ty) {
- RecordDecl *RD = Ty->getDecl();
+ RecordDecl *RD = Ty->getOriginalDecl()->getDefinitionOrSelf();
// Get overall information about the record type for the debug info.
StringRef RDName = getClassName(RD);
@@ -4111,7 +4114,7 @@ llvm::DICompositeType *CGDebugInfo::CreateLimitedType(const RecordType *Ty) {
// If we ended up creating the type during the context chain construction,
// just return that.
auto *T = cast_or_null<llvm::DICompositeType>(
- getTypeOrNull(CGM.getContext().getRecordType(RD)));
+ getTypeOrNull(CGM.getContext().getCanonicalTagType(RD)));
if (T && (!T->isForwardDecl() || !RD->getDefinition()))
return T;
@@ -4181,7 +4184,7 @@ llvm::DICompositeType *CGDebugInfo::CreateLimitedType(const RecordType *Ty) {
break;
}
- RegionMap[Ty->getDecl()].reset(RealDecl);
+ RegionMap[RD].reset(RealDecl);
TypeCache[QualType(Ty, 0).getAsOpaquePtr()].reset(RealDecl);
if (const auto *TSpecial = dyn_cast<ClassTemplateSpecializationDecl>(RD))
@@ -4205,8 +4208,8 @@ void CGDebugInfo::CollectContainingType(const CXXRecordDecl *RD,
else
break;
}
- ContainingType = getOrCreateType(QualType(PBase->getTypeForDecl(), 0),
- getOrCreateFile(RD->getLocation()));
+ CanQualType T = CGM.getContext().getCanonicalTagType(PBase);
+ ContainingType = getOrCreateType(T, getOrCreateFile(RD->getLocation()));
} else if (RD->isDynamicClass())
ContainingType = RealDecl;
@@ -4412,9 +4415,10 @@ llvm::DINode *CGDebugInfo::getDeclarationOrDefinition(const Decl *D) {
// we would otherwise do to get a type for a pointee. (forward declarations in
// limited debug info, full definitions (if the type definition is available)
// in unlimited debug info)
- if (const auto *TD = dyn_cast<TypeDecl>(D))
- return getOrCreateType(CGM.getContext().getTypeDeclType(TD),
- getOrCreateFile(TD->getLocation()));
+ if (const auto *TD = dyn_cast<TypeDecl>(D)) {
+ QualType Ty = CGM.getContext().getTypeDeclType(TD);
+ return getOrCreateType(Ty, getOrCreateFile(TD->getLocation()));
+ }
auto I = DeclCache.find(D->getCanonicalDecl());
if (I != DeclCache.end()) {
@@ -5076,7 +5080,7 @@ llvm::DILocalVariable *CGDebugInfo::EmitDeclare(const VarDecl *VD,
} else if (const auto *RT = dyn_cast<RecordType>(VD->getType())) {
// If VD is an anonymous union then Storage represents value for
// all union fields.
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
if (RD->isUnion() && RD->isAnonymousStructOrUnion()) {
// GDB has trouble finding local variables in anonymous unions, so we emit
// artificial local variables for each of the members.
@@ -5536,7 +5540,7 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
cast_or_null<CXXMethodDecl>(blockDecl->getNonClosureContext()))
type = Method->getThisType();
else if (auto *RDecl = dyn_cast<CXXRecordDecl>(blockDecl->getParent()))
- type = QualType(RDecl->getTypeForDecl(), 0);
+ type = CGM.getContext().getCanonicalTagType(RDecl);
else
llvm_unreachable("unexpected block declcontext");
@@ -5626,8 +5630,9 @@ llvm::DIGlobalVariableExpression *CGDebugInfo::CollectAnonRecordDecls(
// Ignore unnamed fields, but recurse into anonymous records.
if (FieldName.empty()) {
if (const auto *RT = dyn_cast<RecordType>(Field->getType()))
- GVE = CollectAnonRecordDecls(RT->getDecl(), Unit, LineNo, LinkageName,
- Var, DContext);
+ GVE =
+ CollectAnonRecordDecls(RT->getOriginalDecl()->getDefinitionOrSelf(),
+ Unit, LineNo, LinkageName, Var, DContext);
continue;
}
// Use VarDecl's Tag, Scope and Line number.
@@ -5646,7 +5651,7 @@ static bool ReferencesAnonymousEntity(RecordType *RT) {
// But so long as it's not one of those, it doesn't matter if some sub-type
// of the record (a template parameter) can't be reconstituted - because the
// un-reconstitutable type itself will carry its own name.
- const auto *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
+ const auto *RD = dyn_cast<CXXRecordDecl>(RT->getOriginalDecl());
if (!RD)
return false;
if (!RD->getIdentifier())
@@ -5705,15 +5710,15 @@ struct ReconstitutableType : public RecursiveASTVisitor<ReconstitutableType> {
}
return true;
}
- bool TraverseEnumType(EnumType *ET) {
+ bool TraverseEnumType(EnumType *ET, bool = false) {
// Unnamed enums can't be reconstituted due to a lack of column info we
// produce in the DWARF, so we can't get Clang's full name back.
- if (const auto *ED = dyn_cast<EnumDecl>(ET->getDecl())) {
+ if (const auto *ED = dyn_cast<EnumDecl>(ET->getOriginalDecl())) {
if (!ED->getIdentifier()) {
Reconstitutable = false;
return false;
}
- if (!ED->isExternallyVisible()) {
+ if (!ED->getDefinitionOrSelf()->isExternallyVisible()) {
Reconstitutable = false;
return false;
}
@@ -5726,7 +5731,7 @@ struct ReconstitutableType : public RecursiveASTVisitor<ReconstitutableType> {
Reconstitutable &= !FT->getNoReturnAttr();
return Reconstitutable;
}
- bool VisitRecordType(RecordType *RT) {
+ bool VisitRecordType(RecordType *RT, bool = false) {
if (ReferencesAnonymousEntity(RT)) {
Reconstitutable = false;
return false;
@@ -5909,7 +5914,8 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
// variable for each member of the anonymous union so that it's possible
// to find the name of any field in the union.
if (T->isUnionType() && DeclName.empty()) {
- const RecordDecl *RD = T->castAs<RecordType>()->getDecl();
+ const RecordDecl *RD =
+ T->castAs<RecordType>()->getOriginalDecl()->getDefinitionOrSelf();
assert(RD->isAnonymousStructOrUnion() &&
"unnamed non-anonymous struct or union?");
GVE = CollectAnonRecordDecls(RD, Unit, LineNo, LinkageName, Var, DContext);
@@ -5956,8 +5962,6 @@ void CGDebugInfo::EmitGlobalVariable(const ValueDecl *VD, const APValue &Init) {
if (const auto *ECD = dyn_cast<EnumConstantDecl>(VD)) {
const auto *ED = cast<EnumDecl>(ECD->getDeclContext());
- assert(isa<EnumType>(ED->getTypeForDecl()) && "Enum without EnumType?");
-
if (CGM.getCodeGenOpts().EmitCodeView) {
// If CodeView, emit enums as global variables, unless they are defined
// inside a class. We do this because MSVC doesn't emit S_CONSTANTs for
@@ -5969,10 +5973,9 @@ void CGDebugInfo::EmitGlobalVariable(const ValueDecl *VD, const APValue &Init) {
// If not CodeView, emit DW_TAG_enumeration_type if necessary. For
// example: for "enum { ZERO };", a DW_TAG_enumeration_type is created the
// first time `ZERO` is referenced in a function.
- llvm::DIType *EDTy =
- getOrCreateType(QualType(ED->getTypeForDecl(), 0), Unit);
- assert (EDTy->getTag() == llvm::dwarf::DW_TAG_enumeration_type);
- (void)EDTy;
+ CanQualType T = CGM.getContext().getCanonicalTagType(ED);
+ [[maybe_unused]] llvm::DIType *EDTy = getOrCreateType(T, Unit);
+ assert(EDTy->getTag() == llvm::dwarf::DW_TAG_enumeration_type);
return;
}
}
@@ -5991,7 +5994,7 @@ void CGDebugInfo::EmitGlobalVariable(const ValueDecl *VD, const APValue &Init) {
// FIXME: This is probably unnecessary, since Ty should reference RD
// through its scope.
RetainedTypes.push_back(
- CGM.getContext().getRecordType(RD).getAsOpaquePtr());
+ CGM.getContext().getCanonicalTagType(RD).getAsOpaquePtr());
return;
}
diff --git a/clang/lib/CodeGen/CGDecl.cpp b/clang/lib/CodeGen/CGDecl.cpp
index 04f13c7..9df1220 100644
--- a/clang/lib/CodeGen/CGDecl.cpp
+++ b/clang/lib/CodeGen/CGDecl.cpp
@@ -113,12 +113,14 @@ void CodeGenFunction::EmitDecl(const Decl &D, bool EvaluateConditionDecl) {
case Decl::CXXRecord: // struct/union/class X; [C++]
if (CGDebugInfo *DI = getDebugInfo())
if (cast<RecordDecl>(D).getDefinition())
- DI->EmitAndRetainType(getContext().getRecordType(cast<RecordDecl>(&D)));
+ DI->EmitAndRetainType(
+ getContext().getCanonicalTagType(cast<RecordDecl>(&D)));
return;
case Decl::Enum: // enum X;
if (CGDebugInfo *DI = getDebugInfo())
if (cast<EnumDecl>(D).getDefinition())
- DI->EmitAndRetainType(getContext().getEnumType(cast<EnumDecl>(&D)));
+ DI->EmitAndRetainType(
+ getContext().getCanonicalTagType(cast<EnumDecl>(&D)));
return;
case Decl::Function: // void X();
case Decl::EnumConstant: // enum ? { X = ? }
@@ -599,10 +601,11 @@ namespace {
llvm::Constant *CleanupFn;
const CGFunctionInfo &FnInfo;
const VarDecl &Var;
+ const CleanupAttr *Attribute;
CallCleanupFunction(llvm::Constant *CleanupFn, const CGFunctionInfo *Info,
- const VarDecl *Var)
- : CleanupFn(CleanupFn), FnInfo(*Info), Var(*Var) {}
+ const VarDecl *Var, const CleanupAttr *Attr)
+ : CleanupFn(CleanupFn), FnInfo(*Info), Var(*Var), Attribute(Attr) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(&Var), false,
@@ -624,8 +627,11 @@ namespace {
CallArgList Args;
Args.add(RValue::get(Arg),
CGF.getContext().getPointerType(Var.getType()));
- auto Callee = CGCallee::forDirect(CleanupFn);
- CGF.EmitCall(FnInfo, Callee, ReturnValueSlot(), Args);
+ GlobalDecl GD = GlobalDecl(Attribute->getFunctionDecl());
+ auto Callee = CGCallee::forDirect(CleanupFn, CGCalleeInfo(GD));
+ CGF.EmitCall(FnInfo, Callee, ReturnValueSlot(), Args,
+ /*callOrInvoke*/ nullptr, /*IsMustTail*/ false,
+ Attribute->getLoc());
}
};
} // end anonymous namespace
@@ -1347,30 +1353,27 @@ void CodeGenFunction::EmitAutoVarDecl(const VarDecl &D) {
}
/// Emit a lifetime.begin marker if some criteria are satisfied.
-/// \return a pointer to the temporary size Value if a marker was emitted, null
-/// otherwise
-llvm::Value *CodeGenFunction::EmitLifetimeStart(llvm::TypeSize Size,
- llvm::Value *Addr) {
+/// \return whether the marker was emitted.
+bool CodeGenFunction::EmitLifetimeStart(llvm::Value *Addr) {
if (!ShouldEmitLifetimeMarkers)
- return nullptr;
+ return false;
assert(Addr->getType()->getPointerAddressSpace() ==
CGM.getDataLayout().getAllocaAddrSpace() &&
"Pointer should be in alloca address space");
- llvm::Value *SizeV = llvm::ConstantInt::get(
- Int64Ty, Size.isScalable() ? -1 : Size.getFixedValue());
- llvm::CallInst *C =
- Builder.CreateCall(CGM.getLLVMLifetimeStartFn(), {SizeV, Addr});
+ llvm::CallInst *C = Builder.CreateCall(CGM.getLLVMLifetimeStartFn(), {Addr});
C->setDoesNotThrow();
- return SizeV;
+ return true;
}
-void CodeGenFunction::EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr) {
+void CodeGenFunction::EmitLifetimeEnd(llvm::Value *Addr) {
+ if (!ShouldEmitLifetimeMarkers)
+ return;
+
assert(Addr->getType()->getPointerAddressSpace() ==
CGM.getDataLayout().getAllocaAddrSpace() &&
"Pointer should be in alloca address space");
- llvm::CallInst *C =
- Builder.CreateCall(CGM.getLLVMLifetimeEndFn(), {Size, Addr});
+ llvm::CallInst *C = Builder.CreateCall(CGM.getLLVMLifetimeEndFn(), {Addr});
C->setDoesNotThrow();
}
@@ -1567,7 +1570,7 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
;
if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
- const auto *RD = RecordTy->getDecl();
+ const auto *RD = RecordTy->getOriginalDecl()->getDefinitionOrSelf();
const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD);
if ((CXXRD && !CXXRD->hasTrivialDestructor()) ||
RD->isNonTrivialToPrimitiveDestroy()) {
@@ -1628,9 +1631,8 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
// is rare.
if (!Bypasses.IsBypassed(&D) &&
!(!getLangOpts().CPlusPlus && hasLabelBeenSeenInCurrentScope())) {
- llvm::TypeSize Size = CGM.getDataLayout().getTypeAllocSize(allocaTy);
- emission.SizeForLifetimeMarkers =
- EmitLifetimeStart(Size, AllocaAddr.getPointer());
+ emission.UseLifetimeMarkers =
+ EmitLifetimeStart(AllocaAddr.getPointer());
}
} else {
assert(!emission.useLifetimeMarkers());
@@ -1723,9 +1725,8 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
// Make sure we call @llvm.lifetime.end.
if (emission.useLifetimeMarkers())
- EHStack.pushCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker,
- emission.getOriginalAllocatedAddress(),
- emission.getSizeForLifetimeMarkers());
+ EHStack.pushCleanup<CallLifetimeEnd>(
+ NormalEHLifetimeMarker, emission.getOriginalAllocatedAddress());
// Analogous to lifetime markers, we use a 'cleanup' to emit fake.use
// calls for local variables. We are exempting volatile variables and
@@ -2231,7 +2232,8 @@ void CodeGenFunction::EmitAutoVarCleanups(const AutoVarEmission &emission) {
assert(F && "Could not find function!");
const CGFunctionInfo &Info = CGM.getTypes().arrangeFunctionDeclaration(FD);
- EHStack.pushCleanup<CallCleanupFunction>(NormalAndEHCleanup, F, &Info, &D);
+ EHStack.pushCleanup<CallCleanupFunction>(NormalAndEHCleanup, F, &Info, &D,
+ CA);
}
// If this is a block variable, call _Block_object_destroy
@@ -2727,7 +2729,10 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg,
// Don't push a cleanup in a thunk for a method that will also emit a
// cleanup.
if (Ty->isRecordType() && !CurFuncIsThunk &&
- Ty->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) {
+ Ty->castAs<RecordType>()
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf()
+ ->isParamDestroyedInCallee()) {
if (QualType::DestructionKind DtorKind =
D.needsDestruction(getContext())) {
assert((DtorKind == QualType::DK_cxx_destructor ||
diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp
index 5a3d4e4..d5df6dd 100644
--- a/clang/lib/CodeGen/CGExpr.cpp
+++ b/clang/lib/CodeGen/CGExpr.cpp
@@ -408,9 +408,10 @@ pushTemporaryCleanup(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M,
if (const RecordType *RT =
E->getType()->getBaseElementTypeUnsafe()->getAs<RecordType>()) {
// Get the destructor for the reference temporary.
- if (auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl());
+ if (auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getOriginalDecl());
ClassDecl && !ClassDecl->hasTrivialDestructor())
- ReferenceTemporaryDtor = ClassDecl->getDestructor();
+ ReferenceTemporaryDtor =
+ ClassDecl->getDefinitionOrSelf()->getDestructor();
}
if (!ReferenceTemporaryDtor)
@@ -588,11 +589,9 @@ EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) {
} else {
switch (M->getStorageDuration()) {
case SD_Automatic:
- if (auto *Size = EmitLifetimeStart(
- CGM.getDataLayout().getTypeAllocSize(Alloca.getElementType()),
- Alloca.getPointer())) {
+ if (EmitLifetimeStart(Alloca.getPointer())) {
pushCleanupAfterFullExpr<CallLifetimeEnd>(NormalEHLifetimeMarker,
- Alloca, Size);
+ Alloca);
}
break;
@@ -623,11 +622,8 @@ EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) {
Block, llvm::BasicBlock::iterator(Block->back())));
}
- if (auto *Size = EmitLifetimeStart(
- CGM.getDataLayout().getTypeAllocSize(Alloca.getElementType()),
- Alloca.getPointer())) {
- pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, Alloca,
- Size);
+ if (EmitLifetimeStart(Alloca.getPointer())) {
+ pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, Alloca);
}
if (OldConditional) {
@@ -1209,9 +1205,10 @@ llvm::Value *CodeGenFunction::GetCountedByFieldExprGEP(
return nullptr;
Indices.push_back(Builder.getInt32(0));
- return Builder.CreateInBoundsGEP(
- ConvertType(QualType(RD->getTypeForDecl(), 0)), Res,
- RecIndicesTy(llvm::reverse(Indices)), "counted_by.gep");
+ CanQualType T = CGM.getContext().getCanonicalTagType(RD);
+ return Builder.CreateInBoundsGEP(ConvertType(T), Res,
+ RecIndicesTy(llvm::reverse(Indices)),
+ "counted_by.gep");
}
/// This method is typically called in contexts where we can't generate
@@ -1759,9 +1756,11 @@ static bool isConstantEmittableObjectType(QualType type) {
// Otherwise, all object types satisfy this except C++ classes with
// mutable subobjects or non-trivial copy/destroy behavior.
if (const auto *RT = dyn_cast<RecordType>(type))
- if (const auto *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()))
+ if (const auto *RD = dyn_cast<CXXRecordDecl>(RT->getOriginalDecl())) {
+ RD = RD->getDefinitionOrSelf();
if (RD->hasMutableFields() || !RD->isTrivial())
return false;
+ }
return true;
}
@@ -1922,8 +1921,10 @@ static bool getRangeForType(CodeGenFunction &CGF, QualType Ty,
llvm::APInt &Min, llvm::APInt &End,
bool StrictEnums, bool IsBool) {
const EnumType *ET = Ty->getAs<EnumType>();
- bool IsRegularCPlusPlusEnum = CGF.getLangOpts().CPlusPlus && StrictEnums &&
- ET && !ET->getDecl()->isFixed();
+ const EnumDecl *ED =
+ ET ? ET->getOriginalDecl()->getDefinitionOrSelf() : nullptr;
+ bool IsRegularCPlusPlusEnum =
+ CGF.getLangOpts().CPlusPlus && StrictEnums && ET && !ED->isFixed();
if (!IsBool && !IsRegularCPlusPlusEnum)
return false;
@@ -1931,7 +1932,6 @@ static bool getRangeForType(CodeGenFunction &CGF, QualType Ty,
Min = llvm::APInt(CGF.getContext().getTypeSize(Ty), 0);
End = llvm::APInt(CGF.getContext().getTypeSize(Ty), 2);
} else {
- const EnumDecl *ED = ET->getDecl();
ED->getValueRange(End, Min);
}
return true;
@@ -3789,33 +3789,50 @@ void CodeGenFunction::EmitCheck(
Branch->setMetadata(llvm::LLVMContext::MD_prof, Node);
EmitBlock(Handlers);
+ // Clear arguments for the MinimalRuntime handler.
+ if (CGM.getCodeGenOpts().SanitizeMinimalRuntime) {
+ switch (CheckHandler) {
+ case SanitizerHandler::TypeMismatch:
+ // Pass value pointer only. It adds minimal overhead.
+ StaticArgs = {};
+ assert(DynamicArgs.size() == 1);
+ break;
+ default:
+ // No arguments for other checks.
+ StaticArgs = {};
+ DynamicArgs = {};
+ break;
+ }
+ }
+
// Handler functions take an i8* pointing to the (handler-specific) static
// information block, followed by a sequence of intptr_t arguments
// representing operand values.
SmallVector<llvm::Value *, 4> Args;
SmallVector<llvm::Type *, 4> ArgTypes;
- if (!CGM.getCodeGenOpts().SanitizeMinimalRuntime) {
- Args.reserve(DynamicArgs.size() + 1);
- ArgTypes.reserve(DynamicArgs.size() + 1);
-
- // Emit handler arguments and create handler function type.
- if (!StaticArgs.empty()) {
- llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);
- auto *InfoPtr = new llvm::GlobalVariable(
- CGM.getModule(), Info->getType(), false,
- llvm::GlobalVariable::PrivateLinkage, Info, "", nullptr,
- llvm::GlobalVariable::NotThreadLocal,
- CGM.getDataLayout().getDefaultGlobalsAddressSpace());
- InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
- CGM.getSanitizerMetadata()->disableSanitizerForGlobal(InfoPtr);
- Args.push_back(InfoPtr);
- ArgTypes.push_back(Args.back()->getType());
- }
- for (llvm::Value *DynamicArg : DynamicArgs) {
- Args.push_back(EmitCheckValue(DynamicArg));
- ArgTypes.push_back(IntPtrTy);
- }
+ Args.reserve(DynamicArgs.size() + 1);
+ ArgTypes.reserve(DynamicArgs.size() + 1);
+
+ // Emit handler arguments and create handler function type.
+ if (!StaticArgs.empty()) {
+ llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);
+ auto *InfoPtr = new llvm::GlobalVariable(
+ CGM.getModule(), Info->getType(),
+ // Non-constant global is used in a handler to deduplicate reports.
+ // TODO: change deduplication logic and make it constant.
+ /*isConstant=*/false, llvm::GlobalVariable::PrivateLinkage, Info, "",
+ nullptr, llvm::GlobalVariable::NotThreadLocal,
+ CGM.getDataLayout().getDefaultGlobalsAddressSpace());
+ InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
+ CGM.getSanitizerMetadata()->disableSanitizerForGlobal(InfoPtr);
+ Args.push_back(InfoPtr);
+ ArgTypes.push_back(Args.back()->getType());
+ }
+
+ for (llvm::Value *DynamicArg : DynamicArgs) {
+ Args.push_back(EmitCheckValue(DynamicArg));
+ ArgTypes.push_back(IntPtrTy);
}
llvm::FunctionType *FnType =
@@ -4290,7 +4307,9 @@ static bool IsPreserveAIArrayBase(CodeGenFunction &CGF, const Expr *ArrayBase) {
const auto *PointeeT = PtrT->getPointeeType()
->getUnqualifiedDesugaredType();
if (const auto *RecT = dyn_cast<RecordType>(PointeeT))
- return RecT->getDecl()->hasAttr<BPFPreserveAccessIndexAttr>();
+ return RecT->getOriginalDecl()
+ ->getMostRecentDecl()
+ ->hasAttr<BPFPreserveAccessIndexAttr>();
return false;
}
@@ -5056,10 +5075,12 @@ LValue CodeGenFunction::EmitLValueForLambdaField(const FieldDecl *Field,
Address Base = GetAddressOfBaseClass(
LambdaLV.getAddress(), ThisTy, BasePathArray.begin(),
BasePathArray.end(), /*NullCheckValue=*/false, SourceLocation());
- LambdaLV = MakeAddrLValue(Base, QualType{LambdaTy->getTypeForDecl(), 0});
+ CanQualType T = getContext().getCanonicalTagType(LambdaTy);
+ LambdaLV = MakeAddrLValue(Base, T);
}
} else {
- QualType LambdaTagType = getContext().getTagDeclType(Field->getParent());
+ CanQualType LambdaTagType =
+ getContext().getCanonicalTagType(Field->getParent());
LambdaLV = MakeNaturalAlignAddrLValue(ThisValue, LambdaTagType);
}
return EmitLValueForField(LambdaLV, Field);
@@ -5184,7 +5205,7 @@ LValue CodeGenFunction::EmitLValueForField(LValue base, const FieldDecl *field,
}
} else {
llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateRecordType(
- getContext().getRecordType(rec), rec->getLocation());
+ getContext().getCanonicalTagType(rec), rec->getLocation());
Addr = Builder.CreatePreserveStructAccessIndex(
Addr, Idx, getDebugInfoFIndex(rec, field->getFieldIndex()),
DbgInfo);
@@ -5646,7 +5667,9 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
case CK_DerivedToBase: {
const auto *DerivedClassTy =
E->getSubExpr()->getType()->castAs<RecordType>();
- auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl());
+ auto *DerivedClassDecl =
+ cast<CXXRecordDecl>(DerivedClassTy->getOriginalDecl())
+ ->getDefinitionOrSelf();
LValue LV = EmitLValue(E->getSubExpr());
Address This = LV.getAddress();
@@ -5666,7 +5689,9 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
return EmitAggExprToLValue(E);
case CK_BaseToDerived: {
const auto *DerivedClassTy = E->getType()->castAs<RecordType>();
- auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl());
+ auto *DerivedClassDecl =
+ cast<CXXRecordDecl>(DerivedClassTy->getOriginalDecl())
+ ->getDefinitionOrSelf();
LValue LV = EmitLValue(E->getSubExpr());
@@ -5767,13 +5792,10 @@ LValue CodeGenFunction::EmitHLSLOutArgExpr(const HLSLOutArgExpr *E,
llvm::Value *Addr = TempLV.getAddress().getBasePointer();
llvm::Type *ElTy = ConvertTypeForMem(TempLV.getType());
- llvm::TypeSize Sz = CGM.getDataLayout().getTypeAllocSize(ElTy);
-
- llvm::Value *LifetimeSize = EmitLifetimeStart(Sz, Addr);
+ EmitLifetimeStart(Addr);
Address TmpAddr(Addr, ElTy, TempLV.getAlignment());
- Args.addWriteback(BaseLV, TmpAddr, nullptr, E->getWritebackCast(),
- LifetimeSize);
+ Args.addWriteback(BaseLV, TmpAddr, nullptr, E->getWritebackCast());
Args.add(RValue::get(TmpAddr, *this), Ty);
return TempLV;
}
@@ -6724,7 +6746,7 @@ void CodeGenFunction::FlattenAccessAndType(
WorkList.emplace_back(CAT->getElementType(), IdxListCopy);
}
} else if (const auto *RT = dyn_cast<RecordType>(T)) {
- const RecordDecl *Record = RT->getDecl();
+ const RecordDecl *Record = RT->getOriginalDecl()->getDefinitionOrSelf();
assert(!Record->isUnion() && "Union types not supported in flat cast.");
const CXXRecordDecl *CXXD = dyn_cast<CXXRecordDecl>(Record);
diff --git a/clang/lib/CodeGen/CGExprAgg.cpp b/clang/lib/CodeGen/CGExprAgg.cpp
index cad6731..04e125c 100644
--- a/clang/lib/CodeGen/CGExprAgg.cpp
+++ b/clang/lib/CodeGen/CGExprAgg.cpp
@@ -272,7 +272,7 @@ bool AggExprEmitter::TypeRequiresGCollection(QualType T) {
if (!RecordTy) return false;
// Don't mess with non-trivial C++ types.
- RecordDecl *Record = RecordTy->getDecl();
+ RecordDecl *Record = RecordTy->getOriginalDecl()->getDefinitionOrSelf();
if (isa<CXXRecordDecl>(Record) &&
(cast<CXXRecordDecl>(Record)->hasNonTrivialCopyConstructor() ||
!cast<CXXRecordDecl>(Record)->hasTrivialDestructor()))
@@ -300,16 +300,12 @@ void AggExprEmitter::withReturnValueSlot(
Address RetAddr = Address::invalid();
EHScopeStack::stable_iterator LifetimeEndBlock;
- llvm::Value *LifetimeSizePtr = nullptr;
llvm::IntrinsicInst *LifetimeStartInst = nullptr;
if (!UseTemp) {
RetAddr = Dest.getAddress();
} else {
RetAddr = CGF.CreateMemTempWithoutCast(RetTy, "tmp");
- llvm::TypeSize Size =
- CGF.CGM.getDataLayout().getTypeAllocSize(CGF.ConvertTypeForMem(RetTy));
- LifetimeSizePtr = CGF.EmitLifetimeStart(Size, RetAddr.getBasePointer());
- if (LifetimeSizePtr) {
+ if (CGF.EmitLifetimeStart(RetAddr.getBasePointer())) {
LifetimeStartInst =
cast<llvm::IntrinsicInst>(std::prev(Builder.GetInsertPoint()));
assert(LifetimeStartInst->getIntrinsicID() ==
@@ -317,7 +313,7 @@ void AggExprEmitter::withReturnValueSlot(
"Last insertion wasn't a lifetime.start?");
CGF.pushFullExprCleanup<CodeGenFunction::CallLifetimeEnd>(
- NormalEHLifetimeMarker, RetAddr, LifetimeSizePtr);
+ NormalEHLifetimeMarker, RetAddr);
LifetimeEndBlock = CGF.EHStack.stable_begin();
}
}
@@ -338,7 +334,7 @@ void AggExprEmitter::withReturnValueSlot(
// Since we're not guaranteed to be in an ExprWithCleanups, clean up
// eagerly.
CGF.DeactivateCleanupBlock(LifetimeEndBlock, LifetimeStartInst);
- CGF.EmitLifetimeEnd(LifetimeSizePtr, RetAddr.getBasePointer());
+ CGF.EmitLifetimeEnd(RetAddr.getBasePointer());
}
}
@@ -428,7 +424,10 @@ AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) {
Ctx.getAsConstantArrayType(E->getSubExpr()->getType());
assert(ArrayType && "std::initializer_list constructed from non-array");
- RecordDecl *Record = E->getType()->castAs<RecordType>()->getDecl();
+ RecordDecl *Record = E->getType()
+ ->castAs<RecordType>()
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf();
RecordDecl::field_iterator Field = Record->field_begin();
assert(Field != Record->field_end() &&
Ctx.hasSameType(Field->getType()->getPointeeType(),
@@ -1810,7 +1809,10 @@ void AggExprEmitter::VisitCXXParenListOrInitListExpr(
// the disadvantage is that the generated code is more difficult for
// the optimizer, especially with bitfields.
unsigned NumInitElements = InitExprs.size();
- RecordDecl *record = ExprToVisit->getType()->castAs<RecordType>()->getDecl();
+ RecordDecl *record = ExprToVisit->getType()
+ ->castAs<RecordType>()
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf();
// We'll need to enter cleanup scopes in case any of the element
// initializers throws an exception.
@@ -2120,7 +2122,7 @@ static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) {
// referencee. InitListExprs for unions and arrays can't have references.
if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
if (!RT->isUnionType()) {
- RecordDecl *SD = RT->getDecl();
+ RecordDecl *SD = RT->getOriginalDecl()->getDefinitionOrSelf();
CharUnits NumNonZeroBytes = CharUnits::Zero();
unsigned ILEElement = 0;
@@ -2172,7 +2174,7 @@ static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
if (CGF.getLangOpts().CPlusPlus)
if (const RecordType *RT = CGF.getContext()
.getBaseElementType(E->getType())->getAs<RecordType>()) {
- const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getOriginalDecl());
if (RD->hasUserDeclaredConstructor())
return;
}
@@ -2293,7 +2295,8 @@ void CodeGenFunction::EmitAggregateCopy(LValue Dest, LValue Src, QualType Ty,
if (getLangOpts().CPlusPlus) {
if (const RecordType *RT = Ty->getAs<RecordType>()) {
- CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl());
+ auto *Record =
+ cast<CXXRecordDecl>(RT->getOriginalDecl())->getDefinitionOrSelf();
assert((Record->hasTrivialCopyConstructor() ||
Record->hasTrivialCopyAssignment() ||
Record->hasTrivialMoveConstructor() ||
@@ -2377,7 +2380,7 @@ void CodeGenFunction::EmitAggregateCopy(LValue Dest, LValue Src, QualType Ty,
if (CGM.getLangOpts().getGC() == LangOptions::NonGC) {
// fall through
} else if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
- RecordDecl *Record = RecordTy->getDecl();
+ RecordDecl *Record = RecordTy->getOriginalDecl()->getDefinitionOrSelf();
if (Record->hasObjectMember()) {
CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
SizeVal);
@@ -2386,7 +2389,9 @@ void CodeGenFunction::EmitAggregateCopy(LValue Dest, LValue Src, QualType Ty,
} else if (Ty->isArrayType()) {
QualType BaseType = getContext().getBaseElementType(Ty);
if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) {
- if (RecordTy->getDecl()->hasObjectMember()) {
+ if (RecordTy->getOriginalDecl()
+ ->getDefinitionOrSelf()
+ ->hasObjectMember()) {
CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
SizeVal);
return;
diff --git a/clang/lib/CodeGen/CGExprCXX.cpp b/clang/lib/CodeGen/CGExprCXX.cpp
index c7e5333..57d7eec 100644
--- a/clang/lib/CodeGen/CGExprCXX.cpp
+++ b/clang/lib/CodeGen/CGExprCXX.cpp
@@ -181,7 +181,7 @@ static CXXRecordDecl *getCXXRecord(const Expr *E) {
if (const PointerType *PTy = T->getAs<PointerType>())
T = PTy->getPointeeType();
const RecordType *Ty = T->castAs<RecordType>();
- return cast<CXXRecordDecl>(Ty->getDecl());
+ return cast<CXXRecordDecl>(Ty->getOriginalDecl())->getDefinitionOrSelf();
}
// Note: This function also emit constructor calls to support a MSVC
@@ -206,7 +206,7 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
}
bool HasQualifier = ME->hasQualifier();
- NestedNameSpecifier *Qualifier = HasQualifier ? ME->getQualifier() : nullptr;
+ NestedNameSpecifier Qualifier = ME->getQualifier();
bool IsArrow = ME->isArrow();
const Expr *Base = ME->getBase();
@@ -217,7 +217,7 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
const CallExpr *CE, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue,
- bool HasQualifier, NestedNameSpecifier *Qualifier, bool IsArrow,
+ bool HasQualifier, NestedNameSpecifier Qualifier, bool IsArrow,
const Expr *Base, llvm::CallBase **CallOrInvoke) {
assert(isa<CXXMemberCallExpr>(CE) || isa<CXXOperatorCallExpr>(CE));
@@ -361,7 +361,7 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
if (sanitizePerformTypeCheck())
EmitTypeCheck(CodeGenFunction::TCK_MemberCall, CallLoc,
This.emitRawPointer(*this),
- C.getRecordType(CalleeDecl->getParent()),
+ C.getCanonicalTagType(CalleeDecl->getParent()),
/*Alignment=*/CharUnits::Zero(), SkippedChecks);
// C++ [class.virtual]p12:
@@ -461,9 +461,9 @@ CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
else
This = EmitLValue(BaseExpr, KnownNonNull).getAddress();
- EmitTypeCheck(
- TCK_MemberCall, E->getExprLoc(), This.emitRawPointer(*this),
- QualType(MPT->getMostRecentCXXRecordDecl()->getTypeForDecl(), 0));
+ CanQualType ClassType = CGM.getContext().getCanonicalTagType(RD);
+ EmitTypeCheck(TCK_MemberCall, E->getExprLoc(), This.emitRawPointer(*this),
+ ClassType);
// Get the member function pointer.
llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr);
@@ -476,8 +476,7 @@ CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
CallArgList Args;
- QualType ThisType =
- getContext().getPointerType(getContext().getTagDeclType(RD));
+ QualType ThisType = getContext().getPointerType(ClassType);
// Push the this ptr.
Args.add(RValue::get(ThisPtrForCall), ThisType);
@@ -498,7 +497,7 @@ RValue CodeGenFunction::EmitCXXOperatorMemberCallExpr(
assert(MD->isImplicitObjectMemberFunction() &&
"Trying to emit a member call expr on a static method!");
return EmitCXXMemberOrOperatorMemberCallExpr(
- E, MD, ReturnValue, /*HasQualifier=*/false, /*Qualifier=*/nullptr,
+ E, MD, ReturnValue, /*HasQualifier=*/false, /*Qualifier=*/std::nullopt,
/*IsArrow=*/false, E->getArg(0), CallOrInvoke);
}
@@ -1237,11 +1236,12 @@ void CodeGenFunction::EmitNewArrayInitializer(
// usually use memset.
if (auto *ILE = dyn_cast<InitListExpr>(Init)) {
if (const RecordType *RType = ILE->getType()->getAs<RecordType>()) {
- if (RType->getDecl()->isStruct()) {
+ const RecordDecl *RD = RType->getOriginalDecl()->getDefinitionOrSelf();
+ if (RD->isStruct()) {
unsigned NumElements = 0;
- if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RType->getDecl()))
+ if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RD))
NumElements = CXXRD->getNumBases();
- for (auto *Field : RType->getDecl()->fields())
+ for (auto *Field : RD->fields())
if (!Field->isUnnamedBitField())
++NumElements;
// FIXME: Recurse into nested InitListExprs.
@@ -1687,9 +1687,11 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
QualType AlignValT = sizeType;
if (allocatorType->getNumParams() > IndexOfAlignArg) {
AlignValT = allocatorType->getParamType(IndexOfAlignArg);
- assert(getContext().hasSameUnqualifiedType(
- AlignValT->castAs<EnumType>()->getDecl()->getIntegerType(),
- sizeType) &&
+ assert(getContext().hasSameUnqualifiedType(AlignValT->castAs<EnumType>()
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf()
+ ->getIntegerType(),
+ sizeType) &&
"wrong type for alignment parameter");
++ParamsToSkip;
} else {
@@ -1972,7 +1974,8 @@ static bool EmitObjectDelete(CodeGenFunction &CGF,
// destructor is virtual, we'll just emit the vcall and return.
const CXXDestructorDecl *Dtor = nullptr;
if (const RecordType *RT = ElementType->getAs<RecordType>()) {
- CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ auto *RD =
+ cast<CXXRecordDecl>(RT->getOriginalDecl())->getDefinitionOrSelf();
if (RD->hasDefinition() && !RD->hasTrivialDestructor()) {
Dtor = RD->getDestructor();
@@ -2292,7 +2295,20 @@ llvm::Value *CodeGenFunction::EmitDynamicCast(Address ThisAddr,
bool IsExact = !IsDynamicCastToVoid &&
CGM.getCodeGenOpts().OptimizationLevel > 0 &&
DestRecordTy->getAsCXXRecordDecl()->isEffectivelyFinal() &&
- CGM.getCXXABI().shouldEmitExactDynamicCast(DestRecordTy);
+ CGM.getCXXABI().shouldEmitExactDynamicCast(DestRecordTy) &&
+ !getLangOpts().PointerAuthCalls;
+
+ std::optional<CGCXXABI::ExactDynamicCastInfo> ExactCastInfo;
+ if (IsExact) {
+ ExactCastInfo = CGM.getCXXABI().getExactDynamicCastInfo(SrcRecordTy, DestTy,
+ DestRecordTy);
+ if (!ExactCastInfo) {
+ llvm::Value *NullValue = EmitDynamicCastToNull(*this, DestTy);
+ if (!Builder.GetInsertBlock())
+ EmitBlock(createBasicBlock("dynamic_cast.unreachable"));
+ return NullValue;
+ }
+ }
// C++ [expr.dynamic.cast]p4:
// If the value of v is a null pointer value in the pointer case, the result
@@ -2321,7 +2337,8 @@ llvm::Value *CodeGenFunction::EmitDynamicCast(Address ThisAddr,
// If the destination type is effectively final, this pointer points to the
// right type if and only if its vptr has the right value.
Value = CGM.getCXXABI().emitExactDynamicCast(
- *this, ThisAddr, SrcRecordTy, DestTy, DestRecordTy, CastEnd, CastNull);
+ *this, ThisAddr, SrcRecordTy, DestTy, DestRecordTy, *ExactCastInfo,
+ CastEnd, CastNull);
} else {
assert(DestRecordTy->isRecordType() &&
"destination type must be a record type!");
diff --git a/clang/lib/CodeGen/CGExprConstant.cpp b/clang/lib/CodeGen/CGExprConstant.cpp
index 715bd39..a96c151 100644
--- a/clang/lib/CodeGen/CGExprConstant.cpp
+++ b/clang/lib/CodeGen/CGExprConstant.cpp
@@ -714,7 +714,10 @@ static bool EmitDesignatedInitUpdater(ConstantEmitter &Emitter,
}
bool ConstStructBuilder::Build(const InitListExpr *ILE, bool AllowOverwrite) {
- RecordDecl *RD = ILE->getType()->castAs<RecordType>()->getDecl();
+ RecordDecl *RD = ILE->getType()
+ ->castAs<RecordType>()
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf();
const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
unsigned FieldNo = -1;
@@ -977,7 +980,8 @@ bool ConstStructBuilder::DoZeroInitPadding(const ASTRecordLayout &Layout,
llvm::Constant *ConstStructBuilder::Finalize(QualType Type) {
Type = Type.getNonReferenceType();
- RecordDecl *RD = Type->castAs<RecordType>()->getDecl();
+ RecordDecl *RD =
+ Type->castAs<RecordType>()->getOriginalDecl()->getDefinitionOrSelf();
llvm::Type *ValTy = CGM.getTypes().ConvertType(Type);
return Builder.build(ValTy, RD->hasFlexibleArrayMember());
}
@@ -1000,7 +1004,8 @@ llvm::Constant *ConstStructBuilder::BuildStruct(ConstantEmitter &Emitter,
ConstantAggregateBuilder Const(Emitter.CGM);
ConstStructBuilder Builder(Emitter, Const, CharUnits::Zero());
- const RecordDecl *RD = ValTy->castAs<RecordType>()->getDecl();
+ const RecordDecl *RD =
+ ValTy->castAs<RecordType>()->getOriginalDecl()->getDefinitionOrSelf();
const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(RD);
if (!Builder.Build(Val, RD, false, CD, CharUnits::Zero()))
return nullptr;
@@ -1506,7 +1511,9 @@ public:
llvm::Type *ValTy = CGM.getTypes().ConvertType(destType);
bool HasFlexibleArray = false;
if (const auto *RT = destType->getAs<RecordType>())
- HasFlexibleArray = RT->getDecl()->hasFlexibleArrayMember();
+ HasFlexibleArray = RT->getOriginalDecl()
+ ->getDefinitionOrSelf()
+ ->hasFlexibleArrayMember();
return Const.build(ValTy, HasFlexibleArray);
}
@@ -2640,7 +2647,9 @@ static llvm::Constant *EmitNullConstant(CodeGenModule &CGM,
}
const CXXRecordDecl *base =
- cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(
+ I.getType()->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
// Ignore empty bases.
if (isEmptyRecordForLayout(CGM.getContext(), I.getType()) ||
@@ -2679,8 +2688,10 @@ static llvm::Constant *EmitNullConstant(CodeGenModule &CGM,
// Fill in the virtual bases, if we're working with the complete object.
if (CXXR && asCompleteObject) {
for (const auto &I : CXXR->vbases()) {
- const CXXRecordDecl *base =
- cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
+ const auto *base =
+ cast<CXXRecordDecl>(
+ I.getType()->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
// Ignore empty bases.
if (isEmptyRecordForLayout(CGM.getContext(), I.getType()))
@@ -2746,7 +2757,9 @@ llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
}
if (const RecordType *RT = T->getAs<RecordType>())
- return ::EmitNullConstant(*this, RT->getDecl(), /*complete object*/ true);
+ return ::EmitNullConstant(*this,
+ RT->getOriginalDecl()->getDefinitionOrSelf(),
+ /*complete object*/ true);
assert(T->isMemberDataPointerType() &&
"Should only see pointers to data members here!");
diff --git a/clang/lib/CodeGen/CGExprScalar.cpp b/clang/lib/CodeGen/CGExprScalar.cpp
index 44931d0..155b80d 100644
--- a/clang/lib/CodeGen/CGExprScalar.cpp
+++ b/clang/lib/CodeGen/CGExprScalar.cpp
@@ -3515,7 +3515,9 @@ Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
case OffsetOfNode::Field: {
FieldDecl *MemberDecl = ON.getField();
- RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl();
+ RecordDecl *RD = CurrentType->castAs<RecordType>()
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf();
const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
// Compute the index of the field in its parent.
@@ -3548,15 +3550,16 @@ Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
continue;
}
- RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl();
- const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
+ const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(
+ CurrentType->castAs<RecordType>()->getOriginalDecl());
// Save the element type.
CurrentType = ON.getBase()->getType();
// Compute the offset to the base.
auto *BaseRT = CurrentType->castAs<RecordType>();
- auto *BaseRD = cast<CXXRecordDecl>(BaseRT->getDecl());
+ auto *BaseRD =
+ cast<CXXRecordDecl>(BaseRT->getOriginalDecl())->getDefinitionOrSelf();
CharUnits OffsetInt = RL.getBaseClassOffset(BaseRD);
Offset = llvm::ConstantInt::get(ResultType, OffsetInt.getQuantity());
break;
@@ -4183,9 +4186,8 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
return phi;
}
-/// Emit pointer + index arithmetic.
-static Value *emitPointerArithmetic(CodeGenFunction &CGF,
- const BinOpInfo &op,
+/// This function is used for BO_Add/BO_Sub/BO_AddAssign/BO_SubAssign.
+static Value *emitPointerArithmetic(CodeGenFunction &CGF, const BinOpInfo &op,
bool isSubtraction) {
// Must have binary (not unary) expr here. Unary pointer
// increment/decrement doesn't use this path.
@@ -4202,11 +4204,19 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF,
std::swap(pointerOperand, indexOperand);
}
+ return CGF.EmitPointerArithmetic(expr, pointerOperand, pointer, indexOperand,
+ index, isSubtraction);
+}
+
+/// Emit pointer + index arithmetic.
+llvm::Value *CodeGenFunction::EmitPointerArithmetic(
+ const BinaryOperator *BO, Expr *pointerOperand, llvm::Value *pointer,
+ Expr *indexOperand, llvm::Value *index, bool isSubtraction) {
bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType();
unsigned width = cast<llvm::IntegerType>(index->getType())->getBitWidth();
- auto &DL = CGF.CGM.getDataLayout();
- auto PtrTy = cast<llvm::PointerType>(pointer->getType());
+ auto &DL = CGM.getDataLayout();
+ auto *PtrTy = cast<llvm::PointerType>(pointer->getType());
// Some versions of glibc and gcc use idioms (particularly in their malloc
// routines) that add a pointer-sized integer (known to be a pointer value)
@@ -4227,79 +4237,77 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF,
//
// Note that we do not suppress the pointer overflow check in this case.
if (BinaryOperator::isNullPointerArithmeticExtension(
- CGF.getContext(), op.Opcode, expr->getLHS(), expr->getRHS())) {
- Value *Ptr = CGF.Builder.CreateIntToPtr(index, pointer->getType());
- if (CGF.getLangOpts().PointerOverflowDefined ||
- !CGF.SanOpts.has(SanitizerKind::PointerOverflow) ||
- NullPointerIsDefined(CGF.Builder.GetInsertBlock()->getParent(),
+ getContext(), BO->getOpcode(), pointerOperand, indexOperand)) {
+ llvm::Value *Ptr = Builder.CreateIntToPtr(index, pointer->getType());
+ if (getLangOpts().PointerOverflowDefined ||
+ !SanOpts.has(SanitizerKind::PointerOverflow) ||
+ NullPointerIsDefined(Builder.GetInsertBlock()->getParent(),
PtrTy->getPointerAddressSpace()))
return Ptr;
// The inbounds GEP of null is valid iff the index is zero.
auto CheckOrdinal = SanitizerKind::SO_PointerOverflow;
auto CheckHandler = SanitizerHandler::PointerOverflow;
- SanitizerDebugLocation SanScope(&CGF, {CheckOrdinal}, CheckHandler);
- Value *IsZeroIndex = CGF.Builder.CreateIsNull(index);
- llvm::Constant *StaticArgs[] = {
- CGF.EmitCheckSourceLocation(op.E->getExprLoc())};
+ SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
+ llvm::Value *IsZeroIndex = Builder.CreateIsNull(index);
+ llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(BO->getExprLoc())};
llvm::Type *IntPtrTy = DL.getIntPtrType(PtrTy);
- Value *IntPtr = llvm::Constant::getNullValue(IntPtrTy);
- Value *ComputedGEP = CGF.Builder.CreateZExtOrTrunc(index, IntPtrTy);
- Value *DynamicArgs[] = {IntPtr, ComputedGEP};
- CGF.EmitCheck({{IsZeroIndex, CheckOrdinal}}, CheckHandler, StaticArgs,
- DynamicArgs);
+ llvm::Value *IntPtr = llvm::Constant::getNullValue(IntPtrTy);
+ llvm::Value *ComputedGEP = Builder.CreateZExtOrTrunc(index, IntPtrTy);
+ llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP};
+ EmitCheck({{IsZeroIndex, CheckOrdinal}}, CheckHandler, StaticArgs,
+ DynamicArgs);
return Ptr;
}
if (width != DL.getIndexTypeSizeInBits(PtrTy)) {
// Zero-extend or sign-extend the pointer value according to
// whether the index is signed or not.
- index = CGF.Builder.CreateIntCast(index, DL.getIndexType(PtrTy), isSigned,
- "idx.ext");
+ index = Builder.CreateIntCast(index, DL.getIndexType(PtrTy), isSigned,
+ "idx.ext");
}
// If this is subtraction, negate the index.
if (isSubtraction)
- index = CGF.Builder.CreateNeg(index, "idx.neg");
+ index = Builder.CreateNeg(index, "idx.neg");
- if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
- CGF.EmitBoundsCheck(op.E, pointerOperand, index, indexOperand->getType(),
- /*Accessed*/ false);
+ if (SanOpts.has(SanitizerKind::ArrayBounds))
+ EmitBoundsCheck(BO, pointerOperand, index, indexOperand->getType(),
+ /*Accessed*/ false);
- const PointerType *pointerType
- = pointerOperand->getType()->getAs<PointerType>();
+ const PointerType *pointerType =
+ pointerOperand->getType()->getAs<PointerType>();
if (!pointerType) {
QualType objectType = pointerOperand->getType()
- ->castAs<ObjCObjectPointerType>()
- ->getPointeeType();
- llvm::Value *objectSize
- = CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(objectType));
+ ->castAs<ObjCObjectPointerType>()
+ ->getPointeeType();
+ llvm::Value *objectSize =
+ CGM.getSize(getContext().getTypeSizeInChars(objectType));
- index = CGF.Builder.CreateMul(index, objectSize);
+ index = Builder.CreateMul(index, objectSize);
- Value *result =
- CGF.Builder.CreateGEP(CGF.Int8Ty, pointer, index, "add.ptr");
- return CGF.Builder.CreateBitCast(result, pointer->getType());
+ llvm::Value *result = Builder.CreateGEP(Int8Ty, pointer, index, "add.ptr");
+ return Builder.CreateBitCast(result, pointer->getType());
}
QualType elementType = pointerType->getPointeeType();
- if (const VariableArrayType *vla
- = CGF.getContext().getAsVariableArrayType(elementType)) {
+ if (const VariableArrayType *vla =
+ getContext().getAsVariableArrayType(elementType)) {
// The element count here is the total number of non-VLA elements.
- llvm::Value *numElements = CGF.getVLASize(vla).NumElts;
+ llvm::Value *numElements = getVLASize(vla).NumElts;
// Effectively, the multiply by the VLA size is part of the GEP.
// GEP indexes are signed, and scaling an index isn't permitted to
// signed-overflow, so we use the same semantics for our explicit
// multiply. We suppress this if overflow is not undefined behavior.
- llvm::Type *elemTy = CGF.ConvertTypeForMem(vla->getElementType());
- if (CGF.getLangOpts().PointerOverflowDefined) {
- index = CGF.Builder.CreateMul(index, numElements, "vla.index");
- pointer = CGF.Builder.CreateGEP(elemTy, pointer, index, "add.ptr");
+ llvm::Type *elemTy = ConvertTypeForMem(vla->getElementType());
+ if (getLangOpts().PointerOverflowDefined) {
+ index = Builder.CreateMul(index, numElements, "vla.index");
+ pointer = Builder.CreateGEP(elemTy, pointer, index, "add.ptr");
} else {
- index = CGF.Builder.CreateNSWMul(index, numElements, "vla.index");
- pointer = CGF.EmitCheckedInBoundsGEP(
- elemTy, pointer, index, isSigned, isSubtraction, op.E->getExprLoc(),
- "add.ptr");
+ index = Builder.CreateNSWMul(index, numElements, "vla.index");
+ pointer =
+ EmitCheckedInBoundsGEP(elemTy, pointer, index, isSigned,
+ isSubtraction, BO->getExprLoc(), "add.ptr");
}
return pointer;
}
@@ -4309,16 +4317,15 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF,
// future proof.
llvm::Type *elemTy;
if (elementType->isVoidType() || elementType->isFunctionType())
- elemTy = CGF.Int8Ty;
+ elemTy = Int8Ty;
else
- elemTy = CGF.ConvertTypeForMem(elementType);
+ elemTy = ConvertTypeForMem(elementType);
- if (CGF.getLangOpts().PointerOverflowDefined)
- return CGF.Builder.CreateGEP(elemTy, pointer, index, "add.ptr");
+ if (getLangOpts().PointerOverflowDefined)
+ return Builder.CreateGEP(elemTy, pointer, index, "add.ptr");
- return CGF.EmitCheckedInBoundsGEP(
- elemTy, pointer, index, isSigned, isSubtraction, op.E->getExprLoc(),
- "add.ptr");
+ return EmitCheckedInBoundsGEP(elemTy, pointer, index, isSigned, isSubtraction,
+ BO->getExprLoc(), "add.ptr");
}
// Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and
diff --git a/clang/lib/CodeGen/CGHLSLRuntime.cpp b/clang/lib/CodeGen/CGHLSLRuntime.cpp
index a47d1cc..9b3ef6a 100644
--- a/clang/lib/CodeGen/CGHLSLRuntime.cpp
+++ b/clang/lib/CodeGen/CGHLSLRuntime.cpp
@@ -103,13 +103,6 @@ llvm::Triple::ArchType CGHLSLRuntime::getArch() {
return CGM.getTarget().getTriple().getArch();
}
-// Returns true if the type is an HLSL resource class or an array of them
-static bool isResourceRecordTypeOrArrayOf(const clang::Type *Ty) {
- while (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(Ty))
- Ty = CAT->getArrayElementTypeNoTypeQual();
- return Ty->isHLSLResourceRecord();
-}
-
// Emits constant global variables for buffer constants declarations
// and creates metadata linking the constant globals with the buffer global.
void CGHLSLRuntime::emitBufferGlobalsAndMetadata(const HLSLBufferDecl *BufDecl,
@@ -146,7 +139,7 @@ void CGHLSLRuntime::emitBufferGlobalsAndMetadata(const HLSLBufferDecl *BufDecl,
if (VDTy.getAddressSpace() != LangAS::hlsl_constant) {
if (VD->getStorageClass() == SC_Static ||
VDTy.getAddressSpace() == LangAS::hlsl_groupshared ||
- isResourceRecordTypeOrArrayOf(VDTy.getTypePtr())) {
+ VDTy->isHLSLResourceRecord() || VDTy->isHLSLResourceRecordArray()) {
// Emit static and groupshared variables and resource classes inside
// cbuffer as regular globals
CGM.EmitGlobal(VD);
@@ -186,8 +179,7 @@ static const clang::HLSLAttributedResourceType *
createBufferHandleType(const HLSLBufferDecl *BufDecl) {
ASTContext &AST = BufDecl->getASTContext();
QualType QT = AST.getHLSLAttributedResourceType(
- AST.HLSLResourceTy,
- QualType(BufDecl->getLayoutStruct()->getTypeForDecl(), 0),
+ AST.HLSLResourceTy, AST.getCanonicalTagType(BufDecl->getLayoutStruct()),
HLSLAttributedResourceType::Attributes(ResourceClass::CBuffer));
return cast<HLSLAttributedResourceType>(QT.getTypePtr());
}
@@ -273,10 +265,14 @@ void CGHLSLRuntime::addBuffer(const HLSLBufferDecl *BufDecl) {
emitBufferGlobalsAndMetadata(BufDecl, BufGV);
// Initialize cbuffer from binding (implicit or explicit)
- HLSLResourceBindingAttr *RBA = BufDecl->getAttr<HLSLResourceBindingAttr>();
- assert(RBA &&
- "cbuffer/tbuffer should always have resource binding attribute");
- initializeBufferFromBinding(BufDecl, BufGV, RBA);
+ if (HLSLVkBindingAttr *VkBinding = BufDecl->getAttr<HLSLVkBindingAttr>()) {
+ initializeBufferFromBinding(BufDecl, BufGV, VkBinding);
+ } else {
+ HLSLResourceBindingAttr *RBA = BufDecl->getAttr<HLSLResourceBindingAttr>();
+ assert(RBA &&
+ "cbuffer/tbuffer should always have resource binding attribute");
+ initializeBufferFromBinding(BufDecl, BufGV, RBA);
+ }
}
llvm::TargetExtType *
@@ -593,6 +589,31 @@ static void initializeBuffer(CodeGenModule &CGM, llvm::GlobalVariable *GV,
CGM.AddCXXGlobalInit(InitResFunc);
}
+static Value *buildNameForResource(llvm::StringRef BaseName,
+ CodeGenModule &CGM) {
+ std::string Str(BaseName);
+ std::string GlobalName(Str + ".str");
+ return CGM.GetAddrOfConstantCString(Str, GlobalName.c_str()).getPointer();
+}
+
+void CGHLSLRuntime::initializeBufferFromBinding(const HLSLBufferDecl *BufDecl,
+ llvm::GlobalVariable *GV,
+ HLSLVkBindingAttr *VkBinding) {
+ assert(VkBinding && "expect a nonnull binding attribute");
+ llvm::Type *Int1Ty = llvm::Type::getInt1Ty(CGM.getLLVMContext());
+ auto *NonUniform = llvm::ConstantInt::get(Int1Ty, false);
+ auto *Index = llvm::ConstantInt::get(CGM.IntTy, 0);
+ auto *RangeSize = llvm::ConstantInt::get(CGM.IntTy, 1);
+ auto *Set = llvm::ConstantInt::get(CGM.IntTy, VkBinding->getSet());
+ auto *Binding = llvm::ConstantInt::get(CGM.IntTy, VkBinding->getBinding());
+ Value *Name = buildNameForResource(BufDecl->getName(), CGM);
+ llvm::Intrinsic::ID IntrinsicID =
+ CGM.getHLSLRuntime().getCreateHandleFromBindingIntrinsic();
+
+ SmallVector<Value *> Args{Set, Binding, RangeSize, Index, NonUniform, Name};
+ initializeBuffer(CGM, GV, IntrinsicID, Args);
+}
+
void CGHLSLRuntime::initializeBufferFromBinding(const HLSLBufferDecl *BufDecl,
llvm::GlobalVariable *GV,
HLSLResourceBindingAttr *RBA) {
diff --git a/clang/lib/CodeGen/CGHLSLRuntime.h b/clang/lib/CodeGen/CGHLSLRuntime.h
index 89d2aff8..31d1728 100644
--- a/clang/lib/CodeGen/CGHLSLRuntime.h
+++ b/clang/lib/CodeGen/CGHLSLRuntime.h
@@ -62,6 +62,7 @@ class VarDecl;
class ParmVarDecl;
class InitListExpr;
class HLSLBufferDecl;
+class HLSLVkBindingAttr;
class HLSLResourceBindingAttr;
class Type;
class RecordType;
@@ -168,6 +169,9 @@ private:
llvm::GlobalVariable *BufGV);
void initializeBufferFromBinding(const HLSLBufferDecl *BufDecl,
llvm::GlobalVariable *GV,
+ HLSLVkBindingAttr *VkBinding);
+ void initializeBufferFromBinding(const HLSLBufferDecl *BufDecl,
+ llvm::GlobalVariable *GV,
HLSLResourceBindingAttr *RBA);
llvm::Triple::ArchType getArch();
diff --git a/clang/lib/CodeGen/CGNonTrivialStruct.cpp b/clang/lib/CodeGen/CGNonTrivialStruct.cpp
index e0983ef..1b941fff 100644
--- a/clang/lib/CodeGen/CGNonTrivialStruct.cpp
+++ b/clang/lib/CodeGen/CGNonTrivialStruct.cpp
@@ -39,7 +39,8 @@ template <class Derived> struct StructVisitor {
template <class... Ts>
void visitStructFields(QualType QT, CharUnits CurStructOffset, Ts... Args) {
- const RecordDecl *RD = QT->castAs<RecordType>()->getDecl();
+ const RecordDecl *RD =
+ QT->castAs<RecordType>()->getOriginalDecl()->getDefinitionOrSelf();
// Iterate over the fields of the struct.
for (const FieldDecl *FD : RD->fields()) {
@@ -464,7 +465,8 @@ template <class Derived> struct GenFuncBase {
if (WrongType) {
std::string FuncName = std::string(F->getName());
- SourceLocation Loc = QT->castAs<RecordType>()->getDecl()->getLocation();
+ SourceLocation Loc =
+ QT->castAs<RecordType>()->getOriginalDecl()->getLocation();
CGM.Error(Loc, "special function " + FuncName +
" for non-trivial C struct has incorrect type");
return nullptr;
@@ -560,7 +562,8 @@ struct GenBinaryFunc : CopyStructVisitor<Derived, IsMove>,
if (FD->isZeroLengthBitField())
return;
- QualType RT = QualType(FD->getParent()->getTypeForDecl(), 0);
+ CanQualType RT =
+ this->CGF->getContext().getCanonicalTagType(FD->getParent());
llvm::Type *Ty = this->CGF->ConvertType(RT);
Address DstAddr = this->getAddrWithOffset(Addrs[DstIdx], Offset);
LValue DstBase =
diff --git a/clang/lib/CodeGen/CGObjC.cpp b/clang/lib/CodeGen/CGObjC.cpp
index 24b6ce7..b5f17b8 100644
--- a/clang/lib/CodeGen/CGObjC.cpp
+++ b/clang/lib/CodeGen/CGObjC.cpp
@@ -1000,7 +1000,9 @@ PropertyImplStrategy::PropertyImplStrategy(CodeGenModule &CGM,
// Compute whether the ivar has strong members.
if (CGM.getLangOpts().getGC())
if (const RecordType *recordType = ivarType->getAs<RecordType>())
- HasStrong = recordType->getDecl()->hasObjectMember();
+ HasStrong = recordType->getOriginalDecl()
+ ->getDefinitionOrSelf()
+ ->hasObjectMember();
// We can never access structs with object members with a native
// access, because we need to use write barriers. This is what
diff --git a/clang/lib/CodeGen/CGObjCMac.cpp b/clang/lib/CodeGen/CGObjCMac.cpp
index 8c66176..eb49040 100644
--- a/clang/lib/CodeGen/CGObjCMac.cpp
+++ b/clang/lib/CodeGen/CGObjCMac.cpp
@@ -2495,7 +2495,7 @@ void CGObjCCommonMac::BuildRCBlockVarRecordLayout(const RecordType *RT,
CharUnits BytePos,
bool &HasUnion,
bool ByrefLayout) {
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
SmallVector<const FieldDecl *, 16> Fields(RD->fields());
llvm::Type *Ty = CGM.getTypes().ConvertType(QualType(RT, 0));
const llvm::StructLayout *RecLayout =
@@ -3354,7 +3354,8 @@ static bool hasWeakMember(QualType type) {
}
if (auto recType = type->getAs<RecordType>()) {
- for (auto *field : recType->getDecl()->fields()) {
+ for (auto *field :
+ recType->getOriginalDecl()->getDefinitionOrSelf()->fields()) {
if (hasWeakMember(field->getType()))
return true;
}
@@ -5184,7 +5185,7 @@ CGObjCCommonMac::GetIvarLayoutName(IdentifierInfo *Ident,
}
void IvarLayoutBuilder::visitRecord(const RecordType *RT, CharUnits offset) {
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
// If this is a union, remember that we had one, because it might mess
// up the ordering of layout entries.
@@ -5670,7 +5671,7 @@ ObjCCommonTypesHelper::ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm)
nullptr, false, ICIS_NoInit));
RD->completeDefinition();
- SuperCTy = Ctx.getTagDeclType(RD);
+ SuperCTy = Ctx.getCanonicalTagType(RD);
SuperPtrCTy = Ctx.getPointerType(SuperCTy);
SuperTy = cast<llvm::StructType>(Types.ConvertType(SuperCTy));
@@ -6016,7 +6017,7 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(
false, ICIS_NoInit));
RD->completeDefinition();
- MessageRefCTy = Ctx.getTagDeclType(RD);
+ MessageRefCTy = Ctx.getCanonicalTagType(RD);
MessageRefCPtrTy = Ctx.getPointerType(MessageRefCTy);
MessageRefTy = cast<llvm::StructType>(Types.ConvertType(MessageRefCTy));
diff --git a/clang/lib/CodeGen/CGObjCRuntime.cpp b/clang/lib/CodeGen/CGObjCRuntime.cpp
index 6e2f320..cbf9953 100644
--- a/clang/lib/CodeGen/CGObjCRuntime.cpp
+++ b/clang/lib/CodeGen/CGObjCRuntime.cpp
@@ -440,7 +440,9 @@ void CGObjCRuntime::destroyCalleeDestroyedArguments(CodeGenFunction &CGF,
} else {
QualType QT = param->getType();
auto *RT = QT->getAs<RecordType>();
- if (RT && RT->getDecl()->isParamDestroyedInCallee()) {
+ if (RT && RT->getOriginalDecl()
+ ->getDefinitionOrSelf()
+ ->isParamDestroyedInCallee()) {
RValue RV = I->getRValue(CGF);
QualType::DestructionKind DtorKind = QT.isDestructedType();
switch (DtorKind) {
diff --git a/clang/lib/CodeGen/CGOpenMPRuntime.cpp b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
index 91237cf..3eba33f 100644
--- a/clang/lib/CodeGen/CGOpenMPRuntime.cpp
+++ b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
@@ -2915,7 +2915,7 @@ createKmpTaskTRecordDecl(CodeGenModule &CGM, OpenMPDirectiveKind Kind,
addFieldToRecordDecl(C, UD, KmpInt32Ty);
addFieldToRecordDecl(C, UD, KmpRoutineEntryPointerQTy);
UD->completeDefinition();
- QualType KmpCmplrdataTy = C.getRecordType(UD);
+ CanQualType KmpCmplrdataTy = C.getCanonicalTagType(UD);
RecordDecl *RD = C.buildImplicitRecord("kmp_task_t");
RD->startDefinition();
addFieldToRecordDecl(C, RD, C.VoidPtrTy);
@@ -2950,7 +2950,7 @@ createKmpTaskTWithPrivatesRecordDecl(CodeGenModule &CGM, QualType KmpTaskTQTy,
RD->startDefinition();
addFieldToRecordDecl(C, RD, KmpTaskTQTy);
if (const RecordDecl *PrivateRD = createPrivatesRecordDecl(CGM, Privates))
- addFieldToRecordDecl(C, RD, C.getRecordType(PrivateRD));
+ addFieldToRecordDecl(C, RD, C.getCanonicalTagType(PrivateRD));
RD->completeDefinition();
return RD;
}
@@ -3582,7 +3582,7 @@ static void getKmpAffinityType(ASTContext &C, QualType &KmpTaskAffinityInfoTy) {
addFieldToRecordDecl(C, KmpAffinityInfoRD, C.getSizeType());
addFieldToRecordDecl(C, KmpAffinityInfoRD, FlagsTy);
KmpAffinityInfoRD->completeDefinition();
- KmpTaskAffinityInfoTy = C.getRecordType(KmpAffinityInfoRD);
+ KmpTaskAffinityInfoTy = C.getCanonicalTagType(KmpAffinityInfoRD);
}
}
@@ -3640,7 +3640,7 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
// Build type kmp_task_t (if not built yet).
if (isOpenMPTaskLoopDirective(D.getDirectiveKind())) {
if (SavedKmpTaskloopTQTy.isNull()) {
- SavedKmpTaskloopTQTy = C.getRecordType(createKmpTaskTRecordDecl(
+ SavedKmpTaskloopTQTy = C.getCanonicalTagType(createKmpTaskTRecordDecl(
CGM, D.getDirectiveKind(), KmpInt32Ty, KmpRoutineEntryPtrQTy));
}
KmpTaskTQTy = SavedKmpTaskloopTQTy;
@@ -3650,7 +3650,7 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
isOpenMPTargetDataManagementDirective(D.getDirectiveKind())) &&
"Expected taskloop, task or target directive");
if (SavedKmpTaskTQTy.isNull()) {
- SavedKmpTaskTQTy = C.getRecordType(createKmpTaskTRecordDecl(
+ SavedKmpTaskTQTy = C.getCanonicalTagType(createKmpTaskTRecordDecl(
CGM, D.getDirectiveKind(), KmpInt32Ty, KmpRoutineEntryPtrQTy));
}
KmpTaskTQTy = SavedKmpTaskTQTy;
@@ -3659,7 +3659,8 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
// Build particular struct kmp_task_t for the given task.
const RecordDecl *KmpTaskTWithPrivatesQTyRD =
createKmpTaskTWithPrivatesRecordDecl(CGM, KmpTaskTQTy, Privates);
- QualType KmpTaskTWithPrivatesQTy = C.getRecordType(KmpTaskTWithPrivatesQTyRD);
+ CanQualType KmpTaskTWithPrivatesQTy =
+ C.getCanonicalTagType(KmpTaskTWithPrivatesQTyRD);
QualType KmpTaskTWithPrivatesPtrQTy =
C.getPointerType(KmpTaskTWithPrivatesQTy);
llvm::Type *KmpTaskTWithPrivatesPtrTy = CGF.Builder.getPtrTy(0);
@@ -3914,7 +3915,10 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
// Fill the data in the resulting kmp_task_t record.
// Copy shareds if there are any.
Address KmpTaskSharedsPtr = Address::invalid();
- if (!SharedsTy->getAsStructureType()->getDecl()->field_empty()) {
+ if (!SharedsTy->getAsStructureType()
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf()
+ ->field_empty()) {
KmpTaskSharedsPtr = Address(
CGF.EmitLoadOfScalar(
CGF.EmitLValueForField(
@@ -3944,8 +3948,11 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
enum { Priority = 0, Destructors = 1 };
// Provide pointer to function with destructors for privates.
auto FI = std::next(KmpTaskTQTyRD->field_begin(), Data1);
- const RecordDecl *KmpCmplrdataUD =
- (*FI)->getType()->getAsUnionType()->getDecl();
+ const RecordDecl *KmpCmplrdataUD = (*FI)
+ ->getType()
+ ->getAsUnionType()
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf();
if (NeedsCleanup) {
llvm::Value *DestructorFn = emitDestructorsFunction(
CGM, Loc, KmpInt32Ty, KmpTaskTWithPrivatesPtrQTy,
@@ -4015,7 +4022,7 @@ static void getDependTypes(ASTContext &C, QualType &KmpDependInfoTy,
addFieldToRecordDecl(C, KmpDependInfoRD, C.getSizeType());
addFieldToRecordDecl(C, KmpDependInfoRD, FlagsTy);
KmpDependInfoRD->completeDefinition();
- KmpDependInfoTy = C.getRecordType(KmpDependInfoRD);
+ KmpDependInfoTy = C.getCanonicalTagType(KmpDependInfoRD);
}
}
@@ -5714,7 +5721,7 @@ llvm::Value *CGOpenMPRuntime::emitTaskReductionInit(
const FieldDecl *FlagsFD = addFieldToRecordDecl(
C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/false));
RD->completeDefinition();
- QualType RDType = C.getRecordType(RD);
+ CanQualType RDType = C.getCanonicalTagType(RD);
unsigned Size = Data.ReductionVars.size();
llvm::APInt ArraySize(/*numBits=*/64, Size);
QualType ArrayRDType =
@@ -10703,7 +10710,7 @@ static unsigned evaluateCDTSize(const FunctionDecl *FD,
unsigned Offset = 0;
if (const auto *MD = dyn_cast<CXXMethodDecl>(FD)) {
if (ParamAttrs[Offset].Kind == Vector)
- CDT = C.getPointerType(C.getRecordType(MD->getParent()));
+ CDT = C.getPointerType(C.getCanonicalTagType(MD->getParent()));
++Offset;
}
if (CDT.isNull()) {
@@ -11285,7 +11292,7 @@ void CGOpenMPRuntime::emitDoacrossInit(CodeGenFunction &CGF,
addFieldToRecordDecl(C, RD, Int64Ty);
addFieldToRecordDecl(C, RD, Int64Ty);
RD->completeDefinition();
- KmpDimTy = C.getRecordType(RD);
+ KmpDimTy = C.getCanonicalTagType(RD);
} else {
RD = cast<RecordDecl>(KmpDimTy->getAsTagDecl());
}
@@ -11781,7 +11788,7 @@ Address CGOpenMPRuntime::emitLastprivateConditionalInit(CodeGenFunction &CGF,
VDField = addFieldToRecordDecl(C, RD, VD->getType().getNonReferenceType());
FiredField = addFieldToRecordDecl(C, RD, C.CharTy);
RD->completeDefinition();
- NewType = C.getRecordType(RD);
+ NewType = C.getCanonicalTagType(RD);
Address Addr = CGF.CreateMemTemp(NewType, C.getDeclAlign(VD), VD->getName());
BaseLVal = CGF.MakeAddrLValue(Addr, NewType, AlignmentSource::Decl);
I->getSecond().try_emplace(VD, NewType, VDField, FiredField, BaseLVal);
diff --git a/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
index 04c9192..cff1071 100644
--- a/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
+++ b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
@@ -769,7 +769,7 @@ void CGOpenMPRuntimeGPU::emitKernelDeinit(CodeGenFunction &CGF,
"_openmp_teams_reduction_type_$_", RecordDecl::TagKind::Union);
StaticRD->startDefinition();
for (const RecordDecl *TeamReductionRec : TeamsReductions) {
- QualType RecTy = C.getRecordType(TeamReductionRec);
+ CanQualType RecTy = C.getCanonicalTagType(TeamReductionRec);
auto *Field = FieldDecl::Create(
C, StaticRD, SourceLocation(), SourceLocation(), nullptr, RecTy,
C.getTrivialTypeSourceInfo(RecTy, SourceLocation()),
@@ -779,7 +779,7 @@ void CGOpenMPRuntimeGPU::emitKernelDeinit(CodeGenFunction &CGF,
StaticRD->addDecl(Field);
}
StaticRD->completeDefinition();
- QualType StaticTy = C.getRecordType(StaticRD);
+ CanQualType StaticTy = C.getCanonicalTagType(StaticRD);
llvm::Type *LLVMReductionsBufferTy =
CGM.getTypes().ConvertTypeForMem(StaticTy);
const auto &DL = CGM.getModule().getDataLayout();
diff --git a/clang/lib/CodeGen/CGPointerAuth.cpp b/clang/lib/CodeGen/CGPointerAuth.cpp
index dcef01a..1a5ffb5 100644
--- a/clang/lib/CodeGen/CGPointerAuth.cpp
+++ b/clang/lib/CodeGen/CGPointerAuth.cpp
@@ -531,7 +531,7 @@ llvm::Constant *CodeGenModule::getMemberFunctionPointer(llvm::Constant *Pointer,
llvm::Constant *CodeGenModule::getMemberFunctionPointer(const FunctionDecl *FD,
llvm::Type *Ty) {
QualType FT = FD->getType();
- FT = getContext().getMemberPointerType(FT, /*Qualifier=*/nullptr,
+ FT = getContext().getMemberPointerType(FT, /*Qualifier=*/std::nullopt,
cast<CXXMethodDecl>(FD)->getParent());
return getMemberFunctionPointer(getRawFunctionPointer(FD, Ty), FT);
}
diff --git a/clang/lib/CodeGen/CGStmt.cpp b/clang/lib/CodeGen/CGStmt.cpp
index 1a8c6f0..031ef73 100644
--- a/clang/lib/CodeGen/CGStmt.cpp
+++ b/clang/lib/CodeGen/CGStmt.cpp
@@ -3279,7 +3279,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
LValue CodeGenFunction::InitCapturedStruct(const CapturedStmt &S) {
const RecordDecl *RD = S.getCapturedRecordDecl();
- QualType RecordTy = getContext().getRecordType(RD);
+ CanQualType RecordTy = getContext().getCanonicalTagType(RD);
// Initialize the captured struct.
LValue SlotLV =
@@ -3359,7 +3359,7 @@ CodeGenFunction::GenerateCapturedStmtFunction(const CapturedStmt &S) {
// Initialize variable-length arrays.
LValue Base = MakeNaturalAlignRawAddrLValue(
- CapturedStmtInfo->getContextValue(), Ctx.getTagDeclType(RD));
+ CapturedStmtInfo->getContextValue(), Ctx.getCanonicalTagType(RD));
for (auto *FD : RD->fields()) {
if (FD->hasCapturedVLAType()) {
auto *ExprArg =
diff --git a/clang/lib/CodeGen/CGStmtOpenMP.cpp b/clang/lib/CodeGen/CGStmtOpenMP.cpp
index 5822e0f..f6a0ca5 100644
--- a/clang/lib/CodeGen/CGStmtOpenMP.cpp
+++ b/clang/lib/CodeGen/CGStmtOpenMP.cpp
@@ -5273,7 +5273,8 @@ void CodeGenFunction::EmitOMPTargetTaskBasedDirective(
// Emit outlined function for task construct.
const CapturedStmt *CS = S.getCapturedStmt(OMPD_task);
Address CapturedStruct = GenerateCapturedStmtArgument(*CS);
- QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl());
+ CanQualType SharedsTy =
+ getContext().getCanonicalTagType(CS->getCapturedRecordDecl());
auto I = CS->getCapturedDecl()->param_begin();
auto PartId = std::next(I);
auto TaskT = std::next(I, 4);
@@ -5507,7 +5508,8 @@ void CodeGenFunction::EmitOMPTaskDirective(const OMPTaskDirective &S) {
// Emit outlined function for task construct.
const CapturedStmt *CS = S.getCapturedStmt(OMPD_task);
Address CapturedStruct = GenerateCapturedStmtArgument(*CS);
- QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl());
+ CanQualType SharedsTy =
+ getContext().getCanonicalTagType(CS->getCapturedRecordDecl());
const Expr *IfCond = nullptr;
for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
if (C->getNameModifier() == OMPD_unknown ||
@@ -7890,7 +7892,8 @@ void CodeGenFunction::EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S) {
OMPLexicalScope Scope(*this, S, OMPD_taskloop, /*EmitPreInitStmt=*/false);
CapturedStruct = GenerateCapturedStmtArgument(*CS);
}
- QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl());
+ CanQualType SharedsTy =
+ getContext().getCanonicalTagType(CS->getCapturedRecordDecl());
const Expr *IfCond = nullptr;
for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
if (C->getNameModifier() == OMPD_unknown ||
diff --git a/clang/lib/CodeGen/CGVTables.cpp b/clang/lib/CodeGen/CGVTables.cpp
index 0b6e830..e14e883 100644
--- a/clang/lib/CodeGen/CGVTables.cpp
+++ b/clang/lib/CodeGen/CGVTables.cpp
@@ -971,7 +971,7 @@ llvm::GlobalVariable *CodeGenVTables::GenerateConstructionVTable(
VTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
llvm::Constant *RTTI = CGM.GetAddrOfRTTIDescriptor(
- CGM.getContext().getTagDeclType(Base.getBase()));
+ CGM.getContext().getCanonicalTagType(Base.getBase()));
// Create and set the initializer.
ConstantInitBuilder builder(CGM);
@@ -1382,8 +1382,8 @@ void CodeGenModule::EmitVTableTypeMetadata(const CXXRecordDecl *RD,
AP.second.AddressPointIndex,
{}};
llvm::raw_string_ostream Stream(N.TypeName);
- getCXXABI().getMangleContext().mangleCanonicalTypeName(
- QualType(N.Base->getTypeForDecl(), 0), Stream);
+ CanQualType T = getContext().getCanonicalTagType(N.Base);
+ getCXXABI().getMangleContext().mangleCanonicalTypeName(T, Stream);
AddressPoints.push_back(std::move(N));
}
@@ -1404,7 +1404,7 @@ void CodeGenModule::EmitVTableTypeMetadata(const CXXRecordDecl *RD,
continue;
llvm::Metadata *MD = CreateMetadataIdentifierForVirtualMemPtrType(
Context.getMemberPointerType(Comps[I].getFunctionDecl()->getType(),
- /*Qualifier=*/nullptr, AP.Base));
+ /*Qualifier=*/std::nullopt, AP.Base));
VTable->addTypeMetadata((ComponentWidth * I).getQuantity(), MD);
}
}
diff --git a/clang/lib/CodeGen/CodeGenFunction.cpp b/clang/lib/CodeGen/CodeGenFunction.cpp
index ab345a5..d077ee5 100644
--- a/clang/lib/CodeGen/CodeGenFunction.cpp
+++ b/clang/lib/CodeGen/CodeGenFunction.cpp
@@ -2223,7 +2223,9 @@ CodeGenFunction::EmitNullInitialization(Address DestPtr, QualType Ty) {
// Ignore empty classes in C++.
if (getLangOpts().CPlusPlus) {
if (const RecordType *RT = Ty->getAs<RecordType>()) {
- if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty())
+ if (cast<CXXRecordDecl>(RT->getOriginalDecl())
+ ->getDefinitionOrSelf()
+ ->isEmpty())
return;
}
}
@@ -2494,10 +2496,6 @@ void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
case Type::PredefinedSugar:
llvm_unreachable("type class is never variably-modified!");
- case Type::Elaborated:
- type = cast<ElaboratedType>(ty)->getNamedType();
- break;
-
case Type::Adjusted:
type = cast<AdjustedType>(ty)->getAdjustedType();
break;
diff --git a/clang/lib/CodeGen/CodeGenFunction.h b/clang/lib/CodeGen/CodeGenFunction.h
index 6c32c98..84be422 100644
--- a/clang/lib/CodeGen/CodeGenFunction.h
+++ b/clang/lib/CodeGen/CodeGenFunction.h
@@ -701,14 +701,12 @@ public:
bool isRedundantBeforeReturn() override { return true; }
llvm::Value *Addr;
- llvm::Value *Size;
public:
- CallLifetimeEnd(RawAddress addr, llvm::Value *size)
- : Addr(addr.getPointer()), Size(size) {}
+ CallLifetimeEnd(RawAddress addr) : Addr(addr.getPointer()) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
- CGF.EmitLifetimeEnd(Size, Addr);
+ CGF.EmitLifetimeEnd(Addr);
}
};
@@ -2974,7 +2972,7 @@ public:
/// member.
bool hasVolatileMember(QualType T) {
if (const RecordType *RT = T->getAs<RecordType>()) {
- const RecordDecl *RD = cast<RecordDecl>(RT->getDecl());
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
return RD->hasVolatileMember();
}
return false;
@@ -3233,8 +3231,8 @@ public:
void EmitSehTryScopeBegin();
void EmitSehTryScopeEnd();
- llvm::Value *EmitLifetimeStart(llvm::TypeSize Size, llvm::Value *Addr);
- void EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr);
+ bool EmitLifetimeStart(llvm::Value *Addr);
+ void EmitLifetimeEnd(llvm::Value *Addr);
llvm::Value *EmitCXXNewExpr(const CXXNewExpr *E);
void EmitCXXDeleteExpr(const CXXDeleteExpr *E);
@@ -3417,8 +3415,8 @@ public:
/// initializer.
bool IsConstantAggregate;
- /// Non-null if we should use lifetime annotations.
- llvm::Value *SizeForLifetimeMarkers;
+ /// True if lifetime markers should be used.
+ bool UseLifetimeMarkers;
/// Address with original alloca instruction. Invalid if the variable was
/// emitted as a global constant.
@@ -3432,20 +3430,14 @@ public:
AutoVarEmission(const VarDecl &variable)
: Variable(&variable), Addr(Address::invalid()), NRVOFlag(nullptr),
IsEscapingByRef(false), IsConstantAggregate(false),
- SizeForLifetimeMarkers(nullptr), AllocaAddr(RawAddress::invalid()) {}
+ UseLifetimeMarkers(false), AllocaAddr(RawAddress::invalid()) {}
bool wasEmittedAsGlobal() const { return !Addr.isValid(); }
public:
static AutoVarEmission invalid() { return AutoVarEmission(Invalid()); }
- bool useLifetimeMarkers() const {
- return SizeForLifetimeMarkers != nullptr;
- }
- llvm::Value *getSizeForLifetimeMarkers() const {
- assert(useLifetimeMarkers());
- return SizeForLifetimeMarkers;
- }
+ bool useLifetimeMarkers() const { return UseLifetimeMarkers; }
/// Returns the raw, allocated address, which is not necessarily
/// the address of the object itself. It is casted to default
@@ -4560,7 +4552,7 @@ public:
ArrayRef<llvm::Value *> args);
CGCallee BuildAppleKextVirtualCall(const CXXMethodDecl *MD,
- NestedNameSpecifier *Qual, llvm::Type *Ty);
+ NestedNameSpecifier Qual, llvm::Type *Ty);
CGCallee BuildAppleKextVirtualDestructorCall(const CXXDestructorDecl *DD,
CXXDtorType Type,
@@ -4665,7 +4657,7 @@ public:
llvm::CallBase **CallOrInvoke = nullptr);
RValue EmitCXXMemberOrOperatorMemberCallExpr(
const CallExpr *CE, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue,
- bool HasQualifier, NestedNameSpecifier *Qualifier, bool IsArrow,
+ bool HasQualifier, NestedNameSpecifier Qualifier, bool IsArrow,
const Expr *Base, llvm::CallBase **CallOrInvoke);
// Compute the object pointer.
Address EmitCXXMemberDataPointerAddress(
@@ -5220,6 +5212,12 @@ public:
/// operation is a subtraction.
enum { NotSubtraction = false, IsSubtraction = true };
+ /// Emit pointer + index arithmetic.
+ llvm::Value *EmitPointerArithmetic(const BinaryOperator *BO,
+ Expr *pointerOperand, llvm::Value *pointer,
+ Expr *indexOperand, llvm::Value *index,
+ bool isSubtraction);
+
/// Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to
/// detect undefined behavior when the pointer overflow sanitizer is enabled.
/// \p SignedIndices indicates whether any of the GEP indices are signed.
diff --git a/clang/lib/CodeGen/CodeGenModule.cpp b/clang/lib/CodeGen/CodeGenModule.cpp
index 834b1c0..2541a44 100644
--- a/clang/lib/CodeGen/CodeGenModule.cpp
+++ b/clang/lib/CodeGen/CodeGenModule.cpp
@@ -2793,7 +2793,7 @@ void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
for (const CXXRecordDecl *Base : getMostBaseClasses(MD->getParent())) {
llvm::Metadata *Id =
CreateMetadataIdentifierForType(Context.getMemberPointerType(
- MD->getType(), /*Qualifier=*/nullptr, Base));
+ MD->getType(), /*Qualifier=*/std::nullopt, Base));
F->addTypeMetadata(0, Id);
}
}
@@ -4152,9 +4152,11 @@ void CodeGenModule::EmitGlobal(GlobalDecl GD) {
// Check if T is a class type with a destructor that's not dllimport.
static bool HasNonDllImportDtor(QualType T) {
if (const auto *RT = T->getBaseElementTypeUnsafe()->getAs<RecordType>())
- if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()))
+ if (auto *RD = dyn_cast<CXXRecordDecl>(RT->getOriginalDecl())) {
+ RD = RD->getDefinitionOrSelf();
if (RD->getDestructor() && !RD->getDestructor()->hasAttr<DLLImportAttr>())
return true;
+ }
return false;
}
@@ -6029,7 +6031,7 @@ static bool isVarDeclStrongDefinition(const ASTContext &Context,
return true;
if (const auto *RT = VarType->getAs<RecordType>()) {
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
for (const FieldDecl *FD : RD->fields()) {
if (FD->isBitField())
continue;
@@ -6738,7 +6740,7 @@ QualType CodeGenModule::getObjCFastEnumerationStateType() {
}
D->completeDefinition();
- ObjCFastEnumerationStateType = Context.getTagDeclType(D);
+ ObjCFastEnumerationStateType = Context.getCanonicalTagType(D);
}
return ObjCFastEnumerationStateType;
@@ -7248,7 +7250,8 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
CXXRecordDecl *CRD = cast<CXXRecordDecl>(D);
if (CGDebugInfo *DI = getModuleDebugInfo()) {
if (CRD->hasDefinition())
- DI->EmitAndRetainType(getContext().getRecordType(cast<RecordDecl>(D)));
+ DI->EmitAndRetainType(
+ getContext().getCanonicalTagType(cast<RecordDecl>(D)));
if (auto *ES = D->getASTContext().getExternalSource())
if (ES->hasExternalDefinitions(D) == ExternalASTSource::EK_Never)
DI->completeUnusedClass(*CRD);
@@ -7467,20 +7470,23 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
case Decl::Typedef:
case Decl::TypeAlias: // using foo = bar; [C++11]
if (CGDebugInfo *DI = getModuleDebugInfo())
- DI->EmitAndRetainType(
- getContext().getTypedefType(cast<TypedefNameDecl>(D)));
+ DI->EmitAndRetainType(getContext().getTypedefType(
+ ElaboratedTypeKeyword::None, /*Qualifier=*/std::nullopt,
+ cast<TypedefNameDecl>(D)));
break;
case Decl::Record:
if (CGDebugInfo *DI = getModuleDebugInfo())
if (cast<RecordDecl>(D)->getDefinition())
- DI->EmitAndRetainType(getContext().getRecordType(cast<RecordDecl>(D)));
+ DI->EmitAndRetainType(
+ getContext().getCanonicalTagType(cast<RecordDecl>(D)));
break;
case Decl::Enum:
if (CGDebugInfo *DI = getModuleDebugInfo())
if (cast<EnumDecl>(D)->getDefinition())
- DI->EmitAndRetainType(getContext().getEnumType(cast<EnumDecl>(D)));
+ DI->EmitAndRetainType(
+ getContext().getCanonicalTagType(cast<EnumDecl>(D)));
break;
case Decl::HLSLBuffer:
@@ -7934,8 +7940,8 @@ bool CodeGenModule::NeedAllVtablesTypeId() const {
void CodeGenModule::AddVTableTypeMetadata(llvm::GlobalVariable *VTable,
CharUnits Offset,
const CXXRecordDecl *RD) {
- llvm::Metadata *MD =
- CreateMetadataIdentifierForType(QualType(RD->getTypeForDecl(), 0));
+ CanQualType T = getContext().getCanonicalTagType(RD);
+ llvm::Metadata *MD = CreateMetadataIdentifierForType(T);
VTable->addTypeMetadata(Offset.getQuantity(), MD);
if (CodeGenOpts.SanitizeCfiCrossDso)
diff --git a/clang/lib/CodeGen/CodeGenTBAA.cpp b/clang/lib/CodeGen/CodeGenTBAA.cpp
index 90eafe2..bd2442f 100644
--- a/clang/lib/CodeGen/CodeGenTBAA.cpp
+++ b/clang/lib/CodeGen/CodeGenTBAA.cpp
@@ -143,7 +143,7 @@ static bool TypeHasMayAlias(QualType QTy) {
/// Check if the given type is a valid base type to be used in access tags.
static bool isValidBaseType(QualType QTy) {
if (const RecordType *TTy = QTy->getAs<RecordType>()) {
- const RecordDecl *RD = TTy->getDecl()->getDefinition();
+ const RecordDecl *RD = TTy->getOriginalDecl()->getDefinition();
// Incomplete types are not valid base access types.
if (!RD)
return false;
@@ -311,7 +311,7 @@ llvm::MDNode *CodeGenTBAA::getTypeInfoHelper(const Type *Ty) {
// This also covers anonymous structs and unions, which have a different
// compatibility rule, but it doesn't matter because you can never have a
// pointer to an anonymous struct or union.
- if (!RT->getDecl()->getDeclName())
+ if (!RT->getOriginalDecl()->getDeclName())
return getAnyPtr(PtrDepth);
// For non-builtin types use the mangled name of the canonical type.
@@ -333,14 +333,15 @@ llvm::MDNode *CodeGenTBAA::getTypeInfoHelper(const Type *Ty) {
// Enum types are distinct types. In C++ they have "underlying types",
// however they aren't related for TBAA.
if (const EnumType *ETy = dyn_cast<EnumType>(Ty)) {
+ const EnumDecl *ED = ETy->getOriginalDecl()->getDefinitionOrSelf();
if (!Features.CPlusPlus)
- return getTypeInfo(ETy->getDecl()->getIntegerType());
+ return getTypeInfo(ED->getIntegerType());
// In C++ mode, types have linkage, so we can rely on the ODR and
// on their mangled names, if they're external.
// TODO: Is there a way to get a program-wide unique name for a
// decl with local linkage or no linkage?
- if (!ETy->getDecl()->isExternallyVisible())
+ if (!ED->isExternallyVisible())
return getChar();
SmallString<256> OutName;
@@ -433,7 +434,7 @@ CodeGenTBAA::CollectFields(uint64_t BaseOffset,
llvm::MDBuilder::TBAAStructField(BaseOffset, Size, TBAATag));
return true;
}
- const RecordDecl *RD = TTy->getDecl()->getDefinition();
+ const RecordDecl *RD = TTy->getOriginalDecl()->getDefinition();
if (RD->hasFlexibleArrayMember())
return false;
@@ -514,7 +515,7 @@ CodeGenTBAA::getTBAAStructInfo(QualType QTy) {
llvm::MDNode *CodeGenTBAA::getBaseTypeInfoHelper(const Type *Ty) {
if (auto *TTy = dyn_cast<RecordType>(Ty)) {
- const RecordDecl *RD = TTy->getDecl()->getDefinition();
+ const RecordDecl *RD = TTy->getOriginalDecl()->getDefinition();
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
using TBAAStructField = llvm::MDBuilder::TBAAStructField;
SmallVector<TBAAStructField, 4> Fields;
diff --git a/clang/lib/CodeGen/CodeGenTypes.cpp b/clang/lib/CodeGen/CodeGenTypes.cpp
index c98503e..f2a0a64 100644
--- a/clang/lib/CodeGen/CodeGenTypes.cpp
+++ b/clang/lib/CodeGen/CodeGenTypes.cpp
@@ -229,12 +229,13 @@ bool CodeGenTypes::isFuncTypeConvertible(const FunctionType *FT) {
/// UpdateCompletedType - When we find the full definition for a TagDecl,
/// replace the 'opaque' type we previously made for it if applicable.
void CodeGenTypes::UpdateCompletedType(const TagDecl *TD) {
+ CanQualType T = CGM.getContext().getCanonicalTagType(TD);
// If this is an enum being completed, then we flush all non-struct types from
// the cache. This allows function types and other things that may be derived
// from the enum to be recomputed.
if (const EnumDecl *ED = dyn_cast<EnumDecl>(TD)) {
// Only flush the cache if we've actually already converted this type.
- if (TypeCache.count(ED->getTypeForDecl())) {
+ if (TypeCache.count(T->getTypePtr())) {
// Okay, we formed some types based on this. We speculated that the enum
// would be lowered to i32, so we only need to flush the cache if this
// didn't happen.
@@ -255,7 +256,7 @@ void CodeGenTypes::UpdateCompletedType(const TagDecl *TD) {
// Only complete it if we converted it already. If we haven't converted it
// yet, we'll just do it lazily.
- if (RecordDeclTypes.count(Context.getTagDeclType(RD).getTypePtr()))
+ if (RecordDeclTypes.count(T.getTypePtr()))
ConvertRecordDeclType(RD);
// If necessary, provide the full definition of a type only used with a
@@ -265,7 +266,7 @@ void CodeGenTypes::UpdateCompletedType(const TagDecl *TD) {
}
void CodeGenTypes::RefreshTypeCacheForClass(const CXXRecordDecl *RD) {
- QualType T = Context.getRecordType(RD);
+ CanQualType T = Context.getCanonicalTagType(RD);
T = Context.getCanonicalType(T);
const Type *Ty = T.getTypePtr();
@@ -311,11 +312,11 @@ llvm::Type *CodeGenTypes::ConvertFunctionTypeInternal(QualType QFT) {
// Force conversion of all the relevant record types, to make sure
// we re-convert the FunctionType when appropriate.
if (const RecordType *RT = FT->getReturnType()->getAs<RecordType>())
- ConvertRecordDeclType(RT->getDecl());
+ ConvertRecordDeclType(RT->getOriginalDecl()->getDefinitionOrSelf());
if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT))
for (unsigned i = 0, e = FPT->getNumParams(); i != e; i++)
if (const RecordType *RT = FPT->getParamType(i)->getAs<RecordType>())
- ConvertRecordDeclType(RT->getDecl());
+ ConvertRecordDeclType(RT->getOriginalDecl()->getDefinitionOrSelf());
SkippedLayout = true;
@@ -373,7 +374,7 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
// RecordTypes are cached and processed specially.
if (const RecordType *RT = dyn_cast<RecordType>(Ty))
- return ConvertRecordDeclType(RT->getDecl());
+ return ConvertRecordDeclType(RT->getOriginalDecl()->getDefinitionOrSelf());
llvm::Type *CachedType = nullptr;
auto TCI = TypeCache.find(Ty);
@@ -699,7 +700,8 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
break;
case Type::Enum: {
- const EnumDecl *ED = cast<EnumType>(Ty)->getDecl();
+ const EnumDecl *ED =
+ cast<EnumType>(Ty)->getOriginalDecl()->getDefinitionOrSelf();
if (ED->isCompleteDefinition() || ED->isFixed())
return ConvertType(ED->getIntegerType());
// Return a placeholder 'i32' type. This can be changed later when the
@@ -725,8 +727,10 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
case Type::MemberPointer: {
auto *MPTy = cast<MemberPointerType>(Ty);
if (!getCXXABI().isMemberPointerConvertible(MPTy)) {
- auto *C = MPTy->getMostRecentCXXRecordDecl()->getTypeForDecl();
- auto Insertion = RecordsWithOpaqueMemberPointers.try_emplace(C);
+ CanQualType T = CGM.getContext().getCanonicalTagType(
+ MPTy->getMostRecentCXXRecordDecl());
+ auto Insertion =
+ RecordsWithOpaqueMemberPointers.try_emplace(T.getTypePtr());
if (Insertion.second)
Insertion.first->second = llvm::StructType::create(getLLVMContext());
ResultType = Insertion.first->second;
@@ -789,7 +793,7 @@ bool CodeGenModule::isPaddedAtomicType(const AtomicType *type) {
llvm::StructType *CodeGenTypes::ConvertRecordDeclType(const RecordDecl *RD) {
// TagDecl's are not necessarily unique, instead use the (clang)
// type connected to the decl.
- const Type *Key = Context.getTagDeclType(RD).getTypePtr();
+ const Type *Key = Context.getCanonicalTagType(RD).getTypePtr();
llvm::StructType *&Entry = RecordDeclTypes[Key];
@@ -810,7 +814,10 @@ llvm::StructType *CodeGenTypes::ConvertRecordDeclType(const RecordDecl *RD) {
if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) {
for (const auto &I : CRD->bases()) {
if (I.isVirtual()) continue;
- ConvertRecordDeclType(I.getType()->castAs<RecordType>()->getDecl());
+ ConvertRecordDeclType(I.getType()
+ ->castAs<RecordType>()
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf());
}
}
@@ -830,7 +837,7 @@ llvm::StructType *CodeGenTypes::ConvertRecordDeclType(const RecordDecl *RD) {
/// getCGRecordLayout - Return record layout info for the given record decl.
const CGRecordLayout &
CodeGenTypes::getCGRecordLayout(const RecordDecl *RD) {
- const Type *Key = Context.getTagDeclType(RD).getTypePtr();
+ const Type *Key = Context.getCanonicalTagType(RD).getTypePtr();
auto I = CGRecordLayouts.find(Key);
if (I != CGRecordLayouts.end())
@@ -869,7 +876,7 @@ bool CodeGenTypes::isZeroInitializable(QualType T) {
// Records are non-zero-initializable if they contain any
// non-zero-initializable subobjects.
if (const RecordType *RT = T->getAs<RecordType>()) {
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
return isZeroInitializable(RD);
}
diff --git a/clang/lib/CodeGen/CoverageMappingGen.cpp b/clang/lib/CodeGen/CoverageMappingGen.cpp
index 38aaceb..05fb137 100644
--- a/clang/lib/CodeGen/CoverageMappingGen.cpp
+++ b/clang/lib/CodeGen/CoverageMappingGen.cpp
@@ -2269,6 +2269,11 @@ struct CounterCoverageMappingBuilder
// Track LHS True/False Decision.
const auto DecisionLHS = MCDCBuilder.pop();
+ if (auto Gap =
+ findGapAreaBetween(getEnd(E->getLHS()), getStart(E->getRHS()))) {
+ fillGapAreaWithCount(Gap->getBegin(), Gap->getEnd(), getRegionCounter(E));
+ }
+
// Counter tracks the right hand side of a logical and operator.
extendRegion(E->getRHS());
propagateCounts(getRegionCounter(E), E->getRHS());
@@ -2330,6 +2335,11 @@ struct CounterCoverageMappingBuilder
// Track LHS True/False Decision.
const auto DecisionLHS = MCDCBuilder.pop();
+ if (auto Gap =
+ findGapAreaBetween(getEnd(E->getLHS()), getStart(E->getRHS()))) {
+ fillGapAreaWithCount(Gap->getBegin(), Gap->getEnd(), getRegionCounter(E));
+ }
+
// Counter tracks the right hand side of a logical or operator.
extendRegion(E->getRHS());
propagateCounts(getRegionCounter(E), E->getRHS());
diff --git a/clang/lib/CodeGen/HLSLBufferLayoutBuilder.cpp b/clang/lib/CodeGen/HLSLBufferLayoutBuilder.cpp
index 1ed3389..ac56dda 100644
--- a/clang/lib/CodeGen/HLSLBufferLayoutBuilder.cpp
+++ b/clang/lib/CodeGen/HLSLBufferLayoutBuilder.cpp
@@ -101,7 +101,8 @@ llvm::TargetExtType *HLSLBufferLayoutBuilder::createLayoutType(
const RecordType *RT = RecordTypes.back();
RecordTypes.pop_back();
- for (const auto *FD : RT->getDecl()->fields()) {
+ for (const auto *FD :
+ RT->getOriginalDecl()->getDefinitionOrSelf()->fields()) {
assert((!PackOffsets || Index < PackOffsets->size()) &&
"number of elements in layout struct does not match number of "
"packoffset annotations");
diff --git a/clang/lib/CodeGen/ItaniumCXXABI.cpp b/clang/lib/CodeGen/ItaniumCXXABI.cpp
index aae1481..4ed3775 100644
--- a/clang/lib/CodeGen/ItaniumCXXABI.cpp
+++ b/clang/lib/CodeGen/ItaniumCXXABI.cpp
@@ -226,6 +226,10 @@ public:
return hasUniqueVTablePointer(DestRecordTy);
}
+ std::optional<ExactDynamicCastInfo>
+ getExactDynamicCastInfo(QualType SrcRecordTy, QualType DestTy,
+ QualType DestRecordTy) override;
+
llvm::Value *emitDynamicCastCall(CodeGenFunction &CGF, Address Value,
QualType SrcRecordTy, QualType DestTy,
QualType DestRecordTy,
@@ -234,6 +238,7 @@ public:
llvm::Value *emitExactDynamicCast(CodeGenFunction &CGF, Address ThisAddr,
QualType SrcRecordTy, QualType DestTy,
QualType DestRecordTy,
+ const ExactDynamicCastInfo &CastInfo,
llvm::BasicBlock *CastSuccess,
llvm::BasicBlock *CastFail) override;
@@ -826,7 +831,7 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
for (const CXXRecordDecl *Base : CGM.getMostBaseClasses(RD)) {
llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType(
getContext().getMemberPointerType(MPT->getPointeeType(),
- /*Qualifier=*/nullptr,
+ /*Qualifier=*/std::nullopt,
Base->getCanonicalDecl()));
llvm::Value *TypeId =
llvm::MetadataAsValue::get(CGF.getLLVMContext(), MD);
@@ -1236,7 +1241,7 @@ llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const APValue &MP,
if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MPD)) {
llvm::Constant *Src = BuildMemberPointer(MD, ThisAdjustment);
QualType SrcType = getContext().getMemberPointerType(
- MD->getType(), /*Qualifier=*/nullptr, MD->getParent());
+ MD->getType(), /*Qualifier=*/std::nullopt, MD->getParent());
return pointerAuthResignMemberFunctionPointer(Src, MPType, SrcType, CGM);
}
@@ -1392,8 +1397,9 @@ void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
// to pass to the deallocation function.
// Grab the vtable pointer as an intptr_t*.
- auto *ClassDecl =
- cast<CXXRecordDecl>(ElementType->castAs<RecordType>()->getDecl());
+ auto *ClassDecl = cast<CXXRecordDecl>(
+ ElementType->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
llvm::Value *VTable = CGF.GetVTablePtr(Ptr, CGF.UnqualPtrTy, ClassDecl);
// Track back to entry -2 and pull out the offset there.
@@ -1479,7 +1485,8 @@ void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
// trivial destructor (or isn't a record), we just pass null.
llvm::Constant *Dtor = nullptr;
if (const RecordType *RecordTy = ThrowType->getAs<RecordType>()) {
- CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl());
+ CXXRecordDecl *Record =
+ cast<CXXRecordDecl>(RecordTy->getOriginalDecl())->getDefinitionOrSelf();
if (!Record->hasTrivialDestructor()) {
// __cxa_throw is declared to take its destructor as void (*)(void *). We
// must match that if function pointers can be authenticated with a
@@ -1606,7 +1613,8 @@ llvm::Value *ItaniumCXXABI::EmitTypeid(CodeGenFunction &CGF,
Address ThisPtr,
llvm::Type *StdTypeInfoPtrTy) {
auto *ClassDecl =
- cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
llvm::Value *Value = CGF.GetVTablePtr(ThisPtr, CGM.GlobalsInt8PtrTy,
ClassDecl);
@@ -1681,10 +1689,11 @@ llvm::Value *ItaniumCXXABI::emitDynamicCastCall(
return Value;
}
-llvm::Value *ItaniumCXXABI::emitExactDynamicCast(
- CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy,
- QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastSuccess,
- llvm::BasicBlock *CastFail) {
+std::optional<CGCXXABI::ExactDynamicCastInfo>
+ItaniumCXXABI::getExactDynamicCastInfo(QualType SrcRecordTy, QualType DestTy,
+ QualType DestRecordTy) {
+ assert(shouldEmitExactDynamicCast(DestRecordTy));
+
ASTContext &Context = getContext();
// Find all the inheritance paths.
@@ -1722,48 +1731,64 @@ llvm::Value *ItaniumCXXABI::emitExactDynamicCast(
if (!Offset)
Offset = PathOffset;
else if (Offset != PathOffset) {
- // Base appears in at least two different places. Find the most-derived
- // object and see if it's a DestDecl. Note that the most-derived object
- // must be at least as aligned as this base class subobject, and must
- // have a vptr at offset 0.
- ThisAddr = Address(emitDynamicCastToVoid(CGF, ThisAddr, SrcRecordTy),
- CGF.VoidPtrTy, ThisAddr.getAlignment());
- SrcDecl = DestDecl;
- Offset = CharUnits::Zero();
- break;
+ // Base appears in at least two different places.
+ return ExactDynamicCastInfo{/*RequiresCastToPrimaryBase=*/true,
+ CharUnits::Zero()};
}
}
+ if (!Offset)
+ return std::nullopt;
+ return ExactDynamicCastInfo{/*RequiresCastToPrimaryBase=*/false, *Offset};
+}
- if (!Offset) {
- // If there are no public inheritance paths, the cast always fails.
- CGF.EmitBranch(CastFail);
- return llvm::PoisonValue::get(CGF.VoidPtrTy);
- }
+llvm::Value *ItaniumCXXABI::emitExactDynamicCast(
+ CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy,
+ QualType DestTy, QualType DestRecordTy,
+ const ExactDynamicCastInfo &ExactCastInfo, llvm::BasicBlock *CastSuccess,
+ llvm::BasicBlock *CastFail) {
+ const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl();
+ const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl();
+
+ llvm::Value *VTable = nullptr;
+ if (ExactCastInfo.RequiresCastToPrimaryBase) {
+ // Base appears in at least two different places. Find the most-derived
+ // object and see if it's a DestDecl. Note that the most-derived object
+ // must be at least as aligned as this base class subobject, and must
+ // have a vptr at offset 0.
+ llvm::Value *PrimaryBase =
+ emitDynamicCastToVoid(CGF, ThisAddr, SrcRecordTy);
+ ThisAddr = Address(PrimaryBase, CGF.VoidPtrTy, ThisAddr.getAlignment());
+ SrcDecl = DestDecl;
+ Address VTablePtrPtr = ThisAddr.withElementType(CGF.VoidPtrPtrTy);
+ VTable = CGF.Builder.CreateLoad(VTablePtrPtr, "vtable");
+ } else
+ VTable = CGF.GetVTablePtr(ThisAddr, CGF.UnqualPtrTy, SrcDecl);
// Compare the vptr against the expected vptr for the destination type at
- // this offset. Note that we do not know what type ThisAddr points to in
- // the case where the derived class multiply inherits from the base class
- // so we can't use GetVTablePtr, so we load the vptr directly instead.
- llvm::Instruction *VPtr = CGF.Builder.CreateLoad(
- ThisAddr.withElementType(CGF.VoidPtrPtrTy), "vtable");
- CGM.DecorateInstructionWithTBAA(
- VPtr, CGM.getTBAAVTablePtrAccessInfo(CGF.VoidPtrPtrTy));
- llvm::Value *Success = CGF.Builder.CreateICmpEQ(
- VPtr, getVTableAddressPoint(BaseSubobject(SrcDecl, *Offset), DestDecl));
- llvm::Value *Result = ThisAddr.emitRawPointer(CGF);
- if (!Offset->isZero())
- Result = CGF.Builder.CreateInBoundsGEP(
- CGF.CharTy, Result,
- {llvm::ConstantInt::get(CGF.PtrDiffTy, -Offset->getQuantity())});
+ // this offset.
+ llvm::Constant *ExpectedVTable = getVTableAddressPoint(
+ BaseSubobject(SrcDecl, ExactCastInfo.Offset), DestDecl);
+ llvm::Value *Success = CGF.Builder.CreateICmpEQ(VTable, ExpectedVTable);
+ llvm::Value *AdjustedThisPtr = ThisAddr.emitRawPointer(CGF);
+
+ if (!ExactCastInfo.Offset.isZero()) {
+ CharUnits::QuantityType Offset = ExactCastInfo.Offset.getQuantity();
+ llvm::Constant *OffsetConstant =
+ llvm::ConstantInt::get(CGF.PtrDiffTy, -Offset);
+ AdjustedThisPtr = CGF.Builder.CreateInBoundsGEP(CGF.CharTy, AdjustedThisPtr,
+ OffsetConstant);
+ }
+
CGF.Builder.CreateCondBr(Success, CastSuccess, CastFail);
- return Result;
+ return AdjustedThisPtr;
}
llvm::Value *ItaniumCXXABI::emitDynamicCastToVoid(CodeGenFunction &CGF,
Address ThisAddr,
QualType SrcRecordTy) {
auto *ClassDecl =
- cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(SrcRecordTy->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
llvm::Value *OffsetToTop;
if (CGM.getItaniumVTableContext().isRelativeLayout()) {
// Get the vtable pointer.
@@ -2016,7 +2041,7 @@ void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
const VTableLayout &VTLayout = VTContext.getVTableLayout(RD);
llvm::GlobalVariable::LinkageTypes Linkage = CGM.getVTableLinkage(RD);
llvm::Constant *RTTI =
- CGM.GetAddrOfRTTIDescriptor(CGM.getContext().getTagDeclType(RD));
+ CGM.GetAddrOfRTTIDescriptor(CGM.getContext().getCanonicalTagType(RD));
// Create and set the initializer.
ConstantInitBuilder builder(CGM);
@@ -3755,7 +3780,8 @@ static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM,
if (!Context.getLangOpts().RTTI) return false;
if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
- const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
+ const CXXRecordDecl *RD =
+ cast<CXXRecordDecl>(RecordTy->getOriginalDecl())->getDefinitionOrSelf();
if (!RD->hasDefinition())
return false;
@@ -3789,7 +3815,9 @@ static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM,
/// IsIncompleteClassType - Returns whether the given record type is incomplete.
static bool IsIncompleteClassType(const RecordType *RecordTy) {
- return !RecordTy->getDecl()->isCompleteDefinition();
+ return !RecordTy->getOriginalDecl()
+ ->getDefinitionOrSelf()
+ ->isCompleteDefinition();
}
/// ContainsIncompleteClassType - Returns whether the given type contains an
@@ -3815,9 +3843,7 @@ static bool ContainsIncompleteClassType(QualType Ty) {
if (const MemberPointerType *MemberPointerTy =
dyn_cast<MemberPointerType>(Ty)) {
// Check if the class type is incomplete.
- const auto *ClassType = cast<RecordType>(
- MemberPointerTy->getMostRecentCXXRecordDecl()->getTypeForDecl());
- if (IsIncompleteClassType(ClassType))
+ if (!MemberPointerTy->getMostRecentCXXRecordDecl()->hasDefinition())
return true;
return ContainsIncompleteClassType(MemberPointerTy->getPointeeType());
@@ -3846,8 +3872,9 @@ static bool CanUseSingleInheritance(const CXXRecordDecl *RD) {
return false;
// Check that the class is dynamic iff the base is.
- auto *BaseDecl =
- cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl());
+ auto *BaseDecl = cast<CXXRecordDecl>(
+ Base->getType()->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
if (!BaseDecl->isEmpty() &&
BaseDecl->isDynamicClass() != RD->isDynamicClass())
return false;
@@ -3926,7 +3953,8 @@ void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty,
case Type::Record: {
const CXXRecordDecl *RD =
- cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
+ cast<CXXRecordDecl>(cast<RecordType>(Ty)->getOriginalDecl())
+ ->getDefinitionOrSelf();
if (!RD->hasDefinition() || !RD->getNumBases()) {
VTableName = ClassTypeInfo;
@@ -4048,7 +4076,8 @@ static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(CodeGenModule &CGM,
return llvm::GlobalValue::LinkOnceODRLinkage;
if (const RecordType *Record = dyn_cast<RecordType>(Ty)) {
- const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl());
+ const CXXRecordDecl *RD =
+ cast<CXXRecordDecl>(Record->getOriginalDecl())->getDefinitionOrSelf();
if (RD->hasAttr<WeakAttr>())
return llvm::GlobalValue::WeakODRLinkage;
if (CGM.getTriple().isWindowsItaniumEnvironment())
@@ -4212,7 +4241,8 @@ llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
case Type::Record: {
const CXXRecordDecl *RD =
- cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
+ cast<CXXRecordDecl>(cast<RecordType>(Ty)->getOriginalDecl())
+ ->getDefinitionOrSelf();
if (!RD->hasDefinition() || !RD->getNumBases()) {
// We don't need to emit any fields.
break;
@@ -4259,7 +4289,8 @@ llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
if (CGM.getTarget().hasPS4DLLImportExport() &&
GVDLLStorageClass != llvm::GlobalVariable::DLLExportStorageClass) {
if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
- const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getOriginalDecl())
+ ->getDefinitionOrSelf();
if (RD->hasAttr<DLLExportAttr>() ||
CXXRecordNonInlineHasAttr<DLLExportAttr>(RD))
GVDLLStorageClass = llvm::GlobalVariable::DLLExportStorageClass;
@@ -4363,8 +4394,9 @@ static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier *Base,
unsigned Flags = 0;
- auto *BaseDecl =
- cast<CXXRecordDecl>(Base->getType()->castAs<RecordType>()->getDecl());
+ auto *BaseDecl = cast<CXXRecordDecl>(
+ Base->getType()->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
if (Base->isVirtual()) {
// Mark the virtual base as seen.
@@ -4464,7 +4496,9 @@ void ItaniumRTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
Fields.push_back(ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Base.getType()));
auto *BaseDecl =
- cast<CXXRecordDecl>(Base.getType()->castAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(
+ Base.getType()->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
int64_t OffsetFlags = 0;
@@ -4554,9 +4588,8 @@ ItaniumRTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) {
// attributes of the type pointed to.
unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
- const auto *ClassType =
- cast<RecordType>(Ty->getMostRecentCXXRecordDecl()->getTypeForDecl());
- if (IsIncompleteClassType(ClassType))
+ const auto *RD = Ty->getMostRecentCXXRecordDecl();
+ if (!RD->hasDefinition())
Flags |= PTI_ContainingClassIncomplete;
llvm::Type *UnsignedIntLTy =
@@ -4574,8 +4607,8 @@ ItaniumRTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) {
// __context is a pointer to an abi::__class_type_info corresponding to the
// class type containing the member pointed to
// (e.g., the "A" in "int A::*").
- Fields.push_back(
- ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(QualType(ClassType, 0)));
+ CanQualType T = CGM.getContext().getCanonicalTagType(RD);
+ Fields.push_back(ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(T));
}
llvm::Constant *ItaniumCXXABI::getAddrOfRTTIDescriptor(QualType Ty) {
@@ -5155,7 +5188,7 @@ ItaniumCXXABI::getSignedVirtualMemberFunctionPointer(const CXXMethodDecl *MD) {
.getDecl());
llvm::Constant *thunk = getOrCreateVirtualFunctionPointerThunk(origMD);
QualType funcType = CGM.getContext().getMemberPointerType(
- MD->getType(), /*Qualifier=*/nullptr, MD->getParent());
+ MD->getType(), /*Qualifier=*/std::nullopt, MD->getParent());
return CGM.getMemberFunctionPointer(thunk, funcType);
}
diff --git a/clang/lib/CodeGen/MicrosoftCXXABI.cpp b/clang/lib/CodeGen/MicrosoftCXXABI.cpp
index 700ffa4..88f0648 100644
--- a/clang/lib/CodeGen/MicrosoftCXXABI.cpp
+++ b/clang/lib/CodeGen/MicrosoftCXXABI.cpp
@@ -158,9 +158,15 @@ public:
// TODO: Add support for exact dynamic_casts.
return false;
}
+ std::optional<ExactDynamicCastInfo>
+ getExactDynamicCastInfo(QualType SrcRecordTy, QualType DestTy,
+ QualType DestRecordTy) override {
+ llvm_unreachable("unsupported");
+ }
llvm::Value *emitExactDynamicCast(CodeGenFunction &CGF, Address Value,
QualType SrcRecordTy, QualType DestTy,
QualType DestRecordTy,
+ const ExactDynamicCastInfo &CastInfo,
llvm::BasicBlock *CastSuccess,
llvm::BasicBlock *CastFail) override {
llvm_unreachable("unsupported");
@@ -870,7 +876,8 @@ MicrosoftCXXABI::getRecordArgABI(const CXXRecordDecl *RD) const {
// it indirectly. Prior to MSVC version 19.14, passing overaligned
// arguments was not supported and resulted in a compiler error. In 19.14
// and later versions, such arguments are now passed indirectly.
- TypeInfo Info = getContext().getTypeInfo(RD->getTypeForDecl());
+ TypeInfo Info =
+ getContext().getTypeInfo(getContext().getCanonicalTagType(RD));
if (Info.isAlignRequired() && Info.Align > 4)
return RAA_Indirect;
@@ -2918,15 +2925,15 @@ llvm::Constant *MicrosoftCXXABI::EmitMemberPointer(const APValue &MP,
if (!FD)
FD = cast<FieldDecl>(*cast<IndirectFieldDecl>(MPD)->chain_begin());
const CXXRecordDecl *RD = cast<CXXRecordDecl>(FD->getParent());
- RD = RD->getMostRecentNonInjectedDecl();
+ RD = RD->getMostRecentDecl();
C = EmitMemberDataPointer(RD, FieldOffset);
}
if (!MemberPointerPath.empty()) {
const CXXRecordDecl *SrcRD = cast<CXXRecordDecl>(MPD->getDeclContext());
const MemberPointerType *SrcTy =
- Ctx.getMemberPointerType(DstTy->getPointeeType(), /*Qualifier=*/nullptr,
- SrcRD)
+ Ctx.getMemberPointerType(DstTy->getPointeeType(),
+ /*Qualifier=*/std::nullopt, SrcRD)
->castAs<MemberPointerType>();
bool DerivedMember = MP.isMemberPointerToDerivedMember();
@@ -2963,7 +2970,7 @@ MicrosoftCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) {
assert(MD->isInstance() && "Member function must not be static!");
CharUnits NonVirtualBaseAdjustment = CharUnits::Zero();
- const CXXRecordDecl *RD = MD->getParent()->getMostRecentNonInjectedDecl();
+ const CXXRecordDecl *RD = MD->getParent()->getMostRecentDecl();
CodeGenTypes &Types = CGM.getTypes();
unsigned VBTableIndex = 0;
@@ -3684,7 +3691,7 @@ struct MSRTTIBuilder {
MSRTTIBuilder(MicrosoftCXXABI &ABI, const CXXRecordDecl *RD)
: CGM(ABI.CGM), Context(CGM.getContext()),
VMContext(CGM.getLLVMContext()), Module(CGM.getModule()), RD(RD),
- Linkage(getLinkageForRTTI(CGM.getContext().getTagDeclType(RD))),
+ Linkage(getLinkageForRTTI(CGM.getContext().getCanonicalTagType(RD))),
ABI(ABI) {}
llvm::GlobalVariable *getBaseClassDescriptor(const MSRTTIClass &Classes);
@@ -3858,7 +3865,7 @@ MSRTTIBuilder::getBaseClassDescriptor(const MSRTTIClass &Class) {
// Initialize the BaseClassDescriptor.
llvm::Constant *Fields[] = {
ABI.getImageRelativeConstant(
- ABI.getAddrOfRTTIDescriptor(Context.getTypeDeclType(Class.RD))),
+ ABI.getAddrOfRTTIDescriptor(Context.getCanonicalTagType(Class.RD))),
llvm::ConstantInt::get(CGM.IntTy, Class.NumBases),
llvm::ConstantInt::get(CGM.IntTy, Class.OffsetInVBase),
llvm::ConstantInt::get(CGM.IntTy, VBPtrOffset),
@@ -3905,7 +3912,7 @@ MSRTTIBuilder::getCompleteObjectLocator(const VPtrInfo &Info) {
llvm::ConstantInt::get(CGM.IntTy, OffsetToTop),
llvm::ConstantInt::get(CGM.IntTy, VFPtrOffset),
ABI.getImageRelativeConstant(
- CGM.GetAddrOfRTTIDescriptor(Context.getTypeDeclType(RD))),
+ CGM.GetAddrOfRTTIDescriptor(Context.getCanonicalTagType(RD))),
ABI.getImageRelativeConstant(getClassHierarchyDescriptor()),
ABI.getImageRelativeConstant(COL),
};
@@ -4076,7 +4083,7 @@ MicrosoftCXXABI::getAddrOfCXXCtorClosure(const CXXConstructorDecl *CD,
const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeMSCtorClosure(CD, CT);
llvm::FunctionType *ThunkTy = CGM.getTypes().GetFunctionType(FnInfo);
const CXXRecordDecl *RD = CD->getParent();
- QualType RecordTy = getContext().getRecordType(RD);
+ CanQualType RecordTy = getContext().getCanonicalTagType(RD);
llvm::Function *ThunkFn = llvm::Function::Create(
ThunkTy, getLinkageForRTTI(RecordTy), ThunkName.str(), &CGM.getModule());
ThunkFn->setCallingConv(static_cast<llvm::CallingConv::ID>(
@@ -4312,7 +4319,7 @@ llvm::GlobalVariable *MicrosoftCXXABI::getCatchableTypeArray(QualType T) {
// Turn our record back into a pointer if the exception object is a
// pointer.
- QualType RTTITy = QualType(Class.RD->getTypeForDecl(), 0);
+ CanQualType RTTITy = Context.getCanonicalTagType(Class.RD);
if (IsPointer)
RTTITy = Context.getPointerType(RTTITy);
CatchableTypes.insert(getCatchableType(RTTITy, Class.OffsetInVBase,
@@ -4463,8 +4470,8 @@ void MicrosoftCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) {
std::pair<llvm::Value *, const CXXRecordDecl *>
MicrosoftCXXABI::LoadVTablePtr(CodeGenFunction &CGF, Address This,
const CXXRecordDecl *RD) {
- std::tie(This, std::ignore, RD) =
- performBaseAdjustment(CGF, This, QualType(RD->getTypeForDecl(), 0));
+ CanQualType T = CGF.getContext().getCanonicalTagType(RD);
+ std::tie(This, std::ignore, RD) = performBaseAdjustment(CGF, This, T);
return {CGF.GetVTablePtr(This, CGM.Int8PtrTy, RD), RD};
}
diff --git a/clang/lib/CodeGen/SwiftCallingConv.cpp b/clang/lib/CodeGen/SwiftCallingConv.cpp
index 10f9f20b..de58e0d 100644
--- a/clang/lib/CodeGen/SwiftCallingConv.cpp
+++ b/clang/lib/CodeGen/SwiftCallingConv.cpp
@@ -66,9 +66,9 @@ void SwiftAggLowering::addTypedData(QualType type, CharUnits begin) {
// Record types.
if (auto recType = type->getAs<RecordType>()) {
- addTypedData(recType->getDecl(), begin);
+ addTypedData(recType->getOriginalDecl(), begin);
- // Array types.
+ // Array types.
} else if (type->isArrayType()) {
// Incomplete array types (flexible array members?) don't provide
// data to lay out, and the other cases shouldn't be possible.
@@ -814,7 +814,7 @@ static ABIArgInfo classifyType(CodeGenModule &CGM, CanQualType type,
bool forReturn) {
unsigned IndirectAS = CGM.getDataLayout().getAllocaAddrSpace();
if (auto recordType = dyn_cast<RecordType>(type)) {
- auto record = recordType->getDecl();
+ auto record = recordType->getOriginalDecl();
auto &layout = CGM.getContext().getASTRecordLayout(record);
if (mustPassRecordIndirectly(CGM, record))
@@ -822,7 +822,8 @@ static ABIArgInfo classifyType(CodeGenModule &CGM, CanQualType type,
/*AddrSpace=*/IndirectAS, /*byval=*/false);
SwiftAggLowering lowering(CGM);
- lowering.addTypedData(recordType->getDecl(), CharUnits::Zero(), layout);
+ lowering.addTypedData(recordType->getOriginalDecl(), CharUnits::Zero(),
+ layout);
lowering.finish();
return classifyExpandedType(lowering, forReturn, layout.getAlignment(),
diff --git a/clang/lib/CodeGen/TargetBuiltins/WebAssembly.cpp b/clang/lib/CodeGen/TargetBuiltins/WebAssembly.cpp
index 33a8d8f..1a1889a 100644
--- a/clang/lib/CodeGen/TargetBuiltins/WebAssembly.cpp
+++ b/clang/lib/CodeGen/TargetBuiltins/WebAssembly.cpp
@@ -246,35 +246,26 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
llvm::FunctionType *LLVMFuncTy =
cast<llvm::FunctionType>(ConvertType(QualType(FuncTy, 0)));
+ bool VarArg = LLVMFuncTy->isVarArg();
unsigned NParams = LLVMFuncTy->getNumParams();
std::vector<Value *> Args;
- Args.reserve(NParams + 3);
+ Args.reserve(NParams + 3 + VarArg);
// The only real argument is the FuncRef
Args.push_back(FuncRef);
// Add the type information
- auto addType = [this, &Args](llvm::Type *T) {
- if (T->isVoidTy()) {
- // Do nothing
- } else if (T->isFloatingPointTy()) {
- Args.push_back(ConstantFP::get(T, 0));
- } else if (T->isIntegerTy()) {
- Args.push_back(ConstantInt::get(T, 0));
- } else if (T->isPointerTy()) {
- Args.push_back(ConstantPointerNull::get(llvm::PointerType::get(
- getLLVMContext(), T->getPointerAddressSpace())));
- } else {
- // TODO: Handle reference types. For now, we reject them in Sema.
- llvm_unreachable("Unhandled type");
- }
- };
-
- addType(LLVMFuncTy->getReturnType());
+ llvm::Type *RetType = LLVMFuncTy->getReturnType();
+ if (!RetType->isVoidTy()) {
+ Args.push_back(PoisonValue::get(RetType));
+ }
// The token type indicates the boundary between return types and param
// types.
Args.push_back(PoisonValue::get(llvm::Type::getTokenTy(getLLVMContext())));
for (unsigned i = 0; i < NParams; i++) {
- addType(LLVMFuncTy->getParamType(i));
+ Args.push_back(PoisonValue::get(LLVMFuncTy->getParamType(i)));
+ }
+ if (VarArg) {
+ Args.push_back(PoisonValue::get(Builder.getPtrTy()));
}
Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_ref_test_func);
return Builder.CreateCall(Callee, Args);
diff --git a/clang/lib/CodeGen/TargetBuiltins/X86.cpp b/clang/lib/CodeGen/TargetBuiltins/X86.cpp
index e23d19d..b508709 100644
--- a/clang/lib/CodeGen/TargetBuiltins/X86.cpp
+++ b/clang/lib/CodeGen/TargetBuiltins/X86.cpp
@@ -1051,18 +1051,9 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_vfmsubsd3_mask3:
return EmitScalarFMAExpr(*this, E, Ops, Ops[2], /*ZeroMask*/ false, 2,
/*NegAcc*/ true);
- case X86::BI__builtin_ia32_vfmaddph:
- case X86::BI__builtin_ia32_vfmaddps:
- case X86::BI__builtin_ia32_vfmaddpd:
- case X86::BI__builtin_ia32_vfmaddph256:
- case X86::BI__builtin_ia32_vfmaddps256:
- case X86::BI__builtin_ia32_vfmaddpd256:
case X86::BI__builtin_ia32_vfmaddph512_mask:
case X86::BI__builtin_ia32_vfmaddph512_maskz:
case X86::BI__builtin_ia32_vfmaddph512_mask3:
- case X86::BI__builtin_ia32_vfmaddbf16128:
- case X86::BI__builtin_ia32_vfmaddbf16256:
- case X86::BI__builtin_ia32_vfmaddbf16512:
case X86::BI__builtin_ia32_vfmaddps512_mask:
case X86::BI__builtin_ia32_vfmaddps512_maskz:
case X86::BI__builtin_ia32_vfmaddps512_mask3:
diff --git a/clang/lib/CodeGen/Targets/AArch64.cpp b/clang/lib/CodeGen/Targets/AArch64.cpp
index b82c469..289f8a9 100644
--- a/clang/lib/CodeGen/Targets/AArch64.cpp
+++ b/clang/lib/CodeGen/Targets/AArch64.cpp
@@ -375,7 +375,7 @@ ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty, bool IsVariadicFn,
if (!passAsAggregateType(Ty)) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ Ty = EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
if (const auto *EIT = Ty->getAs<BitIntType>())
if (EIT->getNumBits() > 128)
@@ -496,7 +496,7 @@ ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty, bool IsVariadicFn,
const RecordType *RT = Ty->getAs<RecordType>();
if (!RT)
return false;
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
for (const auto &I : CXXRD->bases())
if (!Self(Self, I.getType()))
@@ -548,7 +548,8 @@ ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy,
if (!passAsAggregateType(RetTy)) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
- RetTy = EnumTy->getDecl()->getIntegerType();
+ RetTy =
+ EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
if (const auto *EIT = RetTy->getAs<BitIntType>())
if (EIT->getNumBits() > 128)
@@ -744,7 +745,7 @@ bool AArch64ABIInfo::passAsPureScalableType(
return false;
// Pure scalable types are never unions and never contain unions.
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
if (RD->isUnion())
return false;
diff --git a/clang/lib/CodeGen/Targets/AMDGPU.cpp b/clang/lib/CodeGen/Targets/AMDGPU.cpp
index 47a552a..41bccbb 100644
--- a/clang/lib/CodeGen/Targets/AMDGPU.cpp
+++ b/clang/lib/CodeGen/Targets/AMDGPU.cpp
@@ -96,7 +96,7 @@ unsigned AMDGPUABIInfo::numRegsForType(QualType Ty) const {
}
if (const RecordType *RT = Ty->getAs<RecordType>()) {
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
assert(!RD->hasFlexibleArrayMember());
for (const FieldDecl *Field : RD->fields()) {
@@ -153,7 +153,7 @@ ABIArgInfo AMDGPUABIInfo::classifyReturnType(QualType RetTy) const {
return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
if (const RecordType *RT = RetTy->getAs<RecordType>()) {
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
if (RD->hasFlexibleArrayMember())
return DefaultABIInfo::classifyReturnType(RetTy);
}
@@ -246,7 +246,7 @@ ABIArgInfo AMDGPUABIInfo::classifyArgumentType(QualType Ty, bool Variadic,
return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
if (const RecordType *RT = Ty->getAs<RecordType>()) {
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
if (RD->hasFlexibleArrayMember())
return DefaultABIInfo::classifyArgumentType(Ty);
}
diff --git a/clang/lib/CodeGen/Targets/ARC.cpp b/clang/lib/CodeGen/Targets/ARC.cpp
index c8db7e8..ace524e 100644
--- a/clang/lib/CodeGen/Targets/ARC.cpp
+++ b/clang/lib/CodeGen/Targets/ARC.cpp
@@ -106,13 +106,14 @@ ABIArgInfo ARCABIInfo::classifyArgumentType(QualType Ty,
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ Ty = EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
auto SizeInRegs = llvm::alignTo(getContext().getTypeSize(Ty), 32) / 32;
if (isAggregateTypeForABI(Ty)) {
// Structures with flexible arrays are always indirect.
- if (RT && RT->getDecl()->hasFlexibleArrayMember())
+ if (RT &&
+ RT->getOriginalDecl()->getDefinitionOrSelf()->hasFlexibleArrayMember())
return getIndirectByValue(Ty);
// Ignore empty structs/unions.
diff --git a/clang/lib/CodeGen/Targets/ARM.cpp b/clang/lib/CodeGen/Targets/ARM.cpp
index 68f9e01..532ba4c 100644
--- a/clang/lib/CodeGen/Targets/ARM.cpp
+++ b/clang/lib/CodeGen/Targets/ARM.cpp
@@ -383,7 +383,7 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic,
if (!isAggregateTypeForABI(Ty)) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
- Ty = EnumTy->getDecl()->getIntegerType();
+ Ty = EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
}
if (const auto *EIT = Ty->getAs<BitIntType>())
@@ -516,7 +516,7 @@ static bool isIntegerLikeType(QualType Ty, ASTContext &Context,
if (!RT) return false;
// Ignore records with flexible arrays.
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
if (RD->hasFlexibleArrayMember())
return false;
@@ -593,7 +593,8 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, bool isVariadic,
if (!isAggregateTypeForABI(RetTy)) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
- RetTy = EnumTy->getDecl()->getIntegerType();
+ RetTy =
+ EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
if (const auto *EIT = RetTy->getAs<BitIntType>())
if (EIT->getNumBits() > 64)
@@ -718,7 +719,7 @@ bool ARMABIInfo::containsAnyFP16Vectors(QualType Ty) const {
return false;
return containsAnyFP16Vectors(AT->getElementType());
} else if (const RecordType *RT = Ty->getAs<RecordType>()) {
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
// If this is a C++ record, check the bases first.
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
diff --git a/clang/lib/CodeGen/Targets/BPF.cpp b/clang/lib/CodeGen/Targets/BPF.cpp
index 880a8910..87d50e6 100644
--- a/clang/lib/CodeGen/Targets/BPF.cpp
+++ b/clang/lib/CodeGen/Targets/BPF.cpp
@@ -48,7 +48,7 @@ public:
}
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ Ty = EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
ASTContext &Context = getContext();
if (const auto *EIT = Ty->getAs<BitIntType>())
@@ -70,7 +70,8 @@ public:
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
- RetTy = EnumTy->getDecl()->getIntegerType();
+ RetTy =
+ EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
ASTContext &Context = getContext();
if (const auto *EIT = RetTy->getAs<BitIntType>())
diff --git a/clang/lib/CodeGen/Targets/CSKY.cpp b/clang/lib/CodeGen/Targets/CSKY.cpp
index ef26d48..7e5a16f 100644
--- a/clang/lib/CodeGen/Targets/CSKY.cpp
+++ b/clang/lib/CodeGen/Targets/CSKY.cpp
@@ -116,7 +116,7 @@ ABIArgInfo CSKYABIInfo::classifyArgumentType(QualType Ty, int &ArgGPRsLeft,
if (!isAggregateTypeForABI(Ty)) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ Ty = EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
// All integral types are promoted to XLen width, unless passed on the
// stack.
diff --git a/clang/lib/CodeGen/Targets/Hexagon.cpp b/clang/lib/CodeGen/Targets/Hexagon.cpp
index 2976657..0c42342 100644
--- a/clang/lib/CodeGen/Targets/Hexagon.cpp
+++ b/clang/lib/CodeGen/Targets/Hexagon.cpp
@@ -98,7 +98,7 @@ ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty,
if (!isAggregateTypeForABI(Ty)) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ Ty = EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
uint64_t Size = getContext().getTypeSize(Ty);
if (Size <= 64)
@@ -161,7 +161,8 @@ ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const {
if (!isAggregateTypeForABI(RetTy)) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
- RetTy = EnumTy->getDecl()->getIntegerType();
+ RetTy =
+ EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
if (Size > 64 && RetTy->isBitIntType())
return getNaturalAlignIndirect(
diff --git a/clang/lib/CodeGen/Targets/Lanai.cpp b/clang/lib/CodeGen/Targets/Lanai.cpp
index 6f75bd5..08cb360 100644
--- a/clang/lib/CodeGen/Targets/Lanai.cpp
+++ b/clang/lib/CodeGen/Targets/Lanai.cpp
@@ -102,7 +102,8 @@ ABIArgInfo LanaiABIInfo::classifyArgumentType(QualType Ty,
if (isAggregateTypeForABI(Ty)) {
// Structures with flexible arrays are always indirect.
- if (RT && RT->getDecl()->hasFlexibleArrayMember())
+ if (RT &&
+ RT->getOriginalDecl()->getDefinitionOrSelf()->hasFlexibleArrayMember())
return getIndirectResult(Ty, /*ByVal=*/true, State);
// Ignore empty structs/unions.
@@ -125,7 +126,7 @@ ABIArgInfo LanaiABIInfo::classifyArgumentType(QualType Ty,
// Treat an enum type as its underlying type.
if (const auto *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ Ty = EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
bool InReg = shouldUseInReg(Ty, State);
diff --git a/clang/lib/CodeGen/Targets/LoongArch.cpp b/clang/lib/CodeGen/Targets/LoongArch.cpp
index 7640f37..af863e6 100644
--- a/clang/lib/CodeGen/Targets/LoongArch.cpp
+++ b/clang/lib/CodeGen/Targets/LoongArch.cpp
@@ -150,7 +150,7 @@ bool LoongArchABIInfo::detectFARsEligibleStructHelper(
// Non-zero-length arrays of empty records make the struct ineligible to be
// passed via FARs in C++.
if (const auto *RTy = EltTy->getAs<RecordType>()) {
- if (ArraySize != 0 && isa<CXXRecordDecl>(RTy->getDecl()) &&
+ if (ArraySize != 0 && isa<CXXRecordDecl>(RTy->getOriginalDecl()) &&
isEmptyRecord(getContext(), EltTy, true, true))
return false;
}
@@ -169,7 +169,7 @@ bool LoongArchABIInfo::detectFARsEligibleStructHelper(
// copy constructor are not eligible for the FP calling convention.
if (getRecordArgABI(Ty, CGT.getCXXABI()))
return false;
- const RecordDecl *RD = RTy->getDecl();
+ const RecordDecl *RD = RTy->getOriginalDecl()->getDefinitionOrSelf();
if (isEmptyRecord(getContext(), Ty, true, true) &&
(!RD->isUnion() || !isa<CXXRecordDecl>(RD)))
return true;
@@ -181,7 +181,9 @@ bool LoongArchABIInfo::detectFARsEligibleStructHelper(
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
for (const CXXBaseSpecifier &B : CXXRD->bases()) {
const auto *BDecl =
- cast<CXXRecordDecl>(B.getType()->castAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(
+ B.getType()->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
if (!detectFARsEligibleStructHelper(
B.getType(), CurOff + Layout.getBaseClassOffset(BDecl),
Field1Ty, Field1Off, Field2Ty, Field2Off))
@@ -369,7 +371,7 @@ ABIArgInfo LoongArchABIInfo::classifyArgumentType(QualType Ty, bool IsFixed,
if (!isAggregateTypeForABI(Ty) && !Ty->isVectorType()) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ Ty = EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
// All integral types are promoted to GRLen width.
if (Size < GRLen && Ty->isIntegralOrEnumerationType())
diff --git a/clang/lib/CodeGen/Targets/Mips.cpp b/clang/lib/CodeGen/Targets/Mips.cpp
index c025f73..e12a34c 100644
--- a/clang/lib/CodeGen/Targets/Mips.cpp
+++ b/clang/lib/CodeGen/Targets/Mips.cpp
@@ -161,7 +161,7 @@ llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const {
return llvm::StructType::get(getVMContext(), ArgList);
}
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
assert(!(TySize % 8) && "Size of structure must be multiple of 8.");
@@ -242,7 +242,7 @@ MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ Ty = EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
// Make sure we pass indirectly things that are too large.
if (const auto *EIT = Ty->getAs<BitIntType>())
@@ -265,7 +265,7 @@ MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const {
SmallVector<llvm::Type*, 8> RTList;
if (RT && RT->isStructureOrClassType()) {
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
unsigned FieldCnt = Layout.getFieldCount();
@@ -333,7 +333,7 @@ ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
- RetTy = EnumTy->getDecl()->getIntegerType();
+ RetTy = EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
// Make sure we pass indirectly things that are too large.
if (const auto *EIT = RetTy->getAs<BitIntType>())
diff --git a/clang/lib/CodeGen/Targets/NVPTX.cpp b/clang/lib/CodeGen/Targets/NVPTX.cpp
index 82bdfe2..e874617 100644
--- a/clang/lib/CodeGen/Targets/NVPTX.cpp
+++ b/clang/lib/CodeGen/Targets/NVPTX.cpp
@@ -133,7 +133,7 @@ bool NVPTXABIInfo::isUnsupportedType(QualType T) const {
const auto *RT = T->getAs<RecordType>();
if (!RT)
return false;
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
// If this is a C++ record, check the bases first.
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
@@ -174,7 +174,7 @@ ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
- RetTy = EnumTy->getDecl()->getIntegerType();
+ RetTy = EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
: ABIArgInfo::getDirect());
@@ -183,7 +183,7 @@ ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const {
ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ Ty = EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
// Return aggregates type as indirect by value
if (isAggregateTypeForABI(Ty)) {
diff --git a/clang/lib/CodeGen/Targets/PPC.cpp b/clang/lib/CodeGen/Targets/PPC.cpp
index 4df4c9f..38e7639 100644
--- a/clang/lib/CodeGen/Targets/PPC.cpp
+++ b/clang/lib/CodeGen/Targets/PPC.cpp
@@ -154,7 +154,7 @@ public:
bool AIXABIInfo::isPromotableTypeForABI(QualType Ty) const {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ Ty = EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
// Promotable integer types are required to be promoted by the ABI.
if (getContext().isPromotableIntegerType(Ty))
@@ -295,7 +295,9 @@ void AIXTargetCodeGenInfo::setTargetAttributes(
unsigned Alignment = Context.toBits(Context.getDeclAlign(D)) / 8;
const auto *Ty = VarD->getType().getTypePtr();
const RecordDecl *RDecl =
- Ty->isRecordType() ? Ty->getAs<RecordType>()->getDecl() : nullptr;
+ Ty->isRecordType()
+ ? Ty->getAs<RecordType>()->getOriginalDecl()->getDefinitionOrSelf()
+ : nullptr;
bool EmitDiagnostic = UserSpecifiedTOC && GV->hasExternalLinkage();
auto reportUnsupportedWarning = [&](bool ShouldEmitWarning, StringRef Msg) {
@@ -707,7 +709,7 @@ bool
PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ Ty = EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
// Promotable integer types are required to be promoted by the ABI.
if (isPromotableIntegerTypeForABI(Ty))
diff --git a/clang/lib/CodeGen/Targets/RISCV.cpp b/clang/lib/CodeGen/Targets/RISCV.cpp
index a7f9298..e42bd6c 100644
--- a/clang/lib/CodeGen/Targets/RISCV.cpp
+++ b/clang/lib/CodeGen/Targets/RISCV.cpp
@@ -228,7 +228,7 @@ bool RISCVABIInfo::detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff,
// Non-zero-length arrays of empty records make the struct ineligible for
// the FP calling convention in C++.
if (const auto *RTy = EltTy->getAs<RecordType>()) {
- if (ArraySize != 0 && isa<CXXRecordDecl>(RTy->getDecl()) &&
+ if (ArraySize != 0 && isa<CXXRecordDecl>(RTy->getOriginalDecl()) &&
isEmptyRecord(getContext(), EltTy, true, true))
return false;
}
@@ -250,7 +250,7 @@ bool RISCVABIInfo::detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff,
return false;
if (isEmptyRecord(getContext(), Ty, true, true))
return true;
- const RecordDecl *RD = RTy->getDecl();
+ const RecordDecl *RD = RTy->getOriginalDecl()->getDefinitionOrSelf();
// Unions aren't eligible unless they're empty (which is caught above).
if (RD->isUnion())
return false;
@@ -259,7 +259,9 @@ bool RISCVABIInfo::detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff,
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
for (const CXXBaseSpecifier &B : CXXRD->bases()) {
const auto *BDecl =
- cast<CXXRecordDecl>(B.getType()->castAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(
+ B.getType()->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
CharUnits BaseOff = Layout.getBaseClassOffset(BDecl);
bool Ret = detectFPCCEligibleStructHelper(B.getType(), CurOff + BaseOff,
Field1Ty, Field1Off, Field2Ty,
@@ -673,7 +675,7 @@ ABIArgInfo RISCVABIInfo::classifyArgumentType(QualType Ty, bool IsFixed,
if (!isAggregateTypeForABI(Ty) && !Ty->isVectorType()) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ Ty = EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
// All integral types are promoted to XLen width
if (Size < XLen && Ty->isIntegralOrEnumerationType()) {
diff --git a/clang/lib/CodeGen/Targets/SPIR.cpp b/clang/lib/CodeGen/Targets/SPIR.cpp
index d952c6e..237aea7 100644
--- a/clang/lib/CodeGen/Targets/SPIR.cpp
+++ b/clang/lib/CodeGen/Targets/SPIR.cpp
@@ -119,7 +119,7 @@ ABIArgInfo SPIRVABIInfo::classifyReturnType(QualType RetTy) const {
return DefaultABIInfo::classifyReturnType(RetTy);
if (const RecordType *RT = RetTy->getAs<RecordType>()) {
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
if (RD->hasFlexibleArrayMember())
return DefaultABIInfo::classifyReturnType(RetTy);
}
@@ -187,7 +187,7 @@ ABIArgInfo SPIRVABIInfo::classifyArgumentType(QualType Ty) const {
RAA == CGCXXABI::RAA_DirectInMemory);
if (const RecordType *RT = Ty->getAs<RecordType>()) {
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
if (RD->hasFlexibleArrayMember())
return DefaultABIInfo::classifyArgumentType(Ty);
}
@@ -432,7 +432,7 @@ static llvm::Type *getInlineSpirvType(CodeGenModule &CGM,
case SpirvOperandKind::TypeId: {
QualType TypeOperand = Operand.getResultType();
if (auto *RT = TypeOperand->getAs<RecordType>()) {
- auto *RD = RT->getDecl();
+ auto *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
assert(RD->isCompleteDefinition() &&
"Type completion should have been required in Sema");
diff --git a/clang/lib/CodeGen/Targets/Sparc.cpp b/clang/lib/CodeGen/Targets/Sparc.cpp
index 9642196..1547bed 100644
--- a/clang/lib/CodeGen/Targets/Sparc.cpp
+++ b/clang/lib/CodeGen/Targets/Sparc.cpp
@@ -238,7 +238,7 @@ SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ Ty = EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
// Integer types smaller than a register are extended.
if (Size < 64 && Ty->isIntegerType())
diff --git a/clang/lib/CodeGen/Targets/SystemZ.cpp b/clang/lib/CodeGen/Targets/SystemZ.cpp
index 6ea6c7a..38cc4d3 100644
--- a/clang/lib/CodeGen/Targets/SystemZ.cpp
+++ b/clang/lib/CodeGen/Targets/SystemZ.cpp
@@ -146,7 +146,7 @@ public:
bool SystemZABIInfo::isPromotableIntegerTypeForABI(QualType Ty) const {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ Ty = EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
// Promotable integer types are required to be promoted by the ABI.
if (ABIInfo::isPromotableIntegerTypeForABI(Ty))
@@ -211,7 +211,7 @@ QualType SystemZABIInfo::GetSingleElementType(QualType Ty) const {
const RecordType *RT = Ty->getAs<RecordType>();
if (RT && RT->isStructureOrClassType()) {
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
QualType Found;
// If this is a C++ record, check the bases first.
@@ -455,7 +455,7 @@ ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
if (const RecordType *RT = Ty->getAs<RecordType>()) {
// Structures with flexible arrays have variable length, so really
// fail the size test above.
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
if (RD->hasFlexibleArrayMember())
return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace(),
/*ByVal=*/false);
@@ -526,7 +526,7 @@ bool SystemZTargetCodeGenInfo::isVectorTypeBased(const Type *Ty,
return true;
if (const auto *RecordTy = Ty->getAs<RecordType>()) {
- const RecordDecl *RD = RecordTy->getDecl();
+ const RecordDecl *RD = RecordTy->getOriginalDecl()->getDefinitionOrSelf();
if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
if (CXXRD->hasDefinition())
for (const auto &I : CXXRD->bases())
diff --git a/clang/lib/CodeGen/Targets/WebAssembly.cpp b/clang/lib/CodeGen/Targets/WebAssembly.cpp
index 9217c78..ac8dcd2 100644
--- a/clang/lib/CodeGen/Targets/WebAssembly.cpp
+++ b/clang/lib/CodeGen/Targets/WebAssembly.cpp
@@ -118,7 +118,8 @@ ABIArgInfo WebAssemblyABIInfo::classifyArgumentType(QualType Ty) const {
const RecordType *RT = Ty->getAs<RecordType>();
assert(RT);
bool HasBitField = false;
- for (auto *Field : RT->getDecl()->fields()) {
+ for (auto *Field :
+ RT->getOriginalDecl()->getDefinitionOrSelf()->fields()) {
if (Field->isBitField()) {
HasBitField = true;
break;
diff --git a/clang/lib/CodeGen/Targets/X86.cpp b/clang/lib/CodeGen/Targets/X86.cpp
index abb9148..d3431aa 100644
--- a/clang/lib/CodeGen/Targets/X86.cpp
+++ b/clang/lib/CodeGen/Targets/X86.cpp
@@ -359,7 +359,8 @@ bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
// Structure types are passed in register if all fields would be
// passed in a register.
- for (const auto *FD : RT->getDecl()->fields()) {
+ for (const auto *FD :
+ RT->getOriginalDecl()->getDefinitionOrSelf()->fields()) {
// Empty fields are ignored.
if (isEmptyField(Context, FD, true))
continue;
@@ -429,9 +430,9 @@ bool X86_32ABIInfo::canExpandIndirectArgument(QualType Ty) const {
const RecordType *RT = Ty->getAs<RecordType>();
if (!RT)
return false;
- const RecordDecl *RD = RT->getDecl();
uint64_t Size = 0;
- if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
+ if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
if (!IsWin32StructABI) {
// On non-Windows, we have to conservatively match our old bitcode
// prototypes in order to be ABI-compatible at the bitcode level.
@@ -509,7 +510,9 @@ ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
if (isAggregateTypeForABI(RetTy)) {
if (const RecordType *RT = RetTy->getAs<RecordType>()) {
// Structures with flexible arrays are always indirect.
- if (RT->getDecl()->hasFlexibleArrayMember())
+ if (RT->getOriginalDecl()
+ ->getDefinitionOrSelf()
+ ->hasFlexibleArrayMember())
return getIndirectReturnResult(RetTy, State);
}
@@ -554,7 +557,7 @@ ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
- RetTy = EnumTy->getDecl()->getIntegerType();
+ RetTy = EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
if (const auto *EIT = RetTy->getAs<BitIntType>())
if (EIT->getNumBits() > 64)
@@ -796,7 +799,8 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty, CCState &State,
if (isAggregateTypeForABI(Ty)) {
// Structures with flexible arrays are always indirect.
// FIXME: This should not be byval!
- if (RT && RT->getDecl()->hasFlexibleArrayMember())
+ if (RT &&
+ RT->getOriginalDecl()->getDefinitionOrSelf()->hasFlexibleArrayMember())
return getIndirectResult(Ty, true, State);
// Ignore empty structs/unions on non-Windows.
@@ -831,7 +835,7 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty, CCState &State,
unsigned AlignInBits = 0;
if (RT) {
const ASTRecordLayout &Layout =
- getContext().getASTRecordLayout(RT->getDecl());
+ getContext().getASTRecordLayout(RT->getOriginalDecl());
AlignInBits = getContext().toBits(Layout.getRequiredAlignment());
} else if (TI.isAlignRequired()) {
AlignInBits = TI.Align;
@@ -883,7 +887,7 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty, CCState &State,
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ Ty = EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
bool InReg = shouldPrimitiveUseInReg(Ty, State);
@@ -1847,7 +1851,8 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, Class &Lo,
if (const EnumType *ET = Ty->getAs<EnumType>()) {
// Classify the underlying integer type.
- classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
+ classify(ET->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType(),
+ OffsetBase, Lo, Hi, isNamedArg);
return;
}
@@ -2053,7 +2058,7 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, Class &Lo,
if (getRecordArgABI(RT, getCXXABI()))
return;
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
// Assume variable sized types are passed in memory.
if (RD->hasFlexibleArrayMember())
@@ -2070,7 +2075,9 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, Class &Lo,
assert(!I.isVirtual() && !I.getType()->isDependentType() &&
"Unexpected base class!");
const auto *Base =
- cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(
+ I.getType()->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
// Classify this field.
//
@@ -2184,7 +2191,7 @@ ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
if (!isAggregateTypeForABI(Ty)) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ Ty = EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
if (Ty->isBitIntType())
return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace());
@@ -2226,7 +2233,7 @@ ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
!Ty->isBitIntType()) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ Ty = EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
: ABIArgInfo::getDirect());
@@ -2347,7 +2354,7 @@ static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
}
if (const RecordType *RT = Ty->getAs<RecordType>()) {
- const RecordDecl *RD = RT->getDecl();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinitionOrSelf();
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
// If this is a C++ record, check the bases first.
@@ -2356,7 +2363,9 @@ static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
assert(!I.isVirtual() && !I.getType()->isDependentType() &&
"Unexpected base class!");
const auto *Base =
- cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(
+ I.getType()->castAs<RecordType>()->getOriginalDecl())
+ ->getDefinitionOrSelf();
// If the base is after the span we care about, ignore it.
unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base));
@@ -2637,7 +2646,8 @@ ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy) const {
if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
- RetTy = EnumTy->getDecl()->getIntegerType();
+ RetTy =
+ EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
if (RetTy->isIntegralOrEnumerationType() &&
isPromotableIntegerTypeForABI(RetTy))
@@ -2787,7 +2797,7 @@ X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned freeIntRegs,
if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
// Treat an enum type as its underlying type.
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ Ty = EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
if (Ty->isIntegralOrEnumerationType() &&
isPromotableIntegerTypeForABI(Ty))
@@ -2866,14 +2876,15 @@ ABIArgInfo
X86_64ABIInfo::classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,
unsigned &NeededSSE,
unsigned &MaxVectorWidth) const {
- auto RT = Ty->getAs<RecordType>();
- assert(RT && "classifyRegCallStructType only valid with struct types");
+ auto *RD = cast<RecordType>(Ty.getCanonicalType())
+ ->getOriginalDecl()
+ ->getDefinitionOrSelf();
- if (RT->getDecl()->hasFlexibleArrayMember())
+ if (RD->hasFlexibleArrayMember())
return getIndirectReturnResult(Ty);
// Sum up bases
- if (auto CXXRD = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
+ if (auto CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
if (CXXRD->isDynamicClass()) {
NeededInt = NeededSSE = 0;
return getIndirectReturnResult(Ty);
@@ -2889,7 +2900,7 @@ X86_64ABIInfo::classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,
}
// Sum up members
- for (const auto *FD : RT->getDecl()->fields()) {
+ for (const auto *FD : RD->fields()) {
QualType MTy = FD->getType();
if (MTy->isRecordType() && !MTy->isUnionType()) {
if (classifyRegCallStructTypeImpl(MTy, NeededInt, NeededSSE,
@@ -3313,7 +3324,7 @@ ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs,
return ABIArgInfo::getIgnore();
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
- Ty = EnumTy->getDecl()->getIntegerType();
+ Ty = EnumTy->getOriginalDecl()->getDefinitionOrSelf()->getIntegerType();
TypeInfo Info = getContext().getTypeInfo(Ty);
uint64_t Width = Info.Width;
@@ -3327,7 +3338,7 @@ ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs,
RAA == CGCXXABI::RAA_DirectInMemory);
}
- if (RT->getDecl()->hasFlexibleArrayMember())
+ if (RT->getOriginalDecl()->getDefinitionOrSelf()->hasFlexibleArrayMember())
return getNaturalAlignIndirect(Ty, getDataLayout().getAllocaAddrSpace(),
/*ByVal=*/false);
}
diff --git a/clang/lib/CodeGen/Targets/XCore.cpp b/clang/lib/CodeGen/Targets/XCore.cpp
index b7824bd..aa6947b 100644
--- a/clang/lib/CodeGen/Targets/XCore.cpp
+++ b/clang/lib/CodeGen/Targets/XCore.cpp
@@ -379,7 +379,7 @@ static bool appendRecordType(SmallStringEnc &Enc, const RecordType *RT,
// We collect all encoded fields and order as necessary.
bool IsRecursive = false;
- const RecordDecl *RD = RT->getDecl()->getDefinition();
+ const RecordDecl *RD = RT->getOriginalDecl()->getDefinition();
if (RD && !RD->field_empty()) {
// An incomplete TypeString stub is placed in the cache for this RecordType
// so that recursive calls to this RecordType will use it whilst building a
@@ -428,7 +428,7 @@ static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET,
Enc += "){";
// We collect all encoded enumerations and order them alphanumerically.
- if (const EnumDecl *ED = ET->getDecl()->getDefinition()) {
+ if (const EnumDecl *ED = ET->getOriginalDecl()->getDefinition()) {
SmallVector<FieldEncoding, 16> FE;
for (auto I = ED->enumerator_begin(), E = ED->enumerator_end(); I != E;
++I) {