diff options
Diffstat (limited to 'clang/lib')
60 files changed, 1185 insertions, 788 deletions
diff --git a/clang/lib/AST/ByteCode/Descriptor.cpp b/clang/lib/AST/ByteCode/Descriptor.cpp index 7403e90..629c1ff 100644 --- a/clang/lib/AST/ByteCode/Descriptor.cpp +++ b/clang/lib/AST/ByteCode/Descriptor.cpp @@ -153,28 +153,6 @@ static void dtorArrayDesc(Block *B, std::byte *Ptr, const Descriptor *D) { } } -static void moveArrayDesc(Block *B, std::byte *Src, std::byte *Dst, - const Descriptor *D) { - const unsigned NumElems = D->getNumElems(); - const unsigned ElemSize = - D->ElemDesc->getAllocSize() + sizeof(InlineDescriptor); - - unsigned ElemOffset = 0; - for (unsigned I = 0; I < NumElems; ++I, ElemOffset += ElemSize) { - auto *SrcPtr = Src + ElemOffset; - auto *DstPtr = Dst + ElemOffset; - - auto *SrcDesc = reinterpret_cast<InlineDescriptor *>(SrcPtr); - auto *SrcElemLoc = reinterpret_cast<std::byte *>(SrcDesc + 1); - auto *DstDesc = reinterpret_cast<InlineDescriptor *>(DstPtr); - auto *DstElemLoc = reinterpret_cast<std::byte *>(DstDesc + 1); - - *DstDesc = *SrcDesc; - if (auto Fn = D->ElemDesc->MoveFn) - Fn(B, SrcElemLoc, DstElemLoc, D->ElemDesc); - } -} - static void initField(Block *B, std::byte *Ptr, bool IsConst, bool IsMutable, bool IsVolatile, bool IsActive, bool IsUnionField, bool InUnion, const Descriptor *D, unsigned FieldOffset) { @@ -268,45 +246,6 @@ static void dtorRecord(Block *B, std::byte *Ptr, const Descriptor *D) { destroyBase(B, Ptr, F.Desc, F.Offset); } -static void moveRecord(Block *B, std::byte *Src, std::byte *Dst, - const Descriptor *D) { - assert(D); - assert(D->ElemRecord); - - // FIXME: Code duplication. - for (const auto &F : D->ElemRecord->fields()) { - auto FieldOffset = F.Offset; - const auto *SrcDesc = - reinterpret_cast<const InlineDescriptor *>(Src + FieldOffset) - 1; - auto *DestDesc = - reinterpret_cast<InlineDescriptor *>(Dst + FieldOffset) - 1; - std::memcpy(DestDesc, SrcDesc, sizeof(InlineDescriptor)); - - if (auto Fn = F.Desc->MoveFn) - Fn(B, Src + FieldOffset, Dst + FieldOffset, F.Desc); - } - - for (const auto &Base : D->ElemRecord->bases()) { - auto BaseOffset = Base.Offset; - const auto *SrcDesc = - reinterpret_cast<const InlineDescriptor *>(Src + BaseOffset) - 1; - auto *DestDesc = reinterpret_cast<InlineDescriptor *>(Dst + BaseOffset) - 1; - std::memcpy(DestDesc, SrcDesc, sizeof(InlineDescriptor)); - - if (auto Fn = Base.Desc->MoveFn) - Fn(B, Src + BaseOffset, Dst + BaseOffset, Base.Desc); - } - - for (const auto &VBase : D->ElemRecord->virtual_bases()) { - auto VBaseOffset = VBase.Offset; - const auto *SrcDesc = - reinterpret_cast<const InlineDescriptor *>(Src + VBaseOffset) - 1; - auto *DestDesc = - reinterpret_cast<InlineDescriptor *>(Dst + VBaseOffset) - 1; - std::memcpy(DestDesc, SrcDesc, sizeof(InlineDescriptor)); - } -} - static BlockCtorFn getCtorPrim(PrimType Type) { // Floating types are special. They are primitives, but need their // constructor called. @@ -337,18 +276,6 @@ static BlockDtorFn getDtorPrim(PrimType Type) { COMPOSITE_TYPE_SWITCH(Type, return dtorTy<T>, return nullptr); } -static BlockMoveFn getMovePrim(PrimType Type) { - if (Type == PT_Float) - return moveTy<PrimConv<PT_Float>::T>; - if (Type == PT_IntAP) - return moveTy<PrimConv<PT_IntAP>::T>; - if (Type == PT_IntAPS) - return moveTy<PrimConv<PT_IntAPS>::T>; - if (Type == PT_MemberPtr) - return moveTy<PrimConv<PT_MemberPtr>::T>; - COMPOSITE_TYPE_SWITCH(Type, return moveTy<T>, return nullptr); -} - static BlockCtorFn getCtorArrayPrim(PrimType Type) { TYPE_SWITCH(Type, return ctorArrayTy<T>); llvm_unreachable("unknown Expr"); @@ -359,11 +286,6 @@ static BlockDtorFn getDtorArrayPrim(PrimType Type) { llvm_unreachable("unknown Expr"); } -static BlockMoveFn getMoveArrayPrim(PrimType Type) { - TYPE_SWITCH(Type, return moveArrayTy<T>); - llvm_unreachable("unknown Expr"); -} - /// Primitives. Descriptor::Descriptor(const DeclTy &D, const Type *SourceTy, PrimType Type, MetadataSize MD, bool IsConst, bool IsTemporary, @@ -372,7 +294,7 @@ Descriptor::Descriptor(const DeclTy &D, const Type *SourceTy, PrimType Type, MDSize(MD.value_or(0)), AllocSize(align(Size + MDSize)), PrimT(Type), IsConst(IsConst), IsMutable(IsMutable), IsTemporary(IsTemporary), IsVolatile(IsVolatile), CtorFn(getCtorPrim(Type)), - DtorFn(getDtorPrim(Type)), MoveFn(getMovePrim(Type)) { + DtorFn(getDtorPrim(Type)) { assert(AllocSize >= Size); assert(Source && "Missing source"); } @@ -386,7 +308,7 @@ Descriptor::Descriptor(const DeclTy &D, PrimType Type, MetadataSize MD, AllocSize(align(MDSize) + align(Size) + sizeof(InitMapPtr)), PrimT(Type), IsConst(IsConst), IsMutable(IsMutable), IsTemporary(IsTemporary), IsArray(true), CtorFn(getCtorArrayPrim(Type)), - DtorFn(getDtorArrayPrim(Type)), MoveFn(getMoveArrayPrim(Type)) { + DtorFn(getDtorArrayPrim(Type)) { assert(Source && "Missing source"); assert(NumElems <= (MaxArrayElemBytes / ElemSize)); } @@ -399,7 +321,7 @@ Descriptor::Descriptor(const DeclTy &D, PrimType Type, MetadataSize MD, AllocSize(MDSize + sizeof(InitMapPtr) + alignof(void *)), PrimT(Type), IsConst(IsConst), IsMutable(false), IsTemporary(IsTemporary), IsArray(true), CtorFn(getCtorArrayPrim(Type)), - DtorFn(getDtorArrayPrim(Type)), MoveFn(getMoveArrayPrim(Type)) { + DtorFn(getDtorArrayPrim(Type)) { assert(Source && "Missing source"); } @@ -414,7 +336,7 @@ Descriptor::Descriptor(const DeclTy &D, const Type *SourceTy, AllocSize(std::max<size_t>(alignof(void *), Size) + MDSize), ElemDesc(Elem), IsConst(IsConst), IsMutable(IsMutable), IsTemporary(IsTemporary), IsArray(true), CtorFn(ctorArrayDesc), - DtorFn(dtorArrayDesc), MoveFn(moveArrayDesc) { + DtorFn(dtorArrayDesc) { assert(Source && "Missing source"); } @@ -425,7 +347,7 @@ Descriptor::Descriptor(const DeclTy &D, const Descriptor *Elem, MetadataSize MD, Size(UnknownSizeMark), MDSize(MD.value_or(0)), AllocSize(MDSize + alignof(void *)), ElemDesc(Elem), IsConst(true), IsMutable(false), IsTemporary(IsTemporary), IsArray(true), - CtorFn(ctorArrayDesc), DtorFn(dtorArrayDesc), MoveFn(moveArrayDesc) { + CtorFn(ctorArrayDesc), DtorFn(dtorArrayDesc) { assert(Source && "Missing source"); } @@ -437,7 +359,7 @@ Descriptor::Descriptor(const DeclTy &D, const Record *R, MetadataSize MD, Size(ElemSize), MDSize(MD.value_or(0)), AllocSize(Size + MDSize), ElemRecord(R), IsConst(IsConst), IsMutable(IsMutable), IsTemporary(IsTemporary), IsVolatile(IsVolatile), CtorFn(ctorRecord), - DtorFn(dtorRecord), MoveFn(moveRecord) { + DtorFn(dtorRecord) { assert(Source && "Missing source"); } diff --git a/clang/lib/AST/ByteCode/Descriptor.h b/clang/lib/AST/ByteCode/Descriptor.h index 4c925f6..cd34e11 100644 --- a/clang/lib/AST/ByteCode/Descriptor.h +++ b/clang/lib/AST/ByteCode/Descriptor.h @@ -41,14 +41,6 @@ using BlockCtorFn = void (*)(Block *Storage, std::byte *FieldPtr, bool IsConst, using BlockDtorFn = void (*)(Block *Storage, std::byte *FieldPtr, const Descriptor *FieldDesc); -/// Invoked when a block with pointers referencing it goes out of scope. Such -/// blocks are persisted: the move function copies all inline descriptors and -/// non-trivial fields, as existing pointers might need to reference those -/// descriptors. Data is not copied since it cannot be legally read. -using BlockMoveFn = void (*)(Block *Storage, std::byte *SrcFieldPtr, - std::byte *DstFieldPtr, - const Descriptor *FieldDesc); - enum class GlobalInitState { Initialized, NoInitializer, @@ -181,7 +173,6 @@ public: /// Storage management methods. const BlockCtorFn CtorFn = nullptr; const BlockDtorFn DtorFn = nullptr; - const BlockMoveFn MoveFn = nullptr; /// Allocates a descriptor for a primitive. Descriptor(const DeclTy &D, const Type *SourceTy, PrimType Type, diff --git a/clang/lib/AST/ByteCode/DynamicAllocator.cpp b/clang/lib/AST/ByteCode/DynamicAllocator.cpp index 9b8b664..bbef941 100644 --- a/clang/lib/AST/ByteCode/DynamicAllocator.cpp +++ b/clang/lib/AST/ByteCode/DynamicAllocator.cpp @@ -128,7 +128,7 @@ bool DynamicAllocator::deallocate(const Expr *Source, return false; auto &Site = It->second; - assert(Site.size() > 0); + assert(!Site.empty()); // Find the Block to delete. auto AllocIt = llvm::find_if(Site.Allocations, [&](const Allocation &A) { @@ -144,7 +144,7 @@ bool DynamicAllocator::deallocate(const Expr *Source, S.deallocate(B); Site.Allocations.erase(AllocIt); - if (Site.size() == 0) + if (Site.empty()) AllocationSites.erase(It); return true; diff --git a/clang/lib/AST/ByteCode/DynamicAllocator.h b/clang/lib/AST/ByteCode/DynamicAllocator.h index cff09bf..cba5e34 100644 --- a/clang/lib/AST/ByteCode/DynamicAllocator.h +++ b/clang/lib/AST/ByteCode/DynamicAllocator.h @@ -55,6 +55,7 @@ private: } size_t size() const { return Allocations.size(); } + bool empty() const { return Allocations.empty(); } }; public: @@ -65,8 +66,6 @@ public: void cleanup(); - unsigned getNumAllocations() const { return AllocationSites.size(); } - /// Allocate ONE element of the given descriptor. Block *allocate(const Descriptor *D, unsigned EvalID, Form AllocForm); /// Allocate \p NumElements primitive elements of the given type. @@ -96,6 +95,8 @@ public: return llvm::make_range(AllocationSites.begin(), AllocationSites.end()); } + bool hasAllocations() const { return !AllocationSites.empty(); } + private: llvm::DenseMap<const Expr *, AllocationSite> AllocationSites; diff --git a/clang/lib/AST/ByteCode/EvalEmitter.cpp b/clang/lib/AST/ByteCode/EvalEmitter.cpp index 976b7c0..9ed61c7 100644 --- a/clang/lib/AST/ByteCode/EvalEmitter.cpp +++ b/clang/lib/AST/ByteCode/EvalEmitter.cpp @@ -292,7 +292,7 @@ bool EvalEmitter::emitGetLocal(uint32_t I, const SourceInfo &Info) { Block *B = getLocal(I); - if (!CheckLocalLoad(S, OpPC, Pointer(B))) + if (!CheckLocalLoad(S, OpPC, B)) return false; S.Stk.push<T>(*reinterpret_cast<T *>(B->data())); diff --git a/clang/lib/AST/ByteCode/Interp.cpp b/clang/lib/AST/ByteCode/Interp.cpp index eb4e480..bc14bd3d 100644 --- a/clang/lib/AST/ByteCode/Interp.cpp +++ b/clang/lib/AST/ByteCode/Interp.cpp @@ -211,25 +211,26 @@ static void diagnoseNonConstVariable(InterpState &S, CodePtr OpPC, S.Note(VD->getLocation(), diag::note_declared_at); } -static bool CheckTemporary(InterpState &S, CodePtr OpPC, const Pointer &Ptr, +static bool CheckTemporary(InterpState &S, CodePtr OpPC, const Block *B, AccessKinds AK) { - if (auto ID = Ptr.getDeclID()) { - if (!Ptr.isStaticTemporary()) + if (B->getDeclID()) { + if (!(B->isStatic() && B->isTemporary())) return true; const auto *MTE = dyn_cast_if_present<MaterializeTemporaryExpr>( - Ptr.getDeclDesc()->asExpr()); + B->getDescriptor()->asExpr()); if (!MTE) return true; // FIXME(perf): Since we do this check on every Load from a static // temporary, it might make sense to cache the value of the // isUsableInConstantExpressions call. - if (!MTE->isUsableInConstantExpressions(S.getASTContext()) && - Ptr.block()->getEvalID() != S.Ctx.getEvalID()) { + if (B->getEvalID() != S.Ctx.getEvalID() && + !MTE->isUsableInConstantExpressions(S.getASTContext())) { const SourceInfo &E = S.Current->getSource(OpPC); S.FFDiag(E, diag::note_constexpr_access_static_temporary, 1) << AK; - S.Note(Ptr.getDeclLoc(), diag::note_constexpr_temporary_here); + S.Note(B->getDescriptor()->getLocation(), + diag::note_constexpr_temporary_here); return false; } } @@ -658,17 +659,19 @@ static bool CheckVolatile(InterpState &S, CodePtr OpPC, const Pointer &Ptr, return false; } -bool CheckInitialized(InterpState &S, CodePtr OpPC, const Pointer &Ptr, - AccessKinds AK) { +bool DiagnoseUninitialized(InterpState &S, CodePtr OpPC, const Pointer &Ptr, + AccessKinds AK) { assert(Ptr.isLive()); + assert(!Ptr.isInitialized()); + return DiagnoseUninitialized(S, OpPC, Ptr.isExtern(), Ptr.getDeclDesc(), AK); +} - if (Ptr.isInitialized()) - return true; - - if (Ptr.isExtern() && S.checkingPotentialConstantExpression()) +bool DiagnoseUninitialized(InterpState &S, CodePtr OpPC, bool Extern, + const Descriptor *Desc, AccessKinds AK) { + if (Extern && S.checkingPotentialConstantExpression()) return false; - if (const auto *VD = Ptr.getDeclDesc()->asVarDecl(); + if (const auto *VD = Desc->asVarDecl(); VD && (VD->isConstexpr() || VD->hasGlobalStorage())) { if (VD == S.EvaluatingDecl && @@ -703,9 +706,9 @@ bool CheckInitialized(InterpState &S, CodePtr OpPC, const Pointer &Ptr, return false; } -static bool CheckLifetime(InterpState &S, CodePtr OpPC, const Pointer &Ptr, +static bool CheckLifetime(InterpState &S, CodePtr OpPC, Lifetime LT, AccessKinds AK) { - if (Ptr.getLifetime() == Lifetime::Started) + if (LT == Lifetime::Started) return true; if (!S.checkingPotentialConstantExpression()) { @@ -715,11 +718,11 @@ static bool CheckLifetime(InterpState &S, CodePtr OpPC, const Pointer &Ptr, return false; } -static bool CheckWeak(InterpState &S, CodePtr OpPC, const Pointer &Ptr) { - if (!Ptr.isWeak()) +static bool CheckWeak(InterpState &S, CodePtr OpPC, const Block *B) { + if (!B->isWeak()) return true; - const auto *VD = Ptr.getDeclDesc()->asVarDecl(); + const auto *VD = B->getDescriptor()->asVarDecl(); assert(VD); S.FFDiag(S.Current->getLocation(OpPC), diag::note_constexpr_var_init_weak) << VD; @@ -732,32 +735,56 @@ static bool CheckWeak(InterpState &S, CodePtr OpPC, const Pointer &Ptr) { // ones removed that are impossible on primitive global values. // For example, since those can't be members of structs, they also can't // be mutable. -bool CheckGlobalLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr) { - if (!CheckExtern(S, OpPC, Ptr)) - return false; - if (!CheckConstant(S, OpPC, Ptr)) +bool CheckGlobalLoad(InterpState &S, CodePtr OpPC, const Block *B) { + const auto &Desc = + *reinterpret_cast<const GlobalInlineDescriptor *>(B->rawData()); + if (!CheckExtern(S, OpPC, Pointer(const_cast<Block *>(B)))) return false; - if (!CheckDummy(S, OpPC, Ptr, AK_Read)) + if (!CheckConstant(S, OpPC, B->getDescriptor())) return false; - if (!CheckInitialized(S, OpPC, Ptr, AK_Read)) + if (!CheckDummy(S, OpPC, B, AK_Read)) return false; - if (!CheckTemporary(S, OpPC, Ptr, AK_Read)) + if (Desc.InitState != GlobalInitState::Initialized) + return DiagnoseUninitialized(S, OpPC, B->isExtern(), B->getDescriptor(), + AK_Read); + if (!CheckTemporary(S, OpPC, B, AK_Read)) return false; - if (!CheckWeak(S, OpPC, Ptr)) + if (!CheckWeak(S, OpPC, B)) return false; - if (!CheckVolatile(S, OpPC, Ptr, AK_Read)) + if (B->getDescriptor()->IsVolatile) { + if (!S.getLangOpts().CPlusPlus) + return Invalid(S, OpPC); + + const ValueDecl *D = B->getDescriptor()->asValueDecl(); + S.FFDiag(S.Current->getLocation(OpPC), + diag::note_constexpr_access_volatile_obj, 1) + << AK_Read << 1 << D; + S.Note(D->getLocation(), diag::note_constexpr_volatile_here) << 1; return false; + } return true; } // Similarly, for local loads. -bool CheckLocalLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr) { - if (!CheckLifetime(S, OpPC, Ptr, AK_Read)) - return false; - if (!CheckInitialized(S, OpPC, Ptr, AK_Read)) - return false; - if (!CheckVolatile(S, OpPC, Ptr, AK_Read)) +bool CheckLocalLoad(InterpState &S, CodePtr OpPC, const Block *B) { + assert(!B->isExtern()); + const auto &Desc = *reinterpret_cast<const InlineDescriptor *>(B->rawData()); + if (!CheckLifetime(S, OpPC, Desc.LifeState, AK_Read)) + return false; + if (!Desc.IsInitialized) + return DiagnoseUninitialized(S, OpPC, /*Extern=*/false, B->getDescriptor(), + AK_Read); + if (B->getDescriptor()->IsVolatile) { + if (!S.getLangOpts().CPlusPlus) + return Invalid(S, OpPC); + + const ValueDecl *D = B->getDescriptor()->asValueDecl(); + S.FFDiag(S.Current->getLocation(OpPC), + diag::note_constexpr_access_volatile_obj, 1) + << AK_Read << 1 << D; + S.Note(D->getLocation(), diag::note_constexpr_volatile_here) << 1; return false; + } return true; } @@ -769,19 +796,19 @@ bool CheckLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr, return false; if (!CheckConstant(S, OpPC, Ptr)) return false; - if (!CheckDummy(S, OpPC, Ptr, AK)) + if (Ptr.isBlockPointer() && !CheckDummy(S, OpPC, Ptr.block(), AK)) return false; if (!CheckRange(S, OpPC, Ptr, AK)) return false; if (!CheckActive(S, OpPC, Ptr, AK)) return false; - if (!CheckLifetime(S, OpPC, Ptr, AK)) + if (!CheckLifetime(S, OpPC, Ptr.getLifetime(), AK)) return false; - if (!CheckInitialized(S, OpPC, Ptr, AK)) + if (!Ptr.isInitialized()) + return DiagnoseUninitialized(S, OpPC, Ptr, AK); + if (Ptr.isBlockPointer() && !CheckTemporary(S, OpPC, Ptr.block(), AK)) return false; - if (!CheckTemporary(S, OpPC, Ptr, AK)) - return false; - if (!CheckWeak(S, OpPC, Ptr)) + if (Ptr.isBlockPointer() && !CheckWeak(S, OpPC, Ptr.block())) return false; if (!CheckMutable(S, OpPC, Ptr)) return false; @@ -798,7 +825,7 @@ bool CheckFinalLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr) { if (!CheckConstant(S, OpPC, Ptr)) return false; - if (!CheckDummy(S, OpPC, Ptr, AK_Read)) + if (Ptr.isBlockPointer() && !CheckDummy(S, OpPC, Ptr.block(), AK_Read)) return false; if (!CheckExtern(S, OpPC, Ptr)) return false; @@ -806,13 +833,13 @@ bool CheckFinalLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr) { return false; if (!CheckActive(S, OpPC, Ptr, AK_Read)) return false; - if (!CheckLifetime(S, OpPC, Ptr, AK_Read)) - return false; - if (!CheckInitialized(S, OpPC, Ptr, AK_Read)) + if (!CheckLifetime(S, OpPC, Ptr.getLifetime(), AK_Read)) return false; - if (!CheckTemporary(S, OpPC, Ptr, AK_Read)) + if (!Ptr.isInitialized()) + return DiagnoseUninitialized(S, OpPC, Ptr, AK_Read); + if (Ptr.isBlockPointer() && !CheckTemporary(S, OpPC, Ptr.block(), AK_Read)) return false; - if (!CheckWeak(S, OpPC, Ptr)) + if (Ptr.isBlockPointer() && !CheckWeak(S, OpPC, Ptr.block())) return false; if (!CheckMutable(S, OpPC, Ptr)) return false; @@ -822,9 +849,9 @@ bool CheckFinalLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr) { bool CheckStore(InterpState &S, CodePtr OpPC, const Pointer &Ptr) { if (!CheckLive(S, OpPC, Ptr, AK_Assign)) return false; - if (!CheckDummy(S, OpPC, Ptr, AK_Assign)) + if (Ptr.isBlockPointer() && !CheckDummy(S, OpPC, Ptr.block(), AK_Assign)) return false; - if (!CheckLifetime(S, OpPC, Ptr, AK_Assign)) + if (!CheckLifetime(S, OpPC, Ptr.getLifetime(), AK_Assign)) return false; if (!CheckExtern(S, OpPC, Ptr)) return false; @@ -1098,12 +1125,11 @@ bool CheckDeclRef(InterpState &S, CodePtr OpPC, const DeclRefExpr *DR) { return diagnoseUnknownDecl(S, OpPC, D); } -bool CheckDummy(InterpState &S, CodePtr OpPC, const Pointer &Ptr, - AccessKinds AK) { - if (!Ptr.isDummy()) +bool CheckDummy(InterpState &S, CodePtr OpPC, const Block *B, AccessKinds AK) { + const Descriptor *Desc = B->getDescriptor(); + if (!Desc->isDummy()) return true; - const Descriptor *Desc = Ptr.getDeclDesc(); const ValueDecl *D = Desc->asValueDecl(); if (!D) return false; @@ -1426,7 +1452,7 @@ static bool checkConstructor(InterpState &S, CodePtr OpPC, const Function *Func, bool CheckDestructor(InterpState &S, CodePtr OpPC, const Pointer &Ptr) { if (!CheckLive(S, OpPC, Ptr, AK_Destroy)) return false; - if (!CheckTemporary(S, OpPC, Ptr, AK_Destroy)) + if (!CheckTemporary(S, OpPC, Ptr.block(), AK_Destroy)) return false; if (!CheckRange(S, OpPC, Ptr, AK_Destroy)) return false; @@ -1620,8 +1646,17 @@ bool CallVirt(InterpState &S, CodePtr OpPC, const Function *Func, const auto *StaticDecl = cast<CXXRecordDecl>(Func->getParentDecl()); const auto *InitialFunction = cast<CXXMethodDecl>(Callee); - const CXXMethodDecl *Overrider = S.getContext().getOverridingFunction( - DynamicDecl, StaticDecl, InitialFunction); + const CXXMethodDecl *Overrider; + + if (StaticDecl != DynamicDecl) { + if (!DynamicDecl->isDerivedFrom(StaticDecl)) + return false; + Overrider = S.getContext().getOverridingFunction(DynamicDecl, StaticDecl, + InitialFunction); + + } else { + Overrider = InitialFunction; + } if (Overrider != InitialFunction) { // DR1872: An instantiated virtual constexpr function can't be called in a @@ -1749,7 +1784,7 @@ static void startLifetimeRecurse(const Pointer &Ptr) { bool StartLifetime(InterpState &S, CodePtr OpPC) { const auto &Ptr = S.Stk.peek<Pointer>(); - if (!CheckDummy(S, OpPC, Ptr, AK_Destroy)) + if (Ptr.isBlockPointer() && !CheckDummy(S, OpPC, Ptr.block(), AK_Destroy)) return false; startLifetimeRecurse(Ptr.narrow()); return true; @@ -1780,7 +1815,7 @@ static void endLifetimeRecurse(const Pointer &Ptr) { /// Ends the lifetime of the peek'd pointer. bool EndLifetime(InterpState &S, CodePtr OpPC) { const auto &Ptr = S.Stk.peek<Pointer>(); - if (!CheckDummy(S, OpPC, Ptr, AK_Destroy)) + if (Ptr.isBlockPointer() && !CheckDummy(S, OpPC, Ptr.block(), AK_Destroy)) return false; endLifetimeRecurse(Ptr.narrow()); return true; @@ -1789,7 +1824,7 @@ bool EndLifetime(InterpState &S, CodePtr OpPC) { /// Ends the lifetime of the pop'd pointer. bool EndLifetimePop(InterpState &S, CodePtr OpPC) { const auto &Ptr = S.Stk.pop<Pointer>(); - if (!CheckDummy(S, OpPC, Ptr, AK_Destroy)) + if (Ptr.isBlockPointer() && !CheckDummy(S, OpPC, Ptr.block(), AK_Destroy)) return false; endLifetimeRecurse(Ptr.narrow()); return true; @@ -1804,16 +1839,16 @@ bool CheckNewTypeMismatch(InterpState &S, CodePtr OpPC, const Expr *E, // Similar to CheckStore(), but with the additional CheckTemporary() call and // the AccessKinds are different. - if (!CheckTemporary(S, OpPC, Ptr, AK_Construct)) + if (!CheckTemporary(S, OpPC, Ptr.block(), AK_Construct)) return false; if (!CheckLive(S, OpPC, Ptr, AK_Construct)) return false; - if (!CheckDummy(S, OpPC, Ptr, AK_Construct)) + if (!CheckDummy(S, OpPC, Ptr.block(), AK_Construct)) return false; // CheckLifetime for this and all base pointers. for (Pointer P = Ptr;;) { - if (!CheckLifetime(S, OpPC, P, AK_Construct)) + if (!CheckLifetime(S, OpPC, P.getLifetime(), AK_Construct)) return false; if (P.isRoot()) diff --git a/clang/lib/AST/ByteCode/Interp.h b/clang/lib/AST/ByteCode/Interp.h index 8a28106..0d3f492 100644 --- a/clang/lib/AST/ByteCode/Interp.h +++ b/clang/lib/AST/ByteCode/Interp.h @@ -51,8 +51,7 @@ bool CheckLive(InterpState &S, CodePtr OpPC, const Pointer &Ptr, AccessKinds AK); /// Checks if a pointer is a dummy pointer. -bool CheckDummy(InterpState &S, CodePtr OpPC, const Pointer &Ptr, - AccessKinds AK); +bool CheckDummy(InterpState &S, CodePtr OpPC, const Block *B, AccessKinds AK); /// Checks if a pointer is null. bool CheckNull(InterpState &S, CodePtr OpPC, const Pointer &Ptr, @@ -89,11 +88,14 @@ bool CheckLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr, AccessKinds AK = AK_Read); bool CheckFinalLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr); -bool CheckInitialized(InterpState &S, CodePtr OpPC, const Pointer &Ptr, - AccessKinds AK); +bool DiagnoseUninitialized(InterpState &S, CodePtr OpPC, const Pointer &Ptr, + AccessKinds AK); +bool DiagnoseUninitialized(InterpState &S, CodePtr OpPC, bool Extern, + const Descriptor *Desc, AccessKinds AK); + /// Checks a direct load of a primitive value from a global or local variable. -bool CheckGlobalLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr); -bool CheckLocalLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr); +bool CheckGlobalLoad(InterpState &S, CodePtr OpPC, const Block *B); +bool CheckLocalLoad(InterpState &S, CodePtr OpPC, const Block *B); /// Checks if a value can be stored in a block. bool CheckStore(InterpState &S, CodePtr OpPC, const Pointer &Ptr); @@ -1351,10 +1353,10 @@ inline bool ConstFloat(InterpState &S, CodePtr OpPC, const Floating &F) { template <PrimType Name, class T = typename PrimConv<Name>::T> bool GetLocal(InterpState &S, CodePtr OpPC, uint32_t I) { - const Pointer &Ptr = S.Current->getLocalPointer(I); - if (!CheckLocalLoad(S, OpPC, Ptr)) + const Block *B = S.Current->getLocalBlock(I); + if (!CheckLocalLoad(S, OpPC, B)) return false; - S.Stk.push<T>(Ptr.deref<T>()); + S.Stk.push<T>(B->deref<T>()); return true; } @@ -1465,22 +1467,26 @@ bool SetThisField(InterpState &S, CodePtr OpPC, uint32_t I) { template <PrimType Name, class T = typename PrimConv<Name>::T> bool GetGlobal(InterpState &S, CodePtr OpPC, uint32_t I) { - const Pointer &Ptr = S.P.getPtrGlobal(I); + const Block *B = S.P.getGlobal(I); - if (!CheckGlobalLoad(S, OpPC, Ptr)) + if (!CheckGlobalLoad(S, OpPC, B)) return false; - S.Stk.push<T>(Ptr.deref<T>()); + S.Stk.push<T>(B->deref<T>()); return true; } /// Same as GetGlobal, but without the checks. template <PrimType Name, class T = typename PrimConv<Name>::T> bool GetGlobalUnchecked(InterpState &S, CodePtr OpPC, uint32_t I) { - const Pointer &Ptr = S.P.getPtrGlobal(I); - if (!CheckInitialized(S, OpPC, Ptr, AK_Read)) - return false; - S.Stk.push<T>(Ptr.deref<T>()); + const Block *B = S.P.getGlobal(I); + const auto &Desc = + *reinterpret_cast<const GlobalInlineDescriptor *>(B->rawData()); + if (Desc.InitState != GlobalInitState::Initialized) + return DiagnoseUninitialized(S, OpPC, B->isExtern(), B->getDescriptor(), + AK_Read); + + S.Stk.push<T>(B->deref<T>()); return true; } @@ -2351,8 +2357,8 @@ static inline bool IncDecPtrHelper(InterpState &S, CodePtr OpPC, static inline bool IncPtr(InterpState &S, CodePtr OpPC) { const Pointer &Ptr = S.Stk.pop<Pointer>(); - if (!CheckInitialized(S, OpPC, Ptr, AK_Increment)) - return false; + if (!Ptr.isInitialized()) + return DiagnoseUninitialized(S, OpPC, Ptr, AK_Increment); return IncDecPtrHelper<ArithOp::Add>(S, OpPC, Ptr); } @@ -2360,8 +2366,8 @@ static inline bool IncPtr(InterpState &S, CodePtr OpPC) { static inline bool DecPtr(InterpState &S, CodePtr OpPC) { const Pointer &Ptr = S.Stk.pop<Pointer>(); - if (!CheckInitialized(S, OpPC, Ptr, AK_Decrement)) - return false; + if (!Ptr.isInitialized()) + return DiagnoseUninitialized(S, OpPC, Ptr, AK_Decrement); return IncDecPtrHelper<ArithOp::Sub>(S, OpPC, Ptr); } @@ -3195,6 +3201,9 @@ inline bool GetMemberPtr(InterpState &S, CodePtr OpPC, const ValueDecl *D) { inline bool GetMemberPtrBase(InterpState &S, CodePtr OpPC) { const auto &MP = S.Stk.pop<MemberPointer>(); + if (!MP.isBaseCastPossible()) + return false; + S.Stk.push<Pointer>(MP.getBase()); return true; } diff --git a/clang/lib/AST/ByteCode/InterpBlock.h b/clang/lib/AST/ByteCode/InterpBlock.h index 5162223..07194d6 100644 --- a/clang/lib/AST/ByteCode/InterpBlock.h +++ b/clang/lib/AST/ByteCode/InterpBlock.h @@ -103,6 +103,10 @@ public: return reinterpret_cast<const std::byte *>(this) + sizeof(Block); } + template <typename T> T deref() const { + return *reinterpret_cast<const T *>(data()); + } + /// Invokes the constructor. void invokeCtor() { assert(!IsInitialized); diff --git a/clang/lib/AST/ByteCode/InterpBuiltin.cpp b/clang/lib/AST/ByteCode/InterpBuiltin.cpp index f908d02..c835bd4 100644 --- a/clang/lib/AST/ByteCode/InterpBuiltin.cpp +++ b/clang/lib/AST/ByteCode/InterpBuiltin.cpp @@ -276,7 +276,7 @@ static bool interp__builtin_strlen(InterpState &S, CodePtr OpPC, if (!CheckLive(S, OpPC, StrPtr, AK_Read)) return false; - if (!CheckDummy(S, OpPC, StrPtr, AK_Read)) + if (!CheckDummy(S, OpPC, StrPtr.block(), AK_Read)) return false; assert(StrPtr.getFieldDesc()->isPrimitiveArray()); @@ -2232,7 +2232,7 @@ static bool interp__builtin_is_within_lifetime(InterpState &S, CodePtr OpPC, return false; if (!CheckMutable(S, OpPC, Ptr)) return false; - if (!CheckDummy(S, OpPC, Ptr, AK_Read)) + if (!CheckDummy(S, OpPC, Ptr.block(), AK_Read)) return false; } diff --git a/clang/lib/AST/ByteCode/InterpFrame.cpp b/clang/lib/AST/ByteCode/InterpFrame.cpp index 9342192..f2eac86 100644 --- a/clang/lib/AST/ByteCode/InterpFrame.cpp +++ b/clang/lib/AST/ByteCode/InterpFrame.cpp @@ -231,6 +231,10 @@ Pointer InterpFrame::getLocalPointer(unsigned Offset) const { return Pointer(localBlock(Offset)); } +Block *InterpFrame::getLocalBlock(unsigned Offset) const { + return localBlock(Offset); +} + Pointer InterpFrame::getParamPointer(unsigned Off) { // Return the block if it was created previously. if (auto Pt = Params.find(Off); Pt != Params.end()) diff --git a/clang/lib/AST/ByteCode/InterpFrame.h b/clang/lib/AST/ByteCode/InterpFrame.h index cfebe93..4be5391 100644 --- a/clang/lib/AST/ByteCode/InterpFrame.h +++ b/clang/lib/AST/ByteCode/InterpFrame.h @@ -86,6 +86,7 @@ public: /// Returns a pointer to a local variables. Pointer getLocalPointer(unsigned Offset) const; + Block *getLocalBlock(unsigned Offset) const; /// Returns the value of an argument. template <typename T> const T &getParam(unsigned Offset) const { diff --git a/clang/lib/AST/ByteCode/InterpState.cpp b/clang/lib/AST/ByteCode/InterpState.cpp index a06b125..49c9b54 100644 --- a/clang/lib/AST/ByteCode/InterpState.cpp +++ b/clang/lib/AST/ByteCode/InterpState.cpp @@ -76,9 +76,6 @@ bool InterpState::reportOverflow(const Expr *E, const llvm::APSInt &Value) { void InterpState::deallocate(Block *B) { assert(B); - const Descriptor *Desc = B->getDescriptor(); - assert(Desc); - // The block might have a pointer saved in a field in its data // that points to the block itself. We call the dtor first, // which will destroy all the data but leave InlineDescriptors @@ -95,7 +92,7 @@ void InterpState::deallocate(Block *B) { auto *D = new (Memory) DeadBlock(DeadBlocks, B); // Since the block doesn't hold any actual data anymore, we can just // memcpy() everything over. - std::memcpy(D->rawData(), B->rawData(), Desc->getAllocSize()); + std::memcpy(D->rawData(), B->rawData(), B->getSize()); D->B.IsInitialized = B->IsInitialized; // We moved the contents over to the DeadBlock. @@ -104,15 +101,14 @@ void InterpState::deallocate(Block *B) { } bool InterpState::maybeDiagnoseDanglingAllocations() { - bool NoAllocationsLeft = (Alloc.getNumAllocations() == 0); + bool NoAllocationsLeft = !Alloc.hasAllocations(); if (!checkingPotentialConstantExpression()) { - for (const auto &It : Alloc.allocation_sites()) { - assert(It.second.size() > 0); + for (const auto &[Source, Site] : Alloc.allocation_sites()) { + assert(!Site.empty()); - const Expr *Source = It.first; CCEDiag(Source->getExprLoc(), diag::note_constexpr_memory_leak) - << (It.second.size() - 1) << Source->getSourceRange(); + << (Site.size() - 1) << Source->getSourceRange(); } } // Keep evaluating before C++20, since the CXXNewExpr wasn't valid there diff --git a/clang/lib/AST/ByteCode/MemberPointer.h b/clang/lib/AST/ByteCode/MemberPointer.h index b17ce25..8dd75ca 100644 --- a/clang/lib/AST/ByteCode/MemberPointer.h +++ b/clang/lib/AST/ByteCode/MemberPointer.h @@ -51,6 +51,12 @@ public: FunctionPointer toFunctionPointer(const Context &Ctx) const; + bool isBaseCastPossible() const { + if (PtrOffset < 0) + return true; + return static_cast<uint64_t>(PtrOffset) <= Base.getByteOffset(); + } + Pointer getBase() const { if (PtrOffset < 0) return Base.atField(-PtrOffset); diff --git a/clang/lib/AST/OpenACCClause.cpp b/clang/lib/AST/OpenACCClause.cpp index f7a98bd..fe20004 100644 --- a/clang/lib/AST/OpenACCClause.cpp +++ b/clang/lib/AST/OpenACCClause.cpp @@ -329,10 +329,11 @@ OpenACCPrivateClause::Create(const ASTContext &C, SourceLocation BeginLoc, OpenACCFirstPrivateClause *OpenACCFirstPrivateClause::Create( const ASTContext &C, SourceLocation BeginLoc, SourceLocation LParenLoc, - ArrayRef<Expr *> VarList, ArrayRef<VarDecl *> InitRecipes, + ArrayRef<Expr *> VarList, ArrayRef<OpenACCFirstPrivateRecipe> InitRecipes, SourceLocation EndLoc) { - void *Mem = - C.Allocate(OpenACCFirstPrivateClause::totalSizeToAlloc<Expr *, VarDecl *>( + void *Mem = C.Allocate( + OpenACCFirstPrivateClause::totalSizeToAlloc<Expr *, + OpenACCFirstPrivateRecipe>( VarList.size(), InitRecipes.size())); return new (Mem) OpenACCFirstPrivateClause(BeginLoc, LParenLoc, VarList, InitRecipes, EndLoc); diff --git a/clang/lib/AST/StmtProfile.cpp b/clang/lib/AST/StmtProfile.cpp index 4c36f24..0297f9c 100644 --- a/clang/lib/AST/StmtProfile.cpp +++ b/clang/lib/AST/StmtProfile.cpp @@ -2645,8 +2645,10 @@ void OpenACCClauseProfiler::VisitFirstPrivateClause( const OpenACCFirstPrivateClause &Clause) { VisitClauseWithVarList(Clause); - for (auto *VD : Clause.getInitRecipes()) - Profiler.VisitDecl(VD); + for (auto &Recipe : Clause.getInitRecipes()) { + Profiler.VisitDecl(Recipe.RecipeDecl); + Profiler.VisitDecl(Recipe.InitFromTemporary); + } } void OpenACCClauseProfiler::VisitAttachClause( diff --git a/clang/lib/AST/Type.cpp b/clang/lib/AST/Type.cpp index 141edc8..03d7413 100644 --- a/clang/lib/AST/Type.cpp +++ b/clang/lib/AST/Type.cpp @@ -5246,6 +5246,15 @@ bool Type::isHLSLResourceRecord() const { return HLSLAttributedResourceType::findHandleTypeOnResource(this) != nullptr; } +bool Type::isHLSLResourceRecordArray() const { + const Type *Ty = getUnqualifiedDesugaredType(); + if (!Ty->isArrayType()) + return false; + while (isa<ConstantArrayType>(Ty)) + Ty = Ty->getArrayElementTypeNoTypeQual(); + return Ty->isHLSLResourceRecord(); +} + bool Type::isHLSLIntangibleType() const { const Type *Ty = getUnqualifiedDesugaredType(); diff --git a/clang/lib/Analysis/UnsafeBufferUsage.cpp b/clang/lib/Analysis/UnsafeBufferUsage.cpp index 40dff7e..f4ead3d 100644 --- a/clang/lib/Analysis/UnsafeBufferUsage.cpp +++ b/clang/lib/Analysis/UnsafeBufferUsage.cpp @@ -1986,6 +1986,14 @@ public: const auto *FD = dyn_cast<FunctionDecl>(CE->getDirectCallee()); if (!FD) return false; + + bool IsGlobalAndNotInAnyNamespace = + FD->isGlobal() && !FD->getEnclosingNamespaceContext()->isNamespace(); + + // A libc function must either be in the std:: namespace or a global + // function that is not in any namespace: + if (!FD->isInStdNamespace() && !IsGlobalAndNotInAnyNamespace) + return false; auto isSingleStringLiteralArg = false; if (CE->getNumArgs() == 1) { isSingleStringLiteralArg = diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp index be21ce9..b8663eb 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp @@ -16,6 +16,7 @@ // //===----------------------------------------------------------------------===// +#include "CIRGenCleanup.h" #include "CIRGenFunction.h" #include "clang/CIR/MissingFeatures.h" @@ -33,6 +34,52 @@ using namespace clang::CIRGen; void EHScopeStack::Cleanup::anchor() {} +/// Push an entry of the given size onto this protected-scope stack. +char *EHScopeStack::allocate(size_t size) { + size = llvm::alignTo(size, ScopeStackAlignment); + if (!startOfBuffer) { + unsigned capacity = llvm::PowerOf2Ceil(std::max(size, 1024ul)); + startOfBuffer = std::make_unique<char[]>(capacity); + startOfData = endOfBuffer = startOfBuffer.get() + capacity; + } else if (static_cast<size_t>(startOfData - startOfBuffer.get()) < size) { + unsigned currentCapacity = endOfBuffer - startOfBuffer.get(); + unsigned usedCapacity = + currentCapacity - (startOfData - startOfBuffer.get()); + unsigned requiredCapacity = usedCapacity + size; + // We know from the 'else if' condition that requiredCapacity is greater + // than currentCapacity. + unsigned newCapacity = llvm::PowerOf2Ceil(requiredCapacity); + + std::unique_ptr<char[]> newStartOfBuffer = + std::make_unique<char[]>(newCapacity); + char *newEndOfBuffer = newStartOfBuffer.get() + newCapacity; + char *newStartOfData = newEndOfBuffer - usedCapacity; + memcpy(newStartOfData, startOfData, usedCapacity); + startOfBuffer.swap(newStartOfBuffer); + endOfBuffer = newEndOfBuffer; + startOfData = newStartOfData; + } + + assert(startOfBuffer.get() + size <= startOfData); + startOfData -= size; + return startOfData; +} + +void EHScopeStack::deallocate(size_t size) { + startOfData += llvm::alignTo(size, ScopeStackAlignment); +} + +void *EHScopeStack::pushCleanup(CleanupKind kind, size_t size) { + char *buffer = allocate(size); + + // When the full implementation is upstreamed, this will allocate + // extra memory for and construct a wrapper object that is used to + // manage the cleanup generation. + assert(!cir::MissingFeatures::ehCleanupScope()); + + return buffer; +} + static mlir::Block *getCurCleanupBlock(CIRGenFunction &cgf) { mlir::OpBuilder::InsertionGuard guard(cgf.getBuilder()); mlir::Block *cleanup = @@ -44,26 +91,34 @@ static mlir::Block *getCurCleanupBlock(CIRGenFunction &cgf) { /// current insertion point is threaded through the cleanup, as are /// any branch fixups on the cleanup. void CIRGenFunction::popCleanupBlock() { - assert(!ehStack.cleanupStack.empty() && "cleanup stack is empty!"); + assert(!ehStack.empty() && "cleanup stack is empty!"); + + // The memory for the cleanup continues to be owned by the EHScopeStack + // allocator, so we just destroy the object rather than attempting to + // free it. + EHScopeStack::Cleanup &cleanup = *ehStack.begin(); + + // The eventual implementation here will use the EHCleanupScope helper class. + assert(!cir::MissingFeatures::ehCleanupScope()); + mlir::OpBuilder::InsertionGuard guard(builder); - std::unique_ptr<EHScopeStack::Cleanup> cleanup = - ehStack.cleanupStack.pop_back_val(); assert(!cir::MissingFeatures::ehCleanupFlags()); mlir::Block *cleanupEntry = getCurCleanupBlock(*this); builder.setInsertionPointToEnd(cleanupEntry); - cleanup->emit(*this); + cleanup.emit(*this); + + ehStack.deallocate(cleanup.getSize()); } /// Pops cleanup blocks until the given savepoint is reached. -void CIRGenFunction::popCleanupBlocks(size_t oldCleanupStackDepth) { +void CIRGenFunction::popCleanupBlocks( + EHScopeStack::stable_iterator oldCleanupStackDepth) { assert(!cir::MissingFeatures::ehstackBranches()); - assert(ehStack.getStackDepth() >= oldCleanupStackDepth); - // Pop cleanup blocks until we reach the base stack depth for the // current scope. - while (ehStack.getStackDepth() > oldCleanupStackDepth) { + while (ehStack.stable_begin() != oldCleanupStackDepth) { popCleanupBlock(); } } diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.h b/clang/lib/CIR/CodeGen/CIRGenCleanup.h new file mode 100644 index 0000000..7361c8c --- /dev/null +++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.h @@ -0,0 +1,43 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// These classes support the generation of CIR for cleanups, initially based +// on LLVM IR cleanup handling, but ought to change as CIR evolves. +// +//===----------------------------------------------------------------------===// + +#ifndef CLANG_LIB_CIR_CODEGEN_CIRGENCLEANUP_H +#define CLANG_LIB_CIR_CODEGEN_CIRGENCLEANUP_H + +#include "EHScopeStack.h" + +namespace clang::CIRGen { + +/// A non-stable pointer into the scope stack. +class EHScopeStack::iterator { + char *ptr = nullptr; + + friend class EHScopeStack; + explicit iterator(char *ptr) : ptr(ptr) {} + +public: + iterator() = default; + + EHScopeStack::Cleanup *get() const { + return reinterpret_cast<EHScopeStack::Cleanup *>(ptr); + } + + EHScopeStack::Cleanup &operator*() const { return *get(); } +}; + +inline EHScopeStack::iterator EHScopeStack::begin() const { + return iterator(startOfData); +} + +} // namespace clang::CIRGen +#endif // CLANG_LIB_CIR_CODEGEN_CIRGENCLEANUP_H diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index 78d375c..715d101 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -667,6 +667,12 @@ struct DestroyObject final : EHScopeStack::Cleanup { void emit(CIRGenFunction &cgf) override { cgf.emitDestroy(addr, type, destroyer); } + + // This is a placeholder until EHCleanupScope is implemented. + size_t getSize() const override { + assert(!cir::MissingFeatures::ehCleanupScope()); + return sizeof(DestroyObject); + } }; } // namespace diff --git a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp index 3aa170e..cba06a1 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp @@ -116,6 +116,15 @@ public: mlir::Value emitPromotedComplexOperand(const Expr *e, QualType promotionTy); + LValue emitCompoundAssignLValue( + const CompoundAssignOperator *e, + mlir::Value (ComplexExprEmitter::*func)(const BinOpInfo &), + RValue &value); + + mlir::Value emitCompoundAssign( + const CompoundAssignOperator *e, + mlir::Value (ComplexExprEmitter::*func)(const BinOpInfo &)); + mlir::Value emitBinAdd(const BinOpInfo &op); mlir::Value emitBinSub(const BinOpInfo &op); mlir::Value emitBinMul(const BinOpInfo &op); @@ -153,6 +162,19 @@ public: HANDLEBINOP(Sub) HANDLEBINOP(Mul) #undef HANDLEBINOP + + // Compound assignments. + mlir::Value VisitBinAddAssign(const CompoundAssignOperator *e) { + return emitCompoundAssign(e, &ComplexExprEmitter::emitBinAdd); + } + + mlir::Value VisitBinSubAssign(const CompoundAssignOperator *e) { + return emitCompoundAssign(e, &ComplexExprEmitter::emitBinSub); + } + + mlir::Value VisitBinMulAssign(const CompoundAssignOperator *e) { + return emitCompoundAssign(e, &ComplexExprEmitter::emitBinMul); + } }; } // namespace @@ -166,6 +188,12 @@ static const ComplexType *getComplexType(QualType type) { } #endif // NDEBUG +static mlir::Value createComplexFromReal(CIRGenBuilderTy &builder, + mlir::Location loc, mlir::Value real) { + mlir::Value imag = builder.getNullValue(real.getType(), loc); + return builder.createComplexCreate(loc, real, imag); +} + LValue ComplexExprEmitter::emitBinAssignLValue(const BinaryOperator *e, mlir::Value &value) { assert(cgf.getContext().hasSameUnqualifiedType(e->getLHS()->getType(), @@ -602,7 +630,7 @@ mlir::Value ComplexExprEmitter::emitPromoted(const Expr *e, mlir::Value result = Visit(const_cast<Expr *>(e)); if (!promotionTy.isNull()) - cgf.cgm.errorNYI("emitPromoted emitPromotedValue"); + return cgf.emitPromotedValue(result, promotionTy); return result; } @@ -630,6 +658,104 @@ ComplexExprEmitter::emitBinOps(const BinaryOperator *e, QualType promotionTy) { return binOpInfo; } +LValue ComplexExprEmitter::emitCompoundAssignLValue( + const CompoundAssignOperator *e, + mlir::Value (ComplexExprEmitter::*func)(const BinOpInfo &), RValue &value) { + QualType lhsTy = e->getLHS()->getType(); + QualType rhsTy = e->getRHS()->getType(); + SourceLocation exprLoc = e->getExprLoc(); + mlir::Location loc = cgf.getLoc(exprLoc); + + if (lhsTy->getAs<AtomicType>()) { + cgf.cgm.errorNYI("emitCompoundAssignLValue AtmoicType"); + return {}; + } + + BinOpInfo opInfo{loc}; + opInfo.fpFeatures = e->getFPFeaturesInEffect(cgf.getLangOpts()); + + assert(!cir::MissingFeatures::cgFPOptionsRAII()); + + // Load the RHS and LHS operands. + // __block variables need to have the rhs evaluated first, plus this should + // improve codegen a little. + QualType promotionTypeCR = getPromotionType(e->getComputationResultType()); + opInfo.ty = promotionTypeCR.isNull() ? e->getComputationResultType() + : promotionTypeCR; + + QualType complexElementTy = + opInfo.ty->castAs<ComplexType>()->getElementType(); + QualType promotionTypeRHS = getPromotionType(rhsTy); + + // The RHS should have been converted to the computation type. + if (e->getRHS()->getType()->isRealFloatingType()) { + if (!promotionTypeRHS.isNull()) { + opInfo.rhs = createComplexFromReal( + cgf.getBuilder(), loc, + cgf.emitPromotedScalarExpr(e->getRHS(), promotionTypeRHS)); + } else { + assert(cgf.getContext().hasSameUnqualifiedType(complexElementTy, rhsTy)); + opInfo.rhs = createComplexFromReal(cgf.getBuilder(), loc, + cgf.emitScalarExpr(e->getRHS())); + } + } else { + if (!promotionTypeRHS.isNull()) { + opInfo.rhs = cgf.emitPromotedComplexExpr(e->getRHS(), promotionTypeRHS); + } else { + assert(cgf.getContext().hasSameUnqualifiedType(opInfo.ty, rhsTy)); + opInfo.rhs = Visit(e->getRHS()); + } + } + + LValue lhs = cgf.emitLValue(e->getLHS()); + + // Load from the l-value and convert it. + QualType promotionTypeLHS = getPromotionType(e->getComputationLHSType()); + if (lhsTy->isAnyComplexType()) { + mlir::Value lhsValue = emitLoadOfLValue(lhs, exprLoc); + QualType destTy = promotionTypeLHS.isNull() ? opInfo.ty : promotionTypeLHS; + opInfo.lhs = emitComplexToComplexCast(lhsValue, lhsTy, destTy, exprLoc); + } else { + cgf.cgm.errorNYI("emitCompoundAssignLValue emitLoadOfScalar"); + return {}; + } + + // Expand the binary operator. + mlir::Value result = (this->*func)(opInfo); + + // Truncate the result and store it into the LHS lvalue. + if (lhsTy->isAnyComplexType()) { + mlir::Value resultValue = + emitComplexToComplexCast(result, opInfo.ty, lhsTy, exprLoc); + emitStoreOfComplex(loc, resultValue, lhs, /*isInit*/ false); + value = RValue::getComplex(resultValue); + } else { + mlir::Value resultValue = + cgf.emitComplexToScalarConversion(result, opInfo.ty, lhsTy, exprLoc); + cgf.emitStoreOfScalar(resultValue, lhs, /*isInit*/ false); + value = RValue::get(resultValue); + } + + return lhs; +} + +mlir::Value ComplexExprEmitter::emitCompoundAssign( + const CompoundAssignOperator *e, + mlir::Value (ComplexExprEmitter::*func)(const BinOpInfo &)) { + RValue val; + LValue lv = emitCompoundAssignLValue(e, func, val); + + // The result of an assignment in C is the assigned r-value. + if (!cgf.getLangOpts().CPlusPlus) + return val.getComplexValue(); + + // If the lvalue is non-volatile, return the computed value of the assignment. + if (!lv.isVolatileQualified()) + return val.getComplexValue(); + + return emitLoadOfLValue(lv, e->getExprLoc()); +} + mlir::Value ComplexExprEmitter::emitBinAdd(const BinOpInfo &op) { assert(!cir::MissingFeatures::fastMathFlags()); assert(!cir::MissingFeatures::cgFPOptionsRAII()); @@ -654,7 +780,7 @@ getComplexRangeAttr(LangOptions::ComplexRangeKind range) { case LangOptions::CX_Basic: return cir::ComplexRangeKind::Basic; case LangOptions::CX_None: - // The default value for ComplexRangeKind is Full is no option is selected + // The default value for ComplexRangeKind is Full if no option is selected return cir::ComplexRangeKind::Full; } } @@ -685,6 +811,31 @@ mlir::Value CIRGenFunction::emitComplexExpr(const Expr *e) { return ComplexExprEmitter(*this).Visit(const_cast<Expr *>(e)); } +using CompoundFunc = + mlir::Value (ComplexExprEmitter::*)(const ComplexExprEmitter::BinOpInfo &); + +static CompoundFunc getComplexOp(BinaryOperatorKind op) { + switch (op) { + case BO_MulAssign: + return &ComplexExprEmitter::emitBinMul; + case BO_DivAssign: + llvm_unreachable("getComplexOp: BO_DivAssign"); + case BO_SubAssign: + return &ComplexExprEmitter::emitBinSub; + case BO_AddAssign: + return &ComplexExprEmitter::emitBinAdd; + default: + llvm_unreachable("unexpected complex compound assignment"); + } +} + +LValue CIRGenFunction::emitComplexCompoundAssignmentLValue( + const CompoundAssignOperator *e) { + CompoundFunc op = getComplexOp(e->getOpcode()); + RValue val; + return ComplexExprEmitter(*this).emitCompoundAssignLValue(e, op, val); +} + mlir::Value CIRGenFunction::emitComplexPrePostIncDec(const UnaryOperator *e, LValue lv, cir::UnaryOpKind op, @@ -729,3 +880,11 @@ mlir::Value CIRGenFunction::emitPromotedComplexExpr(const Expr *e, QualType promotionType) { return ComplexExprEmitter(*this).emitPromoted(e, promotionType); } + +mlir::Value CIRGenFunction::emitPromotedValue(mlir::Value result, + QualType promotionType) { + assert(!mlir::cast<cir::ComplexType>(result.getType()).isIntegerComplex() && + "integral complex will never be promoted"); + return builder.createCast(cir::CastKind::float_complex, result, + convertType(promotionType)); +} diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 32c1c1a..3e06513 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1955,6 +1955,29 @@ mlir::Value CIRGenFunction::emitScalarConversion(mlir::Value src, .emitScalarConversion(src, srcTy, dstTy, loc); } +mlir::Value CIRGenFunction::emitComplexToScalarConversion(mlir::Value src, + QualType srcTy, + QualType dstTy, + SourceLocation loc) { + assert(srcTy->isAnyComplexType() && hasScalarEvaluationKind(dstTy) && + "Invalid complex -> scalar conversion"); + + QualType complexElemTy = srcTy->castAs<ComplexType>()->getElementType(); + if (dstTy->isBooleanType()) { + auto kind = complexElemTy->isFloatingType() + ? cir::CastKind::float_complex_to_bool + : cir::CastKind::int_complex_to_bool; + return builder.createCast(getLoc(loc), kind, src, convertType(dstTy)); + } + + auto kind = complexElemTy->isFloatingType() + ? cir::CastKind::float_complex_to_real + : cir::CastKind::int_complex_to_real; + mlir::Value real = + builder.createCast(getLoc(loc), kind, src, convertType(complexElemTy)); + return emitScalarConversion(real, complexElemTy, dstTy, loc); +} + mlir::Value ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *e) { // Perform vector logical not on comparison with zero vector. if (e->getType()->isVectorType() && diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index eb05c93..dedd01c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -28,8 +28,6 @@ CIRGenFunction::CIRGenFunction(CIRGenModule &cgm, CIRGenBuilderTy &builder, bool suppressNewContext) : CIRGenTypeCache(cgm), cgm{cgm}, builder(builder) { ehStack.setCGF(this); - currentCleanupStackDepth = 0; - assert(ehStack.getStackDepth() == 0); } CIRGenFunction::~CIRGenFunction() {} @@ -409,6 +407,8 @@ void CIRGenFunction::startFunction(GlobalDecl gd, QualType returnType, const auto *fd = dyn_cast_or_null<FunctionDecl>(d); curFuncDecl = d->getNonClosureContext(); + prologueCleanupDepth = ehStack.stable_begin(); + mlir::Block *entryBB = &fn.getBlocks().front(); builder.setInsertionPointToStart(entryBB); @@ -475,11 +475,11 @@ void CIRGenFunction::finishFunction(SourceLocation endLoc) { // important to do this before we enter the return block or return // edges will be *really* confused. // TODO(cir): Use prologueCleanupDepth here. - bool hasCleanups = ehStack.getStackDepth() != currentCleanupStackDepth; + bool hasCleanups = ehStack.stable_begin() != prologueCleanupDepth; if (hasCleanups) { assert(!cir::MissingFeatures::generateDebugInfo()); // FIXME(cir): should we clearInsertionPoint? breaks many testcases - popCleanupBlocks(currentCleanupStackDepth); + popCleanupBlocks(prologueCleanupDepth); } } @@ -785,9 +785,8 @@ LValue CIRGenFunction::emitLValue(const Expr *e) { } if (!ty->isAnyComplexType()) return emitCompoundAssignmentLValue(cast<CompoundAssignOperator>(e)); - cgm.errorNYI(e->getSourceRange(), - "CompoundAssignOperator with ComplexType"); - return LValue(); + + return emitComplexCompoundAssignmentLValue(cast<CompoundAssignOperator>(e)); } case Expr::CallExprClass: case Expr::CXXMemberCallExprClass: diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 3d92545..bdbc77c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -601,9 +601,13 @@ public: FunctionArgList args, clang::SourceLocation loc, clang::SourceLocation startLoc); + /// The cleanup depth enclosing all the cleanups associated with the + /// parameters. + EHScopeStack::stable_iterator prologueCleanupDepth; + /// Takes the old cleanup stack size and emits the cleanup blocks /// that have been added. - void popCleanupBlocks(size_t oldCleanupStackDepth); + void popCleanupBlocks(EHScopeStack::stable_iterator oldCleanupStackDepth); void popCleanupBlock(); /// Push a cleanup to be run at the end of the current full-expression. Safe @@ -622,7 +626,7 @@ public: /// Enters a new scope for capturing cleanups, all of which /// will be executed once the scope is exited. class RunCleanupsScope { - size_t cleanupStackDepth, oldCleanupStackDepth; + EHScopeStack::stable_iterator cleanupStackDepth, oldCleanupStackDepth; protected: bool performCleanup; @@ -638,7 +642,7 @@ public: /// Enter a new cleanup scope. explicit RunCleanupsScope(CIRGenFunction &cgf) : performCleanup(true), cgf(cgf) { - cleanupStackDepth = cgf.ehStack.getStackDepth(); + cleanupStackDepth = cgf.ehStack.stable_begin(); oldCleanupStackDepth = cgf.currentCleanupStackDepth; cgf.currentCleanupStackDepth = cleanupStackDepth; } @@ -663,7 +667,7 @@ public: }; // Cleanup stack depth of the RunCleanupsScope that was pushed most recently. - size_t currentCleanupStackDepth; + EHScopeStack::stable_iterator currentCleanupStackDepth = ehStack.stable_end(); public: /// Represents a scope, including function bodies, compound statements, and @@ -944,6 +948,11 @@ public: /// sanitizer is enabled, a runtime check is also emitted. mlir::Value emitCheckedArgForAssume(const Expr *e); + /// Emit a conversion from the specified complex type to the specified + /// destination type, where the destination type is an LLVM scalar type. + mlir::Value emitComplexToScalarConversion(mlir::Value src, QualType srcTy, + QualType dstTy, SourceLocation loc); + LValue emitCompoundAssignmentLValue(const clang::CompoundAssignOperator *e); LValue emitCompoundLiteralLValue(const CompoundLiteralExpr *e); @@ -1047,6 +1056,8 @@ public: mlir::Value emitPromotedScalarExpr(const Expr *e, QualType promotionType); + mlir::Value emitPromotedValue(mlir::Value result, QualType promotionType); + /// Emit the computation of the specified expression of scalar type. mlir::Value emitScalarExpr(const clang::Expr *e); @@ -1076,6 +1087,7 @@ public: cir::UnaryOpKind op, bool isPre); LValue emitComplexAssignmentLValue(const BinaryOperator *e); + LValue emitComplexCompoundAssignmentLValue(const CompoundAssignOperator *e); void emitCompoundStmt(const clang::CompoundStmt &s); diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp index 425250d..ff6d293 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp @@ -1365,6 +1365,21 @@ void CIRGenModule::emitTopLevelDecl(Decl *decl) { assert(!cir::MissingFeatures::generateDebugInfo()); assert(!cir::MissingFeatures::cxxRecordStaticMembers()); break; + + case Decl::FileScopeAsm: + // File-scope asm is ignored during device-side CUDA compilation. + if (langOpts.CUDA && langOpts.CUDAIsDevice) + break; + // File-scope asm is ignored during device-side OpenMP compilation. + if (langOpts.OpenMPIsTargetDevice) + break; + // File-scope asm is ignored during device-side SYCL compilation. + if (langOpts.SYCLIsDevice) + break; + auto *file_asm = cast<FileScopeAsmDecl>(decl); + std::string line = file_asm->getAsmString(); + globalScopeAsm.push_back(builder.getStringAttr(line)); + break; } } @@ -1978,6 +1993,9 @@ void CIRGenModule::release() { emitDeferred(); applyReplacements(); + theModule->setAttr(cir::CIRDialect::getModuleLevelAsmAttrName(), + builder.getArrayAttr(globalScopeAsm)); + // There's a lot of code that is not implemented yet. assert(!cir::MissingFeatures::cgmRelease()); } diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h index 5d07d38..163a0fc 100644 --- a/clang/lib/CIR/CodeGen/CIRGenModule.h +++ b/clang/lib/CIR/CodeGen/CIRGenModule.h @@ -90,6 +90,8 @@ private: /// for FunctionDecls's. CIRGenFunction *curCGF = nullptr; + llvm::SmallVector<mlir::Attribute> globalScopeAsm; + public: mlir::ModuleOp getModule() const { return theModule; } CIRGenBuilderTy &getBuilder() { return builder; } diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index 50642e7..332babd 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -412,7 +412,7 @@ mlir::LogicalResult CIRGenFunction::emitReturnStmt(const ReturnStmt &s) { auto *retBlock = curLexScope->getOrCreateRetBlock(*this, loc); // This should emit a branch through the cleanup block if one exists. builder.create<cir::BrOp>(loc, retBlock); - if (ehStack.getStackDepth() != currentCleanupStackDepth) + if (ehStack.stable_begin() != currentCleanupStackDepth) cgm.errorNYI(s.getSourceRange(), "return with cleanup stack"); builder.createBlock(builder.getBlock()->getParent()); diff --git a/clang/lib/CIR/CodeGen/EHScopeStack.h b/clang/lib/CIR/CodeGen/EHScopeStack.h index 22750ac..47478f6 100644 --- a/clang/lib/CIR/CodeGen/EHScopeStack.h +++ b/clang/lib/CIR/CodeGen/EHScopeStack.h @@ -42,7 +42,47 @@ enum CleanupKind : unsigned { /// A stack of scopes which respond to exceptions, including cleanups /// and catch blocks. class EHScopeStack { + friend class CIRGenFunction; + public: + // TODO(ogcg): Switch to alignof(uint64_t) instead of 8 + enum { ScopeStackAlignment = 8 }; + + /// A saved depth on the scope stack. This is necessary because + /// pushing scopes onto the stack invalidates iterators. + class stable_iterator { + friend class EHScopeStack; + + /// Offset from startOfData to endOfBuffer. + ptrdiff_t size = -1; + + explicit stable_iterator(ptrdiff_t size) : size(size) {} + + public: + static stable_iterator invalid() { return stable_iterator(-1); } + stable_iterator() = default; + + bool isValid() const { return size >= 0; } + + /// Returns true if this scope encloses I. + /// Returns false if I is invalid. + /// This scope must be valid. + bool encloses(stable_iterator other) const { return size <= other.size; } + + /// Returns true if this scope strictly encloses I: that is, + /// if it encloses I and is not I. + /// Returns false is I is invalid. + /// This scope must be valid. + bool strictlyEncloses(stable_iterator I) const { return size < I.size; } + + friend bool operator==(stable_iterator A, stable_iterator B) { + return A.size == B.size; + } + friend bool operator!=(stable_iterator A, stable_iterator B) { + return A.size != B.size; + } + }; + /// Information for lazily generating a cleanup. Subclasses must be /// POD-like: cleanups will not be destructed, and they will be /// allocated on the cleanup stack and freely copied and moved @@ -68,30 +108,75 @@ public: /// // \param flags cleanup kind. virtual void emit(CIRGenFunction &cgf) = 0; - }; - // Classic codegen has a finely tuned custom allocator and a complex stack - // management scheme. We'll probably eventually want to find a way to share - // that implementation. For now, we will use a very simplified implementation - // to get cleanups working. - llvm::SmallVector<std::unique_ptr<Cleanup>, 8> cleanupStack; + // This is a placeholder until EHScope is implemented. + virtual size_t getSize() const = 0; + }; private: + // The implementation for this class is in CIRGenCleanup.h and + // CIRGenCleanup.cpp; the definition is here because it's used as a + // member of CIRGenFunction. + + /// The start of the scope-stack buffer, i.e. the allocated pointer + /// for the buffer. All of these pointers are either simultaneously + /// null or simultaneously valid. + std::unique_ptr<char[]> startOfBuffer; + + /// The end of the buffer. + char *endOfBuffer = nullptr; + + /// The first valid entry in the buffer. + char *startOfData = nullptr; + /// The CGF this Stack belong to CIRGenFunction *cgf = nullptr; + // This class uses a custom allocator for maximum efficiency because cleanups + // are allocated and freed very frequently. It's basically a bump pointer + // allocator, but we can't use LLVM's BumpPtrAllocator because we use offsets + // into the buffer as stable iterators. + char *allocate(size_t size); + void deallocate(size_t size); + + void *pushCleanup(CleanupKind kind, size_t dataSize); + public: EHScopeStack() = default; ~EHScopeStack() = default; /// Push a lazily-created cleanup on the stack. template <class T, class... As> void pushCleanup(CleanupKind kind, As... a) { - cleanupStack.push_back(std::make_unique<T>(a...)); + static_assert(alignof(T) <= ScopeStackAlignment, + "Cleanup's alignment is too large."); + void *buffer = pushCleanup(kind, sizeof(T)); + [[maybe_unused]] Cleanup *obj = new (buffer) T(a...); } void setCGF(CIRGenFunction *inCGF) { cgf = inCGF; } - size_t getStackDepth() const { return cleanupStack.size(); } + /// Pops a cleanup scope off the stack. This is private to CIRGenCleanup.cpp. + void popCleanup(); + + /// Determines whether the exception-scopes stack is empty. + bool empty() const { return startOfData == endOfBuffer; } + + /// An unstable reference to a scope-stack depth. Invalidated by + /// pushes but not pops. + class iterator; + + /// Returns an iterator pointing to the innermost EH scope. + iterator begin() const; + + /// Create a stable reference to the top of the EH stack. The + /// returned reference is valid until that scope is popped off the + /// stack. + stable_iterator stable_begin() const { + return stable_iterator(endOfBuffer - startOfData); + } + + /// Create a stable reference to the bottom of the EH stack. + static stable_iterator stable_end() { return stable_iterator(0); } }; } // namespace clang::CIRGen diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp index d3fcac1..53ab04e 100644 --- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp +++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp @@ -1444,6 +1444,27 @@ cir::GetGlobalOp::verifySymbolUses(SymbolTableCollection &symbolTable) { } //===----------------------------------------------------------------------===// +// VTableAddrPointOp +//===----------------------------------------------------------------------===// + +LogicalResult +cir::VTableAddrPointOp::verifySymbolUses(SymbolTableCollection &symbolTable) { + StringRef name = getName(); + + // Verify that the result type underlying pointer type matches the type of + // the referenced cir.global or cir.func op. + auto op = symbolTable.lookupNearestSymbolFrom<GlobalOp>(*this, getNameAttr()); + if (!op) + return emitOpError("'") + << name << "' does not reference a valid cir.global"; + std::optional<mlir::Attribute> init = op.getInitialValue(); + if (!init) + return success(); + assert(!cir::MissingFeatures::vtableInitializer()); + return success(); +} + +//===----------------------------------------------------------------------===// // FuncOp //===----------------------------------------------------------------------===// diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 7e1c9fb..43a1b51 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -2143,6 +2143,11 @@ void ConvertCIRToLLVMPass::processCIRAttrs(mlir::ModuleOp module) { module->getAttr(cir::CIRDialect::getTripleAttrName())) module->setAttr(mlir::LLVM::LLVMDialect::getTargetTripleAttrName(), tripleAttr); + + if (mlir::Attribute asmAttr = + module->getAttr(cir::CIRDialect::getModuleLevelAsmAttrName())) + module->setAttr(mlir::LLVM::LLVMDialect::getModuleLevelAsmAttrName(), + asmAttr); } void ConvertCIRToLLVMPass::runOnOperation() { diff --git a/clang/lib/CodeGen/CGCXXABI.h b/clang/lib/CodeGen/CGCXXABI.h index 96fe046..2dd320d 100644 --- a/clang/lib/CodeGen/CGCXXABI.h +++ b/clang/lib/CodeGen/CGCXXABI.h @@ -294,14 +294,22 @@ public: Address Value, QualType SrcRecordTy) = 0; + struct ExactDynamicCastInfo { + bool RequiresCastToPrimaryBase; + CharUnits Offset; + }; + + virtual std::optional<ExactDynamicCastInfo> + getExactDynamicCastInfo(QualType SrcRecordTy, QualType DestTy, + QualType DestRecordTy) = 0; + /// Emit a dynamic_cast from SrcRecordTy to DestRecordTy. The cast fails if /// the dynamic type of Value is not exactly DestRecordTy. - virtual llvm::Value *emitExactDynamicCast(CodeGenFunction &CGF, Address Value, - QualType SrcRecordTy, - QualType DestTy, - QualType DestRecordTy, - llvm::BasicBlock *CastSuccess, - llvm::BasicBlock *CastFail) = 0; + virtual llvm::Value *emitExactDynamicCast( + CodeGenFunction &CGF, Address Value, QualType SrcRecordTy, + QualType DestTy, QualType DestRecordTy, + const ExactDynamicCastInfo &CastInfo, llvm::BasicBlock *CastSuccess, + llvm::BasicBlock *CastFail) = 0; virtual bool EmitBadCastCall(CodeGenFunction &CGF) = 0; diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp index 5a3d4e4..ed35a05 100644 --- a/clang/lib/CodeGen/CGExpr.cpp +++ b/clang/lib/CodeGen/CGExpr.cpp @@ -3789,33 +3789,50 @@ void CodeGenFunction::EmitCheck( Branch->setMetadata(llvm::LLVMContext::MD_prof, Node); EmitBlock(Handlers); + // Clear arguments for the MinimalRuntime handler. + if (CGM.getCodeGenOpts().SanitizeMinimalRuntime) { + switch (CheckHandler) { + case SanitizerHandler::TypeMismatch: + // Pass value pointer only. It adds minimal overhead. + StaticArgs = {}; + assert(DynamicArgs.size() == 1); + break; + default: + // No arguments for other checks. + StaticArgs = {}; + DynamicArgs = {}; + break; + } + } + // Handler functions take an i8* pointing to the (handler-specific) static // information block, followed by a sequence of intptr_t arguments // representing operand values. SmallVector<llvm::Value *, 4> Args; SmallVector<llvm::Type *, 4> ArgTypes; - if (!CGM.getCodeGenOpts().SanitizeMinimalRuntime) { - Args.reserve(DynamicArgs.size() + 1); - ArgTypes.reserve(DynamicArgs.size() + 1); - - // Emit handler arguments and create handler function type. - if (!StaticArgs.empty()) { - llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs); - auto *InfoPtr = new llvm::GlobalVariable( - CGM.getModule(), Info->getType(), false, - llvm::GlobalVariable::PrivateLinkage, Info, "", nullptr, - llvm::GlobalVariable::NotThreadLocal, - CGM.getDataLayout().getDefaultGlobalsAddressSpace()); - InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); - CGM.getSanitizerMetadata()->disableSanitizerForGlobal(InfoPtr); - Args.push_back(InfoPtr); - ArgTypes.push_back(Args.back()->getType()); - } - for (llvm::Value *DynamicArg : DynamicArgs) { - Args.push_back(EmitCheckValue(DynamicArg)); - ArgTypes.push_back(IntPtrTy); - } + Args.reserve(DynamicArgs.size() + 1); + ArgTypes.reserve(DynamicArgs.size() + 1); + + // Emit handler arguments and create handler function type. + if (!StaticArgs.empty()) { + llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs); + auto *InfoPtr = new llvm::GlobalVariable( + CGM.getModule(), Info->getType(), + // Non-constant global is used in a handler to deduplicate reports. + // TODO: change deduplication logic and make it constant. + /*isConstant=*/false, llvm::GlobalVariable::PrivateLinkage, Info, "", + nullptr, llvm::GlobalVariable::NotThreadLocal, + CGM.getDataLayout().getDefaultGlobalsAddressSpace()); + InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); + CGM.getSanitizerMetadata()->disableSanitizerForGlobal(InfoPtr); + Args.push_back(InfoPtr); + ArgTypes.push_back(Args.back()->getType()); + } + + for (llvm::Value *DynamicArg : DynamicArgs) { + Args.push_back(EmitCheckValue(DynamicArg)); + ArgTypes.push_back(IntPtrTy); } llvm::FunctionType *FnType = diff --git a/clang/lib/CodeGen/CGExprCXX.cpp b/clang/lib/CodeGen/CGExprCXX.cpp index fef1baf..49d5d8a 100644 --- a/clang/lib/CodeGen/CGExprCXX.cpp +++ b/clang/lib/CodeGen/CGExprCXX.cpp @@ -2295,6 +2295,18 @@ llvm::Value *CodeGenFunction::EmitDynamicCast(Address ThisAddr, CGM.getCXXABI().shouldEmitExactDynamicCast(DestRecordTy) && !getLangOpts().PointerAuthCalls; + std::optional<CGCXXABI::ExactDynamicCastInfo> ExactCastInfo; + if (IsExact) { + ExactCastInfo = CGM.getCXXABI().getExactDynamicCastInfo(SrcRecordTy, DestTy, + DestRecordTy); + if (!ExactCastInfo) { + llvm::Value *NullValue = EmitDynamicCastToNull(*this, DestTy); + if (!Builder.GetInsertBlock()) + EmitBlock(createBasicBlock("dynamic_cast.unreachable")); + return NullValue; + } + } + // C++ [expr.dynamic.cast]p4: // If the value of v is a null pointer value in the pointer case, the result // is the null pointer value of type T. @@ -2322,7 +2334,8 @@ llvm::Value *CodeGenFunction::EmitDynamicCast(Address ThisAddr, // If the destination type is effectively final, this pointer points to the // right type if and only if its vptr has the right value. Value = CGM.getCXXABI().emitExactDynamicCast( - *this, ThisAddr, SrcRecordTy, DestTy, DestRecordTy, CastEnd, CastNull); + *this, ThisAddr, SrcRecordTy, DestTy, DestRecordTy, *ExactCastInfo, + CastEnd, CastNull); } else { assert(DestRecordTy->isRecordType() && "destination type must be a record type!"); diff --git a/clang/lib/CodeGen/CGHLSLRuntime.cpp b/clang/lib/CodeGen/CGHLSLRuntime.cpp index f64ac20..918cb3e 100644 --- a/clang/lib/CodeGen/CGHLSLRuntime.cpp +++ b/clang/lib/CodeGen/CGHLSLRuntime.cpp @@ -103,13 +103,6 @@ llvm::Triple::ArchType CGHLSLRuntime::getArch() { return CGM.getTarget().getTriple().getArch(); } -// Returns true if the type is an HLSL resource class or an array of them -static bool isResourceRecordTypeOrArrayOf(const clang::Type *Ty) { - while (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(Ty)) - Ty = CAT->getArrayElementTypeNoTypeQual(); - return Ty->isHLSLResourceRecord(); -} - // Emits constant global variables for buffer constants declarations // and creates metadata linking the constant globals with the buffer global. void CGHLSLRuntime::emitBufferGlobalsAndMetadata(const HLSLBufferDecl *BufDecl, @@ -146,7 +139,7 @@ void CGHLSLRuntime::emitBufferGlobalsAndMetadata(const HLSLBufferDecl *BufDecl, if (VDTy.getAddressSpace() != LangAS::hlsl_constant) { if (VD->getStorageClass() == SC_Static || VDTy.getAddressSpace() == LangAS::hlsl_groupshared || - isResourceRecordTypeOrArrayOf(VDTy.getTypePtr())) { + VDTy->isHLSLResourceRecord() || VDTy->isHLSLResourceRecordArray()) { // Emit static and groupshared variables and resource classes inside // cbuffer as regular globals CGM.EmitGlobal(VD); diff --git a/clang/lib/CodeGen/ItaniumCXXABI.cpp b/clang/lib/CodeGen/ItaniumCXXABI.cpp index aae1481..5ffc1ed 100644 --- a/clang/lib/CodeGen/ItaniumCXXABI.cpp +++ b/clang/lib/CodeGen/ItaniumCXXABI.cpp @@ -226,6 +226,10 @@ public: return hasUniqueVTablePointer(DestRecordTy); } + std::optional<ExactDynamicCastInfo> + getExactDynamicCastInfo(QualType SrcRecordTy, QualType DestTy, + QualType DestRecordTy) override; + llvm::Value *emitDynamicCastCall(CodeGenFunction &CGF, Address Value, QualType SrcRecordTy, QualType DestTy, QualType DestRecordTy, @@ -234,6 +238,7 @@ public: llvm::Value *emitExactDynamicCast(CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy, QualType DestTy, QualType DestRecordTy, + const ExactDynamicCastInfo &CastInfo, llvm::BasicBlock *CastSuccess, llvm::BasicBlock *CastFail) override; @@ -1681,10 +1686,11 @@ llvm::Value *ItaniumCXXABI::emitDynamicCastCall( return Value; } -llvm::Value *ItaniumCXXABI::emitExactDynamicCast( - CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy, - QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastSuccess, - llvm::BasicBlock *CastFail) { +std::optional<CGCXXABI::ExactDynamicCastInfo> +ItaniumCXXABI::getExactDynamicCastInfo(QualType SrcRecordTy, QualType DestTy, + QualType DestRecordTy) { + assert(shouldEmitExactDynamicCast(DestRecordTy)); + ASTContext &Context = getContext(); // Find all the inheritance paths. @@ -1722,41 +1728,56 @@ llvm::Value *ItaniumCXXABI::emitExactDynamicCast( if (!Offset) Offset = PathOffset; else if (Offset != PathOffset) { - // Base appears in at least two different places. Find the most-derived - // object and see if it's a DestDecl. Note that the most-derived object - // must be at least as aligned as this base class subobject, and must - // have a vptr at offset 0. - ThisAddr = Address(emitDynamicCastToVoid(CGF, ThisAddr, SrcRecordTy), - CGF.VoidPtrTy, ThisAddr.getAlignment()); - SrcDecl = DestDecl; - Offset = CharUnits::Zero(); - break; + // Base appears in at least two different places. + return ExactDynamicCastInfo{/*RequiresCastToPrimaryBase=*/true, + CharUnits::Zero()}; } } + if (!Offset) + return std::nullopt; + return ExactDynamicCastInfo{/*RequiresCastToPrimaryBase=*/false, *Offset}; +} - if (!Offset) { - // If there are no public inheritance paths, the cast always fails. - CGF.EmitBranch(CastFail); - return llvm::PoisonValue::get(CGF.VoidPtrTy); - } +llvm::Value *ItaniumCXXABI::emitExactDynamicCast( + CodeGenFunction &CGF, Address ThisAddr, QualType SrcRecordTy, + QualType DestTy, QualType DestRecordTy, + const ExactDynamicCastInfo &ExactCastInfo, llvm::BasicBlock *CastSuccess, + llvm::BasicBlock *CastFail) { + const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl(); + const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl(); + + llvm::Value *VTable = nullptr; + if (ExactCastInfo.RequiresCastToPrimaryBase) { + // Base appears in at least two different places. Find the most-derived + // object and see if it's a DestDecl. Note that the most-derived object + // must be at least as aligned as this base class subobject, and must + // have a vptr at offset 0. + llvm::Value *PrimaryBase = + emitDynamicCastToVoid(CGF, ThisAddr, SrcRecordTy); + ThisAddr = Address(PrimaryBase, CGF.VoidPtrTy, ThisAddr.getAlignment()); + SrcDecl = DestDecl; + Address VTablePtrPtr = ThisAddr.withElementType(CGF.VoidPtrPtrTy); + VTable = CGF.Builder.CreateLoad(VTablePtrPtr, "vtable"); + } else + VTable = CGF.GetVTablePtr(ThisAddr, CGF.UnqualPtrTy, SrcDecl); // Compare the vptr against the expected vptr for the destination type at - // this offset. Note that we do not know what type ThisAddr points to in - // the case where the derived class multiply inherits from the base class - // so we can't use GetVTablePtr, so we load the vptr directly instead. - llvm::Instruction *VPtr = CGF.Builder.CreateLoad( - ThisAddr.withElementType(CGF.VoidPtrPtrTy), "vtable"); - CGM.DecorateInstructionWithTBAA( - VPtr, CGM.getTBAAVTablePtrAccessInfo(CGF.VoidPtrPtrTy)); - llvm::Value *Success = CGF.Builder.CreateICmpEQ( - VPtr, getVTableAddressPoint(BaseSubobject(SrcDecl, *Offset), DestDecl)); - llvm::Value *Result = ThisAddr.emitRawPointer(CGF); - if (!Offset->isZero()) - Result = CGF.Builder.CreateInBoundsGEP( - CGF.CharTy, Result, - {llvm::ConstantInt::get(CGF.PtrDiffTy, -Offset->getQuantity())}); + // this offset. + llvm::Constant *ExpectedVTable = getVTableAddressPoint( + BaseSubobject(SrcDecl, ExactCastInfo.Offset), DestDecl); + llvm::Value *Success = CGF.Builder.CreateICmpEQ(VTable, ExpectedVTable); + llvm::Value *AdjustedThisPtr = ThisAddr.emitRawPointer(CGF); + + if (!ExactCastInfo.Offset.isZero()) { + CharUnits::QuantityType Offset = ExactCastInfo.Offset.getQuantity(); + llvm::Constant *OffsetConstant = + llvm::ConstantInt::get(CGF.PtrDiffTy, -Offset); + AdjustedThisPtr = CGF.Builder.CreateInBoundsGEP(CGF.CharTy, AdjustedThisPtr, + OffsetConstant); + } + CGF.Builder.CreateCondBr(Success, CastSuccess, CastFail); - return Result; + return AdjustedThisPtr; } llvm::Value *ItaniumCXXABI::emitDynamicCastToVoid(CodeGenFunction &CGF, diff --git a/clang/lib/CodeGen/MicrosoftCXXABI.cpp b/clang/lib/CodeGen/MicrosoftCXXABI.cpp index 700ffa4..e8d2451 100644 --- a/clang/lib/CodeGen/MicrosoftCXXABI.cpp +++ b/clang/lib/CodeGen/MicrosoftCXXABI.cpp @@ -158,9 +158,15 @@ public: // TODO: Add support for exact dynamic_casts. return false; } + std::optional<ExactDynamicCastInfo> + getExactDynamicCastInfo(QualType SrcRecordTy, QualType DestTy, + QualType DestRecordTy) override { + llvm_unreachable("unsupported"); + } llvm::Value *emitExactDynamicCast(CodeGenFunction &CGF, Address Value, QualType SrcRecordTy, QualType DestTy, QualType DestRecordTy, + const ExactDynamicCastInfo &CastInfo, llvm::BasicBlock *CastSuccess, llvm::BasicBlock *CastFail) override { llvm_unreachable("unsupported"); diff --git a/clang/lib/CodeGen/TargetBuiltins/WebAssembly.cpp b/clang/lib/CodeGen/TargetBuiltins/WebAssembly.cpp index 33a8d8f..1a1889a 100644 --- a/clang/lib/CodeGen/TargetBuiltins/WebAssembly.cpp +++ b/clang/lib/CodeGen/TargetBuiltins/WebAssembly.cpp @@ -246,35 +246,26 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID, llvm::FunctionType *LLVMFuncTy = cast<llvm::FunctionType>(ConvertType(QualType(FuncTy, 0))); + bool VarArg = LLVMFuncTy->isVarArg(); unsigned NParams = LLVMFuncTy->getNumParams(); std::vector<Value *> Args; - Args.reserve(NParams + 3); + Args.reserve(NParams + 3 + VarArg); // The only real argument is the FuncRef Args.push_back(FuncRef); // Add the type information - auto addType = [this, &Args](llvm::Type *T) { - if (T->isVoidTy()) { - // Do nothing - } else if (T->isFloatingPointTy()) { - Args.push_back(ConstantFP::get(T, 0)); - } else if (T->isIntegerTy()) { - Args.push_back(ConstantInt::get(T, 0)); - } else if (T->isPointerTy()) { - Args.push_back(ConstantPointerNull::get(llvm::PointerType::get( - getLLVMContext(), T->getPointerAddressSpace()))); - } else { - // TODO: Handle reference types. For now, we reject them in Sema. - llvm_unreachable("Unhandled type"); - } - }; - - addType(LLVMFuncTy->getReturnType()); + llvm::Type *RetType = LLVMFuncTy->getReturnType(); + if (!RetType->isVoidTy()) { + Args.push_back(PoisonValue::get(RetType)); + } // The token type indicates the boundary between return types and param // types. Args.push_back(PoisonValue::get(llvm::Type::getTokenTy(getLLVMContext()))); for (unsigned i = 0; i < NParams; i++) { - addType(LLVMFuncTy->getParamType(i)); + Args.push_back(PoisonValue::get(LLVMFuncTy->getParamType(i))); + } + if (VarArg) { + Args.push_back(PoisonValue::get(Builder.getPtrTy())); } Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_ref_test_func); return Builder.CreateCall(Callee, Args); diff --git a/clang/lib/Frontend/CompilerInstance.cpp b/clang/lib/Frontend/CompilerInstance.cpp index d64290f..9f99edad 100644 --- a/clang/lib/Frontend/CompilerInstance.cpp +++ b/clang/lib/Frontend/CompilerInstance.cpp @@ -1485,6 +1485,14 @@ static bool compileModuleAndReadASTImpl(CompilerInstance &ImportingInstance, return false; } + // The module is built successfully, we can update its timestamp now. + if (ImportingInstance.getPreprocessor() + .getHeaderSearchInfo() + .getHeaderSearchOpts() + .ModulesValidateOncePerBuildSession) { + ImportingInstance.getModuleCache().updateModuleTimestamp(ModuleFileName); + } + return readASTAfterCompileModule(ImportingInstance, ImportLoc, ModuleNameLoc, Module, ModuleFileName, /*OutOfDate=*/nullptr, /*Missing=*/nullptr); diff --git a/clang/lib/Frontend/CompilerInvocation.cpp b/clang/lib/Frontend/CompilerInvocation.cpp index 9f77e62..ccc3154 100644 --- a/clang/lib/Frontend/CompilerInvocation.cpp +++ b/clang/lib/Frontend/CompilerInvocation.cpp @@ -3936,47 +3936,18 @@ void CompilerInvocationBase::GenerateLangArgs(const LangOptions &Opts, GenerateArg(Consumer, OPT_fsanitize_ignorelist_EQ, F); switch (Opts.getClangABICompat()) { - case LangOptions::ClangABI::Ver3_8: - GenerateArg(Consumer, OPT_fclang_abi_compat_EQ, "3.8"); +#define ABI_VER_MAJOR_MINOR(Major, Minor) \ + case LangOptions::ClangABI::Ver##Major##_##Minor: \ + GenerateArg(Consumer, OPT_fclang_abi_compat_EQ, #Major "." #Minor); \ break; - case LangOptions::ClangABI::Ver4: - GenerateArg(Consumer, OPT_fclang_abi_compat_EQ, "4.0"); +#define ABI_VER_MAJOR(Major) \ + case LangOptions::ClangABI::Ver##Major: \ + GenerateArg(Consumer, OPT_fclang_abi_compat_EQ, #Major ".0"); \ break; - case LangOptions::ClangABI::Ver6: - GenerateArg(Consumer, OPT_fclang_abi_compat_EQ, "6.0"); - break; - case LangOptions::ClangABI::Ver7: - GenerateArg(Consumer, OPT_fclang_abi_compat_EQ, "7.0"); - break; - case LangOptions::ClangABI::Ver9: - GenerateArg(Consumer, OPT_fclang_abi_compat_EQ, "9.0"); - break; - case LangOptions::ClangABI::Ver11: - GenerateArg(Consumer, OPT_fclang_abi_compat_EQ, "11.0"); - break; - case LangOptions::ClangABI::Ver12: - GenerateArg(Consumer, OPT_fclang_abi_compat_EQ, "12.0"); - break; - case LangOptions::ClangABI::Ver14: - GenerateArg(Consumer, OPT_fclang_abi_compat_EQ, "14.0"); - break; - case LangOptions::ClangABI::Ver15: - GenerateArg(Consumer, OPT_fclang_abi_compat_EQ, "15.0"); - break; - case LangOptions::ClangABI::Ver17: - GenerateArg(Consumer, OPT_fclang_abi_compat_EQ, "17.0"); - break; - case LangOptions::ClangABI::Ver18: - GenerateArg(Consumer, OPT_fclang_abi_compat_EQ, "18.0"); - break; - case LangOptions::ClangABI::Ver19: - GenerateArg(Consumer, OPT_fclang_abi_compat_EQ, "19.0"); - break; - case LangOptions::ClangABI::Ver20: - GenerateArg(Consumer, OPT_fclang_abi_compat_EQ, "20.0"); - break; - case LangOptions::ClangABI::Latest: +#define ABI_VER_LATEST(Latest) \ + case LangOptions::ClangABI::Latest: \ break; +#include "clang/Basic/ABIVersions.def" } if (Opts.getSignReturnAddressScope() == @@ -4482,32 +4453,18 @@ bool CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args, !VerParts.second.getAsInteger(10, Minor) : VerParts.first.size() == Ver.size() || VerParts.second == "0")) { // Got a valid version number. - if (Major == 3 && Minor <= 8) - Opts.setClangABICompat(LangOptions::ClangABI::Ver3_8); - else if (Major <= 4) - Opts.setClangABICompat(LangOptions::ClangABI::Ver4); - else if (Major <= 6) - Opts.setClangABICompat(LangOptions::ClangABI::Ver6); - else if (Major <= 7) - Opts.setClangABICompat(LangOptions::ClangABI::Ver7); - else if (Major <= 9) - Opts.setClangABICompat(LangOptions::ClangABI::Ver9); - else if (Major <= 11) - Opts.setClangABICompat(LangOptions::ClangABI::Ver11); - else if (Major <= 12) - Opts.setClangABICompat(LangOptions::ClangABI::Ver12); - else if (Major <= 14) - Opts.setClangABICompat(LangOptions::ClangABI::Ver14); - else if (Major <= 15) - Opts.setClangABICompat(LangOptions::ClangABI::Ver15); - else if (Major <= 17) - Opts.setClangABICompat(LangOptions::ClangABI::Ver17); - else if (Major <= 18) - Opts.setClangABICompat(LangOptions::ClangABI::Ver18); - else if (Major <= 19) - Opts.setClangABICompat(LangOptions::ClangABI::Ver19); - else if (Major <= 20) - Opts.setClangABICompat(LangOptions::ClangABI::Ver20); +#define ABI_VER_MAJOR_MINOR(Major_, Minor_) \ + if (std::tie(Major, Minor) <= std::tuple(Major_, Minor_)) \ + Opts.setClangABICompat(LangOptions::ClangABI::Ver##Major_##_##Minor_); \ + else +#define ABI_VER_MAJOR(Major_) \ + if (Major <= Major_) \ + Opts.setClangABICompat(LangOptions::ClangABI::Ver##Major_); \ + else +#define ABI_VER_LATEST(Latest) \ + { /* Equivalent to latest version - do nothing */ \ + } +#include "clang/Basic/ABIVersions.def" } else if (Ver != "latest") { Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args) << A->getValue(); diff --git a/clang/lib/Headers/avx2intrin.h b/clang/lib/Headers/avx2intrin.h index dc9fc07..3c3a3d1 100644 --- a/clang/lib/Headers/avx2intrin.h +++ b/clang/lib/Headers/avx2intrin.h @@ -31,6 +31,14 @@ __min_vector_width__(128))) #endif +#if defined(__cplusplus) && (__cplusplus >= 201103L) +#define __DEFAULT_FN_ATTRS256_CONSTEXPR __DEFAULT_FN_ATTRS256 constexpr +#define __DEFAULT_FN_ATTRS128_CONSTEXPR __DEFAULT_FN_ATTRS128 constexpr +#else +#define __DEFAULT_FN_ATTRS256_CONSTEXPR __DEFAULT_FN_ATTRS256 +#define __DEFAULT_FN_ATTRS128_CONSTEXPR __DEFAULT_FN_ATTRS128 +#endif + /* SSE4 Multiple Packed Sums of Absolute Difference. */ /// Computes sixteen sum of absolute difference (SAD) operations on sets of /// four unsigned 8-bit integers from the 256-bit integer vectors \a X and @@ -460,7 +468,7 @@ _mm256_adds_epu16(__m256i __a, __m256i __b) /// \param __b /// A 256-bit integer vector. /// \returns A 256-bit integer vector containing the result. -static __inline__ __m256i __DEFAULT_FN_ATTRS256 +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_and_si256(__m256i __a, __m256i __b) { return (__m256i)((__v4du)__a & (__v4du)__b); @@ -478,7 +486,7 @@ _mm256_and_si256(__m256i __a, __m256i __b) /// \param __b /// A 256-bit integer vector. /// \returns A 256-bit integer vector containing the result. -static __inline__ __m256i __DEFAULT_FN_ATTRS256 +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_andnot_si256(__m256i __a, __m256i __b) { return (__m256i)(~(__v4du)__a & (__v4du)__b); @@ -1822,7 +1830,7 @@ _mm256_mul_epu32(__m256i __a, __m256i __b) /// \param __b /// A 256-bit integer vector. /// \returns A 256-bit integer vector containing the result. -static __inline__ __m256i __DEFAULT_FN_ATTRS256 +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_or_si256(__m256i __a, __m256i __b) { return (__m256i)((__v4du)__a | (__v4du)__b); @@ -2974,7 +2982,7 @@ _mm256_unpacklo_epi64(__m256i __a, __m256i __b) /// \param __b /// A 256-bit integer vector. /// \returns A 256-bit integer vector containing the result. -static __inline__ __m256i __DEFAULT_FN_ATTRS256 +static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR _mm256_xor_si256(__m256i __a, __m256i __b) { return (__m256i)((__v4du)__a ^ (__v4du)__b); @@ -5289,5 +5297,7 @@ _mm_srlv_epi64(__m128i __X, __m128i __Y) #undef __DEFAULT_FN_ATTRS256 #undef __DEFAULT_FN_ATTRS128 +#undef __DEFAULT_FN_ATTRS256_CONSTEXPR +#undef __DEFAULT_FN_ATTRS128_CONSTEXPR #endif /* __AVX2INTRIN_H */ diff --git a/clang/lib/Headers/avx512dqintrin.h b/clang/lib/Headers/avx512dqintrin.h index 88b48e3..62325b9 100644 --- a/clang/lib/Headers/avx512dqintrin.h +++ b/clang/lib/Headers/avx512dqintrin.h @@ -20,6 +20,14 @@ __attribute__((__always_inline__, __nodebug__, \ __target__("avx512dq,no-evex512"))) +#if defined(__cplusplus) && (__cplusplus >= 201103L) +#define __DEFAULT_FN_ATTRS512_CONSTEXPR __DEFAULT_FN_ATTRS512 constexpr +#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS constexpr +#else +#define __DEFAULT_FN_ATTRS512_CONSTEXPR __DEFAULT_FN_ATTRS512 +#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS +#endif + static __inline __mmask8 __DEFAULT_FN_ATTRS _knot_mask8(__mmask8 __M) { @@ -167,7 +175,7 @@ _mm512_maskz_mullo_epi64(__mmask8 __U, __m512i __A, __m512i __B) { (__v8di)_mm512_setzero_si512()); } -static __inline__ __m512d __DEFAULT_FN_ATTRS512 +static __inline__ __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_xor_pd(__m512d __A, __m512d __B) { return (__m512d)((__v8du)__A ^ (__v8du)__B); } @@ -186,7 +194,7 @@ _mm512_maskz_xor_pd(__mmask8 __U, __m512d __A, __m512d __B) { (__v8df)_mm512_setzero_pd()); } -static __inline__ __m512 __DEFAULT_FN_ATTRS512 +static __inline__ __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_xor_ps (__m512 __A, __m512 __B) { return (__m512)((__v16su)__A ^ (__v16su)__B); } @@ -205,7 +213,7 @@ _mm512_maskz_xor_ps(__mmask16 __U, __m512 __A, __m512 __B) { (__v16sf)_mm512_setzero_ps()); } -static __inline__ __m512d __DEFAULT_FN_ATTRS512 +static __inline__ __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_or_pd(__m512d __A, __m512d __B) { return (__m512d)((__v8du)__A | (__v8du)__B); } @@ -224,7 +232,7 @@ _mm512_maskz_or_pd(__mmask8 __U, __m512d __A, __m512d __B) { (__v8df)_mm512_setzero_pd()); } -static __inline__ __m512 __DEFAULT_FN_ATTRS512 +static __inline__ __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_or_ps(__m512 __A, __m512 __B) { return (__m512)((__v16su)__A | (__v16su)__B); } @@ -243,7 +251,7 @@ _mm512_maskz_or_ps(__mmask16 __U, __m512 __A, __m512 __B) { (__v16sf)_mm512_setzero_ps()); } -static __inline__ __m512d __DEFAULT_FN_ATTRS512 +static __inline__ __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_and_pd(__m512d __A, __m512d __B) { return (__m512d)((__v8du)__A & (__v8du)__B); } @@ -262,7 +270,7 @@ _mm512_maskz_and_pd(__mmask8 __U, __m512d __A, __m512d __B) { (__v8df)_mm512_setzero_pd()); } -static __inline__ __m512 __DEFAULT_FN_ATTRS512 +static __inline__ __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_and_ps(__m512 __A, __m512 __B) { return (__m512)((__v16su)__A & (__v16su)__B); } @@ -281,7 +289,7 @@ _mm512_maskz_and_ps(__mmask16 __U, __m512 __A, __m512 __B) { (__v16sf)_mm512_setzero_ps()); } -static __inline__ __m512d __DEFAULT_FN_ATTRS512 +static __inline__ __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_andnot_pd(__m512d __A, __m512d __B) { return (__m512d)(~(__v8du)__A & (__v8du)__B); } @@ -300,7 +308,7 @@ _mm512_maskz_andnot_pd(__mmask8 __U, __m512d __A, __m512d __B) { (__v8df)_mm512_setzero_pd()); } -static __inline__ __m512 __DEFAULT_FN_ATTRS512 +static __inline__ __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_andnot_ps(__m512 __A, __m512 __B) { return (__m512)(~(__v16su)__A & (__v16su)__B); } @@ -1375,5 +1383,7 @@ _mm512_maskz_broadcast_i64x2(__mmask8 __M, __m128i __A) #undef __DEFAULT_FN_ATTRS512 #undef __DEFAULT_FN_ATTRS +#undef __DEFAULT_FN_ATTRS512_CONSTEXPR +#undef __DEFAULT_FN_ATTRS_CONSTEXPR #endif diff --git a/clang/lib/Headers/avx512fintrin.h b/clang/lib/Headers/avx512fintrin.h index 45e7eeb..74343c3 100644 --- a/clang/lib/Headers/avx512fintrin.h +++ b/clang/lib/Headers/avx512fintrin.h @@ -645,7 +645,7 @@ _mm512_zextsi256_si512(__m256i __a) } /* Bitwise operators */ -static __inline__ __m512i __DEFAULT_FN_ATTRS512 +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_and_epi32(__m512i __a, __m512i __b) { return (__m512i)((__v16su)__a & (__v16su)__b); @@ -666,7 +666,7 @@ _mm512_maskz_and_epi32(__mmask16 __k, __m512i __a, __m512i __b) __k, __a, __b); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_and_epi64(__m512i __a, __m512i __b) { return (__m512i)((__v8du)__a & (__v8du)__b); @@ -687,13 +687,13 @@ _mm512_maskz_and_epi64(__mmask8 __k, __m512i __a, __m512i __b) __k, __a, __b); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_andnot_si512 (__m512i __A, __m512i __B) { return (__m512i)(~(__v8du)__A & (__v8du)__B); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_andnot_epi32 (__m512i __A, __m512i __B) { return (__m512i)(~(__v16su)__A & (__v16su)__B); @@ -714,7 +714,7 @@ _mm512_maskz_andnot_epi32(__mmask16 __U, __m512i __A, __m512i __B) __U, __A, __B); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_andnot_epi64(__m512i __A, __m512i __B) { return (__m512i)(~(__v8du)__A & (__v8du)__B); @@ -735,7 +735,7 @@ _mm512_maskz_andnot_epi64(__mmask8 __U, __m512i __A, __m512i __B) __U, __A, __B); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_or_epi32(__m512i __a, __m512i __b) { return (__m512i)((__v16su)__a | (__v16su)__b); @@ -755,7 +755,7 @@ _mm512_maskz_or_epi32(__mmask16 __k, __m512i __a, __m512i __b) return (__m512i)_mm512_mask_or_epi32(_mm512_setzero_si512(), __k, __a, __b); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_or_epi64(__m512i __a, __m512i __b) { return (__m512i)((__v8du)__a | (__v8du)__b); @@ -775,7 +775,7 @@ _mm512_maskz_or_epi64(__mmask8 __k, __m512i __a, __m512i __b) return (__m512i)_mm512_mask_or_epi64(_mm512_setzero_si512(), __k, __a, __b); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_xor_epi32(__m512i __a, __m512i __b) { return (__m512i)((__v16su)__a ^ (__v16su)__b); @@ -795,7 +795,7 @@ _mm512_maskz_xor_epi32(__mmask16 __k, __m512i __a, __m512i __b) return (__m512i)_mm512_mask_xor_epi32(_mm512_setzero_si512(), __k, __a, __b); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_xor_epi64(__m512i __a, __m512i __b) { return (__m512i)((__v8du)__a ^ (__v8du)__b); @@ -815,19 +815,19 @@ _mm512_maskz_xor_epi64(__mmask8 __k, __m512i __a, __m512i __b) return (__m512i)_mm512_mask_xor_epi64(_mm512_setzero_si512(), __k, __a, __b); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_and_si512(__m512i __a, __m512i __b) { return (__m512i)((__v8du)__a & (__v8du)__b); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_or_si512(__m512i __a, __m512i __b) { return (__m512i)((__v8du)__a | (__v8du)__b); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_xor_si512(__m512i __a, __m512i __b) { return (__m512i)((__v8du)__a ^ (__v8du)__b); @@ -835,45 +835,38 @@ _mm512_xor_si512(__m512i __a, __m512i __b) /* Arithmetic */ -static __inline __m512d __DEFAULT_FN_ATTRS512 -_mm512_add_pd(__m512d __a, __m512d __b) -{ +static __inline __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR +_mm512_add_pd(__m512d __a, __m512d __b) { return (__m512d)((__v8df)__a + (__v8df)__b); } -static __inline __m512 __DEFAULT_FN_ATTRS512 -_mm512_add_ps(__m512 __a, __m512 __b) -{ +static __inline __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR +_mm512_add_ps(__m512 __a, __m512 __b) { return (__m512)((__v16sf)__a + (__v16sf)__b); } -static __inline __m512d __DEFAULT_FN_ATTRS512 -_mm512_mul_pd(__m512d __a, __m512d __b) -{ +static __inline __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR +_mm512_mul_pd(__m512d __a, __m512d __b) { return (__m512d)((__v8df)__a * (__v8df)__b); } -static __inline __m512 __DEFAULT_FN_ATTRS512 -_mm512_mul_ps(__m512 __a, __m512 __b) -{ +static __inline __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR +_mm512_mul_ps(__m512 __a, __m512 __b) { return (__m512)((__v16sf)__a * (__v16sf)__b); } -static __inline __m512d __DEFAULT_FN_ATTRS512 -_mm512_sub_pd(__m512d __a, __m512d __b) -{ +static __inline __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR +_mm512_sub_pd(__m512d __a, __m512d __b) { return (__m512d)((__v8df)__a - (__v8df)__b); } -static __inline __m512 __DEFAULT_FN_ATTRS512 -_mm512_sub_ps(__m512 __a, __m512 __b) -{ +static __inline __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR +_mm512_sub_ps(__m512 __a, __m512 __b) { return (__m512)((__v16sf)__a - (__v16sf)__b); } -static __inline__ __m512i __DEFAULT_FN_ATTRS512 -_mm512_add_epi64 (__m512i __A, __m512i __B) -{ +static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR +_mm512_add_epi64(__m512i __A, __m512i __B) { return (__m512i) ((__v8du) __A + (__v8du) __B); } @@ -2315,9 +2308,8 @@ _mm_maskz_div_sd(__mmask8 __U,__m128d __A, __m128d __B) { (__v2df)_mm_setzero_pd(), \ (__mmask8)(U), (int)(R))) -static __inline __m512d __DEFAULT_FN_ATTRS512 -_mm512_div_pd(__m512d __a, __m512d __b) -{ +static __inline __m512d + __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_div_pd(__m512d __a, __m512d __b) { return (__m512d)((__v8df)__a/(__v8df)__b); } @@ -2335,9 +2327,8 @@ _mm512_maskz_div_pd(__mmask8 __U, __m512d __A, __m512d __B) { (__v8df)_mm512_setzero_pd()); } -static __inline __m512 __DEFAULT_FN_ATTRS512 -_mm512_div_ps(__m512 __a, __m512 __b) -{ +static __inline __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR +_mm512_div_ps(__m512 __a, __m512 __b) { return (__m512)((__v16sf)__a/(__v16sf)__b); } @@ -4123,9 +4114,8 @@ _mm512_cvtss_f32(__m512 __a) /* Unpack and Interleave */ -static __inline __m512d __DEFAULT_FN_ATTRS512 -_mm512_unpackhi_pd(__m512d __a, __m512d __b) -{ +static __inline __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR +_mm512_unpackhi_pd(__m512d __a, __m512d __b) { return (__m512d)__builtin_shufflevector((__v8df)__a, (__v8df)__b, 1, 9, 1+2, 9+2, 1+4, 9+4, 1+6, 9+6); } @@ -4146,9 +4136,8 @@ _mm512_maskz_unpackhi_pd(__mmask8 __U, __m512d __A, __m512d __B) (__v8df)_mm512_setzero_pd()); } -static __inline __m512d __DEFAULT_FN_ATTRS512 -_mm512_unpacklo_pd(__m512d __a, __m512d __b) -{ +static __inline __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR +_mm512_unpacklo_pd(__m512d __a, __m512d __b) { return (__m512d)__builtin_shufflevector((__v8df)__a, (__v8df)__b, 0, 8, 0+2, 8+2, 0+4, 8+4, 0+6, 8+6); } @@ -4169,9 +4158,8 @@ _mm512_maskz_unpacklo_pd (__mmask8 __U, __m512d __A, __m512d __B) (__v8df)_mm512_setzero_pd()); } -static __inline __m512 __DEFAULT_FN_ATTRS512 -_mm512_unpackhi_ps(__m512 __a, __m512 __b) -{ +static __inline __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR +_mm512_unpackhi_ps(__m512 __a, __m512 __b) { return (__m512)__builtin_shufflevector((__v16sf)__a, (__v16sf)__b, 2, 18, 3, 19, 2+4, 18+4, 3+4, 19+4, @@ -4195,9 +4183,8 @@ _mm512_maskz_unpackhi_ps (__mmask16 __U, __m512 __A, __m512 __B) (__v16sf)_mm512_setzero_ps()); } -static __inline __m512 __DEFAULT_FN_ATTRS512 -_mm512_unpacklo_ps(__m512 __a, __m512 __b) -{ +static __inline __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR +_mm512_unpacklo_ps(__m512 __a, __m512 __b) { return (__m512)__builtin_shufflevector((__v16sf)__a, (__v16sf)__b, 0, 16, 1, 17, 0+4, 16+4, 1+4, 17+4, @@ -5303,7 +5290,7 @@ _mm512_mask_store_epi64 (void *__P, __mmask8 __U, __m512i __A) (__mmask8) __U); } -static __inline__ __m512d __DEFAULT_FN_ATTRS512 +static __inline__ __m512d __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_movedup_pd (__m512d __A) { return (__m512d)__builtin_shufflevector((__v8df)__A, (__v8df)__A, @@ -8665,7 +8652,7 @@ _mm512_mask_testn_epi64_mask (__mmask8 __U, __m512i __A, __m512i __B) _mm512_setzero_si512()); } -static __inline__ __m512 __DEFAULT_FN_ATTRS512 +static __inline__ __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_movehdup_ps (__m512 __A) { return (__m512)__builtin_shufflevector((__v16sf)__A, (__v16sf)__A, @@ -8688,7 +8675,7 @@ _mm512_maskz_movehdup_ps (__mmask16 __U, __m512 __A) (__v16sf)_mm512_setzero_ps()); } -static __inline__ __m512 __DEFAULT_FN_ATTRS512 +static __inline__ __m512 __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_moveldup_ps (__m512 __A) { return (__m512)__builtin_shufflevector((__v16sf)__A, (__v16sf)__A, @@ -9337,19 +9324,23 @@ _mm512_mask_abs_pd(__m512d __W, __mmask8 __K, __m512d __A) * This takes log2(n) steps where n is the number of elements in the vector. */ -static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_add_epi64(__m512i __W) { +static __inline__ long long __DEFAULT_FN_ATTRS512_CONSTEXPR +_mm512_reduce_add_epi64(__m512i __W) { return __builtin_reduce_add((__v8di)__W); } -static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_mul_epi64(__m512i __W) { +static __inline__ long long __DEFAULT_FN_ATTRS512_CONSTEXPR +_mm512_reduce_mul_epi64(__m512i __W) { return __builtin_reduce_mul((__v8di)__W); } -static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_and_epi64(__m512i __W) { +static __inline__ long long __DEFAULT_FN_ATTRS512_CONSTEXPR +_mm512_reduce_and_epi64(__m512i __W) { return __builtin_reduce_and((__v8di)__W); } -static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_or_epi64(__m512i __W) { +static __inline__ long long __DEFAULT_FN_ATTRS512_CONSTEXPR +_mm512_reduce_or_epi64(__m512i __W) { return __builtin_reduce_or((__v8di)__W); } @@ -9400,22 +9391,22 @@ _mm512_mask_reduce_mul_pd(__mmask8 __M, __m512d __W) { return __builtin_ia32_reduce_fmul_pd512(1.0, __W); } -static __inline__ int __DEFAULT_FN_ATTRS512 +static __inline__ int __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_reduce_add_epi32(__m512i __W) { return __builtin_reduce_add((__v16si)__W); } -static __inline__ int __DEFAULT_FN_ATTRS512 +static __inline__ int __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_reduce_mul_epi32(__m512i __W) { return __builtin_reduce_mul((__v16si)__W); } -static __inline__ int __DEFAULT_FN_ATTRS512 +static __inline__ int __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_reduce_and_epi32(__m512i __W) { return __builtin_reduce_and((__v16si)__W); } -static __inline__ int __DEFAULT_FN_ATTRS512 +static __inline__ int __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_reduce_or_epi32(__m512i __W) { return __builtin_reduce_or((__v16si)__W); } @@ -9466,22 +9457,22 @@ _mm512_mask_reduce_mul_ps(__mmask16 __M, __m512 __W) { return __builtin_ia32_reduce_fmul_ps512(1.0f, __W); } -static __inline__ long long __DEFAULT_FN_ATTRS512 +static __inline__ long long __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_reduce_max_epi64(__m512i __V) { return __builtin_reduce_max((__v8di)__V); } -static __inline__ unsigned long long __DEFAULT_FN_ATTRS512 +static __inline__ unsigned long long __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_reduce_max_epu64(__m512i __V) { return __builtin_reduce_max((__v8du)__V); } -static __inline__ long long __DEFAULT_FN_ATTRS512 +static __inline__ long long __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_reduce_min_epi64(__m512i __V) { return __builtin_reduce_min((__v8di)__V); } -static __inline__ unsigned long long __DEFAULT_FN_ATTRS512 +static __inline__ unsigned long long __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_reduce_min_epu64(__m512i __V) { return __builtin_reduce_min((__v8du)__V); } @@ -9509,22 +9500,22 @@ _mm512_mask_reduce_min_epu64(__mmask8 __M, __m512i __V) { __V = _mm512_mask_mov_epi64(_mm512_set1_epi64(-1LL), __M, __V); return __builtin_reduce_min((__v8du)__V); } -static __inline__ int __DEFAULT_FN_ATTRS512 +static __inline__ int __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_reduce_max_epi32(__m512i __V) { return __builtin_reduce_max((__v16si)__V); } -static __inline__ unsigned int __DEFAULT_FN_ATTRS512 +static __inline__ unsigned int __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_reduce_max_epu32(__m512i __V) { return __builtin_reduce_max((__v16su)__V); } -static __inline__ int __DEFAULT_FN_ATTRS512 +static __inline__ int __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_reduce_min_epi32(__m512i __V) { return __builtin_reduce_min((__v16si)__V); } -static __inline__ unsigned int __DEFAULT_FN_ATTRS512 +static __inline__ unsigned int __DEFAULT_FN_ATTRS512_CONSTEXPR _mm512_reduce_min_epu32(__m512i __V) { return __builtin_reduce_min((__v16su)__V); } diff --git a/clang/lib/Headers/avxintrin.h b/clang/lib/Headers/avxintrin.h index b9ca013..2be4f68 100644 --- a/clang/lib/Headers/avxintrin.h +++ b/clang/lib/Headers/avxintrin.h @@ -87,9 +87,8 @@ typedef __bf16 __m256bh __attribute__((__vector_size__(32), __aligned__(32))); /// A 256-bit vector of [4 x double] containing one of the source operands. /// \returns A 256-bit vector of [4 x double] containing the sums of both /// operands. -static __inline __m256d __DEFAULT_FN_ATTRS -_mm256_add_pd(__m256d __a, __m256d __b) -{ +static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR +_mm256_add_pd(__m256d __a, __m256d __b) { return (__m256d)((__v4df)__a+(__v4df)__b); } @@ -105,9 +104,8 @@ _mm256_add_pd(__m256d __a, __m256d __b) /// A 256-bit vector of [8 x float] containing one of the source operands. /// \returns A 256-bit vector of [8 x float] containing the sums of both /// operands. -static __inline __m256 __DEFAULT_FN_ATTRS -_mm256_add_ps(__m256 __a, __m256 __b) -{ +static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_add_ps(__m256 __a, + __m256 __b) { return (__m256)((__v8sf)__a+(__v8sf)__b); } @@ -123,9 +121,8 @@ _mm256_add_ps(__m256 __a, __m256 __b) /// A 256-bit vector of [4 x double] containing the subtrahend. /// \returns A 256-bit vector of [4 x double] containing the differences between /// both operands. -static __inline __m256d __DEFAULT_FN_ATTRS -_mm256_sub_pd(__m256d __a, __m256d __b) -{ +static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR +_mm256_sub_pd(__m256d __a, __m256d __b) { return (__m256d)((__v4df)__a-(__v4df)__b); } @@ -141,9 +138,8 @@ _mm256_sub_pd(__m256d __a, __m256d __b) /// A 256-bit vector of [8 x float] containing the subtrahend. /// \returns A 256-bit vector of [8 x float] containing the differences between /// both operands. -static __inline __m256 __DEFAULT_FN_ATTRS -_mm256_sub_ps(__m256 __a, __m256 __b) -{ +static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_sub_ps(__m256 __a, + __m256 __b) { return (__m256)((__v8sf)__a-(__v8sf)__b); } @@ -197,9 +193,8 @@ _mm256_addsub_ps(__m256 __a, __m256 __b) /// A 256-bit vector of [4 x double] containing the divisor. /// \returns A 256-bit vector of [4 x double] containing the quotients of both /// operands. -static __inline __m256d __DEFAULT_FN_ATTRS -_mm256_div_pd(__m256d __a, __m256d __b) -{ +static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR +_mm256_div_pd(__m256d __a, __m256d __b) { return (__m256d)((__v4df)__a/(__v4df)__b); } @@ -215,9 +210,8 @@ _mm256_div_pd(__m256d __a, __m256d __b) /// A 256-bit vector of [8 x float] containing the divisor. /// \returns A 256-bit vector of [8 x float] containing the quotients of both /// operands. -static __inline __m256 __DEFAULT_FN_ATTRS -_mm256_div_ps(__m256 __a, __m256 __b) -{ +static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_div_ps(__m256 __a, + __m256 __b) { return (__m256)((__v8sf)__a/(__v8sf)__b); } @@ -317,9 +311,8 @@ _mm256_min_ps(__m256 __a, __m256 __b) /// A 256-bit vector of [4 x double] containing one of the operands. /// \returns A 256-bit vector of [4 x double] containing the products of both /// operands. -static __inline __m256d __DEFAULT_FN_ATTRS -_mm256_mul_pd(__m256d __a, __m256d __b) -{ +static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR +_mm256_mul_pd(__m256d __a, __m256d __b) { return (__m256d)((__v4df)__a * (__v4df)__b); } @@ -335,9 +328,8 @@ _mm256_mul_pd(__m256d __a, __m256d __b) /// A 256-bit vector of [8 x float] containing one of the operands. /// \returns A 256-bit vector of [8 x float] containing the products of both /// operands. -static __inline __m256 __DEFAULT_FN_ATTRS -_mm256_mul_ps(__m256 __a, __m256 __b) -{ +static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_mul_ps(__m256 __a, + __m256 __b) { return (__m256)((__v8sf)__a * (__v8sf)__b); } @@ -555,7 +547,7 @@ _mm256_rcp_ps(__m256 __a) /// A 256-bit vector of [4 x double] containing one of the source operands. /// \returns A 256-bit vector of [4 x double] containing the bitwise AND of the /// values between both operands. -static __inline __m256d __DEFAULT_FN_ATTRS +static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_and_pd(__m256d __a, __m256d __b) { return (__m256d)((__v4du)__a & (__v4du)__b); @@ -573,7 +565,7 @@ _mm256_and_pd(__m256d __a, __m256d __b) /// A 256-bit vector of [8 x float] containing one of the source operands. /// \returns A 256-bit vector of [8 x float] containing the bitwise AND of the /// values between both operands. -static __inline __m256 __DEFAULT_FN_ATTRS +static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_and_ps(__m256 __a, __m256 __b) { return (__m256)((__v8su)__a & (__v8su)__b); @@ -594,7 +586,7 @@ _mm256_and_ps(__m256 __a, __m256 __b) /// \returns A 256-bit vector of [4 x double] containing the bitwise AND of the /// values of the second operand and the one's complement of the first /// operand. -static __inline __m256d __DEFAULT_FN_ATTRS +static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_andnot_pd(__m256d __a, __m256d __b) { return (__m256d)(~(__v4du)__a & (__v4du)__b); @@ -615,7 +607,7 @@ _mm256_andnot_pd(__m256d __a, __m256d __b) /// \returns A 256-bit vector of [8 x float] containing the bitwise AND of the /// values of the second operand and the one's complement of the first /// operand. -static __inline __m256 __DEFAULT_FN_ATTRS +static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_andnot_ps(__m256 __a, __m256 __b) { return (__m256)(~(__v8su)__a & (__v8su)__b); @@ -633,7 +625,7 @@ _mm256_andnot_ps(__m256 __a, __m256 __b) /// A 256-bit vector of [4 x double] containing one of the source operands. /// \returns A 256-bit vector of [4 x double] containing the bitwise OR of the /// values between both operands. -static __inline __m256d __DEFAULT_FN_ATTRS +static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_or_pd(__m256d __a, __m256d __b) { return (__m256d)((__v4du)__a | (__v4du)__b); @@ -651,7 +643,7 @@ _mm256_or_pd(__m256d __a, __m256d __b) /// A 256-bit vector of [8 x float] containing one of the source operands. /// \returns A 256-bit vector of [8 x float] containing the bitwise OR of the /// values between both operands. -static __inline __m256 __DEFAULT_FN_ATTRS +static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_or_ps(__m256 __a, __m256 __b) { return (__m256)((__v8su)__a | (__v8su)__b); @@ -669,7 +661,7 @@ _mm256_or_ps(__m256 __a, __m256 __b) /// A 256-bit vector of [4 x double] containing one of the source operands. /// \returns A 256-bit vector of [4 x double] containing the bitwise XOR of the /// values between both operands. -static __inline __m256d __DEFAULT_FN_ATTRS +static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_xor_pd(__m256d __a, __m256d __b) { return (__m256d)((__v4du)__a ^ (__v4du)__b); @@ -687,7 +679,7 @@ _mm256_xor_pd(__m256d __a, __m256d __b) /// A 256-bit vector of [8 x float] containing one of the source operands. /// \returns A 256-bit vector of [8 x float] containing the bitwise XOR of the /// values between both operands. -static __inline __m256 __DEFAULT_FN_ATTRS +static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_xor_ps(__m256 __a, __m256 __b) { return (__m256)((__v8su)__a ^ (__v8su)__b); @@ -2392,7 +2384,7 @@ _mm256_cvtss_f32(__m256 __a) /// return value. /// \returns A 256-bit vector of [8 x float] containing the moved and duplicated /// values. -static __inline __m256 __DEFAULT_FN_ATTRS +static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_movehdup_ps(__m256 __a) { return __builtin_shufflevector((__v8sf)__a, (__v8sf)__a, 1, 1, 3, 3, 5, 5, 7, 7); @@ -2417,7 +2409,7 @@ _mm256_movehdup_ps(__m256 __a) /// return value. /// \returns A 256-bit vector of [8 x float] containing the moved and duplicated /// values. -static __inline __m256 __DEFAULT_FN_ATTRS +static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_moveldup_ps(__m256 __a) { return __builtin_shufflevector((__v8sf)__a, (__v8sf)__a, 0, 0, 2, 2, 4, 4, 6, 6); @@ -2439,7 +2431,7 @@ _mm256_moveldup_ps(__m256 __a) /// the return value. /// \returns A 256-bit vector of [4 x double] containing the moved and /// duplicated values. -static __inline __m256d __DEFAULT_FN_ATTRS +static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_movedup_pd(__m256d __a) { return __builtin_shufflevector((__v4df)__a, (__v4df)__a, 0, 0, 2, 2); @@ -2462,9 +2454,8 @@ _mm256_movedup_pd(__m256d __a) /// Bits [127:64] are written to bits [127:64] of the return value. \n /// Bits [255:192] are written to bits [255:192] of the return value. \n /// \returns A 256-bit vector of [4 x double] containing the interleaved values. -static __inline __m256d __DEFAULT_FN_ATTRS -_mm256_unpackhi_pd(__m256d __a, __m256d __b) -{ +static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR +_mm256_unpackhi_pd(__m256d __a, __m256d __b) { return __builtin_shufflevector((__v4df)__a, (__v4df)__b, 1, 5, 1+2, 5+2); } @@ -2484,9 +2475,8 @@ _mm256_unpackhi_pd(__m256d __a, __m256d __b) /// Bits [63:0] are written to bits [127:64] of the return value. \n /// Bits [191:128] are written to bits [255:192] of the return value. \n /// \returns A 256-bit vector of [4 x double] containing the interleaved values. -static __inline __m256d __DEFAULT_FN_ATTRS -_mm256_unpacklo_pd(__m256d __a, __m256d __b) -{ +static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR +_mm256_unpacklo_pd(__m256d __a, __m256d __b) { return __builtin_shufflevector((__v4df)__a, (__v4df)__b, 0, 4, 0+2, 4+2); } @@ -2511,9 +2501,8 @@ _mm256_unpacklo_pd(__m256d __a, __m256d __b) /// Bits [223:192] are written to bits [191:160] of the return value. \n /// Bits [255:224] are written to bits [255:224] of the return value. /// \returns A 256-bit vector of [8 x float] containing the interleaved values. -static __inline __m256 __DEFAULT_FN_ATTRS -_mm256_unpackhi_ps(__m256 __a, __m256 __b) -{ +static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR +_mm256_unpackhi_ps(__m256 __a, __m256 __b) { return __builtin_shufflevector((__v8sf)__a, (__v8sf)__b, 2, 10, 2+1, 10+1, 6, 14, 6+1, 14+1); } @@ -2538,9 +2527,8 @@ _mm256_unpackhi_ps(__m256 __a, __m256 __b) /// Bits [159:128] are written to bits [191:160] of the return value. \n /// Bits [191:160] are written to bits [255:224] of the return value. /// \returns A 256-bit vector of [8 x float] containing the interleaved values. -static __inline __m256 __DEFAULT_FN_ATTRS -_mm256_unpacklo_ps(__m256 __a, __m256 __b) -{ +static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR +_mm256_unpacklo_ps(__m256 __a, __m256 __b) { return __builtin_shufflevector((__v8sf)__a, (__v8sf)__b, 0, 8, 0+1, 8+1, 4, 12, 4+1, 12+1); } diff --git a/clang/lib/Headers/cpuid.h b/clang/lib/Headers/cpuid.h index 52addb7..ce8c79e 100644 --- a/clang/lib/Headers/cpuid.h +++ b/clang/lib/Headers/cpuid.h @@ -345,10 +345,15 @@ static __inline int __get_cpuid_count (unsigned int __leaf, // In some configurations, __cpuidex is defined as a builtin (primarily // -fms-extensions) which will conflict with the __cpuidex definition below. #if !(__has_builtin(__cpuidex)) +// In some cases, offloading will set the host as the aux triple and define the +// builtin. Given __has_builtin does not detect builtins on aux triples, we need +// to explicitly check for some offloading cases. +#ifndef __NVPTX__ static __inline void __cpuidex(int __cpu_info[4], int __leaf, int __subleaf) { __cpuid_count(__leaf, __subleaf, __cpu_info[0], __cpu_info[1], __cpu_info[2], __cpu_info[3]); } #endif +#endif #endif /* __CPUID_H */ diff --git a/clang/lib/Headers/emmintrin.h b/clang/lib/Headers/emmintrin.h index 78e8a42..770bb5c 100644 --- a/clang/lib/Headers/emmintrin.h +++ b/clang/lib/Headers/emmintrin.h @@ -2127,8 +2127,9 @@ _mm_add_epi32(__m128i __a, __m128i __b) { /// \param __b /// A 64-bit integer. /// \returns A 64-bit integer containing the sum of both parameters. -static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_add_si64(__m64 __a, __m64 __b) { - return (__m64)(((unsigned long long)__a) + ((unsigned long long)__b)); +static __inline__ __m64 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_add_si64(__m64 __a, + __m64 __b) { + return (__m64)(((__v1du)__a)[0] + ((__v1du)__b)[0]); } /// Adds the corresponding elements of two 128-bit vectors of [2 x i64], @@ -2557,8 +2558,9 @@ _mm_sub_epi32(__m128i __a, __m128i __b) { /// A 64-bit integer vector containing the subtrahend. /// \returns A 64-bit integer vector containing the difference of the values in /// the operands. -static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_sub_si64(__m64 __a, __m64 __b) { - return (__m64)((unsigned long long)__a - (unsigned long long)__b); +static __inline__ __m64 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_sub_si64(__m64 __a, + __m64 __b) { + return (__m64)(((__v1du)__a)[0] - ((__v1du)__b)[0]); } /// Subtracts the corresponding elements of two [2 x i64] vectors. @@ -2676,8 +2678,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epu16(__m128i __a, /// A 128-bit integer vector containing one of the source operands. /// \returns A 128-bit integer vector containing the bitwise AND of the values /// in both operands. -static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_and_si128(__m128i __a, - __m128i __b) { +static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR +_mm_and_si128(__m128i __a, __m128i __b) { return (__m128i)((__v2du)__a & (__v2du)__b); } @@ -2695,8 +2697,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_and_si128(__m128i __a, /// A 128-bit vector containing the right source operand. /// \returns A 128-bit integer vector containing the bitwise AND of the one's /// complement of the first operand and the values in the second operand. -static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_andnot_si128(__m128i __a, - __m128i __b) { +static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR +_mm_andnot_si128(__m128i __a, __m128i __b) { return (__m128i)(~(__v2du)__a & (__v2du)__b); } /// Performs a bitwise OR of two 128-bit integer vectors. @@ -2711,8 +2713,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_andnot_si128(__m128i __a, /// A 128-bit integer vector containing one of the source operands. /// \returns A 128-bit integer vector containing the bitwise OR of the values /// in both operands. -static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_or_si128(__m128i __a, - __m128i __b) { +static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR +_mm_or_si128(__m128i __a, __m128i __b) { return (__m128i)((__v2du)__a | (__v2du)__b); } @@ -2728,8 +2730,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_or_si128(__m128i __a, /// A 128-bit integer vector containing one of the source operands. /// \returns A 128-bit integer vector containing the bitwise exclusive OR of the /// values in both operands. -static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_xor_si128(__m128i __a, - __m128i __b) { +static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR +_mm_xor_si128(__m128i __a, __m128i __b) { return (__m128i)((__v2du)__a ^ (__v2du)__b); } diff --git a/clang/lib/Headers/mmintrin.h b/clang/lib/Headers/mmintrin.h index dc0fa5c..5a02a455 100644 --- a/clang/lib/Headers/mmintrin.h +++ b/clang/lib/Headers/mmintrin.h @@ -85,7 +85,7 @@ _mm_empty(void) { /// A 32-bit integer value. /// \returns A 64-bit integer vector. The lower 32 bits contain the value of the /// parameter. The upper 32 bits are set to 0. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR _mm_cvtsi32_si64(int __i) { return __extension__ (__m64)(__v2si){__i, 0}; @@ -102,7 +102,7 @@ _mm_cvtsi32_si64(int __i) /// A 64-bit integer vector. /// \returns A 32-bit signed integer value containing the lower 32 bits of the /// parameter. -static __inline__ int __DEFAULT_FN_ATTRS_SSE2 +static __inline__ int __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR _mm_cvtsi64_si32(__m64 __m) { return ((__v2si)__m)[0]; @@ -118,10 +118,10 @@ _mm_cvtsi64_si32(__m64 __m) /// A 64-bit signed integer. /// \returns A 64-bit integer vector containing the same bitwise pattern as the /// parameter. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR _mm_cvtsi64_m64(long long __i) { - return (__m64)__i; + return __extension__ (__m64)(__v1di){__i}; } /// Casts a 64-bit integer vector into a 64-bit signed integer value. @@ -134,10 +134,10 @@ _mm_cvtsi64_m64(long long __i) /// A 64-bit integer vector. /// \returns A 64-bit signed integer containing the same bitwise pattern as the /// parameter. -static __inline__ long long __DEFAULT_FN_ATTRS_SSE2 +static __inline__ long long __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR _mm_cvtm64_si64(__m64 __m) { - return (long long)__m; + return ((__v1di)__m)[0]; } /// Converts, with saturation, 16-bit signed integers from both 64-bit integer @@ -379,7 +379,7 @@ _mm_unpacklo_pi32(__m64 __m1, __m64 __m2) /// A 64-bit integer vector of [8 x i8]. /// \returns A 64-bit integer vector of [8 x i8] containing the sums of both /// parameters. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR _mm_add_pi8(__m64 __m1, __m64 __m2) { return (__m64)(((__v8qu)__m1) + ((__v8qu)__m2)); @@ -400,7 +400,7 @@ _mm_add_pi8(__m64 __m1, __m64 __m2) /// A 64-bit integer vector of [4 x i16]. /// \returns A 64-bit integer vector of [4 x i16] containing the sums of both /// parameters. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR _mm_add_pi16(__m64 __m1, __m64 __m2) { return (__m64)(((__v4hu)__m1) + ((__v4hu)__m2)); @@ -421,7 +421,7 @@ _mm_add_pi16(__m64 __m1, __m64 __m2) /// A 64-bit integer vector of [2 x i32]. /// \returns A 64-bit integer vector of [2 x i32] containing the sums of both /// parameters. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR _mm_add_pi32(__m64 __m1, __m64 __m2) { return (__m64)(((__v2su)__m1) + ((__v2su)__m2)); @@ -536,7 +536,7 @@ _mm_adds_pu16(__m64 __m1, __m64 __m2) /// A 64-bit integer vector of [8 x i8] containing the subtrahends. /// \returns A 64-bit integer vector of [8 x i8] containing the differences of /// both parameters. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR _mm_sub_pi8(__m64 __m1, __m64 __m2) { return (__m64)(((__v8qu)__m1) - ((__v8qu)__m2)); @@ -557,7 +557,7 @@ _mm_sub_pi8(__m64 __m1, __m64 __m2) /// A 64-bit integer vector of [4 x i16] containing the subtrahends. /// \returns A 64-bit integer vector of [4 x i16] containing the differences of /// both parameters. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR _mm_sub_pi16(__m64 __m1, __m64 __m2) { return (__m64)(((__v4hu)__m1) - ((__v4hu)__m2)); @@ -578,7 +578,7 @@ _mm_sub_pi16(__m64 __m1, __m64 __m2) /// A 64-bit integer vector of [2 x i32] containing the subtrahends. /// \returns A 64-bit integer vector of [2 x i32] containing the differences of /// both parameters. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR _mm_sub_pi32(__m64 __m1, __m64 __m2) { return (__m64)(((__v2su)__m1) - ((__v2su)__m2)); @@ -745,7 +745,7 @@ _mm_mulhi_pi16(__m64 __m1, __m64 __m2) /// A 64-bit integer vector of [4 x i16]. /// \returns A 64-bit integer vector of [4 x i16] containing the lower 16 bits /// of the products of both parameters. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR _mm_mullo_pi16(__m64 __m1, __m64 __m2) { return (__m64)(((__v4hu)__m1) * ((__v4hu)__m2)); @@ -1134,7 +1134,7 @@ _mm_srli_si64(__m64 __m, int __count) /// A 64-bit integer vector. /// \returns A 64-bit integer vector containing the bitwise AND of both /// parameters. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR _mm_and_si64(__m64 __m1, __m64 __m2) { return (__m64)(((__v1du)__m1) & ((__v1du)__m2)); @@ -1155,7 +1155,7 @@ _mm_and_si64(__m64 __m1, __m64 __m2) /// A 64-bit integer vector. /// \returns A 64-bit integer vector containing the bitwise AND of the second /// parameter and the one's complement of the first parameter. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR _mm_andnot_si64(__m64 __m1, __m64 __m2) { return (__m64)(~((__v1du)__m1) & ((__v1du)__m2)); @@ -1173,7 +1173,7 @@ _mm_andnot_si64(__m64 __m1, __m64 __m2) /// A 64-bit integer vector. /// \returns A 64-bit integer vector containing the bitwise OR of both /// parameters. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR _mm_or_si64(__m64 __m1, __m64 __m2) { return (__m64)(((__v1du)__m1) | ((__v1du)__m2)); @@ -1191,7 +1191,7 @@ _mm_or_si64(__m64 __m1, __m64 __m2) /// A 64-bit integer vector. /// \returns A 64-bit integer vector containing the bitwise exclusive OR of both /// parameters. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR _mm_xor_si64(__m64 __m1, __m64 __m2) { return (__m64)(((__v1du)__m1) ^ ((__v1du)__m2)); @@ -1213,7 +1213,7 @@ _mm_xor_si64(__m64 __m1, __m64 __m2) /// A 64-bit integer vector of [8 x i8]. /// \returns A 64-bit integer vector of [8 x i8] containing the comparison /// results. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR _mm_cmpeq_pi8(__m64 __m1, __m64 __m2) { return (__m64)(((__v8qi)__m1) == ((__v8qi)__m2)); @@ -1235,7 +1235,7 @@ _mm_cmpeq_pi8(__m64 __m1, __m64 __m2) /// A 64-bit integer vector of [4 x i16]. /// \returns A 64-bit integer vector of [4 x i16] containing the comparison /// results. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR _mm_cmpeq_pi16(__m64 __m1, __m64 __m2) { return (__m64)(((__v4hi)__m1) == ((__v4hi)__m2)); @@ -1257,7 +1257,7 @@ _mm_cmpeq_pi16(__m64 __m1, __m64 __m2) /// A 64-bit integer vector of [2 x i32]. /// \returns A 64-bit integer vector of [2 x i32] containing the comparison /// results. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR _mm_cmpeq_pi32(__m64 __m1, __m64 __m2) { return (__m64)(((__v2si)__m1) == ((__v2si)__m2)); @@ -1279,7 +1279,7 @@ _mm_cmpeq_pi32(__m64 __m1, __m64 __m2) /// A 64-bit integer vector of [8 x i8]. /// \returns A 64-bit integer vector of [8 x i8] containing the comparison /// results. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR _mm_cmpgt_pi8(__m64 __m1, __m64 __m2) { /* This function always performs a signed comparison, but __v8qi is a char @@ -1303,7 +1303,7 @@ _mm_cmpgt_pi8(__m64 __m1, __m64 __m2) /// A 64-bit integer vector of [4 x i16]. /// \returns A 64-bit integer vector of [4 x i16] containing the comparison /// results. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR _mm_cmpgt_pi16(__m64 __m1, __m64 __m2) { return (__m64)((__v4hi)__m1 > (__v4hi)__m2); @@ -1325,7 +1325,7 @@ _mm_cmpgt_pi16(__m64 __m1, __m64 __m2) /// A 64-bit integer vector of [2 x i32]. /// \returns A 64-bit integer vector of [2 x i32] containing the comparison /// results. -static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2 +static __inline__ __m64 __DEFAULT_FN_ATTRS_SSE2_CONSTEXPR _mm_cmpgt_pi32(__m64 __m1, __m64 __m2) { return (__m64)((__v2si)__m1 > (__v2si)__m2); diff --git a/clang/lib/Interpreter/RemoteJITUtils.cpp b/clang/lib/Interpreter/RemoteJITUtils.cpp index c0e663b..c100f46 100644 --- a/clang/lib/Interpreter/RemoteJITUtils.cpp +++ b/clang/lib/Interpreter/RemoteJITUtils.cpp @@ -33,6 +33,10 @@ using namespace llvm; using namespace llvm::orc; +#if LLVM_ON_UNIX +static std::vector<pid_t> LaunchedExecutorPID; +#endif + Expected<uint64_t> getSlabAllocSize(StringRef SizeString) { SizeString = SizeString.trim(); @@ -89,9 +93,14 @@ createSharedMemoryManager(SimpleRemoteEPC &SREPC, SlabSize, SREPC, SAs); } +// Launches an out-of-process executor for remote JIT. The calling program can +// provide a CustomizeFork callback, which allows it to run custom code in the +// child process before exec. This enables sending custom setup or code to be +// executed in the child (out-of-process) executor. Expected<std::unique_ptr<SimpleRemoteEPC>> launchExecutor(StringRef ExecutablePath, bool UseSharedMemory, - llvm::StringRef SlabAllocateSizeString) { + llvm::StringRef SlabAllocateSizeString, + std::function<void()> CustomizeFork) { #ifndef LLVM_ON_UNIX // FIXME: Add support for Windows. return make_error<StringError>("-" + ExecutablePath + @@ -134,6 +143,9 @@ launchExecutor(StringRef ExecutablePath, bool UseSharedMemory, close(ToExecutor[WriteEnd]); close(FromExecutor[ReadEnd]); + if (CustomizeFork) + CustomizeFork(); + // Execute the child process. std::unique_ptr<char[]> ExecutorPath, FDSpecifier; { @@ -158,6 +170,8 @@ launchExecutor(StringRef ExecutablePath, bool UseSharedMemory, } // else we're the parent... + LaunchedExecutorPID.push_back(ChildPID); + // Close the child ends of the pipes close(ToExecutor[ReadEnd]); close(FromExecutor[WriteEnd]); @@ -265,3 +279,18 @@ connectTCPSocket(StringRef NetworkAddress, bool UseSharedMemory, std::move(S), *SockFD, *SockFD); #endif } + +#if LLVM_ON_UNIX + +pid_t getLastLaunchedExecutorPID() { + if (!LaunchedExecutorPID.size()) + return -1; + return LaunchedExecutorPID.back(); +} + +pid_t getNthLaunchedExecutorPID(int n) { + if (n - 1 < 0 || n - 1 >= static_cast<int>(LaunchedExecutorPID.size())) + return -1; + return LaunchedExecutorPID.at(n - 1); +} +#endif
\ No newline at end of file diff --git a/clang/lib/Sema/SemaHLSL.cpp b/clang/lib/Sema/SemaHLSL.cpp index 8536e04..17f17f8 100644 --- a/clang/lib/Sema/SemaHLSL.cpp +++ b/clang/lib/Sema/SemaHLSL.cpp @@ -337,16 +337,9 @@ static bool isZeroSizedArray(const ConstantArrayType *CAT) { return CAT != nullptr; } -// Returns true if the record type is an HLSL resource class or an array of -// resource classes -static bool isResourceRecordTypeOrArrayOf(const Type *Ty) { - while (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(Ty)) - Ty = CAT->getArrayElementTypeNoTypeQual(); - return HLSLAttributedResourceType::findHandleTypeOnResource(Ty) != nullptr; -} - static bool isResourceRecordTypeOrArrayOf(VarDecl *VD) { - return isResourceRecordTypeOrArrayOf(VD->getType().getTypePtr()); + const Type *Ty = VD->getType().getTypePtr(); + return Ty->isHLSLResourceRecord() || Ty->isHLSLResourceRecordArray(); } // Returns true if the type is a leaf element type that is not valid to be @@ -355,7 +348,7 @@ static bool isResourceRecordTypeOrArrayOf(VarDecl *VD) { // type or if it is a record type that needs to be inspected further. static bool isInvalidConstantBufferLeafElementType(const Type *Ty) { Ty = Ty->getUnqualifiedDesugaredType(); - if (isResourceRecordTypeOrArrayOf(Ty)) + if (Ty->isHLSLResourceRecord() || Ty->isHLSLResourceRecordArray()) return true; if (Ty->isRecordType()) return Ty->getAsCXXRecordDecl()->isEmpty(); @@ -3597,7 +3590,7 @@ void SemaHLSL::deduceAddressSpace(VarDecl *Decl) { return; // Resource handles. - if (isResourceRecordTypeOrArrayOf(Type->getUnqualifiedDesugaredType())) + if (Type->isHLSLResourceRecord() || Type->isHLSLResourceRecordArray()) return; // Only static globals belong to the Private address space. @@ -3637,10 +3630,7 @@ void SemaHLSL::ActOnVariableDeclarator(VarDecl *VD) { if (VD->getType()->isHLSLIntangibleType()) collectResourceBindingsOnVarDecl(VD); - const Type *VarType = VD->getType().getTypePtr(); - while (VarType->isArrayType()) - VarType = VarType->getArrayElementTypeNoTypeQual(); - if (VarType->isHLSLResourceRecord() || + if (isResourceRecordTypeOrArrayOf(VD) || VD->hasAttr<HLSLVkConstantIdAttr>()) { // Make the variable for resources static. The global externally visible // storage is accessed through the handle, which is a member. The variable diff --git a/clang/lib/Sema/SemaOpenACC.cpp b/clang/lib/Sema/SemaOpenACC.cpp index 62fe3d1..4d58b4a 100644 --- a/clang/lib/Sema/SemaOpenACC.cpp +++ b/clang/lib/Sema/SemaOpenACC.cpp @@ -646,8 +646,17 @@ ExprResult CheckVarType(SemaOpenACC &S, OpenACCClauseKind CK, Expr *VarExpr, if (auto *RefTy = InnerTy->getAs<ReferenceType>()) InnerTy = RefTy->getPointeeType(); - if (auto *ArrTy = InnerTy->getAsArrayTypeUnsafe()) + if (auto *ArrTy = InnerTy->getAsArrayTypeUnsafe()) { + // Non constant arrays decay to 'pointer', so warn and return that we're + // successful. + if (!ArrTy->isConstantArrayType()) { + S.Diag(InnerLoc, clang::diag::warn_acc_var_referenced_non_const_array) + << InnerTy << CK; + return VarExpr; + } + return CheckVarType(S, CK, VarExpr, InnerLoc, ArrTy->getElementType()); + } auto *RD = InnerTy->getAsCXXRecordDecl(); @@ -2575,8 +2584,8 @@ SemaOpenACC::ActOnOpenACCAsteriskSizeExpr(SourceLocation AsteriskLoc) { return BuildOpenACCAsteriskSizeExpr(AsteriskLoc); } -VarDecl *SemaOpenACC::CreateInitRecipe(OpenACCClauseKind CK, - const Expr *VarExpr) { +std::pair<VarDecl *, VarDecl *> +SemaOpenACC::CreateInitRecipe(OpenACCClauseKind CK, const Expr *VarExpr) { // Strip off any array subscripts/array section exprs to get to the type of // the variable. while (isa_and_present<ArraySectionExpr, ArraySubscriptExpr>(VarExpr)) { @@ -2590,7 +2599,7 @@ VarDecl *SemaOpenACC::CreateInitRecipe(OpenACCClauseKind CK, // fill in with nullptr. We'll count on TreeTransform to make this if // necessary. if (!VarExpr || VarExpr->getType()->isDependentType()) - return nullptr; + return {nullptr, nullptr}; QualType VarTy = VarExpr->getType().getNonReferenceType().getUnqualifiedType(); @@ -2602,6 +2611,7 @@ VarDecl *SemaOpenACC::CreateInitRecipe(OpenACCClauseKind CK, getASTContext().getTrivialTypeSourceInfo(VarTy), SC_Auto); ExprResult Init; + VarDecl *Temporary = nullptr; if (CK == OpenACCClauseKind::Private) { // Trap errors so we don't get weird ones here. If we can't init, we'll just @@ -2626,5 +2636,5 @@ VarDecl *SemaOpenACC::CreateInitRecipe(OpenACCClauseKind CK, Recipe->setInitStyle(VarDecl::CallInit); } - return Recipe; + return {Recipe, Temporary}; } diff --git a/clang/lib/Sema/SemaOpenACCClause.cpp b/clang/lib/Sema/SemaOpenACCClause.cpp index 88d217f..e8a18243 100644 --- a/clang/lib/Sema/SemaOpenACCClause.cpp +++ b/clang/lib/Sema/SemaOpenACCClause.cpp @@ -800,7 +800,7 @@ OpenACCClause *SemaOpenACCClauseVisitor::VisitPrivateClause( // Assemble the recipes list. for (const Expr *VarExpr : Clause.getVarList()) InitRecipes.push_back( - SemaRef.CreateInitRecipe(OpenACCClauseKind::Private, VarExpr)); + SemaRef.CreateInitRecipe(OpenACCClauseKind::Private, VarExpr).first); return OpenACCPrivateClause::Create( Ctx, Clause.getBeginLoc(), Clause.getLParenLoc(), Clause.getVarList(), @@ -813,7 +813,7 @@ OpenACCClause *SemaOpenACCClauseVisitor::VisitFirstPrivateClause( // really isn't anything to do here. GCC does some duplicate-finding, though // it isn't apparent in the standard where this is justified. - llvm::SmallVector<VarDecl *> InitRecipes; + llvm::SmallVector<OpenACCFirstPrivateRecipe> InitRecipes; // Assemble the recipes list. for (const Expr *VarExpr : Clause.getVarList()) diff --git a/clang/lib/Sema/SemaTemplate.cpp b/clang/lib/Sema/SemaTemplate.cpp index b6b8932..2d8fdb5 100644 --- a/clang/lib/Sema/SemaTemplate.cpp +++ b/clang/lib/Sema/SemaTemplate.cpp @@ -367,7 +367,7 @@ bool Sema::DiagnoseUnknownTemplateName(const IdentifierInfo &II, // The code is missing a 'template' keyword prior to the dependent template // name. - NestedNameSpecifier *Qualifier = (NestedNameSpecifier *)SS->getScopeRep(); + NestedNameSpecifier *Qualifier = SS->getScopeRep(); SuggestedTemplate = TemplateTy::make(Context.getDependentTemplateName( {Qualifier, &II, /*HasTemplateKeyword=*/false})); Diag(IILoc, diag::err_template_kw_missing) diff --git a/clang/lib/Sema/SemaWasm.cpp b/clang/lib/Sema/SemaWasm.cpp index 8998492..e773113 100644 --- a/clang/lib/Sema/SemaWasm.cpp +++ b/clang/lib/Sema/SemaWasm.cpp @@ -17,6 +17,7 @@ #include "clang/Basic/AddressSpaces.h" #include "clang/Basic/DiagnosticSema.h" #include "clang/Basic/TargetBuiltins.h" +#include "clang/Basic/TargetInfo.h" #include "clang/Sema/Attr.h" #include "clang/Sema/Sema.h" @@ -227,7 +228,8 @@ bool SemaWasm::BuiltinWasmTableCopy(CallExpr *TheCall) { return false; } -bool SemaWasm::BuiltinWasmTestFunctionPointerSignature(CallExpr *TheCall) { +bool SemaWasm::BuiltinWasmTestFunctionPointerSignature(const TargetInfo &TI, + CallExpr *TheCall) { if (SemaRef.checkArgCount(TheCall, 1)) return true; @@ -250,27 +252,31 @@ bool SemaWasm::BuiltinWasmTestFunctionPointerSignature(CallExpr *TheCall) { << ArgType << FuncPtrArg->getSourceRange(); } - // Check that the function pointer doesn't use reference types - if (FuncTy->getReturnType().isWebAssemblyReferenceType()) { - return Diag( - FuncPtrArg->getBeginLoc(), - diag::err_wasm_builtin_test_fp_sig_cannot_include_reference_type) - << 0 << FuncTy->getReturnType() << FuncPtrArg->getSourceRange(); - } - auto NParams = FuncTy->getNumParams(); - for (unsigned I = 0; I < NParams; I++) { - if (FuncTy->getParamType(I).isWebAssemblyReferenceType()) { + if (TI.getABI() == "experimental-mv") { + auto isStructOrUnion = [](QualType T) { + return T->isUnionType() || T->isStructureType(); + }; + if (isStructOrUnion(FuncTy->getReturnType())) { return Diag( FuncPtrArg->getBeginLoc(), diag:: - err_wasm_builtin_test_fp_sig_cannot_include_reference_type) - << 1 << FuncPtrArg->getSourceRange(); + err_wasm_builtin_test_fp_sig_cannot_include_struct_or_union) + << 0 << FuncTy->getReturnType() << FuncPtrArg->getSourceRange(); + } + auto NParams = FuncTy->getNumParams(); + for (unsigned I = 0; I < NParams; I++) { + if (isStructOrUnion(FuncTy->getParamType(I))) { + return Diag( + FuncPtrArg->getBeginLoc(), + diag:: + err_wasm_builtin_test_fp_sig_cannot_include_struct_or_union) + << 1 << FuncPtrArg->getSourceRange(); + } } } // Set return type to int (the result of the test) TheCall->setType(getASTContext().IntTy); - return false; } @@ -297,7 +303,7 @@ bool SemaWasm::CheckWebAssemblyBuiltinFunctionCall(const TargetInfo &TI, case WebAssembly::BI__builtin_wasm_table_copy: return BuiltinWasmTableCopy(TheCall); case WebAssembly::BI__builtin_wasm_test_function_pointer_signature: - return BuiltinWasmTestFunctionPointerSignature(TheCall); + return BuiltinWasmTestFunctionPointerSignature(TI, TheCall); } return false; diff --git a/clang/lib/Sema/TreeTransform.h b/clang/lib/Sema/TreeTransform.h index 6ce5535..0030946 100644 --- a/clang/lib/Sema/TreeTransform.h +++ b/clang/lib/Sema/TreeTransform.h @@ -11901,8 +11901,11 @@ void OpenACCClauseTransform<Derived>::VisitPrivateClause( if (InitRecipe) InitRecipes.push_back(InitRecipe); else - InitRecipes.push_back(Self.getSema().OpenACC().CreateInitRecipe( - OpenACCClauseKind::Private, VarRef.get())); + InitRecipes.push_back( + Self.getSema() + .OpenACC() + .CreateInitRecipe(OpenACCClauseKind::Private, VarRef.get()) + .first); } } ParsedClause.setVarListDetails(InstantiatedVarList, @@ -11942,7 +11945,7 @@ template <typename Derived> void OpenACCClauseTransform<Derived>::VisitFirstPrivateClause( const OpenACCFirstPrivateClause &C) { llvm::SmallVector<Expr *> InstantiatedVarList; - llvm::SmallVector<VarDecl *> InitRecipes; + llvm::SmallVector<OpenACCFirstPrivateRecipe> InitRecipes; for (const auto [RefExpr, InitRecipe] : llvm::zip(C.getVarList(), C.getInitRecipes())) { @@ -11953,7 +11956,7 @@ void OpenACCClauseTransform<Derived>::VisitFirstPrivateClause( // We only have to create a new one if it is dependent, and Sema won't // make one of these unless the type is non-dependent. - if (InitRecipe) + if (InitRecipe.RecipeDecl) InitRecipes.push_back(InitRecipe); else InitRecipes.push_back(Self.getSema().OpenACC().CreateInitRecipe( diff --git a/clang/lib/Serialization/ASTReader.cpp b/clang/lib/Serialization/ASTReader.cpp index 1402f40..ed0ec9e 100644 --- a/clang/lib/Serialization/ASTReader.cpp +++ b/clang/lib/Serialization/ASTReader.cpp @@ -12877,9 +12877,12 @@ OpenACCClause *ASTRecordReader::readOpenACCClause() { case OpenACCClauseKind::FirstPrivate: { SourceLocation LParenLoc = readSourceLocation(); llvm::SmallVector<Expr *> VarList = readOpenACCVarList(); - llvm::SmallVector<VarDecl *> RecipeList; - for (unsigned I = 0; I < VarList.size(); ++I) - RecipeList.push_back(readDeclAs<VarDecl>()); + llvm::SmallVector<OpenACCFirstPrivateRecipe> RecipeList; + for (unsigned I = 0; I < VarList.size(); ++I) { + VarDecl *Recipe = readDeclAs<VarDecl>(); + VarDecl *RecipeTemp = readDeclAs<VarDecl>(); + RecipeList.push_back({Recipe, RecipeTemp}); + } return OpenACCFirstPrivateClause::Create(getContext(), BeginLoc, LParenLoc, VarList, RecipeList, EndLoc); diff --git a/clang/lib/Serialization/ASTWriter.cpp b/clang/lib/Serialization/ASTWriter.cpp index c038d4d..c072acd 100644 --- a/clang/lib/Serialization/ASTWriter.cpp +++ b/clang/lib/Serialization/ASTWriter.cpp @@ -5461,11 +5461,6 @@ ASTWriter::WriteAST(llvm::PointerUnion<Sema *, Preprocessor *> Subject, WritingAST = false; - if (WritingModule && PPRef.getHeaderSearchInfo() - .getHeaderSearchOpts() - .ModulesValidateOncePerBuildSession) - ModCache.updateModuleTimestamp(OutputFile); - if (ShouldCacheASTInMemory) { // Construct MemoryBuffer and update buffer manager. ModCache.getInMemoryModuleCache().addBuiltPCM( @@ -8762,8 +8757,10 @@ void ASTRecordWriter::writeOpenACCClause(const OpenACCClause *C) { writeSourceLocation(FPC->getLParenLoc()); writeOpenACCVarList(FPC); - for (VarDecl *VD : FPC->getInitRecipes()) - AddDeclRef(VD); + for (const OpenACCFirstPrivateRecipe &R : FPC->getInitRecipes()) { + AddDeclRef(R.RecipeDecl); + AddDeclRef(R.InitFromTemporary); + } return; } case OpenACCClauseKind::Attach: { diff --git a/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp index fd0a398..0e5fc0a 100644 --- a/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp +++ b/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp @@ -97,10 +97,6 @@ public: CheckerFrontendWithBugType UninitializedRead{ "Accessing unitialized/garbage values"}; - // FIXME: This bug type should be removed because it is only emitted in a - // situation that is practically impossible. - const BugType AdditionOverflow{&OutOfBounds, "API"}; - StringRef getDebugTag() const override { return "MallocChecker"; } static void *getTag() { static int tag; return &tag; } @@ -330,7 +326,6 @@ public: const Stmt *S, StringRef WarningMsg) const; void emitNotCStringBug(CheckerContext &C, ProgramStateRef State, const Stmt *S, StringRef WarningMsg) const; - void emitAdditionOverflowBug(CheckerContext &C, ProgramStateRef State) const; void emitUninitializedReadBug(CheckerContext &C, ProgramStateRef State, const Expr *E, const MemRegion *R, StringRef Msg) const; @@ -843,22 +838,6 @@ void CStringChecker::emitNotCStringBug(CheckerContext &C, ProgramStateRef State, } } -void CStringChecker::emitAdditionOverflowBug(CheckerContext &C, - ProgramStateRef State) const { - if (ExplodedNode *N = C.generateErrorNode(State)) { - // This isn't a great error message, but this should never occur in real - // code anyway -- you'd have to create a buffer longer than a size_t can - // represent, which is sort of a contradiction. - const char *WarningMsg = - "This expression will create a string whose length is too big to " - "be represented as a size_t"; - - auto Report = std::make_unique<PathSensitiveBugReport>(AdditionOverflow, - WarningMsg, N); - C.emitReport(std::move(Report)); - } -} - ProgramStateRef CStringChecker::checkAdditionOverflow(CheckerContext &C, ProgramStateRef state, NonLoc left, @@ -896,19 +875,22 @@ ProgramStateRef CStringChecker::checkAdditionOverflow(CheckerContext &C, SVal willOverflow = svalBuilder.evalBinOpNN(state, BO_GT, left, *maxMinusRightNL, cmpTy); - ProgramStateRef stateOverflow, stateOkay; - std::tie(stateOverflow, stateOkay) = - state->assume(willOverflow.castAs<DefinedOrUnknownSVal>()); + auto [StateOverflow, StateOkay] = + state->assume(willOverflow.castAs<DefinedOrUnknownSVal>()); - if (stateOverflow && !stateOkay) { - // We have an overflow. Emit a bug report. - emitAdditionOverflowBug(C, stateOverflow); + if (StateOverflow && !StateOkay) { + // On this path the analyzer is convinced that the addition of these two + // values would overflow `size_t` which must be caused by the inaccuracy + // of our modeling because this method is called in situations where the + // summands are size/length values which are much less than SIZE_MAX. To + // avoid false positives let's just sink this invalid path. + C.addSink(StateOverflow); return nullptr; } // From now on, assume an overflow didn't occur. - assert(stateOkay); - state = stateOkay; + assert(StateOkay); + state = StateOkay; } return state; diff --git a/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.cpp index 62bc321..65ff902 100644 --- a/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.cpp +++ b/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.cpp @@ -840,20 +840,27 @@ ProgramStateRef RetainCountChecker::updateSymbol(ProgramStateRef state, const RefCountBug & RetainCountChecker::errorKindToBugKind(RefVal::Kind ErrorKind, SymbolRef Sym) const { + const RefCountFrontend &FE = getPreferredFrontend(); + switch (ErrorKind) { case RefVal::ErrorUseAfterRelease: - return *UseAfterRelease; + return FE.UseAfterRelease; case RefVal::ErrorReleaseNotOwned: - return *ReleaseNotOwned; + return FE.ReleaseNotOwned; case RefVal::ErrorDeallocNotOwned: if (Sym->getType()->getPointeeCXXRecordDecl()) - return *FreeNotOwned; - return *DeallocNotOwned; + return FE.FreeNotOwned; + return FE.DeallocNotOwned; default: llvm_unreachable("Unhandled error."); } } +bool RetainCountChecker::isReleaseUnownedError(RefVal::Kind ErrorKind) const { + return ErrorKind == RefVal::ErrorReleaseNotOwned || + ErrorKind == RefVal::ErrorDeallocNotOwned; +} + void RetainCountChecker::processNonLeakError(ProgramStateRef St, SourceRange ErrorRange, RefVal::Kind ErrorKind, @@ -874,8 +881,8 @@ void RetainCountChecker::processNonLeakError(ProgramStateRef St, return; auto report = std::make_unique<RefCountReport>( - errorKindToBugKind(ErrorKind, Sym), - C.getASTContext().getLangOpts(), N, Sym); + errorKindToBugKind(ErrorKind, Sym), C.getASTContext().getLangOpts(), N, + Sym, /*isLeak=*/false, isReleaseUnownedError(ErrorKind)); report->addRange(ErrorRange); C.emitReport(std::move(report)); } @@ -1090,8 +1097,8 @@ ExplodedNode * RetainCountChecker::checkReturnWithRetEffect(const ReturnStmt *S, ExplodedNode *N = C.addTransition(state, Pred); if (N) { const LangOptions &LOpts = C.getASTContext().getLangOpts(); - auto R = - std::make_unique<RefLeakReport>(*LeakAtReturn, LOpts, N, Sym, C); + auto R = std::make_unique<RefLeakReport>( + getPreferredFrontend().LeakAtReturn, LOpts, N, Sym, C); C.emitReport(std::move(R)); } return N; @@ -1113,7 +1120,8 @@ ExplodedNode * RetainCountChecker::checkReturnWithRetEffect(const ReturnStmt *S, ExplodedNode *N = C.addTransition(state, Pred); if (N) { auto R = std::make_unique<RefCountReport>( - *ReturnNotOwnedForOwned, C.getASTContext().getLangOpts(), N, Sym); + getPreferredFrontend().ReturnNotOwnedForOwned, + C.getASTContext().getLangOpts(), N, Sym); C.emitReport(std::move(R)); } return N; @@ -1261,8 +1269,8 @@ ProgramStateRef RetainCountChecker::handleAutoreleaseCounts( os << "has a +" << V.getCount() << " retain count"; const LangOptions &LOpts = Ctx.getASTContext().getLangOpts(); - auto R = std::make_unique<RefCountReport>(*OverAutorelease, LOpts, N, Sym, - os.str()); + auto R = std::make_unique<RefCountReport>( + getPreferredFrontend().OverAutorelease, LOpts, N, Sym, os.str()); Ctx.emitReport(std::move(R)); } @@ -1307,8 +1315,10 @@ RetainCountChecker::processLeaks(ProgramStateRef state, const LangOptions &LOpts = Ctx.getASTContext().getLangOpts(); if (N) { + const RefCountFrontend &FE = getPreferredFrontend(); + const RefCountBug &BT = Pred ? FE.LeakWithinFunction : FE.LeakAtReturn; + for (SymbolRef L : Leaked) { - const RefCountBug &BT = Pred ? *LeakWithinFunction : *LeakAtReturn; Ctx.emitReport(std::make_unique<RefLeakReport>(BT, LOpts, N, L, Ctx)); } } @@ -1463,44 +1473,31 @@ std::unique_ptr<SimpleProgramPointTag> RetainCountChecker::DeallocSentTag; std::unique_ptr<SimpleProgramPointTag> RetainCountChecker::CastFailTag; void ento::registerRetainCountBase(CheckerManager &Mgr) { - auto *Chk = Mgr.registerChecker<RetainCountChecker>(); + auto *Chk = Mgr.getChecker<RetainCountChecker>(); Chk->DeallocSentTag = std::make_unique<SimpleProgramPointTag>( "RetainCountChecker", "DeallocSent"); Chk->CastFailTag = std::make_unique<SimpleProgramPointTag>( "RetainCountChecker", "DynamicCastFail"); } -bool ento::shouldRegisterRetainCountBase(const CheckerManager &mgr) { +bool ento::shouldRegisterRetainCountBase(const CheckerManager &) { return true; } + void ento::registerRetainCountChecker(CheckerManager &Mgr) { auto *Chk = Mgr.getChecker<RetainCountChecker>(); - Chk->TrackObjCAndCFObjects = true; + Chk->RetainCount.enable(Mgr); Chk->TrackNSCFStartParam = Mgr.getAnalyzerOptions().getCheckerBooleanOption( Mgr.getCurrentCheckerName(), "TrackNSCFStartParam"); - -#define INIT_BUGTYPE(KIND) \ - Chk->KIND = std::make_unique<RefCountBug>(Mgr.getCurrentCheckerName(), \ - RefCountBug::KIND); - // TODO: Ideally, we should have a checker for each of these bug types. - INIT_BUGTYPE(UseAfterRelease) - INIT_BUGTYPE(ReleaseNotOwned) - INIT_BUGTYPE(DeallocNotOwned) - INIT_BUGTYPE(FreeNotOwned) - INIT_BUGTYPE(OverAutorelease) - INIT_BUGTYPE(ReturnNotOwnedForOwned) - INIT_BUGTYPE(LeakWithinFunction) - INIT_BUGTYPE(LeakAtReturn) -#undef INIT_BUGTYPE } -bool ento::shouldRegisterRetainCountChecker(const CheckerManager &mgr) { +bool ento::shouldRegisterRetainCountChecker(const CheckerManager &) { return true; } void ento::registerOSObjectRetainCountChecker(CheckerManager &Mgr) { auto *Chk = Mgr.getChecker<RetainCountChecker>(); - Chk->TrackOSObjects = true; + Chk->OSObjectRetainCount.enable(Mgr); // FIXME: We want bug reports to always have the same checker name associated // with them, yet here, if RetainCountChecker is disabled but @@ -1511,21 +1508,8 @@ void ento::registerOSObjectRetainCountChecker(CheckerManager &Mgr) { // diagnostics, and **hidden checker options** with the fine-tuning of // modeling. Following this logic, OSObjectRetainCountChecker should be the // latter, but we can't just remove it for backward compatibility reasons. -#define LAZY_INIT_BUGTYPE(KIND) \ - if (!Chk->KIND) \ - Chk->KIND = std::make_unique<RefCountBug>(Mgr.getCurrentCheckerName(), \ - RefCountBug::KIND); - LAZY_INIT_BUGTYPE(UseAfterRelease) - LAZY_INIT_BUGTYPE(ReleaseNotOwned) - LAZY_INIT_BUGTYPE(DeallocNotOwned) - LAZY_INIT_BUGTYPE(FreeNotOwned) - LAZY_INIT_BUGTYPE(OverAutorelease) - LAZY_INIT_BUGTYPE(ReturnNotOwnedForOwned) - LAZY_INIT_BUGTYPE(LeakWithinFunction) - LAZY_INIT_BUGTYPE(LeakAtReturn) -#undef LAZY_INIT_BUGTYPE } -bool ento::shouldRegisterOSObjectRetainCountChecker(const CheckerManager &mgr) { +bool ento::shouldRegisterOSObjectRetainCountChecker(const CheckerManager &) { return true; } diff --git a/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.h b/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.h index 0e81143..8854e10 100644 --- a/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.h +++ b/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.h @@ -235,51 +235,32 @@ public: }; class RetainCountChecker - : public Checker< check::Bind, - check::DeadSymbols, - check::BeginFunction, - check::EndFunction, - check::PostStmt<BlockExpr>, - check::PostStmt<CastExpr>, - check::PostStmt<ObjCArrayLiteral>, - check::PostStmt<ObjCDictionaryLiteral>, - check::PostStmt<ObjCBoxedExpr>, - check::PostStmt<ObjCIvarRefExpr>, - check::PostCall, - check::RegionChanges, - eval::Assume, - eval::Call > { + : public CheckerFamily< + check::Bind, check::DeadSymbols, check::BeginFunction, + check::EndFunction, check::PostStmt<BlockExpr>, + check::PostStmt<CastExpr>, check::PostStmt<ObjCArrayLiteral>, + check::PostStmt<ObjCDictionaryLiteral>, + check::PostStmt<ObjCBoxedExpr>, check::PostStmt<ObjCIvarRefExpr>, + check::PostCall, check::RegionChanges, eval::Assume, eval::Call> { public: - std::unique_ptr<RefCountBug> UseAfterRelease; - std::unique_ptr<RefCountBug> ReleaseNotOwned; - std::unique_ptr<RefCountBug> DeallocNotOwned; - std::unique_ptr<RefCountBug> FreeNotOwned; - std::unique_ptr<RefCountBug> OverAutorelease; - std::unique_ptr<RefCountBug> ReturnNotOwnedForOwned; - std::unique_ptr<RefCountBug> LeakWithinFunction; - std::unique_ptr<RefCountBug> LeakAtReturn; + RefCountFrontend RetainCount; + RefCountFrontend OSObjectRetainCount; mutable std::unique_ptr<RetainSummaryManager> Summaries; static std::unique_ptr<SimpleProgramPointTag> DeallocSentTag; static std::unique_ptr<SimpleProgramPointTag> CastFailTag; - /// Track Objective-C and CoreFoundation objects. - bool TrackObjCAndCFObjects = false; - - /// Track sublcasses of OSObject. - bool TrackOSObjects = false; - /// Track initial parameters (for the entry point) for NS/CF objects. bool TrackNSCFStartParam = false; - RetainCountChecker() {}; + StringRef getDebugTag() const override { return "RetainCountChecker"; } RetainSummaryManager &getSummaryManager(ASTContext &Ctx) const { if (!Summaries) - Summaries.reset( - new RetainSummaryManager(Ctx, TrackObjCAndCFObjects, TrackOSObjects)); + Summaries = std::make_unique<RetainSummaryManager>( + Ctx, RetainCount.isEnabled(), OSObjectRetainCount.isEnabled()); return *Summaries; } @@ -287,6 +268,15 @@ public: return getSummaryManager(C.getASTContext()); } + const RefCountFrontend &getPreferredFrontend() const { + // FIXME: The two frontends of this checker family are in an unusual + // relationship: if they are both enabled, then all bug reports are + // reported by RetainCount (i.e. `osx.cocoa.RetainCount`), even the bugs + // that "belong to" OSObjectRetainCount (i.e. `osx.OSObjectRetainCount`). + // This is counter-intuitive and should be fixed to avoid confusion. + return RetainCount.isEnabled() ? RetainCount : OSObjectRetainCount; + } + void printState(raw_ostream &Out, ProgramStateRef State, const char *NL, const char *Sep) const override; @@ -337,6 +327,8 @@ public: const RefCountBug &errorKindToBugKind(RefVal::Kind ErrorKind, SymbolRef Sym) const; + bool isReleaseUnownedError(RefVal::Kind ErrorKind) const; + void processNonLeakError(ProgramStateRef St, SourceRange ErrorRange, RefVal::Kind ErrorKind, SymbolRef Sym, CheckerContext &C) const; diff --git a/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp b/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp index c9f5dc9..cad2c72 100644 --- a/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp +++ b/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp @@ -21,57 +21,6 @@ using namespace clang; using namespace ento; using namespace retaincountchecker; -StringRef RefCountBug::bugTypeToName(RefCountBug::RefCountBugKind BT) { - switch (BT) { - case UseAfterRelease: - return "Use-after-release"; - case ReleaseNotOwned: - return "Bad release"; - case DeallocNotOwned: - return "-dealloc sent to non-exclusively owned object"; - case FreeNotOwned: - return "freeing non-exclusively owned object"; - case OverAutorelease: - return "Object autoreleased too many times"; - case ReturnNotOwnedForOwned: - return "Method should return an owned object"; - case LeakWithinFunction: - return "Leak"; - case LeakAtReturn: - return "Leak of returned object"; - } - llvm_unreachable("Unknown RefCountBugKind"); -} - -StringRef RefCountBug::getDescription() const { - switch (BT) { - case UseAfterRelease: - return "Reference-counted object is used after it is released"; - case ReleaseNotOwned: - return "Incorrect decrement of the reference count of an object that is " - "not owned at this point by the caller"; - case DeallocNotOwned: - return "-dealloc sent to object that may be referenced elsewhere"; - case FreeNotOwned: - return "'free' called on an object that may be referenced elsewhere"; - case OverAutorelease: - return "Object autoreleased too many times"; - case ReturnNotOwnedForOwned: - return "Object with a +0 retain count returned to caller where a +1 " - "(owning) retain count is expected"; - case LeakWithinFunction: - case LeakAtReturn: - return ""; - } - llvm_unreachable("Unknown RefCountBugKind"); -} - -RefCountBug::RefCountBug(CheckerNameRef Checker, RefCountBugKind BT) - : BugType(Checker, bugTypeToName(BT), categories::MemoryRefCount, - /*SuppressOnSink=*/BT == LeakWithinFunction || - BT == LeakAtReturn), - BT(BT) {} - static bool isNumericLiteralExpression(const Expr *E) { // FIXME: This set of cases was copied from SemaExprObjC. return isa<IntegerLiteral, CharacterLiteral, FloatingLiteral, @@ -312,9 +261,11 @@ namespace retaincountchecker { class RefCountReportVisitor : public BugReporterVisitor { protected: SymbolRef Sym; + bool IsReleaseUnowned; public: - RefCountReportVisitor(SymbolRef sym) : Sym(sym) {} + RefCountReportVisitor(SymbolRef S, bool IRU) + : Sym(S), IsReleaseUnowned(IRU) {} void Profile(llvm::FoldingSetNodeID &ID) const override { static int x = 0; @@ -334,7 +285,8 @@ public: class RefLeakReportVisitor : public RefCountReportVisitor { public: RefLeakReportVisitor(SymbolRef Sym, const MemRegion *LastBinding) - : RefCountReportVisitor(Sym), LastBinding(LastBinding) {} + : RefCountReportVisitor(Sym, /*IsReleaseUnowned=*/false), + LastBinding(LastBinding) {} PathDiagnosticPieceRef getEndPath(BugReporterContext &BRC, const ExplodedNode *N, @@ -452,12 +404,6 @@ annotateStartParameter(const ExplodedNode *N, SymbolRef Sym, PathDiagnosticPieceRef RefCountReportVisitor::VisitNode(const ExplodedNode *N, BugReporterContext &BRC, PathSensitiveBugReport &BR) { - - const auto &BT = static_cast<const RefCountBug&>(BR.getBugType()); - - bool IsFreeUnowned = BT.getBugType() == RefCountBug::FreeNotOwned || - BT.getBugType() == RefCountBug::DeallocNotOwned; - const SourceManager &SM = BRC.getSourceManager(); CallEventManager &CEMgr = BRC.getStateManager().getCallEventManager(); if (auto CE = N->getLocationAs<CallExitBegin>()) @@ -490,7 +436,7 @@ RefCountReportVisitor::VisitNode(const ExplodedNode *N, BugReporterContext &BRC, std::string sbuf; llvm::raw_string_ostream os(sbuf); - if (PrevT && IsFreeUnowned && CurrV.isNotOwned() && PrevT->isOwned()) { + if (PrevT && IsReleaseUnowned && CurrV.isNotOwned() && PrevT->isOwned()) { os << "Object is now not exclusively owned"; auto Pos = PathDiagnosticLocation::create(N->getLocation(), SM); return std::make_shared<PathDiagnosticEventPiece>(Pos, sbuf); @@ -815,10 +761,8 @@ RefLeakReportVisitor::getEndPath(BugReporterContext &BRC, if (K == ObjKind::ObjC || K == ObjKind::CF) { os << "whose name ('" << *FD << "') does not contain 'Copy' or 'Create'. This violates the " - "naming" - " convention rules given in the Memory Management Guide for " - "Core" - " Foundation"; + "naming convention rules given in the Memory Management Guide " + "for Core Foundation"; } else if (RV->getObjKind() == ObjKind::OS) { std::string FuncName = FD->getNameAsString(); os << "whose name ('" << FuncName << "') starts with '" @@ -836,19 +780,20 @@ RefLeakReportVisitor::getEndPath(BugReporterContext &BRC, } RefCountReport::RefCountReport(const RefCountBug &D, const LangOptions &LOpts, - ExplodedNode *n, SymbolRef sym, bool isLeak) - : PathSensitiveBugReport(D, D.getDescription(), n), Sym(sym), + ExplodedNode *n, SymbolRef sym, bool isLeak, + bool IsReleaseUnowned) + : PathSensitiveBugReport(D, D.getReportMessage(), n), Sym(sym), isLeak(isLeak) { if (!isLeak) - addVisitor<RefCountReportVisitor>(sym); + addVisitor<RefCountReportVisitor>(sym, IsReleaseUnowned); } RefCountReport::RefCountReport(const RefCountBug &D, const LangOptions &LOpts, ExplodedNode *n, SymbolRef sym, StringRef endText) - : PathSensitiveBugReport(D, D.getDescription(), endText, n) { + : PathSensitiveBugReport(D, D.getReportMessage(), endText, n) { - addVisitor<RefCountReportVisitor>(sym); + addVisitor<RefCountReportVisitor>(sym, /*IsReleaseUnowned=*/false); } void RefLeakReport::deriveParamLocation(CheckerContext &Ctx) { diff --git a/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.h b/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.h index d059008..6ceb86f 100644 --- a/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.h +++ b/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.h @@ -25,25 +25,44 @@ namespace ento { namespace retaincountchecker { class RefCountBug : public BugType { + StringRef ReportMessage; + public: - enum RefCountBugKind { - UseAfterRelease, - ReleaseNotOwned, - DeallocNotOwned, - FreeNotOwned, - OverAutorelease, - ReturnNotOwnedForOwned, - LeakWithinFunction, - LeakAtReturn, - }; - RefCountBug(CheckerNameRef Checker, RefCountBugKind BT); - StringRef getDescription() const; - - RefCountBugKind getBugType() const { return BT; } - -private: - RefCountBugKind BT; - static StringRef bugTypeToName(RefCountBugKind BT); + RefCountBug(const CheckerFrontend *CF, StringRef Desc, StringRef ReportMsg, + bool SuppressOnSink = false) + : BugType(CF, Desc, categories::MemoryRefCount, SuppressOnSink), + ReportMessage(ReportMsg) {} + StringRef getReportMessage() const { return ReportMessage; } +}; + +class RefCountFrontend : public CheckerFrontend { +public: + const RefCountBug UseAfterRelease{ + this, "Use-after-release", + "Reference-counted object is used after it is released"}; + const RefCountBug ReleaseNotOwned{ + this, "Bad release", + "Incorrect decrement of the reference count of an object that is not " + "owned at this point by the caller"}; + const RefCountBug DeallocNotOwned{ + this, "-dealloc sent to non-exclusively owned object", + "-dealloc sent to object that may be referenced elsewhere"}; + const RefCountBug FreeNotOwned{ + this, "freeing non-exclusively owned object", + "'free' called on an object that may be referenced elsewhere"}; + const RefCountBug OverAutorelease{this, "Object autoreleased too many times", + "Object autoreleased too many times"}; + const RefCountBug ReturnNotOwnedForOwned{ + this, "Method should return an owned object", + "Object with a +0 retain count returned to caller where a +1 (owning) " + "retain count is expected"}; + // For these two bug types the report message will be generated dynamically + // by `RefLeakReport::createDescription` so the empty string taken from the + // BugType will be ignored (overwritten). + const RefCountBug LeakWithinFunction{this, "Leak", /*ReportMsg=*/"", + /*SuppressOnSink=*/true}; + const RefCountBug LeakAtReturn{this, "Leak of returned object", + /*ReportMsg=*/"", /*SuppressOnSink=*/true}; }; class RefCountReport : public PathSensitiveBugReport { @@ -53,8 +72,8 @@ protected: public: RefCountReport(const RefCountBug &D, const LangOptions &LOpts, - ExplodedNode *n, SymbolRef sym, - bool isLeak=false); + ExplodedNode *n, SymbolRef sym, bool isLeak = false, + bool IsReleaseUnowned = false); RefCountReport(const RefCountBug &D, const LangOptions &LOpts, ExplodedNode *n, SymbolRef sym, |