diff options
Diffstat (limited to 'clang/lib')
125 files changed, 2688 insertions, 1559 deletions
diff --git a/clang/lib/AST/ASTContext.cpp b/clang/lib/AST/ASTContext.cpp index 232a4b6..6b6275f 100644 --- a/clang/lib/AST/ASTContext.cpp +++ b/clang/lib/AST/ASTContext.cpp @@ -2597,6 +2597,9 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const { } break; + case Type::PredefinedSugar: + return getTypeInfo(cast<PredefinedSugarType>(T)->desugar().getTypePtr()); + case Type::Pipe: Width = Target->getPointerWidth(LangAS::opencl_global); Align = Target->getPointerAlign(LangAS::opencl_global); @@ -5216,6 +5219,39 @@ QualType ASTContext::getDependentBitIntType(bool IsUnsigned, return QualType(New, 0); } +QualType +ASTContext::getPredefinedSugarType(PredefinedSugarType::Kind KD) const { + using Kind = PredefinedSugarType::Kind; + + if (auto *Target = PredefinedSugarTypes[llvm::to_underlying(KD)]; + Target != nullptr) + return QualType(Target, 0); + + auto getCanonicalType = [](const ASTContext &Ctx, Kind KDI) -> QualType { + switch (KDI) { + // size_t (C99TC3 6.5.3.4), signed size_t (C++23 5.13.2) and + // ptrdiff_t (C99TC3 6.5.6) Although these types are not built-in, they + // are part of the core language and are widely used. Using + // PredefinedSugarType makes these types as named sugar types rather than + // standard integer types, enabling better hints and diagnostics. + case Kind::SizeT: + return Ctx.getFromTargetType(Ctx.Target->getSizeType()); + case Kind::SignedSizeT: + return Ctx.getFromTargetType(Ctx.Target->getSignedSizeType()); + case Kind::PtrdiffT: + return Ctx.getFromTargetType(Ctx.Target->getPtrDiffType(LangAS::Default)); + } + llvm_unreachable("unexpected kind"); + }; + + auto *New = new (*this, alignof(PredefinedSugarType)) + PredefinedSugarType(KD, &Idents.get(PredefinedSugarType::getName(KD)), + getCanonicalType(*this, static_cast<Kind>(KD))); + Types.push_back(New); + PredefinedSugarTypes[llvm::to_underlying(KD)] = New; + return QualType(New, 0); +} + #ifndef NDEBUG static bool NeedsInjectedClassNameType(const RecordDecl *D) { if (!isa<CXXRecordDecl>(D)) return false; @@ -6796,14 +6832,31 @@ QualType ASTContext::getTagDeclType(const TagDecl *Decl) const { /// getSizeType - Return the unique type for "size_t" (C99 7.17), the result /// of the sizeof operator (C99 6.5.3.4p4). The value is target dependent and /// needs to agree with the definition in <stddef.h>. -CanQualType ASTContext::getSizeType() const { +QualType ASTContext::getSizeType() const { + return getPredefinedSugarType(PredefinedSugarType::Kind::SizeT); +} + +CanQualType ASTContext::getCanonicalSizeType() const { return getFromTargetType(Target->getSizeType()); } /// Return the unique signed counterpart of the integer type /// corresponding to size_t. -CanQualType ASTContext::getSignedSizeType() const { - return getFromTargetType(Target->getSignedSizeType()); +QualType ASTContext::getSignedSizeType() const { + return getPredefinedSugarType(PredefinedSugarType::Kind::SignedSizeT); +} + +/// getPointerDiffType - Return the unique type for "ptrdiff_t" (C99 7.17) +/// defined in <stddef.h>. Pointer - pointer requires this (C99 6.5.6p9). +QualType ASTContext::getPointerDiffType() const { + return getPredefinedSugarType(PredefinedSugarType::Kind::PtrdiffT); +} + +/// Return the unique unsigned counterpart of "ptrdiff_t" +/// integer type. The standard (C11 7.21.6.1p7) refers to this type +/// in the definition of %tu format specifier. +QualType ASTContext::getUnsignedPointerDiffType() const { + return getFromTargetType(Target->getUnsignedPtrDiffType(LangAS::Default)); } /// getIntMaxType - Return the unique type for "intmax_t" (C99 7.18.1.5). @@ -6838,19 +6891,6 @@ QualType ASTContext::getUIntPtrType() const { return getCorrespondingUnsignedType(getIntPtrType()); } -/// getPointerDiffType - Return the unique type for "ptrdiff_t" (C99 7.17) -/// defined in <stddef.h>. Pointer - pointer requires this (C99 6.5.6p9). -QualType ASTContext::getPointerDiffType() const { - return getFromTargetType(Target->getPtrDiffType(LangAS::Default)); -} - -/// Return the unique unsigned counterpart of "ptrdiff_t" -/// integer type. The standard (C11 7.21.6.1p7) refers to this type -/// in the definition of %tu format specifier. -QualType ASTContext::getUnsignedPointerDiffType() const { - return getFromTargetType(Target->getUnsignedPtrDiffType(LangAS::Default)); -} - /// Return the unique type for "pid_t" defined in /// <sys/types.h>. We need this to compute the correct type for vfork(). QualType ASTContext::getProcessIDType() const { @@ -14503,6 +14543,10 @@ static QualType getCommonSugarTypeNode(ASTContext &Ctx, const Type *X, DX->isCountInBytes(), DX->isOrNull(), CDX); } + case Type::PredefinedSugar: + assert(cast<PredefinedSugarType>(X)->getKind() != + cast<PredefinedSugarType>(Y)->getKind()); + return QualType(); } llvm_unreachable("Unhandled Type Class"); } diff --git a/clang/lib/AST/ASTImporter.cpp b/clang/lib/AST/ASTImporter.cpp index b5f6c5a..b9bdabe0b 100644 --- a/clang/lib/AST/ASTImporter.cpp +++ b/clang/lib/AST/ASTImporter.cpp @@ -2080,6 +2080,11 @@ ExpectedType clang::ASTNodeImporter::VisitDependentBitIntType( *ToNumBitsExprOrErr); } +ExpectedType clang::ASTNodeImporter::VisitPredefinedSugarType( + const clang::PredefinedSugarType *T) { + return Importer.getToContext().getPredefinedSugarType(T->getKind()); +} + ExpectedType clang::ASTNodeImporter::VisitDependentSizedMatrixType( const clang::DependentSizedMatrixType *T) { Error Err = Error::success(); diff --git a/clang/lib/AST/ASTStructuralEquivalence.cpp b/clang/lib/AST/ASTStructuralEquivalence.cpp index 289c6d7..0f2762d 100644 --- a/clang/lib/AST/ASTStructuralEquivalence.cpp +++ b/clang/lib/AST/ASTStructuralEquivalence.cpp @@ -1477,6 +1477,13 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context, return false; break; } + case Type::PredefinedSugar: { + const auto *TP1 = cast<PredefinedSugarType>(T1); + const auto *TP2 = cast<PredefinedSugarType>(T2); + if (TP1->getKind() != TP2->getKind()) + return false; + break; + } } // end switch return true; diff --git a/clang/lib/AST/ByteCode/ByteCodeEmitter.cpp b/clang/lib/AST/ByteCode/ByteCodeEmitter.cpp index 965e235..3288585 100644 --- a/clang/lib/AST/ByteCode/ByteCodeEmitter.cpp +++ b/clang/lib/AST/ByteCode/ByteCodeEmitter.cpp @@ -62,7 +62,7 @@ void ByteCodeEmitter::compileFunc(const FunctionDecl *FuncDecl, (Func->hasThisPointer() && !Func->isThisPointerExplicit()); for (auto ParamOffset : llvm::drop_begin(Func->ParamOffsets, Drop)) { const ParmVarDecl *PD = FuncDecl->parameters()[ParamIndex]; - std::optional<PrimType> T = Ctx.classify(PD->getType()); + OptPrimType T = Ctx.classify(PD->getType()); this->Params.insert({PD, {ParamOffset, T != std::nullopt}}); ++ParamIndex; } diff --git a/clang/lib/AST/ByteCode/Compiler.cpp b/clang/lib/AST/ByteCode/Compiler.cpp index ea473730..63ac536 100644 --- a/clang/lib/AST/ByteCode/Compiler.cpp +++ b/clang/lib/AST/ByteCode/Compiler.cpp @@ -25,34 +25,6 @@ using APSInt = llvm::APSInt; namespace clang { namespace interp { -static bool refersToUnion(const Expr *E) { - for (;;) { - if (const auto *ME = dyn_cast<MemberExpr>(E)) { - if (const auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl()); - FD && FD->getParent()->isUnion()) - return true; - E = ME->getBase(); - continue; - } - - if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(E)) { - E = ASE->getBase()->IgnoreImplicit(); - continue; - } - - if (const auto *ICE = dyn_cast<ImplicitCastExpr>(E); - ICE && (ICE->getCastKind() == CK_NoOp || - ICE->getCastKind() == CK_DerivedToBase || - ICE->getCastKind() == CK_UncheckedDerivedToBase)) { - E = ICE->getSubExpr(); - continue; - } - - break; - } - return false; -} - static std::optional<bool> getBoolValue(const Expr *E) { if (const auto *CE = dyn_cast_if_present<ConstantExpr>(E); CE && CE->hasAPValueResult() && @@ -237,7 +209,7 @@ bool Compiler<Emitter>::VisitCastExpr(const CastExpr *CE) { if (SubExpr->getType().isVolatileQualified()) return this->emitInvalidCast(CastKind::Volatile, /*Fatal=*/true, CE); - std::optional<PrimType> SubExprT = classify(SubExpr->getType()); + OptPrimType SubExprT = classify(SubExpr->getType()); // Prepare storage for the result. if (!Initializing && !SubExprT) { std::optional<unsigned> LocalIndex = allocateLocal(SubExpr); @@ -388,7 +360,7 @@ bool Compiler<Emitter>::VisitCastExpr(const CastExpr *CE) { const Descriptor *Desc = nullptr; const QualType PointeeType = CE->getType()->getPointeeType(); if (!PointeeType.isNull()) { - if (std::optional<PrimType> T = classify(PointeeType)) + if (OptPrimType T = classify(PointeeType)) Desc = P.createDescriptor(SubExpr, *T); else Desc = P.createDescriptor(SubExpr, PointeeType.getTypePtr(), @@ -436,7 +408,7 @@ bool Compiler<Emitter>::VisitCastExpr(const CastExpr *CE) { PrimType T = classifyPrim(IntType); QualType PtrType = CE->getType(); const Descriptor *Desc; - if (std::optional<PrimType> T = classify(PtrType->getPointeeType())) + if (OptPrimType T = classify(PtrType->getPointeeType())) Desc = P.createDescriptor(SubExpr, *T); else if (PtrType->getPointeeType()->isVoidType()) Desc = nullptr; @@ -473,12 +445,12 @@ bool Compiler<Emitter>::VisitCastExpr(const CastExpr *CE) { return this->emitInvalidCast(CastKind::Reinterpret, /*Fatal=*/true, CE); } QualType SubExprTy = SubExpr->getType(); - std::optional<PrimType> FromT = classify(SubExprTy); + OptPrimType FromT = classify(SubExprTy); // Casts from integer/vector to vector. if (CE->getType()->isVectorType()) return this->emitBuiltinBitCast(CE); - std::optional<PrimType> ToT = classify(CE->getType()); + OptPrimType ToT = classify(CE->getType()); if (!FromT || !ToT) return false; @@ -504,7 +476,7 @@ bool Compiler<Emitter>::VisitCastExpr(const CastExpr *CE) { case CK_IntegralToBoolean: case CK_FixedPointToBoolean: { // HLSL uses this to cast to one-element vectors. - std::optional<PrimType> FromT = classify(SubExpr->getType()); + OptPrimType FromT = classify(SubExpr->getType()); if (!FromT) return false; @@ -517,8 +489,8 @@ bool Compiler<Emitter>::VisitCastExpr(const CastExpr *CE) { case CK_BooleanToSignedIntegral: case CK_IntegralCast: { - std::optional<PrimType> FromT = classify(SubExpr->getType()); - std::optional<PrimType> ToT = classify(CE->getType()); + OptPrimType FromT = classify(SubExpr->getType()); + OptPrimType ToT = classify(CE->getType()); if (!FromT || !ToT) return false; @@ -688,7 +660,7 @@ bool Compiler<Emitter>::VisitCastExpr(const CastExpr *CE) { case CK_HLSLVectorTruncation: { assert(SubExpr->getType()->isVectorType()); - if (std::optional<PrimType> ResultT = classify(CE)) { + if (OptPrimType ResultT = classify(CE)) { assert(!DiscardResult); // Result must be either a float or integer. Take the first element. if (!this->visit(SubExpr)) @@ -872,9 +844,9 @@ bool Compiler<Emitter>::VisitBinaryOperator(const BinaryOperator *BO) { } // Typecheck the args. - std::optional<PrimType> LT = classify(LHS); - std::optional<PrimType> RT = classify(RHS); - std::optional<PrimType> T = classify(BO->getType()); + OptPrimType LT = classify(LHS); + OptPrimType RT = classify(RHS); + OptPrimType T = classify(BO->getType()); // Special case for C++'s three-way/spaceship operator <=>, which // returns a std::{strong,weak,partial}_ordering (which is a class, so doesn't @@ -995,8 +967,8 @@ bool Compiler<Emitter>::VisitPointerArithBinOp(const BinaryOperator *E) { (!LHS->getType()->isPointerType() && !RHS->getType()->isPointerType())) return false; - std::optional<PrimType> LT = classify(LHS); - std::optional<PrimType> RT = classify(RHS); + OptPrimType LT = classify(LHS); + OptPrimType RT = classify(RHS); if (!LT || !RT) return false; @@ -1068,7 +1040,7 @@ bool Compiler<Emitter>::VisitLogicalBinOp(const BinaryOperator *E) { BinaryOperatorKind Op = E->getOpcode(); const Expr *LHS = E->getLHS(); const Expr *RHS = E->getRHS(); - std::optional<PrimType> T = classify(E->getType()); + OptPrimType T = classify(E->getType()); if (Op == BO_LOr) { // Logical OR. Visit LHS and only evaluate RHS if LHS was FALSE. @@ -1648,7 +1620,7 @@ bool Compiler<Emitter>::VisitImplicitValueInitExpr( const ImplicitValueInitExpr *E) { QualType QT = E->getType(); - if (std::optional<PrimType> T = classify(QT)) + if (OptPrimType T = classify(QT)) return this->visitZeroInitializer(*T, QT, E); if (QT->isRecordType()) { @@ -1734,7 +1706,7 @@ bool Compiler<Emitter>::VisitArraySubscriptExpr(const ArraySubscriptExpr *E) { if (!Success) return false; - std::optional<PrimType> IndexT = classify(Index->getType()); + OptPrimType IndexT = classify(Index->getType()); // In error-recovery cases, the index expression has a dependent type. if (!IndexT) return this->emitError(E); @@ -1776,7 +1748,7 @@ bool Compiler<Emitter>::visitInitList(ArrayRef<const Expr *> Inits, } // Primitive values. - if (std::optional<PrimType> T = classify(QT)) { + if (OptPrimType T = classify(QT)) { assert(!DiscardResult); if (Inits.size() == 0) return this->visitZeroInitializer(*T, QT, E); @@ -1840,7 +1812,7 @@ bool Compiler<Emitter>::visitInitList(ArrayRef<const Expr *> Inits, FToInit = cast<CXXParenListInitExpr>(E)->getInitializedFieldInUnion(); const Record::Field *FieldToInit = R->getField(FToInit); - if (std::optional<PrimType> T = classify(Init)) { + if (OptPrimType T = classify(Init)) { if (!initPrimitiveField(FieldToInit, Init, *T, /*Activate=*/true)) return false; } else { @@ -1859,7 +1831,7 @@ bool Compiler<Emitter>::visitInitList(ArrayRef<const Expr *> Inits, R->getField(InitIndex)->isUnnamedBitField()) ++InitIndex; - if (std::optional<PrimType> T = classify(Init)) { + if (OptPrimType T = classify(Init)) { const Record::Field *FieldToInit = R->getField(InitIndex); if (!initPrimitiveField(FieldToInit, Init, *T)) return false; @@ -1899,7 +1871,7 @@ bool Compiler<Emitter>::visitInitList(ArrayRef<const Expr *> Inits, if (!this->emitCheckArraySize(NumElems, E)) return false; - std::optional<PrimType> InitT = classify(CAT->getElementType()); + OptPrimType InitT = classify(CAT->getElementType()); unsigned ElementIndex = 0; for (const Expr *Init : Inits) { if (const auto *EmbedS = @@ -2013,7 +1985,7 @@ bool Compiler<Emitter>::visitInitList(ArrayRef<const Expr *> Inits, /// this. template <class Emitter> bool Compiler<Emitter>::visitArrayElemInit(unsigned ElemIndex, const Expr *Init, - std::optional<PrimType> InitT) { + OptPrimType InitT) { if (InitT) { // Visit the primitive element like normal. if (!this->visit(Init)) @@ -2042,7 +2014,7 @@ bool Compiler<Emitter>::visitCallArgs(ArrayRef<const Expr *> Args, unsigned ArgIndex = 0; for (const Expr *Arg : Args) { - if (std::optional<PrimType> T = classify(Arg)) { + if (OptPrimType T = classify(Arg)) { if (!this->visit(Arg)) return false; } else { @@ -2097,7 +2069,7 @@ bool Compiler<Emitter>::VisitSubstNonTypeTemplateParmExpr( template <class Emitter> bool Compiler<Emitter>::VisitConstantExpr(const ConstantExpr *E) { - std::optional<PrimType> T = classify(E->getType()); + OptPrimType T = classify(E->getType()); if (T && E->hasAPValueResult()) { // Try to emit the APValue directly, without visiting the subexpr. // This will only fail if we can't emit the APValue, so won't emit any @@ -2292,7 +2264,7 @@ bool Compiler<Emitter>::VisitMemberExpr(const MemberExpr *E) { const auto maybeLoadValue = [&]() -> bool { if (E->isGLValue()) return true; - if (std::optional<PrimType> T = classify(E)) + if (OptPrimType T = classify(E)) return this->emitLoadPop(*T, E); return false; }; @@ -2357,7 +2329,7 @@ bool Compiler<Emitter>::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E) { // Investigate compiling this to a loop. const Expr *SubExpr = E->getSubExpr(); size_t Size = E->getArraySize().getZExtValue(); - std::optional<PrimType> SubExprT = classify(SubExpr); + OptPrimType SubExprT = classify(SubExpr); // So, every iteration, we execute an assignment here // where the LHS is on the stack (the target array) @@ -2589,8 +2561,8 @@ bool Compiler<Emitter>::VisitFloatCompoundAssignOperator( QualType LHSType = LHS->getType(); QualType LHSComputationType = E->getComputationLHSType(); QualType ResultType = E->getComputationResultType(); - std::optional<PrimType> LT = classify(LHSComputationType); - std::optional<PrimType> RT = classify(ResultType); + OptPrimType LT = classify(LHSComputationType); + OptPrimType RT = classify(ResultType); assert(ResultType->isFloatingType()); @@ -2659,8 +2631,8 @@ bool Compiler<Emitter>::VisitPointerCompoundAssignOperator( BinaryOperatorKind Op = E->getOpcode(); const Expr *LHS = E->getLHS(); const Expr *RHS = E->getRHS(); - std::optional<PrimType> LT = classify(LHS->getType()); - std::optional<PrimType> RT = classify(RHS->getType()); + OptPrimType LT = classify(LHS->getType()); + OptPrimType RT = classify(RHS->getType()); if (Op != BO_AddAssign && Op != BO_SubAssign) return false; @@ -2698,11 +2670,10 @@ bool Compiler<Emitter>::VisitCompoundAssignOperator( const Expr *LHS = E->getLHS(); const Expr *RHS = E->getRHS(); - std::optional<PrimType> LHSComputationT = - classify(E->getComputationLHSType()); - std::optional<PrimType> LT = classify(LHS->getType()); - std::optional<PrimType> RT = classify(RHS->getType()); - std::optional<PrimType> ResultT = classify(E->getType()); + OptPrimType LHSComputationT = classify(E->getComputationLHSType()); + OptPrimType LT = classify(LHS->getType()); + OptPrimType RT = classify(RHS->getType()); + OptPrimType ResultT = classify(E->getType()); if (!Ctx.getLangOpts().CPlusPlus14) return this->visit(RHS) && this->visit(LHS) && this->emitError(E); @@ -2837,7 +2808,7 @@ bool Compiler<Emitter>::VisitMaterializeTemporaryExpr( // When we're initializing a global variable *or* the storage duration of // the temporary is explicitly static, create a global variable. - std::optional<PrimType> SubExprT = classify(SubExpr); + OptPrimType SubExprT = classify(SubExpr); bool IsStatic = E->getStorageDuration() == SD_Static; if (IsStatic) { std::optional<unsigned> GlobalIndex = P.createGlobal(E); @@ -2931,7 +2902,7 @@ bool Compiler<Emitter>::VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) { return this->visitInitializer(Init) && this->emitFinishInit(E); } - std::optional<PrimType> T = classify(E->getType()); + OptPrimType T = classify(E->getType()); if (E->isFileScope()) { // Avoid creating a variable if this is a primitive RValue anyway. if (T && !E->isLValue()) @@ -3014,7 +2985,7 @@ bool Compiler<Emitter>::VisitLambdaExpr(const LambdaExpr *E) { continue; ++CaptureInitIt; - if (std::optional<PrimType> T = classify(Init)) { + if (OptPrimType T = classify(Init)) { if (!this->visit(Init)) return false; @@ -3061,21 +3032,21 @@ bool Compiler<Emitter>::VisitCXXReinterpretCastExpr( const CXXReinterpretCastExpr *E) { const Expr *SubExpr = E->getSubExpr(); - std::optional<PrimType> FromT = classify(SubExpr); - std::optional<PrimType> ToT = classify(E); + OptPrimType FromT = classify(SubExpr); + OptPrimType ToT = classify(E); if (!FromT || !ToT) return this->emitInvalidCast(CastKind::Reinterpret, /*Fatal=*/true, E); if (FromT == PT_Ptr || ToT == PT_Ptr) { // Both types could be PT_Ptr because their expressions are glvalues. - std::optional<PrimType> PointeeFromT; + OptPrimType PointeeFromT; if (SubExpr->getType()->isPointerOrReferenceType()) PointeeFromT = classify(SubExpr->getType()->getPointeeType()); else PointeeFromT = classify(SubExpr->getType()); - std::optional<PrimType> PointeeToT; + OptPrimType PointeeToT; if (E->getType()->isPointerOrReferenceType()) PointeeToT = classify(E->getType()->getPointeeType()); else @@ -3344,7 +3315,7 @@ bool Compiler<Emitter>::VisitCXXScalarValueInitExpr( if (DiscardResult || Ty->isVoidType()) return true; - if (std::optional<PrimType> T = classify(Ty)) + if (OptPrimType T = classify(Ty)) return this->visitZeroInitializer(*T, Ty, E); if (const auto *CT = Ty->getAs<ComplexType>()) { @@ -3457,7 +3428,7 @@ bool Compiler<Emitter>::VisitCXXNewExpr(const CXXNewExpr *E) { assert(classifyPrim(E->getType()) == PT_Ptr); const Expr *Init = E->getInitializer(); QualType ElementType = E->getAllocatedType(); - std::optional<PrimType> ElemT = classify(ElementType); + OptPrimType ElemT = classify(ElementType); unsigned PlacementArgs = E->getNumPlacementArgs(); const FunctionDecl *OperatorNew = E->getOperatorNew(); const Expr *PlacementDest = nullptr; @@ -3645,7 +3616,7 @@ bool Compiler<Emitter>::VisitCXXNewExpr(const CXXNewExpr *E) { if (!this->emitStorePop(InitT, E)) return false; } else if (DynamicInit) { - if (std::optional<PrimType> InitT = classify(DynamicInit)) { + if (OptPrimType InitT = classify(DynamicInit)) { if (!this->visit(DynamicInit)) return false; if (!this->emitStorePop(*InitT, E)) @@ -4154,7 +4125,7 @@ bool Compiler<Emitter>::visitInitializer(const Expr *E) { } template <class Emitter> bool Compiler<Emitter>::visitBool(const Expr *E) { - std::optional<PrimType> T = classify(E->getType()); + OptPrimType T = classify(E->getType()); if (!T) { // Convert complex values to bool. if (E->getType()->isAnyComplexType()) { @@ -4309,7 +4280,7 @@ bool Compiler<Emitter>::visitZeroArrayInitializer(QualType T, const Expr *E) { QualType ElemType = AT->getElementType(); size_t NumElems = cast<ConstantArrayType>(AT)->getZExtSize(); - if (std::optional<PrimType> ElemT = classify(ElemType)) { + if (OptPrimType ElemT = classify(ElemType)) { for (size_t I = 0; I != NumElems; ++I) { if (!this->visitZeroInitializer(*ElemT, ElemType, E)) return false; @@ -4602,7 +4573,7 @@ bool Compiler<Emitter>::visitExpr(const Expr *E, bool DestroyToplevelScope) { } // Expressions with a primitive return type. - if (std::optional<PrimType> T = classify(E)) { + if (OptPrimType T = classify(E)) { if (!visit(E)) return false; @@ -4679,7 +4650,7 @@ bool Compiler<Emitter>::visitDeclAndReturn(const VarDecl *VD, if (!this->visitVarDecl(VD, /*Toplevel=*/true)) return false; - std::optional<PrimType> VarT = classify(VD->getType()); + OptPrimType VarT = classify(VD->getType()); if (Context::shouldBeGloballyIndexed(VD)) { auto GlobalIndex = P.getGlobal(VD); assert(GlobalIndex); // visitVarDecl() didn't return false. @@ -4736,7 +4707,7 @@ VarCreationState Compiler<Emitter>::visitVarDecl(const VarDecl *VD, return VarCreationState::NotCreated(); const Expr *Init = VD->getInit(); - std::optional<PrimType> VarT = classify(VD->getType()); + OptPrimType VarT = classify(VD->getType()); if (Init && Init->isValueDependent()) return false; @@ -4868,7 +4839,7 @@ bool Compiler<Emitter>::visitAPValueInitializer(const APValue &Val, const Record::Field *RF = R->getField(I); QualType FieldType = RF->Decl->getType(); - if (std::optional<PrimType> PT = classify(FieldType)) { + if (OptPrimType PT = classify(FieldType)) { if (!this->visitAPValue(F, *PT, E)) return false; if (!this->emitInitField(*PT, RF->Offset, E)) @@ -4898,7 +4869,7 @@ bool Compiler<Emitter>::visitAPValueInitializer(const APValue &Val, QualType ElemType = ArrType->getElementType(); for (unsigned A = 0, AN = Val.getArraySize(); A != AN; ++A) { const APValue &Elem = Val.getArrayInitializedElt(A); - if (std::optional<PrimType> ElemT = classify(ElemType)) { + if (OptPrimType ElemT = classify(ElemType)) { if (!this->visitAPValue(Elem, *ElemT, E)) return false; if (!this->emitInitElem(*ElemT, A, E)) @@ -4958,7 +4929,7 @@ bool Compiler<Emitter>::VisitBuiltinCallExpr(const CallExpr *E, } QualType ReturnType = E->getType(); - std::optional<PrimType> ReturnT = classify(E); + OptPrimType ReturnT = classify(E); // Non-primitive return type. Prepare storage. if (!Initializing && !ReturnT && !ReturnType->isVoidType()) { @@ -5032,7 +5003,7 @@ bool Compiler<Emitter>::VisitCallExpr(const CallExpr *E) { BlockScope<Emitter> CallScope(this, ScopeKind::Call); QualType ReturnType = E->getCallReturnType(Ctx.getASTContext()); - std::optional<PrimType> T = classify(ReturnType); + OptPrimType T = classify(ReturnType); bool HasRVO = !ReturnType->isVoidType() && !T; if (HasRVO) { @@ -5402,6 +5373,53 @@ bool Compiler<Emitter>::maybeEmitDeferredVarInit(const VarDecl *VD) { return true; } +static bool hasTrivialDefaultCtorParent(const FieldDecl *FD) { + assert(FD); + assert(FD->getParent()->isUnion()); + const auto *CXXRD = dyn_cast<CXXRecordDecl>(FD->getParent()); + return !CXXRD || CXXRD->hasTrivialDefaultConstructor(); +} + +template <class Emitter> bool Compiler<Emitter>::refersToUnion(const Expr *E) { + for (;;) { + if (const auto *ME = dyn_cast<MemberExpr>(E)) { + if (const auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl()); + FD && FD->getParent()->isUnion() && hasTrivialDefaultCtorParent(FD)) + return true; + E = ME->getBase(); + continue; + } + + if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(E)) { + E = ASE->getBase()->IgnoreImplicit(); + continue; + } + + if (const auto *ICE = dyn_cast<ImplicitCastExpr>(E); + ICE && (ICE->getCastKind() == CK_NoOp || + ICE->getCastKind() == CK_DerivedToBase || + ICE->getCastKind() == CK_UncheckedDerivedToBase)) { + E = ICE->getSubExpr(); + continue; + } + + if (const auto *This = dyn_cast<CXXThisExpr>(E)) { + const auto *ThisRecord = + This->getType()->getPointeeType()->getAsRecordDecl(); + if (!ThisRecord->isUnion()) + return false; + // Otherwise, always activate if we're in the ctor. + if (const auto *Ctor = + dyn_cast_if_present<CXXConstructorDecl>(CompilingFunction)) + return Ctor->getParent() == ThisRecord; + return false; + } + + break; + } + return false; +} + template <class Emitter> bool Compiler<Emitter>::visitDeclStmt(const DeclStmt *DS, bool EvaluateConditionDecl) { @@ -5933,17 +5951,16 @@ bool Compiler<Emitter>::compileConstructor(const CXXConstructorDecl *Ctor) { if (InitExpr->getType().isNull()) return false; - if (std::optional<PrimType> T = this->classify(InitExpr)) { + if (OptPrimType T = this->classify(InitExpr)) { + if (Activate && !this->emitActivateThisField(FieldOffset, InitExpr)) + return false; + if (!this->visit(InitExpr)) return false; bool BitField = F->isBitField(); - if (BitField && Activate) - return this->emitInitThisBitFieldActivate(*T, F, FieldOffset, InitExpr); if (BitField) return this->emitInitThisBitField(*T, F, FieldOffset, InitExpr); - if (Activate) - return this->emitInitThisFieldActivate(*T, FieldOffset, InitExpr); return this->emitInitThisField(*T, FieldOffset, InitExpr); } // Non-primitive case. Get a pointer to the field-to-initialize @@ -6189,7 +6206,7 @@ bool Compiler<Emitter>::VisitUnaryOperator(const UnaryOperator *E) { return this->VisitVectorUnaryOperator(E); if (SubExpr->getType()->isFixedPointType()) return this->VisitFixedPointUnaryOperator(E); - std::optional<PrimType> T = classify(SubExpr->getType()); + OptPrimType T = classify(SubExpr->getType()); switch (E->getOpcode()) { case UO_PostInc: { // x++ @@ -6375,6 +6392,9 @@ bool Compiler<Emitter>::VisitUnaryOperator(const UnaryOperator *E) { if (!this->visit(SubExpr)) return false; + if (!this->emitCheckNull(E)) + return false; + if (classifyPrim(SubExpr) == PT_Ptr) return this->emitNarrowPtr(E); return true; @@ -6412,7 +6432,7 @@ bool Compiler<Emitter>::VisitComplexUnaryOperator(const UnaryOperator *E) { if (DiscardResult) return this->discard(SubExpr); - std::optional<PrimType> ResT = classify(E); + OptPrimType ResT = classify(E); auto prepareResult = [=]() -> bool { if (!ResT && !Initializing) { std::optional<unsigned> LocalIndex = allocateLocal(SubExpr); @@ -6634,7 +6654,7 @@ bool Compiler<Emitter>::visitDeclRef(const ValueDecl *D, const Expr *E) { if (std::optional<unsigned> Index = P.getOrCreateGlobal(D)) { if (!this->emitGetPtrGlobal(*Index, E)) return false; - if (std::optional<PrimType> T = classify(E->getType())) { + if (OptPrimType T = classify(E->getType())) { if (!this->visitAPValue(TPOD->getValue(), *T, E)) return false; return this->emitInitGlobal(*T, *Index, E); @@ -6670,6 +6690,11 @@ bool Compiler<Emitter>::visitDeclRef(const ValueDecl *D, const Expr *E) { } // Function parameters. if (const auto *PVD = dyn_cast<ParmVarDecl>(D)) { + if (Ctx.getLangOpts().CPlusPlus && !Ctx.getLangOpts().CPlusPlus11 && + !D->getType()->isIntegralOrEnumerationType()) { + return this->emitInvalidDeclRef(cast<DeclRefExpr>(E), + /*InitializerFailed=*/false, E); + } if (auto It = this->Params.find(PVD); It != this->Params.end()) { if (IsReference || !It->second.IsPtr) return this->emitGetParam(classifyPrim(E), It->second.Offset, E); @@ -7128,7 +7153,7 @@ bool Compiler<Emitter>::emitBuiltinBitCast(const CastExpr *E) { const Expr *SubExpr = E->getSubExpr(); QualType FromType = SubExpr->getType(); QualType ToType = E->getType(); - std::optional<PrimType> ToT = classify(ToType); + OptPrimType ToT = classify(ToType); assert(!ToType->isReferenceType()); @@ -7149,7 +7174,7 @@ bool Compiler<Emitter>::emitBuiltinBitCast(const CastExpr *E) { if (SubExpr->isGLValue() || FromType->isVectorType()) { if (!this->visit(SubExpr)) return false; - } else if (std::optional<PrimType> FromT = classify(SubExpr)) { + } else if (OptPrimType FromT = classify(SubExpr)) { unsigned TempOffset = allocateLocalPrimitive(SubExpr, *FromT, /*IsConst=*/true); if (!this->visit(SubExpr)) diff --git a/clang/lib/AST/ByteCode/Compiler.h b/clang/lib/AST/ByteCode/Compiler.h index debee672..3a26342 100644 --- a/clang/lib/AST/ByteCode/Compiler.h +++ b/clang/lib/AST/ByteCode/Compiler.h @@ -254,12 +254,8 @@ protected: /// If the function does not exist yet, it is compiled. const Function *getFunction(const FunctionDecl *FD); - std::optional<PrimType> classify(const Expr *E) const { - return Ctx.classify(E); - } - std::optional<PrimType> classify(QualType Ty) const { - return Ctx.classify(Ty); - } + OptPrimType classify(const Expr *E) const { return Ctx.classify(E); } + OptPrimType classify(QualType Ty) const { return Ctx.classify(Ty); } /// Classifies a known primitive type. PrimType classifyPrim(QualType Ty) const { @@ -306,7 +302,7 @@ protected: bool visitInitList(ArrayRef<const Expr *> Inits, const Expr *ArrayFiller, const Expr *E); bool visitArrayElemInit(unsigned ElemIndex, const Expr *Init, - std::optional<PrimType> InitT); + OptPrimType InitT); bool visitCallArgs(ArrayRef<const Expr *> Args, const FunctionDecl *FuncDecl, bool Activate); @@ -405,6 +401,8 @@ private: bool checkLiteralType(const Expr *E); bool maybeEmitDeferredVarInit(const VarDecl *VD); + bool refersToUnion(const Expr *E); + protected: /// Variable to storage mapping. llvm::DenseMap<const ValueDecl *, Scope::Local> Locals; @@ -435,7 +433,7 @@ protected: bool InitStackActive = false; /// Type of the expression returned by the function. - std::optional<PrimType> ReturnType; + OptPrimType ReturnType; /// Switch case mapping. CaseMap CaseLabels; diff --git a/clang/lib/AST/ByteCode/Context.cpp b/clang/lib/AST/ByteCode/Context.cpp index a629ff9..aaeb52e 100644 --- a/clang/lib/AST/ByteCode/Context.cpp +++ b/clang/lib/AST/ByteCode/Context.cpp @@ -52,6 +52,19 @@ bool Context::isPotentialConstantExpr(State &Parent, const FunctionDecl *FD) { return Func->isValid(); } +void Context::isPotentialConstantExprUnevaluated(State &Parent, const Expr *E, + const FunctionDecl *FD) { + assert(Stk.empty()); + ++EvalID; + size_t StackSizeBefore = Stk.size(); + Compiler<EvalEmitter> C(*this, *P, Parent, Stk); + + if (!C.interpretCall(FD, E)) { + C.cleanup(); + Stk.clearTo(StackSizeBefore); + } +} + bool Context::evaluateAsRValue(State &Parent, const Expr *E, APValue &Result) { ++EvalID; bool Recursing = !Stk.empty(); @@ -222,6 +235,43 @@ bool Context::evaluateCharRange(State &Parent, const Expr *SizeExpr, return evaluateStringRepr(Parent, SizeExpr, PtrExpr, Result); } +bool Context::evaluateStrlen(State &Parent, const Expr *E, uint64_t &Result) { + assert(Stk.empty()); + Compiler<EvalEmitter> C(*this, *P, Parent, Stk); + + auto PtrRes = C.interpretAsPointer(E, [&](const Pointer &Ptr) { + const Descriptor *FieldDesc = Ptr.getFieldDesc(); + if (!FieldDesc->isPrimitiveArray()) + return false; + + unsigned N = Ptr.getNumElems(); + if (Ptr.elemSize() == 1) { + Result = strnlen(reinterpret_cast<const char *>(Ptr.getRawAddress()), N); + return Result != N; + } + + PrimType ElemT = FieldDesc->getPrimType(); + Result = 0; + for (unsigned I = Ptr.getIndex(); I != N; ++I) { + INT_TYPE_SWITCH(ElemT, { + auto Elem = Ptr.elem<T>(I); + if (Elem.isZero()) + return true; + ++Result; + }); + } + // We didn't find a 0 byte. + return false; + }); + + if (PtrRes.isInvalid()) { + C.cleanup(); + Stk.clear(); + return false; + } + return true; +} + const LangOptions &Context::getLangOpts() const { return Ctx.getLangOpts(); } static PrimType integralTypeToPrimTypeS(unsigned BitWidth) { @@ -256,7 +306,7 @@ static PrimType integralTypeToPrimTypeU(unsigned BitWidth) { llvm_unreachable("Unhandled BitWidth"); } -std::optional<PrimType> Context::classify(QualType T) const { +OptPrimType Context::classify(QualType T) const { if (const auto *BT = dyn_cast<BuiltinType>(T.getCanonicalType())) { auto Kind = BT->getKind(); @@ -492,7 +542,7 @@ const Function *Context::getOrCreateFunction(const FunctionDecl *FuncDecl) { // Assign descriptors to all parameters. // Composite objects are lowered to pointers. for (const ParmVarDecl *PD : FuncDecl->parameters()) { - std::optional<PrimType> T = classify(PD->getType()); + OptPrimType T = classify(PD->getType()); PrimType PT = T.value_or(PT_Ptr); Descriptor *Desc = P->createDescriptor(PD, PT); ParamDescriptors.insert({ParamOffset, {PT, Desc}}); @@ -520,7 +570,7 @@ const Function *Context::getOrCreateObjCBlock(const BlockExpr *E) { // Assign descriptors to all parameters. // Composite objects are lowered to pointers. for (const ParmVarDecl *PD : BD->parameters()) { - std::optional<PrimType> T = classify(PD->getType()); + OptPrimType T = classify(PD->getType()); PrimType PT = T.value_or(PT_Ptr); Descriptor *Desc = P->createDescriptor(PD, PT); ParamDescriptors.insert({ParamOffset, {PT, Desc}}); diff --git a/clang/lib/AST/ByteCode/Context.h b/clang/lib/AST/ByteCode/Context.h index 5898ab5..62ef529 100644 --- a/clang/lib/AST/ByteCode/Context.h +++ b/clang/lib/AST/ByteCode/Context.h @@ -47,7 +47,9 @@ public: ~Context(); /// Checks if a function is a potential constant expression. - bool isPotentialConstantExpr(State &Parent, const FunctionDecl *FnDecl); + bool isPotentialConstantExpr(State &Parent, const FunctionDecl *FD); + void isPotentialConstantExprUnevaluated(State &Parent, const Expr *E, + const FunctionDecl *FD); /// Evaluates a toplevel expression as an rvalue. bool evaluateAsRValue(State &Parent, const Expr *E, APValue &Result); @@ -64,6 +66,10 @@ public: bool evaluateCharRange(State &Parent, const Expr *SizeExpr, const Expr *PtrExpr, std::string &Result); + /// Evalute \param E and if it can be evaluated to a string literal, + /// run strlen() on it. + bool evaluateStrlen(State &Parent, const Expr *E, uint64_t &Result); + /// Returns the AST context. ASTContext &getASTContext() const { return Ctx; } /// Returns the language options. @@ -76,10 +82,10 @@ public: uint32_t getBitWidth(QualType T) const { return Ctx.getIntWidth(T); } /// Classifies a type. - std::optional<PrimType> classify(QualType T) const; + OptPrimType classify(QualType T) const; /// Classifies an expression. - std::optional<PrimType> classify(const Expr *E) const { + OptPrimType classify(const Expr *E) const { assert(E); if (E->isGLValue()) return PT_Ptr; diff --git a/clang/lib/AST/ByteCode/Descriptor.h b/clang/lib/AST/ByteCode/Descriptor.h index 0227e4c..4c925f6 100644 --- a/clang/lib/AST/ByteCode/Descriptor.h +++ b/clang/lib/AST/ByteCode/Descriptor.h @@ -164,7 +164,7 @@ public: /// The primitive type this descriptor was created for, /// or the primitive element type in case this is /// a primitive array. - const std::optional<PrimType> PrimT = std::nullopt; + const OptPrimType PrimT = std::nullopt; /// Flag indicating if the block is mutable. const bool IsConst = false; /// Flag indicating if a field is mutable. diff --git a/clang/lib/AST/ByteCode/EvalEmitter.cpp b/clang/lib/AST/ByteCode/EvalEmitter.cpp index 5498065..81ebc56 100644 --- a/clang/lib/AST/ByteCode/EvalEmitter.cpp +++ b/clang/lib/AST/ByteCode/EvalEmitter.cpp @@ -90,6 +90,19 @@ EvaluationResult EvalEmitter::interpretAsPointer(const Expr *E, return std::move(this->EvalResult); } +bool EvalEmitter::interpretCall(const FunctionDecl *FD, const Expr *E) { + // Add parameters to the parameter map. The values in the ParamOffset don't + // matter in this case as reading from them can't ever work. + for (const ParmVarDecl *PD : FD->parameters()) { + this->Params.insert({PD, {0, false}}); + } + + if (!this->visit(E)) + return false; + PrimType T = Ctx.classify(E).value_or(PT_Ptr); + return this->emitPop(T, E); +} + void EvalEmitter::emitLabel(LabelTy Label) { CurrentLabel = Label; } EvalEmitter::LabelTy EvalEmitter::getLabel() { return NextLabel++; } @@ -311,7 +324,7 @@ void EvalEmitter::updateGlobalTemporaries() { const Pointer &Ptr = P.getPtrGlobal(*GlobalIndex); APValue *Cached = Temp->getOrCreateValue(true); - if (std::optional<PrimType> T = Ctx.classify(E->getType())) { + if (OptPrimType T = Ctx.classify(E->getType())) { TYPE_SWITCH( *T, { *Cached = Ptr.deref<T>().toAPValue(Ctx.getASTContext()); }); } else { diff --git a/clang/lib/AST/ByteCode/EvalEmitter.h b/clang/lib/AST/ByteCode/EvalEmitter.h index 7303adb..2fe7da6 100644 --- a/clang/lib/AST/ByteCode/EvalEmitter.h +++ b/clang/lib/AST/ByteCode/EvalEmitter.h @@ -40,6 +40,9 @@ public: EvaluationResult interpretDecl(const VarDecl *VD, bool CheckFullyInitialized); /// Interpret the given Expr to a Pointer. EvaluationResult interpretAsPointer(const Expr *E, PtrCallback PtrCB); + /// Interpret the given expression as if it was in the body of the given + /// function, i.e. the parameters of the function are available for use. + bool interpretCall(const FunctionDecl *FD, const Expr *E); /// Clean up all resources. void cleanup(); diff --git a/clang/lib/AST/ByteCode/EvaluationResult.cpp b/clang/lib/AST/ByteCode/EvaluationResult.cpp index f59612b..b11531f 100644 --- a/clang/lib/AST/ByteCode/EvaluationResult.cpp +++ b/clang/lib/AST/ByteCode/EvaluationResult.cpp @@ -204,7 +204,7 @@ static void collectBlocks(const Pointer &Ptr, } else if (Desc->isPrimitiveArray() && Desc->getPrimType() == PT_Ptr) { for (unsigned I = 0; I != Desc->getNumElems(); ++I) { - const Pointer &ElemPointee = Ptr.atIndex(I).deref<Pointer>(); + const Pointer &ElemPointee = Ptr.elem<Pointer>(I); if (isUsefulPtr(ElemPointee) && !Blocks.contains(ElemPointee.block())) collectBlocks(ElemPointee, Blocks); } diff --git a/clang/lib/AST/ByteCode/Interp.cpp b/clang/lib/AST/ByteCode/Interp.cpp index df5e3be..5463aec 100644 --- a/clang/lib/AST/ByteCode/Interp.cpp +++ b/clang/lib/AST/ByteCode/Interp.cpp @@ -142,8 +142,12 @@ static bool diagnoseUnknownDecl(InterpState &S, CodePtr OpPC, return false; if (isa<ParmVarDecl>(D)) { - if (D->getType()->isReferenceType()) + if (D->getType()->isReferenceType()) { + if (S.inConstantContext() && S.getLangOpts().CPlusPlus && + !S.getLangOpts().CPlusPlus11) + diagnoseNonConstVariable(S, OpPC, D); return false; + } const SourceInfo &Loc = S.Current->getSource(OpPC); if (S.getLangOpts().CPlusPlus11) { @@ -661,6 +665,9 @@ bool CheckInitialized(InterpState &S, CodePtr OpPC, const Pointer &Ptr, if (Ptr.isInitialized()) return true; + if (Ptr.isExtern() && S.checkingPotentialConstantExpression()) + return false; + if (const auto *VD = Ptr.getDeclDesc()->asVarDecl(); VD && (VD->isConstexpr() || VD->hasGlobalStorage())) { diff --git a/clang/lib/AST/ByteCode/Interp.h b/clang/lib/AST/ByteCode/Interp.h index ce0ebdd..9012442 100644 --- a/clang/lib/AST/ByteCode/Interp.h +++ b/clang/lib/AST/ByteCode/Interp.h @@ -468,10 +468,10 @@ inline bool Mulc(InterpState &S, CodePtr OpPC) { const Pointer &Result = S.Stk.peek<Pointer>(); if constexpr (std::is_same_v<T, Floating>) { - APFloat A = LHS.atIndex(0).deref<Floating>().getAPFloat(); - APFloat B = LHS.atIndex(1).deref<Floating>().getAPFloat(); - APFloat C = RHS.atIndex(0).deref<Floating>().getAPFloat(); - APFloat D = RHS.atIndex(1).deref<Floating>().getAPFloat(); + APFloat A = LHS.elem<Floating>(0).getAPFloat(); + APFloat B = LHS.elem<Floating>(1).getAPFloat(); + APFloat C = RHS.elem<Floating>(0).getAPFloat(); + APFloat D = RHS.elem<Floating>(1).getAPFloat(); APFloat ResR(A.getSemantics()); APFloat ResI(A.getSemantics()); @@ -480,20 +480,20 @@ inline bool Mulc(InterpState &S, CodePtr OpPC) { // Copy into the result. Floating RA = S.allocFloat(A.getSemantics()); RA.copy(ResR); - Result.atIndex(0).deref<Floating>() = RA; // Floating(ResR); + Result.elem<Floating>(0) = RA; // Floating(ResR); Result.atIndex(0).initialize(); Floating RI = S.allocFloat(A.getSemantics()); RI.copy(ResI); - Result.atIndex(1).deref<Floating>() = RI; // Floating(ResI); + Result.elem<Floating>(1) = RI; // Floating(ResI); Result.atIndex(1).initialize(); Result.initialize(); } else { // Integer element type. - const T &LHSR = LHS.atIndex(0).deref<T>(); - const T &LHSI = LHS.atIndex(1).deref<T>(); - const T &RHSR = RHS.atIndex(0).deref<T>(); - const T &RHSI = RHS.atIndex(1).deref<T>(); + const T &LHSR = LHS.elem<T>(0); + const T &LHSI = LHS.elem<T>(1); + const T &RHSR = RHS.elem<T>(0); + const T &RHSI = RHS.elem<T>(1); unsigned Bits = LHSR.bitWidth(); // real(Result) = (real(LHS) * real(RHS)) - (imag(LHS) * imag(RHS)) @@ -503,7 +503,7 @@ inline bool Mulc(InterpState &S, CodePtr OpPC) { T B; if (T::mul(LHSI, RHSI, Bits, &B)) return false; - if (T::sub(A, B, Bits, &Result.atIndex(0).deref<T>())) + if (T::sub(A, B, Bits, &Result.elem<T>(0))) return false; Result.atIndex(0).initialize(); @@ -512,7 +512,7 @@ inline bool Mulc(InterpState &S, CodePtr OpPC) { return false; if (T::mul(LHSI, RHSR, Bits, &B)) return false; - if (T::add(A, B, Bits, &Result.atIndex(1).deref<T>())) + if (T::add(A, B, Bits, &Result.elem<T>(1))) return false; Result.atIndex(1).initialize(); Result.initialize(); @@ -528,10 +528,10 @@ inline bool Divc(InterpState &S, CodePtr OpPC) { const Pointer &Result = S.Stk.peek<Pointer>(); if constexpr (std::is_same_v<T, Floating>) { - APFloat A = LHS.atIndex(0).deref<Floating>().getAPFloat(); - APFloat B = LHS.atIndex(1).deref<Floating>().getAPFloat(); - APFloat C = RHS.atIndex(0).deref<Floating>().getAPFloat(); - APFloat D = RHS.atIndex(1).deref<Floating>().getAPFloat(); + APFloat A = LHS.elem<Floating>(0).getAPFloat(); + APFloat B = LHS.elem<Floating>(1).getAPFloat(); + APFloat C = RHS.elem<Floating>(0).getAPFloat(); + APFloat D = RHS.elem<Floating>(1).getAPFloat(); APFloat ResR(A.getSemantics()); APFloat ResI(A.getSemantics()); @@ -540,21 +540,21 @@ inline bool Divc(InterpState &S, CodePtr OpPC) { // Copy into the result. Floating RA = S.allocFloat(A.getSemantics()); RA.copy(ResR); - Result.atIndex(0).deref<Floating>() = RA; // Floating(ResR); + Result.elem<Floating>(0) = RA; // Floating(ResR); Result.atIndex(0).initialize(); Floating RI = S.allocFloat(A.getSemantics()); RI.copy(ResI); - Result.atIndex(1).deref<Floating>() = RI; // Floating(ResI); + Result.elem<Floating>(1) = RI; // Floating(ResI); Result.atIndex(1).initialize(); Result.initialize(); } else { // Integer element type. - const T &LHSR = LHS.atIndex(0).deref<T>(); - const T &LHSI = LHS.atIndex(1).deref<T>(); - const T &RHSR = RHS.atIndex(0).deref<T>(); - const T &RHSI = RHS.atIndex(1).deref<T>(); + const T &LHSR = LHS.elem<T>(0); + const T &LHSI = LHS.elem<T>(1); + const T &RHSR = RHS.elem<T>(0); + const T &RHSI = RHS.elem<T>(1); unsigned Bits = LHSR.bitWidth(); const T Zero = T::from(0, Bits); @@ -581,8 +581,8 @@ inline bool Divc(InterpState &S, CodePtr OpPC) { } // real(Result) = ((real(LHS) * real(RHS)) + (imag(LHS) * imag(RHS))) / Den - T &ResultR = Result.atIndex(0).deref<T>(); - T &ResultI = Result.atIndex(1).deref<T>(); + T &ResultR = Result.elem<T>(0); + T &ResultI = Result.elem<T>(1); if (T::mul(LHSR, RHSR, Bits, &A) || T::mul(LHSI, RHSI, Bits, &B)) return false; @@ -1308,7 +1308,7 @@ bool Dup(InterpState &S, CodePtr OpPC) { template <PrimType Name, class T = typename PrimConv<Name>::T> bool Pop(InterpState &S, CodePtr OpPC) { - S.Stk.pop<T>(); + S.Stk.discard<T>(); return true; } @@ -1885,6 +1885,16 @@ inline bool Dump(InterpState &S, CodePtr OpPC) { return true; } +inline bool CheckNull(InterpState &S, CodePtr OpPC) { + const auto &Ptr = S.Stk.peek<Pointer>(); + if (Ptr.isZero()) { + S.FFDiag(S.Current->getSource(OpPC), + diag::note_constexpr_dereferencing_null); + return S.noteUndefinedBehavior(); + } + return true; +} + inline bool VirtBaseHelper(InterpState &S, CodePtr OpPC, const RecordDecl *Decl, const Pointer &Ptr) { Pointer Base = Ptr; @@ -1973,6 +1983,16 @@ static inline bool Activate(InterpState &S, CodePtr OpPC) { return true; } +static inline bool ActivateThisField(InterpState &S, CodePtr OpPC, uint32_t I) { + if (S.checkingPotentialConstantExpression()) + return false; + + const Pointer &Ptr = S.Current->getThis(); + assert(Ptr.atField(I).canBeInitialized()); + Ptr.atField(I).activate(); + return true; +} + template <PrimType Name, class T = typename PrimConv<Name>::T> bool StoreActivate(InterpState &S, CodePtr OpPC) { const T &Value = S.Stk.pop<T>(); @@ -3093,7 +3113,7 @@ inline bool ArrayElem(InterpState &S, CodePtr OpPC, uint32_t Index) { return false; assert(Ptr.atIndex(Index).getFieldDesc()->getPrimType() == Name); - S.Stk.push<T>(Ptr.atIndex(Index).deref<T>()); + S.Stk.push<T>(Ptr.elem<T>(Index)); return true; } @@ -3105,7 +3125,7 @@ inline bool ArrayElemPop(InterpState &S, CodePtr OpPC, uint32_t Index) { return false; assert(Ptr.atIndex(Index).getFieldDesc()->getPrimType() == Name); - S.Stk.push<T>(Ptr.atIndex(Index).deref<T>()); + S.Stk.push<T>(Ptr.elem<T>(Index)); return true; } @@ -3547,12 +3567,22 @@ inline bool BitCastPrim(InterpState &S, CodePtr OpPC, bool TargetIsUCharOrByte, Floating Result = S.allocFloat(*Sem); Floating::bitcastFromMemory(Buff.data(), *Sem, &Result); S.Stk.push<Floating>(Result); - - // S.Stk.push<Floating>(T::bitcastFromMemory(Buff.data(), *Sem)); } else if constexpr (needsAlloc<T>()) { T Result = S.allocAP<T>(ResultBitWidth); T::bitcastFromMemory(Buff.data(), ResultBitWidth, &Result); S.Stk.push<T>(Result); + } else if constexpr (std::is_same_v<T, Boolean>) { + // Only allow to cast single-byte integers to bool if they are either 0 + // or 1. + assert(FullBitWidth.getQuantity() == 8); + auto Val = static_cast<unsigned int>(Buff[0]); + if (Val > 1) { + S.FFDiag(S.Current->getSource(OpPC), + diag::note_constexpr_bit_cast_unrepresentable_value) + << S.getASTContext().BoolTy << Val; + return false; + } + S.Stk.push<T>(T::bitcastFromMemory(Buff.data(), ResultBitWidth)); } else { assert(!Sem); S.Stk.push<T>(T::bitcastFromMemory(Buff.data(), ResultBitWidth)); diff --git a/clang/lib/AST/ByteCode/InterpBuiltin.cpp b/clang/lib/AST/ByteCode/InterpBuiltin.cpp index 462b9a1..19d4c0c 100644 --- a/clang/lib/AST/ByteCode/InterpBuiltin.cpp +++ b/clang/lib/AST/ByteCode/InterpBuiltin.cpp @@ -53,7 +53,7 @@ static APSInt popToAPSInt(InterpStack &Stk, PrimType T) { static void pushInteger(InterpState &S, const APSInt &Val, QualType QT) { assert(QT->isSignedIntegerOrEnumerationType() || QT->isUnsignedIntegerOrEnumerationType()); - std::optional<PrimType> T = S.getContext().classify(QT); + OptPrimType T = S.getContext().classify(QT); assert(T); unsigned BitWidth = S.getASTContext().getTypeSize(QT); @@ -1098,9 +1098,9 @@ static bool interp__builtin_complex(InterpState &S, CodePtr OpPC, const Floating &Arg1 = S.Stk.pop<Floating>(); Pointer &Result = S.Stk.peek<Pointer>(); - Result.atIndex(0).deref<Floating>() = Arg1; + Result.elem<Floating>(0) = Arg1; Result.atIndex(0).initialize(); - Result.atIndex(1).deref<Floating>() = Arg2; + Result.elem<Floating>(1) = Arg2; Result.atIndex(1).initialize(); Result.initialize(); @@ -1530,7 +1530,7 @@ static bool interp__builtin_operator_new(InterpState &S, CodePtr OpPC, return false; bool IsArray = NumElems.ugt(1); - std::optional<PrimType> ElemT = S.getContext().classify(ElemType); + OptPrimType ElemT = S.getContext().classify(ElemType); DynamicAllocator &Allocator = S.getAllocator(); if (ElemT) { Block *B = @@ -1644,10 +1644,10 @@ static bool interp__builtin_vector_reduce(InterpState &S, CodePtr OpPC, unsigned NumElems = Arg.getNumElems(); INT_TYPE_SWITCH_NO_BOOL(ElemT, { - T Result = Arg.atIndex(0).deref<T>(); + T Result = Arg.elem<T>(0); unsigned BitWidth = Result.bitWidth(); for (unsigned I = 1; I != NumElems; ++I) { - T Elem = Arg.atIndex(I).deref<T>(); + T Elem = Arg.elem<T>(I); T PrevResult = Result; if (ID == Builtin::BI__builtin_reduce_add) { @@ -1723,11 +1723,10 @@ static bool interp__builtin_elementwise_popcount(InterpState &S, CodePtr OpPC, for (unsigned I = 0; I != NumElems; ++I) { INT_TYPE_SWITCH_NO_BOOL(ElemT, { if (BuiltinID == Builtin::BI__builtin_elementwise_popcount) { - Dst.atIndex(I).deref<T>() = - T::from(Arg.atIndex(I).deref<T>().toAPSInt().popcount()); + Dst.elem<T>(I) = T::from(Arg.elem<T>(I).toAPSInt().popcount()); } else { - Dst.atIndex(I).deref<T>() = T::from( - Arg.atIndex(I).deref<T>().toAPSInt().reverseBits().getZExtValue()); + Dst.elem<T>(I) = + T::from(Arg.elem<T>(I).toAPSInt().reverseBits().getZExtValue()); } Dst.atIndex(I).initialize(); }); @@ -2296,8 +2295,8 @@ static bool interp__builtin_elementwise_sat(InterpState &S, CodePtr OpPC, APSInt Elem1; APSInt Elem2; INT_TYPE_SWITCH_NO_BOOL(ElemT, { - Elem1 = LHS.atIndex(I).deref<T>().toAPSInt(); - Elem2 = RHS.atIndex(I).deref<T>().toAPSInt(); + Elem1 = LHS.elem<T>(I).toAPSInt(); + Elem2 = RHS.elem<T>(I).toAPSInt(); }); APSInt Result; @@ -2880,7 +2879,7 @@ static bool copyRecord(InterpState &S, CodePtr OpPC, const Pointer &Src, auto copyField = [&](const Record::Field &F, bool Activate) -> bool { Pointer DestField = Dest.atField(F.Offset); - if (std::optional<PrimType> FT = S.Ctx.classify(F.Decl->getType())) { + if (OptPrimType FT = S.Ctx.classify(F.Decl->getType())) { TYPE_SWITCH(*FT, { DestField.deref<T>() = Src.atField(F.Offset).deref<T>(); if (Src.atField(F.Offset).isInitialized()) @@ -2942,7 +2941,7 @@ static bool copyComposite(InterpState &S, CodePtr OpPC, const Pointer &Src, for (unsigned I = 0, N = DestDesc->getNumElems(); I != N; ++I) { Pointer DestElem = Dest.atIndex(I); TYPE_SWITCH(ET, { - DestElem.deref<T>() = Src.atIndex(I).deref<T>(); + DestElem.deref<T>() = Src.elem<T>(I); DestElem.initialize(); }); } diff --git a/clang/lib/AST/ByteCode/Opcodes.td b/clang/lib/AST/ByteCode/Opcodes.td index 804853d..abfed77 100644 --- a/clang/lib/AST/ByteCode/Opcodes.td +++ b/clang/lib/AST/ByteCode/Opcodes.td @@ -510,6 +510,7 @@ def StoreBitFieldActivate : StoreBitFieldOpcode {} def StoreBitFieldActivatePop : StoreBitFieldOpcode {} def Activate : Opcode {} +def ActivateThisField : Opcode { let Args = [ArgUint32]; } // [Pointer, Value] -> [] def Init : StoreOpcode {} @@ -865,6 +866,7 @@ def CheckNewTypeMismatchArray : Opcode { def IsConstantContext: Opcode; def CheckAllocations : Opcode; +def CheckNull : Opcode; def BitCastTypeClass : TypeClass { let Types = [Uint8, Sint8, Uint16, Sint16, Uint32, Sint32, Uint64, Sint64, diff --git a/clang/lib/AST/ByteCode/Pointer.cpp b/clang/lib/AST/ByteCode/Pointer.cpp index 2f9ecf9..4019b74 100644 --- a/clang/lib/AST/ByteCode/Pointer.cpp +++ b/clang/lib/AST/ByteCode/Pointer.cpp @@ -665,7 +665,7 @@ std::optional<APValue> Pointer::toRValue(const Context &Ctx, return false; // Primitive values. - if (std::optional<PrimType> T = Ctx.classify(Ty)) { + if (OptPrimType T = Ctx.classify(Ty)) { TYPE_SWITCH(*T, R = Ptr.deref<T>().toAPValue(ASTCtx)); return true; } @@ -682,7 +682,7 @@ std::optional<APValue> Pointer::toRValue(const Context &Ctx, const Pointer &FP = Ptr.atField(F.Offset); QualType FieldTy = F.Decl->getType(); if (FP.isActive()) { - if (std::optional<PrimType> T = Ctx.classify(FieldTy)) { + if (OptPrimType T = Ctx.classify(FieldTy)) { TYPE_SWITCH(*T, Value = FP.deref<T>().toAPValue(ASTCtx)); } else { Ok &= Composite(FieldTy, FP, Value); @@ -705,7 +705,7 @@ std::optional<APValue> Pointer::toRValue(const Context &Ctx, const Pointer &FP = Ptr.atField(FD->Offset); APValue &Value = R.getStructField(I); - if (std::optional<PrimType> T = Ctx.classify(FieldTy)) { + if (OptPrimType T = Ctx.classify(FieldTy)) { TYPE_SWITCH(*T, Value = FP.deref<T>().toAPValue(ASTCtx)); } else { Ok &= Composite(FieldTy, FP, Value); @@ -743,7 +743,7 @@ std::optional<APValue> Pointer::toRValue(const Context &Ctx, for (unsigned I = 0; I < NumElems; ++I) { APValue &Slot = R.getArrayInitializedElt(I); const Pointer &EP = Ptr.atIndex(I); - if (std::optional<PrimType> T = Ctx.classify(ElemTy)) { + if (OptPrimType T = Ctx.classify(ElemTy)) { TYPE_SWITCH(*T, Slot = EP.deref<T>().toAPValue(ASTCtx)); } else { Ok &= Composite(ElemTy, EP.narrow(), Slot); @@ -757,17 +757,17 @@ std::optional<APValue> Pointer::toRValue(const Context &Ctx, QualType ElemTy = CT->getElementType(); if (ElemTy->isIntegerType()) { - std::optional<PrimType> ElemT = Ctx.classify(ElemTy); + OptPrimType ElemT = Ctx.classify(ElemTy); assert(ElemT); INT_TYPE_SWITCH(*ElemT, { - auto V1 = Ptr.atIndex(0).deref<T>(); - auto V2 = Ptr.atIndex(1).deref<T>(); + auto V1 = Ptr.elem<T>(0); + auto V2 = Ptr.elem<T>(1); R = APValue(V1.toAPSInt(), V2.toAPSInt()); return true; }); } else if (ElemTy->isFloatingType()) { - R = APValue(Ptr.atIndex(0).deref<Floating>().getAPFloat(), - Ptr.atIndex(1).deref<Floating>().getAPFloat()); + R = APValue(Ptr.elem<Floating>(0).getAPFloat(), + Ptr.elem<Floating>(1).getAPFloat()); return true; } return false; @@ -782,9 +782,8 @@ std::optional<APValue> Pointer::toRValue(const Context &Ctx, SmallVector<APValue> Values; Values.reserve(VT->getNumElements()); for (unsigned I = 0; I != VT->getNumElements(); ++I) { - TYPE_SWITCH(ElemT, { - Values.push_back(Ptr.atIndex(I).deref<T>().toAPValue(ASTCtx)); - }); + TYPE_SWITCH(ElemT, + { Values.push_back(Ptr.elem<T>(I).toAPValue(ASTCtx)); }); } assert(Values.size() == VT->getNumElements()); @@ -804,7 +803,7 @@ std::optional<APValue> Pointer::toRValue(const Context &Ctx, return toAPValue(ASTCtx); // Just load primitive types. - if (std::optional<PrimType> T = Ctx.classify(ResultType)) { + if (OptPrimType T = Ctx.classify(ResultType)) { TYPE_SWITCH(*T, return this->deref<T>().toAPValue(ASTCtx)); } diff --git a/clang/lib/AST/ByteCode/Pointer.h b/clang/lib/AST/ByteCode/Pointer.h index da74013..d17eba5 100644 --- a/clang/lib/AST/ByteCode/Pointer.h +++ b/clang/lib/AST/ByteCode/Pointer.h @@ -693,6 +693,25 @@ public: return *reinterpret_cast<T *>(asBlockPointer().Pointee->rawData() + Offset); } + /// Dereferences the element at index \p I. + /// This is equivalent to atIndex(I).deref<T>(). + template <typename T> T &elem(unsigned I) const { + assert(isLive() && "Invalid pointer"); + assert(isBlockPointer()); + assert(asBlockPointer().Pointee); + assert(isDereferencable()); + assert(getFieldDesc()->isPrimitiveArray()); + + unsigned ElemByteOffset = I * getFieldDesc()->getElemSize(); + if (isArrayRoot()) + return *reinterpret_cast<T *>(asBlockPointer().Pointee->rawData() + + asBlockPointer().Base + sizeof(InitMapPtr) + + ElemByteOffset); + + return *reinterpret_cast<T *>(asBlockPointer().Pointee->rawData() + Offset + + ElemByteOffset); + } + /// Whether this block can be read from at all. This is only true for /// block pointers that point to a valid location inside that block. bool isDereferencable() const { diff --git a/clang/lib/AST/ByteCode/PrimType.h b/clang/lib/AST/ByteCode/PrimType.h index a156ccc..38c29b9 100644 --- a/clang/lib/AST/ByteCode/PrimType.h +++ b/clang/lib/AST/ByteCode/PrimType.h @@ -13,6 +13,7 @@ #ifndef LLVM_CLANG_AST_INTERP_TYPE_H #define LLVM_CLANG_AST_INTERP_TYPE_H +#include "clang/Basic/UnsignedOrNone.h" #include "llvm/Support/raw_ostream.h" #include <climits> #include <cstddef> @@ -49,6 +50,38 @@ enum PrimType : unsigned { PT_MemberPtr = 14, }; +// Like std::optional<PrimType>, but only sizeof(PrimType). +class OptPrimType final { + unsigned V = ~0u; + +public: + OptPrimType() = default; + OptPrimType(std::nullopt_t) {} + OptPrimType(PrimType T) : V(static_cast<unsigned>(T)) {} + + explicit constexpr operator bool() const { return V != ~0u; } + PrimType operator*() const { + assert(operator bool()); + return static_cast<PrimType>(V); + } + + PrimType value_or(PrimType PT) const { + if (operator bool()) + return static_cast<PrimType>(V); + return PT; + } + + bool operator==(PrimType PT) const { + if (!operator bool()) + return false; + return V == static_cast<unsigned>(PT); + } + bool operator==(OptPrimType OPT) const { return V == OPT.V; } + bool operator!=(PrimType PT) const { return !(*this == PT); } + bool operator!=(OptPrimType OPT) const { return V != OPT.V; } +}; +static_assert(sizeof(OptPrimType) == sizeof(PrimType)); + inline constexpr bool isPtrType(PrimType T) { return T == PT_Ptr || T == PT_MemberPtr; } diff --git a/clang/lib/AST/ByteCode/Program.cpp b/clang/lib/AST/ByteCode/Program.cpp index 5ac0f59f..7002724 100644 --- a/clang/lib/AST/ByteCode/Program.cpp +++ b/clang/lib/AST/ByteCode/Program.cpp @@ -74,27 +74,25 @@ unsigned Program::createGlobalString(const StringLiteral *S, const Expr *Base) { const Pointer Ptr(G->block()); if (CharWidth == 1) { - std::memcpy(&Ptr.atIndex(0).deref<char>(), S->getString().data(), - StringLength); + std::memcpy(&Ptr.elem<char>(0), S->getString().data(), StringLength); } else { // Construct the string in storage. for (unsigned I = 0; I <= StringLength; ++I) { - Pointer Field = Ptr.atIndex(I); const uint32_t CodePoint = I == StringLength ? 0 : S->getCodeUnit(I); switch (CharType) { case PT_Sint8: { using T = PrimConv<PT_Sint8>::T; - Field.deref<T>() = T::from(CodePoint, BitWidth); + Ptr.elem<T>(I) = T::from(CodePoint, BitWidth); break; } case PT_Uint16: { using T = PrimConv<PT_Uint16>::T; - Field.deref<T>() = T::from(CodePoint, BitWidth); + Ptr.elem<T>(I) = T::from(CodePoint, BitWidth); break; } case PT_Uint32: { using T = PrimConv<PT_Uint32>::T; - Field.deref<T>() = T::from(CodePoint, BitWidth); + Ptr.elem<T>(I) = T::from(CodePoint, BitWidth); break; } default: @@ -171,7 +169,7 @@ unsigned Program::getOrCreateDummy(const DeclTy &D) { assert(!QT.isNull()); Descriptor *Desc; - if (std::optional<PrimType> T = Ctx.classify(QT)) + if (OptPrimType T = Ctx.classify(QT)) Desc = createDescriptor(D, *T, /*SourceTy=*/nullptr, std::nullopt, /*IsConst=*/QT.isConstQualified()); else @@ -250,7 +248,7 @@ std::optional<unsigned> Program::createGlobal(const DeclTy &D, QualType Ty, const bool IsConst = Ty.isConstQualified(); const bool IsTemporary = D.dyn_cast<const Expr *>(); const bool IsVolatile = Ty.isVolatileQualified(); - if (std::optional<PrimType> T = Ctx.classify(Ty)) + if (OptPrimType T = Ctx.classify(Ty)) Desc = createDescriptor(D, *T, nullptr, Descriptor::GlobalMD, IsConst, IsTemporary, /*IsMutable=*/false, IsVolatile); else @@ -373,7 +371,7 @@ Record *Program::getOrCreateRecord(const RecordDecl *RD) { const bool IsMutable = FD->isMutable(); const bool IsVolatile = FT.isVolatileQualified(); const Descriptor *Desc; - if (std::optional<PrimType> T = Ctx.classify(FT)) { + if (OptPrimType T = Ctx.classify(FT)) { Desc = createDescriptor(FD, *T, nullptr, std::nullopt, IsConst, /*isTemporary=*/false, IsMutable, IsVolatile); } else { @@ -412,7 +410,7 @@ Descriptor *Program::createDescriptor(const DeclTy &D, const Type *Ty, // Array of well-known bounds. if (const auto *CAT = dyn_cast<ConstantArrayType>(ArrayType)) { size_t NumElems = CAT->getZExtSize(); - if (std::optional<PrimType> T = Ctx.classify(ElemTy)) { + if (OptPrimType T = Ctx.classify(ElemTy)) { // Arrays of primitives. unsigned ElemSize = primSize(*T); if (std::numeric_limits<unsigned>::max() / ElemSize <= NumElems) { @@ -439,7 +437,7 @@ Descriptor *Program::createDescriptor(const DeclTy &D, const Type *Ty, // is forbidden on pointers to such objects. if (isa<IncompleteArrayType>(ArrayType) || isa<VariableArrayType>(ArrayType)) { - if (std::optional<PrimType> T = Ctx.classify(ElemTy)) { + if (OptPrimType T = Ctx.classify(ElemTy)) { return allocateDescriptor(D, *T, MDSize, IsConst, IsTemporary, Descriptor::UnknownSize{}); } else { @@ -462,7 +460,7 @@ Descriptor *Program::createDescriptor(const DeclTy &D, const Type *Ty, // Complex types - represented as arrays of elements. if (const auto *CT = Ty->getAs<ComplexType>()) { - std::optional<PrimType> ElemTy = Ctx.classify(CT->getElementType()); + OptPrimType ElemTy = Ctx.classify(CT->getElementType()); if (!ElemTy) return nullptr; @@ -472,7 +470,7 @@ Descriptor *Program::createDescriptor(const DeclTy &D, const Type *Ty, // Same with vector types. if (const auto *VT = Ty->getAs<VectorType>()) { - std::optional<PrimType> ElemTy = Ctx.classify(VT->getElementType()); + OptPrimType ElemTy = Ctx.classify(VT->getElementType()); if (!ElemTy) return nullptr; diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp index 8797ead..0d12161 100644 --- a/clang/lib/AST/ExprConstant.cpp +++ b/clang/lib/AST/ExprConstant.cpp @@ -9346,9 +9346,13 @@ bool LValueExprEvaluator::VisitUnaryDeref(const UnaryOperator *E) { // [C++26][expr.unary.op] // If the operand points to an object or function, the result // denotes that object or function; otherwise, the behavior is undefined. - return Success && - (!E->getType().getNonReferenceType()->isObjectType() || - findCompleteObject(Info, E, AK_Dereference, Result, E->getType())); + // Because &(*(type*)0) is a common pattern, we do not fail the evaluation + // immediately. + if (!Success || !E->getType().getNonReferenceType()->isObjectType()) + return Success; + return bool(findCompleteObject(Info, E, AK_Dereference, Result, + E->getType())) || + Info.noteUndefinedBehavior(); } bool LValueExprEvaluator::VisitUnaryReal(const UnaryOperator *E) { @@ -18018,6 +18022,11 @@ bool Expr::isPotentialConstantExprUnevaluated(Expr *E, Info.InConstantContext = true; Info.CheckingPotentialConstantExpression = true; + if (Info.EnableNewConstInterp) { + Info.Ctx.getInterpContext().isPotentialConstantExprUnevaluated(Info, E, FD); + return Diags.empty(); + } + // Fabricate a call stack frame to give the arguments a plausible cover story. CallStackFrame Frame(Info, SourceLocation(), FD, /*This=*/nullptr, /*CallExpr=*/nullptr, CallRef()); @@ -18175,6 +18184,10 @@ bool Expr::EvaluateCharRangeAsString(APValue &Result, bool Expr::tryEvaluateStrLen(uint64_t &Result, ASTContext &Ctx) const { Expr::EvalStatus Status; EvalInfo Info(Ctx, Status, EvalInfo::EM_ConstantFold); + + if (Info.EnableNewConstInterp) + return Info.Ctx.getInterpContext().evaluateStrlen(Info, this, Result); + return EvaluateBuiltinStrLen(this, Result, Info); } diff --git a/clang/lib/AST/FormatString.cpp b/clang/lib/AST/FormatString.cpp index 5d3b56f..112b756d 100644 --- a/clang/lib/AST/FormatString.cpp +++ b/clang/lib/AST/FormatString.cpp @@ -320,6 +320,70 @@ bool clang::analyze_format_string::ParseUTF8InvalidSpecifier( // Methods on ArgType. //===----------------------------------------------------------------------===// +static bool namedTypeToLengthModifierKind(ASTContext &Ctx, QualType QT, + LengthModifier::Kind &K) { + if (!Ctx.getLangOpts().C99 && !Ctx.getLangOpts().CPlusPlus) + return false; + for (/**/; const auto *TT = QT->getAs<TypedefType>(); QT = TT->desugar()) { + const auto *TD = TT->getDecl(); + const auto *DC = TT->getDecl()->getDeclContext(); + if (DC->isTranslationUnit() || DC->isStdNamespace()) { + StringRef Name = TD->getIdentifier()->getName(); + if (Name == "size_t") { + K = LengthModifier::AsSizeT; + return true; + } else if (Name == "ssize_t" /*Not C99, but common in Unix.*/) { + K = LengthModifier::AsSizeT; + return true; + } else if (Name == "ptrdiff_t") { + K = LengthModifier::AsPtrDiff; + return true; + } else if (Name == "intmax_t") { + K = LengthModifier::AsIntMax; + return true; + } else if (Name == "uintmax_t") { + K = LengthModifier::AsIntMax; + return true; + } + } + } + if (const auto *PST = QT->getAs<PredefinedSugarType>()) { + using Kind = PredefinedSugarType::Kind; + switch (PST->getKind()) { + case Kind::SizeT: + case Kind::SignedSizeT: + K = LengthModifier::AsSizeT; + return true; + case Kind::PtrdiffT: + K = LengthModifier::AsPtrDiff; + return true; + } + llvm_unreachable("unexpected kind"); + } + return false; +} + +// Check whether T and E are compatible size_t/ptrdiff_t types. E must be +// consistent with LE. +// T is the type of the actual expression in the code to be checked, and E is +// the expected type parsed from the format string. +static clang::analyze_format_string::ArgType::MatchKind +matchesSizeTPtrdiffT(ASTContext &C, QualType T, QualType E) { + using MatchKind = clang::analyze_format_string::ArgType::MatchKind; + + if (!T->isIntegerType()) + return MatchKind::NoMatch; + + if (C.hasSameType(T, E)) + return MatchKind::Match; + + if (C.getCorrespondingSignedType(T.getCanonicalType()) != + C.getCorrespondingSignedType(E.getCanonicalType())) + return MatchKind::NoMatch; + + return MatchKind::NoMatchSignedness; +} + clang::analyze_format_string::ArgType::MatchKind ArgType::matchesType(ASTContext &C, QualType argTy) const { // When using the format attribute in C++, you can receive a function or an @@ -394,6 +458,10 @@ ArgType::matchesType(ASTContext &C, QualType argTy) const { } case SpecificTy: { + if (TK != TypeKind::DontCare) { + return matchesSizeTPtrdiffT(C, argTy, T); + } + if (const EnumType *ETy = argTy->getAs<EnumType>()) { // If the enum is incomplete we know nothing about the underlying type. // Assume that it's 'int'. Do not use the underlying type for a scoped @@ -653,6 +721,12 @@ ArgType::matchesArgType(ASTContext &C, const ArgType &Other) const { if (Left.K == AK::SpecificTy) { if (Right.K == AK::SpecificTy) { + if (Left.TK != TypeKind::DontCare) { + return matchesSizeTPtrdiffT(C, Right.T, Left.T); + } else if (Right.TK != TypeKind::DontCare) { + return matchesSizeTPtrdiffT(C, Left.T, Right.T); + } + auto Canon1 = C.getCanonicalType(Left.T); auto Canon2 = C.getCanonicalType(Right.T); if (Canon1 == Canon2) @@ -706,7 +780,11 @@ QualType ArgType::getRepresentativeType(ASTContext &C) const { Res = C.CharTy; break; case SpecificTy: - Res = T; + if (TK == TypeKind::PtrdiffT || TK == TypeKind::SizeT) + // Using Name as name, so no need to show the uglified name. + Res = T->getCanonicalTypeInternal(); + else + Res = T; break; case CStrTy: Res = C.getPointerType(C.CharTy); @@ -733,7 +811,6 @@ QualType ArgType::getRepresentativeType(ASTContext &C) const { std::string ArgType::getRepresentativeTypeName(ASTContext &C) const { std::string S = getRepresentativeType(C).getAsString(C.getPrintingPolicy()); - std::string Alias; if (Name) { // Use a specific name for this type, e.g. "size_t". @@ -1198,29 +1275,12 @@ FormatSpecifier::getCorrectedLengthModifier() const { return std::nullopt; } -bool FormatSpecifier::namedTypeToLengthModifier(QualType QT, +bool FormatSpecifier::namedTypeToLengthModifier(ASTContext &Ctx, QualType QT, LengthModifier &LM) { - for (/**/; const auto *TT = QT->getAs<TypedefType>(); - QT = TT->getDecl()->getUnderlyingType()) { - const TypedefNameDecl *Typedef = TT->getDecl(); - const IdentifierInfo *Identifier = Typedef->getIdentifier(); - if (Identifier->getName() == "size_t") { - LM.setKind(LengthModifier::AsSizeT); - return true; - } else if (Identifier->getName() == "ssize_t") { - // Not C99, but common in Unix. - LM.setKind(LengthModifier::AsSizeT); - return true; - } else if (Identifier->getName() == "intmax_t") { - LM.setKind(LengthModifier::AsIntMax); - return true; - } else if (Identifier->getName() == "uintmax_t") { - LM.setKind(LengthModifier::AsIntMax); - return true; - } else if (Identifier->getName() == "ptrdiff_t") { - LM.setKind(LengthModifier::AsPtrDiff); - return true; - } + if (LengthModifier::Kind Out = LengthModifier::Kind::None; + namedTypeToLengthModifierKind(Ctx, QT, Out)) { + LM.setKind(Out); + return true; } return false; } diff --git a/clang/lib/AST/ItaniumMangle.cpp b/clang/lib/AST/ItaniumMangle.cpp index 6d082b3..2a66793 100644 --- a/clang/lib/AST/ItaniumMangle.cpp +++ b/clang/lib/AST/ItaniumMangle.cpp @@ -2514,6 +2514,10 @@ bool CXXNameMangler::mangleUnresolvedTypeOrSimpleId(QualType Ty, mangleSourceNameWithAbiTags(cast<TypedefType>(Ty)->getDecl()); break; + case Type::PredefinedSugar: + mangleType(cast<PredefinedSugarType>(Ty)->desugar()); + break; + case Type::UnresolvedUsing: mangleSourceNameWithAbiTags( cast<UnresolvedUsingType>(Ty)->getDecl()); diff --git a/clang/lib/AST/PrintfFormatString.cpp b/clang/lib/AST/PrintfFormatString.cpp index 293164d..bcd44f0 100644 --- a/clang/lib/AST/PrintfFormatString.cpp +++ b/clang/lib/AST/PrintfFormatString.cpp @@ -543,7 +543,8 @@ ArgType PrintfSpecifier::getScalarArgType(ASTContext &Ctx, case LengthModifier::AsIntMax: return ArgType(Ctx.getIntMaxType(), "intmax_t"); case LengthModifier::AsSizeT: - return ArgType::makeSizeT(ArgType(Ctx.getSignedSizeType(), "ssize_t")); + return ArgType::makeSizeT( + ArgType(Ctx.getSignedSizeType(), "signed size_t")); case LengthModifier::AsInt3264: return Ctx.getTargetInfo().getTriple().isArch64Bit() ? ArgType(Ctx.LongLongTy, "__int64") @@ -626,9 +627,11 @@ ArgType PrintfSpecifier::getScalarArgType(ASTContext &Ctx, case LengthModifier::AsIntMax: return ArgType::PtrTo(ArgType(Ctx.getIntMaxType(), "intmax_t")); case LengthModifier::AsSizeT: - return ArgType::PtrTo(ArgType(Ctx.getSignedSizeType(), "ssize_t")); + return ArgType::PtrTo(ArgType::makeSizeT( + ArgType(Ctx.getSignedSizeType(), "signed size_t"))); case LengthModifier::AsPtrDiff: - return ArgType::PtrTo(ArgType(Ctx.getPointerDiffType(), "ptrdiff_t")); + return ArgType::PtrTo(ArgType::makePtrdiffT( + ArgType(Ctx.getPointerDiffType(), "ptrdiff_t"))); case LengthModifier::AsLongDouble: return ArgType(); // FIXME: Is this a known extension? case LengthModifier::AsAllocate: @@ -917,7 +920,7 @@ bool PrintfSpecifier::fixType(QualType QT, const LangOptions &LangOpt, // Handle size_t, ptrdiff_t, etc. that have dedicated length modifiers in C99. if (LangOpt.C99 || LangOpt.CPlusPlus11) - namedTypeToLengthModifier(QT, LM); + namedTypeToLengthModifier(Ctx, QT, LM); // If fixing the length modifier was enough, we might be done. if (hasValidLengthModifier(Ctx.getTargetInfo(), LangOpt)) { diff --git a/clang/lib/AST/ScanfFormatString.cpp b/clang/lib/AST/ScanfFormatString.cpp index 7ee21c8..1227edd 100644 --- a/clang/lib/AST/ScanfFormatString.cpp +++ b/clang/lib/AST/ScanfFormatString.cpp @@ -251,9 +251,11 @@ ArgType ScanfSpecifier::getArgType(ASTContext &Ctx) const { case LengthModifier::AsIntMax: return ArgType::PtrTo(ArgType(Ctx.getIntMaxType(), "intmax_t")); case LengthModifier::AsSizeT: - return ArgType::PtrTo(ArgType(Ctx.getSignedSizeType(), "ssize_t")); + return ArgType::PtrTo(ArgType::makeSizeT( + ArgType(Ctx.getSignedSizeType(), "signed size_t"))); case LengthModifier::AsPtrDiff: - return ArgType::PtrTo(ArgType(Ctx.getPointerDiffType(), "ptrdiff_t")); + return ArgType::PtrTo(ArgType::makePtrdiffT( + ArgType(Ctx.getPointerDiffType(), "ptrdiff_t"))); case LengthModifier::AsLongDouble: // GNU extension. return ArgType::PtrTo(Ctx.LongLongTy); @@ -292,10 +294,11 @@ ArgType ScanfSpecifier::getArgType(ASTContext &Ctx) const { case LengthModifier::AsIntMax: return ArgType::PtrTo(ArgType(Ctx.getUIntMaxType(), "uintmax_t")); case LengthModifier::AsSizeT: - return ArgType::PtrTo(ArgType(Ctx.getSizeType(), "size_t")); - case LengthModifier::AsPtrDiff: return ArgType::PtrTo( - ArgType(Ctx.getUnsignedPointerDiffType(), "unsigned ptrdiff_t")); + ArgType::makeSizeT(ArgType(Ctx.getSizeType(), "size_t"))); + case LengthModifier::AsPtrDiff: + return ArgType::PtrTo(ArgType::makePtrdiffT( + ArgType(Ctx.getUnsignedPointerDiffType(), "unsigned ptrdiff_t"))); case LengthModifier::AsLongDouble: // GNU extension. return ArgType::PtrTo(Ctx.UnsignedLongLongTy); @@ -390,9 +393,11 @@ ArgType ScanfSpecifier::getArgType(ASTContext &Ctx) const { case LengthModifier::AsIntMax: return ArgType::PtrTo(ArgType(Ctx.getIntMaxType(), "intmax_t")); case LengthModifier::AsSizeT: - return ArgType::PtrTo(ArgType(Ctx.getSignedSizeType(), "ssize_t")); + return ArgType::PtrTo(ArgType::makeSizeT( + ArgType(Ctx.getSignedSizeType(), "signed size_t"))); case LengthModifier::AsPtrDiff: - return ArgType::PtrTo(ArgType(Ctx.getPointerDiffType(), "ptrdiff_t")); + return ArgType::PtrTo(ArgType::makePtrdiffT( + ArgType(Ctx.getPointerDiffType(), "ptrdiff_t"))); case LengthModifier::AsLongDouble: return ArgType(); // FIXME: Is this a known extension? case LengthModifier::AsAllocate: @@ -501,7 +506,7 @@ bool ScanfSpecifier::fixType(QualType QT, QualType RawQT, // Handle size_t, ptrdiff_t, etc. that have dedicated length modifiers in C99. if (LangOpt.C99 || LangOpt.CPlusPlus11) - namedTypeToLengthModifier(PT, LM); + namedTypeToLengthModifier(Ctx, PT, LM); // If fixing the length modifier was enough, we are done. if (hasValidLengthModifier(Ctx.getTargetInfo(), LangOpt)) { diff --git a/clang/lib/AST/TextNodeDumper.cpp b/clang/lib/AST/TextNodeDumper.cpp index 3d9397f..6b524cf 100644 --- a/clang/lib/AST/TextNodeDumper.cpp +++ b/clang/lib/AST/TextNodeDumper.cpp @@ -843,7 +843,10 @@ void TextNodeDumper::Visit(const APValue &Value, QualType Ty) { } ColorScope Color(OS, ShowColors, DeclNameColor); - OS << Value.getMemberPointerDecl()->getDeclName(); + if (const ValueDecl *MemDecl = Value.getMemberPointerDecl()) + OS << MemDecl->getDeclName(); + else + OS << "null"; return; } case APValue::AddrLabelDiff: diff --git a/clang/lib/AST/Type.cpp b/clang/lib/AST/Type.cpp index e5a1ab2..7444a2f 100644 --- a/clang/lib/AST/Type.cpp +++ b/clang/lib/AST/Type.cpp @@ -5613,3 +5613,15 @@ HLSLAttributedResourceType::findHandleTypeOnResource(const Type *RT) { } return nullptr; } + +StringRef PredefinedSugarType::getName(Kind KD) { + switch (KD) { + case Kind::SizeT: + return "__size_t"; + case Kind::SignedSizeT: + return "__signed_size_t"; + case Kind::PtrdiffT: + return "__ptrdiff_t"; + } + llvm_unreachable("unexpected kind"); +} diff --git a/clang/lib/AST/TypePrinter.cpp b/clang/lib/AST/TypePrinter.cpp index 818d213..deb453f 100644 --- a/clang/lib/AST/TypePrinter.cpp +++ b/clang/lib/AST/TypePrinter.cpp @@ -248,6 +248,7 @@ bool TypePrinter::canPrefixQualifiers(const Type *T, case Type::BTFTagAttributed: case Type::HLSLAttributedResource: case Type::HLSLInlineSpirv: + case Type::PredefinedSugar: CanPrefixQualifiers = true; break; @@ -1417,6 +1418,15 @@ void TypePrinter::printDependentBitIntBefore(const DependentBitIntType *T, void TypePrinter::printDependentBitIntAfter(const DependentBitIntType *T, raw_ostream &OS) {} +void TypePrinter::printPredefinedSugarBefore(const PredefinedSugarType *T, + raw_ostream &OS) { + OS << T->getIdentifier()->getName(); + spaceBeforePlaceHolder(OS); +} + +void TypePrinter::printPredefinedSugarAfter(const PredefinedSugarType *T, + raw_ostream &OS) {} + /// Appends the given scope to the end of a string. void TypePrinter::AppendScope(DeclContext *DC, raw_ostream &OS, DeclarationName NameInScope) { diff --git a/clang/lib/Analysis/LifetimeSafety.cpp b/clang/lib/Analysis/LifetimeSafety.cpp index e3a03cf..94b8197 100644 --- a/clang/lib/Analysis/LifetimeSafety.cpp +++ b/clang/lib/Analysis/LifetimeSafety.cpp @@ -23,9 +23,10 @@ #include "llvm/Support/Debug.h" #include "llvm/Support/TimeProfiler.h" #include <cstdint> +#include <memory> -namespace clang { -namespace { +namespace clang::lifetimes { +namespace internal { /// Represents the storage location being borrowed, e.g., a specific stack /// variable. @@ -36,32 +37,6 @@ struct AccessPath { AccessPath(const clang::ValueDecl *D) : D(D) {} }; -/// A generic, type-safe wrapper for an ID, distinguished by its `Tag` type. -/// Used for giving ID to loans and origins. -template <typename Tag> struct ID { - uint32_t Value = 0; - - bool operator==(const ID<Tag> &Other) const { return Value == Other.Value; } - bool operator!=(const ID<Tag> &Other) const { return !(*this == Other); } - bool operator<(const ID<Tag> &Other) const { return Value < Other.Value; } - ID<Tag> operator++(int) { - ID<Tag> Tmp = *this; - ++Value; - return Tmp; - } - void Profile(llvm::FoldingSetNodeID &IDBuilder) const { - IDBuilder.AddInteger(Value); - } -}; - -template <typename Tag> -inline llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, ID<Tag> ID) { - return OS << ID.Value; -} - -using LoanID = ID<struct LoanTag>; -using OriginID = ID<struct OriginTag>; - /// Information about a single borrow, or "Loan". A loan is created when a /// reference or pointer is created. struct Loan { @@ -223,7 +198,9 @@ public: /// An origin is propagated from a source to a destination (e.g., p = q). AssignOrigin, /// An origin escapes the function by flowing into the return value. - ReturnOfOrigin + ReturnOfOrigin, + /// A marker for a specific point in the code, for testing. + TestPoint, }; private: @@ -310,6 +287,24 @@ public: } }; +/// A dummy-fact used to mark a specific point in the code for testing. +/// It is generated by recognizing a `void("__lifetime_test_point_...")` cast. +class TestPointFact : public Fact { + StringRef Annotation; + +public: + static bool classof(const Fact *F) { return F->getKind() == Kind::TestPoint; } + + explicit TestPointFact(StringRef Annotation) + : Fact(Kind::TestPoint), Annotation(Annotation) {} + + StringRef getAnnotation() const { return Annotation; } + + void dump(llvm::raw_ostream &OS) const override { + OS << "TestPoint (Annotation: \"" << getAnnotation() << "\")\n"; + } +}; + class FactManager { public: llvm::ArrayRef<const Fact *> getFacts(const CFGBlock *B) const { @@ -363,6 +358,7 @@ private: }; class FactGenerator : public ConstStmtVisitor<FactGenerator> { + using Base = ConstStmtVisitor<FactGenerator>; public: FactGenerator(FactManager &FactMgr, AnalysisDeclContext &AC) @@ -458,6 +454,15 @@ public: } } + void VisitCXXFunctionalCastExpr(const CXXFunctionalCastExpr *FCE) { + // Check if this is a test point marker. If so, we are done with this + // expression. + if (VisitTestPoint(FCE)) + return; + // Visit as normal otherwise. + Base::VisitCXXFunctionalCastExpr(FCE); + } + private: // Check if a type has an origin. bool hasOrigin(QualType QT) { return QT->isPointerOrReferenceType(); } @@ -491,6 +496,27 @@ private: } } + /// Checks if the expression is a `void("__lifetime_test_point_...")` cast. + /// If so, creates a `TestPointFact` and returns true. + bool VisitTestPoint(const CXXFunctionalCastExpr *FCE) { + if (!FCE->getType()->isVoidType()) + return false; + + const auto *SubExpr = FCE->getSubExpr()->IgnoreParenImpCasts(); + if (const auto *SL = dyn_cast<StringLiteral>(SubExpr)) { + llvm::StringRef LiteralValue = SL->getString(); + const std::string Prefix = "__lifetime_test_point_"; + + if (LiteralValue.starts_with(Prefix)) { + StringRef Annotation = LiteralValue.drop_front(Prefix.length()); + CurrentBlockFacts.push_back( + FactMgr.createFact<TestPointFact>(Annotation)); + return true; + } + } + return false; + } + FactManager &FactMgr; AnalysisDeclContext &AC; llvm::SmallVector<Fact *> CurrentBlockFacts; @@ -502,6 +528,13 @@ private: enum class Direction { Forward, Backward }; +/// A `ProgramPoint` identifies a location in the CFG by pointing to a specific +/// `Fact`. identified by a lifetime-related event (`Fact`). +/// +/// A `ProgramPoint` has "after" semantics: it represents the location +/// immediately after its corresponding `Fact`. +using ProgramPoint = const Fact *; + /// A generic, policy-based driver for dataflow analyses. It combines /// the dataflow runner and the transferer logic into a single class hierarchy. /// @@ -524,14 +557,20 @@ template <typename Derived, typename LatticeType, Direction Dir> class DataflowAnalysis { public: using Lattice = LatticeType; - using Base = DataflowAnalysis<Derived, LatticeType, Dir>; + using Base = DataflowAnalysis<Derived, Lattice, Dir>; private: const CFG &Cfg; AnalysisDeclContext &AC; + /// The dataflow state before a basic block is processed. llvm::DenseMap<const CFGBlock *, Lattice> InStates; + /// The dataflow state after a basic block is processed. llvm::DenseMap<const CFGBlock *, Lattice> OutStates; + /// The dataflow state at a Program Point. + /// In a forward analysis, this is the state after the Fact at that point has + /// been applied, while in a backward analysis, it is the state before. + llvm::DenseMap<ProgramPoint, Lattice> PerPointStates; static constexpr bool isForward() { return Dir == Direction::Forward; } @@ -577,6 +616,8 @@ public: } } + Lattice getState(ProgramPoint P) const { return PerPointStates.lookup(P); } + Lattice getInState(const CFGBlock *B) const { return InStates.lookup(B); } Lattice getOutState(const CFGBlock *B) const { return OutStates.lookup(B); } @@ -590,18 +631,23 @@ public: getOutState(&B).dump(llvm::dbgs()); } +private: /// Computes the state at one end of a block by applying all its facts /// sequentially to a given state from the other end. - /// TODO: We might need to store intermediate states per-fact in the block for - /// later analysis. Lattice transferBlock(const CFGBlock *Block, Lattice State) { auto Facts = AllFacts.getFacts(Block); - if constexpr (isForward()) - for (const Fact *F : Facts) + if constexpr (isForward()) { + for (const Fact *F : Facts) { State = transferFact(State, F); - else - for (const Fact *F : llvm::reverse(Facts)) + PerPointStates[F] = State; + } + } else { + for (const Fact *F : llvm::reverse(Facts)) { + // In backward analysis, capture the state before applying the fact. + PerPointStates[F] = State; State = transferFact(State, F); + } + } return State; } @@ -617,6 +663,8 @@ public: return D->transfer(In, *F->getAs<AssignOriginFact>()); case Fact::Kind::ReturnOfOrigin: return D->transfer(In, *F->getAs<ReturnOfOriginFact>()); + case Fact::Kind::TestPoint: + return D->transfer(In, *F->getAs<TestPointFact>()); } llvm_unreachable("Unknown fact kind"); } @@ -626,14 +674,16 @@ public: Lattice transfer(Lattice In, const ExpireFact &) { return In; } Lattice transfer(Lattice In, const AssignOriginFact &) { return In; } Lattice transfer(Lattice In, const ReturnOfOriginFact &) { return In; } + Lattice transfer(Lattice In, const TestPointFact &) { return In; } }; namespace utils { /// Computes the union of two ImmutableSets. template <typename T> -llvm::ImmutableSet<T> join(llvm::ImmutableSet<T> A, llvm::ImmutableSet<T> B, - typename llvm::ImmutableSet<T>::Factory &F) { +static llvm::ImmutableSet<T> join(llvm::ImmutableSet<T> A, + llvm::ImmutableSet<T> B, + typename llvm::ImmutableSet<T>::Factory &F) { if (A.getHeight() < B.getHeight()) std::swap(A, B); for (const T &E : B) @@ -646,7 +696,7 @@ llvm::ImmutableSet<T> join(llvm::ImmutableSet<T> A, llvm::ImmutableSet<T> B, // efficient merge could be implemented using a Patricia Trie or HAMT // instead of the current AVL-tree-based ImmutableMap. template <typename K, typename V, typename Joiner> -llvm::ImmutableMap<K, V> +static llvm::ImmutableMap<K, V> join(llvm::ImmutableMap<K, V> A, llvm::ImmutableMap<K, V> B, typename llvm::ImmutableMap<K, V>::Factory &F, Joiner joinValues) { if (A.getHeight() < B.getHeight()) @@ -670,10 +720,6 @@ join(llvm::ImmutableMap<K, V> A, llvm::ImmutableMap<K, V> B, // Loan Propagation Analysis // ========================================================================= // -// Using LLVM's immutable collections is efficient for dataflow analysis -// as it avoids deep copies during state transitions. -// TODO(opt): Consider using a bitset to represent the set of loans. -using LoanSet = llvm::ImmutableSet<LoanID>; using OriginLoanMap = llvm::ImmutableMap<OriginID, LoanSet>; /// An object to hold the factories for immutable collections, ensuring @@ -769,6 +815,10 @@ public: Factory.OriginMapFactory.add(In.Origins, DestOID, SrcLoans)); } + LoanSet getLoans(OriginID OID, ProgramPoint P) { + return getLoans(getState(P), OID); + } + private: LoanSet getLoans(Lattice L, OriginID OID) { if (auto *Loans = L.Origins.lookup(OID)) @@ -778,23 +828,118 @@ private: }; // ========================================================================= // +// Expired Loans Analysis +// ========================================================================= // + +/// The dataflow lattice for tracking the set of expired loans. +struct ExpiredLattice { + LoanSet Expired; + + ExpiredLattice() : Expired(nullptr) {}; + explicit ExpiredLattice(LoanSet S) : Expired(S) {} + + bool operator==(const ExpiredLattice &Other) const { + return Expired == Other.Expired; + } + bool operator!=(const ExpiredLattice &Other) const { + return !(*this == Other); + } + + void dump(llvm::raw_ostream &OS) const { + OS << "ExpiredLattice State:\n"; + if (Expired.isEmpty()) + OS << " <empty>\n"; + for (const LoanID &LID : Expired) + OS << " Loan " << LID << " is expired\n"; + } +}; + +/// The analysis that tracks which loans have expired. +class ExpiredLoansAnalysis + : public DataflowAnalysis<ExpiredLoansAnalysis, ExpiredLattice, + Direction::Forward> { + + LoanSet::Factory &Factory; + +public: + ExpiredLoansAnalysis(const CFG &C, AnalysisDeclContext &AC, FactManager &F, + LifetimeFactory &Factory) + : DataflowAnalysis(C, AC, F), Factory(Factory.LoanSetFactory) {} + + using Base::transfer; + + StringRef getAnalysisName() const { return "ExpiredLoans"; } + + Lattice getInitialState() { return Lattice(Factory.getEmptySet()); } + + /// Merges two lattices by taking the union of the expired loan sets. + Lattice join(Lattice L1, Lattice L2) const { + return Lattice(utils::join(L1.Expired, L2.Expired, Factory)); + } + + Lattice transfer(Lattice In, const ExpireFact &F) { + return Lattice(Factory.add(In.Expired, F.getLoanID())); + } + + // Removes the loan from the set of expired loans. + // + // When a loan is re-issued (e.g., in a loop), it is no longer considered + // expired. A loan can be in the expired set at the point of issue due to + // the dataflow state from a previous loop iteration being propagated along + // a backedge in the CFG. + // + // Note: This has a subtle false-negative though where a loan from previous + // iteration is not overwritten by a reissue. This needs careful tracking + // of loans "across iterations" which can be considered for future + // enhancements. + // + // void foo(int safe) { + // int* p = &safe; + // int* q = &safe; + // while (condition()) { + // int x = 1; + // p = &x; // A loan to 'x' is issued to 'p' in every iteration. + // if (condition()) { + // q = p; + // } + // (void)*p; // OK — 'p' points to 'x' from new iteration. + // (void)*q; // UaF - 'q' still points to 'x' from previous iteration + // // which is now destroyed. + // } + // } + Lattice transfer(Lattice In, const IssueFact &F) { + return Lattice(Factory.remove(In.Expired, F.getLoanID())); + } +}; + +// ========================================================================= // // TODO: -// - Modifying loan propagation to answer `LoanSet getLoans(Origin O, Point P)` // - Modify loan expiry analysis to answer `bool isExpired(Loan L, Point P)` // - Modify origin liveness analysis to answer `bool isLive(Origin O, Point P)` // - Using the above three to perform the final error reporting. // ========================================================================= // -} // anonymous namespace -void runLifetimeSafetyAnalysis(const DeclContext &DC, const CFG &Cfg, - AnalysisDeclContext &AC) { +// ========================================================================= // +// LifetimeSafetyAnalysis Class Implementation +// ========================================================================= // + +// We need this here for unique_ptr with forward declared class. +LifetimeSafetyAnalysis::~LifetimeSafetyAnalysis() = default; + +LifetimeSafetyAnalysis::LifetimeSafetyAnalysis(AnalysisDeclContext &AC) + : AC(AC), Factory(std::make_unique<LifetimeFactory>()), + FactMgr(std::make_unique<FactManager>()) {} + +void LifetimeSafetyAnalysis::run() { llvm::TimeTraceScope TimeProfile("LifetimeSafetyAnalysis"); + + const CFG &Cfg = *AC.getCFG(); DEBUG_WITH_TYPE("PrintCFG", Cfg.dump(AC.getASTContext().getLangOpts(), /*ShowColors=*/true)); - FactManager FactMgr; - FactGenerator FactGen(FactMgr, AC); + + FactGenerator FactGen(*FactMgr, AC); FactGen.run(); - DEBUG_WITH_TYPE("LifetimeFacts", FactMgr.dump(Cfg, AC)); + DEBUG_WITH_TYPE("LifetimeFacts", FactMgr->dump(Cfg, AC)); /// TODO(opt): Consider optimizing individual blocks before running the /// dataflow analysis. @@ -805,9 +950,65 @@ void runLifetimeSafetyAnalysis(const DeclContext &DC, const CFG &Cfg, /// blocks; only Decls are visible. Therefore, loans in a block that /// never reach an Origin associated with a Decl can be safely dropped by /// the analysis. - LifetimeFactory Factory; - LoanPropagationAnalysis LoanPropagation(Cfg, AC, FactMgr, Factory); - LoanPropagation.run(); - DEBUG_WITH_TYPE("LifetimeLoanPropagation", LoanPropagation.dump()); + LoanPropagation = + std::make_unique<LoanPropagationAnalysis>(Cfg, AC, *FactMgr, *Factory); + LoanPropagation->run(); + + ExpiredLoans = + std::make_unique<ExpiredLoansAnalysis>(Cfg, AC, *FactMgr, *Factory); + ExpiredLoans->run(); +} + +LoanSet LifetimeSafetyAnalysis::getLoansAtPoint(OriginID OID, + ProgramPoint PP) const { + assert(LoanPropagation && "Analysis has not been run."); + return LoanPropagation->getLoans(OID, PP); +} + +LoanSet LifetimeSafetyAnalysis::getExpiredLoansAtPoint(ProgramPoint PP) const { + assert(ExpiredLoans && "ExpiredLoansAnalysis has not been run."); + return ExpiredLoans->getState(PP).Expired; +} + +std::optional<OriginID> +LifetimeSafetyAnalysis::getOriginIDForDecl(const ValueDecl *D) const { + assert(FactMgr && "FactManager not initialized"); + // This assumes the OriginManager's `get` can find an existing origin. + // We might need a `find` method on OriginManager to avoid `getOrCreate` logic + // in a const-query context if that becomes an issue. + return FactMgr->getOriginMgr().get(*D); +} + +std::vector<LoanID> +LifetimeSafetyAnalysis::getLoanIDForVar(const VarDecl *VD) const { + assert(FactMgr && "FactManager not initialized"); + std::vector<LoanID> Result; + for (const Loan &L : FactMgr->getLoanMgr().getLoans()) + if (L.Path.D == VD) + Result.push_back(L.ID); + return Result; +} + +llvm::StringMap<ProgramPoint> LifetimeSafetyAnalysis::getTestPoints() const { + assert(FactMgr && "FactManager not initialized"); + llvm::StringMap<ProgramPoint> AnnotationToPointMap; + for (const CFGBlock *Block : *AC.getCFG()) { + for (const Fact *F : FactMgr->getFacts(Block)) { + if (const auto *TPF = F->getAs<TestPointFact>()) { + StringRef PointName = TPF->getAnnotation(); + assert(AnnotationToPointMap.find(PointName) == + AnnotationToPointMap.end() && + "more than one test points with the same name"); + AnnotationToPointMap[PointName] = F; + } + } + } + return AnnotationToPointMap; +} +} // namespace internal + +void runLifetimeSafetyAnalysis(AnalysisDeclContext &AC) { + internal::LifetimeSafetyAnalysis Analysis(AC); + Analysis.run(); } -} // namespace clang +} // namespace clang::lifetimes diff --git a/clang/lib/Analysis/plugins/CheckerDependencyHandling/CheckerDependencyHandling.cpp b/clang/lib/Analysis/plugins/CheckerDependencyHandling/CheckerDependencyHandling.cpp index aacb886..518f9e7 100644 --- a/clang/lib/Analysis/plugins/CheckerDependencyHandling/CheckerDependencyHandling.cpp +++ b/clang/lib/Analysis/plugins/CheckerDependencyHandling/CheckerDependencyHandling.cpp @@ -2,6 +2,9 @@ #include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h" #include "clang/StaticAnalyzer/Frontend/CheckerRegistry.h" +// This barebones plugin is used by clang/test/Analysis/checker-plugins.c +// to test dependency handling among checkers loaded from plugins. + using namespace clang; using namespace ento; @@ -15,12 +18,12 @@ struct DependendentChecker : public Checker<check::BeginFunction> { } // end anonymous namespace // Register plugin! -extern "C" void clang_registerCheckers(CheckerRegistry ®istry) { - registry.addChecker<Dependency>("example.Dependency", "", ""); - registry.addChecker<DependendentChecker>("example.DependendentChecker", "", - ""); +extern "C" void clang_registerCheckers(CheckerRegistry &Registry) { + Registry.addChecker<Dependency>("example.Dependency", "MockDescription"); + Registry.addChecker<DependendentChecker>("example.DependendentChecker", + "MockDescription"); - registry.addDependency("example.DependendentChecker", "example.Dependency"); + Registry.addDependency("example.DependendentChecker", "example.Dependency"); } extern "C" const char clang_analyzerAPIVersionString[] = diff --git a/clang/lib/Analysis/plugins/CheckerOptionHandling/CheckerOptionHandling.cpp b/clang/lib/Analysis/plugins/CheckerOptionHandling/CheckerOptionHandling.cpp index 82c1058..2adb934 100644 --- a/clang/lib/Analysis/plugins/CheckerOptionHandling/CheckerOptionHandling.cpp +++ b/clang/lib/Analysis/plugins/CheckerOptionHandling/CheckerOptionHandling.cpp @@ -5,6 +5,9 @@ using namespace clang; using namespace ento; +// This barebones plugin is used by clang/test/Analysis/checker-plugins.c +// to test option handling on checkers loaded from plugins. + namespace { struct MyChecker : public Checker<check::BeginFunction> { void checkBeginFunction(CheckerContext &Ctx) const {} @@ -25,13 +28,11 @@ bool shouldRegisterMyChecker(const CheckerManager &mgr) { return true; } } // end anonymous namespace // Register plugin! -extern "C" void clang_registerCheckers(CheckerRegistry ®istry) { - registry.addChecker(registerMyChecker, shouldRegisterMyChecker, - "example.MyChecker", "Example Description", - "example.mychecker.documentation.nonexistent.html", - /*isHidden*/false); +extern "C" void clang_registerCheckers(CheckerRegistry &Registry) { + Registry.addChecker(registerMyChecker, shouldRegisterMyChecker, + "example.MyChecker", "Example Description"); - registry.addCheckerOption(/*OptionType*/ "bool", + Registry.addCheckerOption(/*OptionType*/ "bool", /*CheckerFullName*/ "example.MyChecker", /*OptionName*/ "ExampleOption", /*DefaultValStr*/ "false", diff --git a/clang/lib/Analysis/plugins/SampleAnalyzer/MainCallChecker.cpp b/clang/lib/Analysis/plugins/SampleAnalyzer/MainCallChecker.cpp index fd210d7..53a01d2 100644 --- a/clang/lib/Analysis/plugins/SampleAnalyzer/MainCallChecker.cpp +++ b/clang/lib/Analysis/plugins/SampleAnalyzer/MainCallChecker.cpp @@ -3,12 +3,16 @@ #include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h" #include "clang/StaticAnalyzer/Frontend/CheckerRegistry.h" +// This simple plugin is used by clang/test/Analysis/checker-plugins.c +// to test the use of a checker that is defined in a plugin. + using namespace clang; using namespace ento; namespace { class MainCallChecker : public Checker<check::PreStmt<CallExpr>> { - mutable std::unique_ptr<BugType> BT; + + const BugType BT{this, "call to main", "example analyzer plugin"}; public: void checkPreStmt(const CallExpr *CE, CheckerContext &C) const; @@ -33,21 +37,17 @@ void MainCallChecker::checkPreStmt(const CallExpr *CE, if (!N) return; - if (!BT) - BT.reset(new BugType(this, "call to main", "example analyzer plugin")); - auto report = - std::make_unique<PathSensitiveBugReport>(*BT, BT->getDescription(), N); + std::make_unique<PathSensitiveBugReport>(BT, BT.getDescription(), N); report->addRange(Callee->getSourceRange()); C.emitReport(std::move(report)); } } // Register plugin! -extern "C" void clang_registerCheckers(CheckerRegistry ®istry) { - registry.addChecker<MainCallChecker>( - "example.MainCallChecker", "Disallows calls to functions called main", - ""); +extern "C" void clang_registerCheckers(CheckerRegistry &Registry) { + Registry.addChecker<MainCallChecker>("example.MainCallChecker", + "Example Description"); } extern "C" const char clang_analyzerAPIVersionString[] = diff --git a/clang/lib/Basic/TargetInfo.cpp b/clang/lib/Basic/TargetInfo.cpp index 09b6a1f..21fc084 100644 --- a/clang/lib/Basic/TargetInfo.cpp +++ b/clang/lib/Basic/TargetInfo.cpp @@ -172,7 +172,7 @@ TargetInfo::TargetInfo(const llvm::Triple &T) : Triple(T) { ComplexLongDoubleUsesFP2Ret = false; // Set the C++ ABI based on the triple. - TheCXXABI.set(Triple.isKnownWindowsMSVCEnvironment() + TheCXXABI.set(Triple.isKnownWindowsMSVCEnvironment() || Triple.isUEFI() ? TargetCXXABI::Microsoft : TargetCXXABI::GenericItanium); diff --git a/clang/lib/Basic/Targets.cpp b/clang/lib/Basic/Targets.cpp index 5c2af9b..e3f9760 100644 --- a/clang/lib/Basic/Targets.cpp +++ b/clang/lib/Basic/Targets.cpp @@ -757,6 +757,9 @@ std::unique_ptr<TargetInfo> AllocateTarget(const llvm::Triple &Triple, case llvm::Triple::FreeBSD: return std::make_unique<FreeBSDTargetInfo<LoongArch64TargetInfo>>(Triple, Opts); + case llvm::Triple::OpenBSD: + return std::make_unique<OpenBSDTargetInfo<LoongArch64TargetInfo>>(Triple, + Opts); default: return std::make_unique<LoongArch64TargetInfo>(Triple, Opts); } diff --git a/clang/lib/Basic/Targets/AArch64.cpp b/clang/lib/Basic/Targets/AArch64.cpp index 72d2e5f..2b023e5 100644 --- a/clang/lib/Basic/Targets/AArch64.cpp +++ b/clang/lib/Basic/Targets/AArch64.cpp @@ -786,7 +786,8 @@ AArch64TargetInfo::getVScaleRange(const LangOptions &LangOpts, return std::nullopt; } -uint64_t AArch64TargetInfo::getFMVPriority(ArrayRef<StringRef> Features) const { +llvm::APInt +AArch64TargetInfo::getFMVPriority(ArrayRef<StringRef> Features) const { return llvm::AArch64::getFMVPriority(Features); } diff --git a/clang/lib/Basic/Targets/AArch64.h b/clang/lib/Basic/Targets/AArch64.h index f4277e9..dfd89be 100644 --- a/clang/lib/Basic/Targets/AArch64.h +++ b/clang/lib/Basic/Targets/AArch64.h @@ -152,7 +152,7 @@ public: void fillValidCPUList(SmallVectorImpl<StringRef> &Values) const override; bool setCPU(const std::string &Name) override; - uint64_t getFMVPriority(ArrayRef<StringRef> Features) const override; + llvm::APInt getFMVPriority(ArrayRef<StringRef> Features) const override; bool useFP16ConversionIntrinsics() const override { return false; diff --git a/clang/lib/Basic/Targets/ARM.cpp b/clang/lib/Basic/Targets/ARM.cpp index 7ff8e51..29de34bb 100644 --- a/clang/lib/Basic/Targets/ARM.cpp +++ b/clang/lib/Basic/Targets/ARM.cpp @@ -623,13 +623,15 @@ bool ARMTargetInfo::handleTargetFeatures(std::vector<std::string> &Features, LDREX = LDREX_W; break; case 7: + case 8: if (ArchProfile == llvm::ARM::ProfileKind::M) LDREX = LDREX_W | LDREX_H | LDREX_B; else LDREX = LDREX_D | LDREX_W | LDREX_H | LDREX_B; break; - case 8: case 9: + assert(ArchProfile != llvm::ARM::ProfileKind::M && + "No Armv9-M architectures defined"); LDREX = LDREX_D | LDREX_W | LDREX_H | LDREX_B; } diff --git a/clang/lib/Basic/Targets/Mips.h b/clang/lib/Basic/Targets/Mips.h index 35501ed..e199df3 100644 --- a/clang/lib/Basic/Targets/Mips.h +++ b/clang/lib/Basic/Targets/Mips.h @@ -129,7 +129,7 @@ public: LongWidth = LongAlign = 32; MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 32; PointerWidth = PointerAlign = 32; - PtrDiffType = SignedInt; + PtrDiffType = IntPtrType = SignedInt; SizeType = UnsignedInt; SuitableAlign = 64; } @@ -155,7 +155,7 @@ public: IntMaxType = Int64Type; LongWidth = LongAlign = 64; PointerWidth = PointerAlign = 64; - PtrDiffType = SignedLong; + PtrDiffType = IntPtrType = SignedLong; SizeType = UnsignedLong; } @@ -165,7 +165,7 @@ public: IntMaxType = Int64Type; LongWidth = LongAlign = 32; PointerWidth = PointerAlign = 32; - PtrDiffType = SignedInt; + PtrDiffType = IntPtrType = SignedInt; SizeType = UnsignedInt; } diff --git a/clang/lib/Basic/Targets/OSTargets.h b/clang/lib/Basic/Targets/OSTargets.h index 42cff65..94b018a 100644 --- a/clang/lib/Basic/Targets/OSTargets.h +++ b/clang/lib/Basic/Targets/OSTargets.h @@ -496,6 +496,7 @@ public: case llvm::Triple::sparcv9: this->MCountName = "_mcount"; break; + case llvm::Triple::loongarch64: case llvm::Triple::riscv64: break; } diff --git a/clang/lib/Basic/Targets/RISCV.cpp b/clang/lib/Basic/Targets/RISCV.cpp index 8a28c07..a6a5ec4 100644 --- a/clang/lib/Basic/Targets/RISCV.cpp +++ b/clang/lib/Basic/Targets/RISCV.cpp @@ -568,7 +568,8 @@ ParsedTargetAttr RISCVTargetInfo::parseTargetAttr(StringRef Features) const { return Ret; } -uint64_t RISCVTargetInfo::getFMVPriority(ArrayRef<StringRef> Features) const { +llvm::APInt +RISCVTargetInfo::getFMVPriority(ArrayRef<StringRef> Features) const { // Priority is explicitly specified on RISC-V unlike on other targets, where // it is derived by all the features of a specific version. Therefore if a // feature contains the priority string, then return it immediately. @@ -580,12 +581,12 @@ uint64_t RISCVTargetInfo::getFMVPriority(ArrayRef<StringRef> Features) const { Feature = RHS; else continue; - uint64_t Priority; + unsigned Priority; if (!Feature.getAsInteger(0, Priority)) - return Priority; + return llvm::APInt(32, Priority); } // Default Priority is zero. - return 0; + return llvm::APInt::getZero(32); } TargetInfo::CallingConvCheckResult diff --git a/clang/lib/Basic/Targets/RISCV.h b/clang/lib/Basic/Targets/RISCV.h index 8d629ab..58bfad1 100644 --- a/clang/lib/Basic/Targets/RISCV.h +++ b/clang/lib/Basic/Targets/RISCV.h @@ -123,7 +123,7 @@ public: void fillValidTuneCPUList(SmallVectorImpl<StringRef> &Values) const override; bool supportsTargetAttributeTune() const override { return true; } ParsedTargetAttr parseTargetAttr(StringRef Str) const override; - uint64_t getFMVPriority(ArrayRef<StringRef> Features) const override; + llvm::APInt getFMVPriority(ArrayRef<StringRef> Features) const override; std::pair<unsigned, unsigned> hardwareInterferenceSizes() const override { return std::make_pair(32, 32); diff --git a/clang/lib/Basic/Targets/X86.cpp b/clang/lib/Basic/Targets/X86.cpp index a1f5aa2..24ecec2 100644 --- a/clang/lib/Basic/Targets/X86.cpp +++ b/clang/lib/Basic/Targets/X86.cpp @@ -1390,8 +1390,8 @@ static llvm::X86::ProcessorFeatures getFeature(StringRef Name) { // correct, so it asserts if the value is out of range. } -uint64_t X86TargetInfo::getFMVPriority(ArrayRef<StringRef> Features) const { - auto getPriority = [](StringRef Feature) -> uint64_t { +llvm::APInt X86TargetInfo::getFMVPriority(ArrayRef<StringRef> Features) const { + auto getPriority = [](StringRef Feature) -> unsigned { // Valid CPUs have a 'key feature' that compares just better than its key // feature. using namespace llvm::X86; @@ -1405,11 +1405,11 @@ uint64_t X86TargetInfo::getFMVPriority(ArrayRef<StringRef> Features) const { return getFeaturePriority(getFeature(Feature)) << 1; }; - uint64_t Priority = 0; + unsigned Priority = 0; for (StringRef Feature : Features) if (!Feature.empty()) Priority = std::max(Priority, getPriority(Feature)); - return Priority; + return llvm::APInt(32, Priority); } bool X86TargetInfo::validateCPUSpecificCPUDispatch(StringRef Name) const { diff --git a/clang/lib/Basic/Targets/X86.h b/clang/lib/Basic/Targets/X86.h index ebc59c9..eb15103 100644 --- a/clang/lib/Basic/Targets/X86.h +++ b/clang/lib/Basic/Targets/X86.h @@ -388,7 +388,7 @@ public: return CPU != llvm::X86::CK_None; } - uint64_t getFMVPriority(ArrayRef<StringRef> Features) const override; + llvm::APInt getFMVPriority(ArrayRef<StringRef> Features) const override; bool setFPMath(StringRef Name) override; diff --git a/clang/lib/CIR/CodeGen/CIRGenBuilder.h b/clang/lib/CIR/CodeGen/CIRGenBuilder.h index f855bda..73c9fb9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuilder.h +++ b/clang/lib/CIR/CodeGen/CIRGenBuilder.h @@ -408,21 +408,23 @@ public: } mlir::Value createSetBitfield(mlir::Location loc, mlir::Type resultType, - mlir::Value dstAddr, mlir::Type storageType, + Address dstAddr, mlir::Type storageType, mlir::Value src, const CIRGenBitFieldInfo &info, - bool isLvalueVolatile, bool useVolatile) { - return create<cir::SetBitfieldOp>(loc, resultType, dstAddr, storageType, - src, info.name, info.size, info.offset, - info.isSigned, isLvalueVolatile); + bool isLvalueVolatile) { + return create<cir::SetBitfieldOp>( + loc, resultType, dstAddr.getPointer(), storageType, src, info.name, + info.size, info.offset, info.isSigned, isLvalueVolatile, + dstAddr.getAlignment().getAsAlign().value()); } mlir::Value createGetBitfield(mlir::Location loc, mlir::Type resultType, - mlir::Value addr, mlir::Type storageType, + Address addr, mlir::Type storageType, const CIRGenBitFieldInfo &info, - bool isLvalueVolatile, bool useVolatile) { - return create<cir::GetBitfieldOp>(loc, resultType, addr, storageType, - info.name, info.size, info.offset, - info.isSigned, isLvalueVolatile); + bool isLvalueVolatile) { + return create<cir::GetBitfieldOp>( + loc, resultType, addr.getPointer(), storageType, info.name, info.size, + info.offset, info.isSigned, isLvalueVolatile, + addr.getAlignment().getAsAlign().value()); } }; diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 61d1c54..ef136f8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -121,6 +121,13 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl &gd, unsigned builtinID, return RValue::get(nullptr); } + case Builtin::BI__builtin_assume_separate_storage: { + mlir::Value value0 = emitScalarExpr(e->getArg(0)); + mlir::Value value1 = emitScalarExpr(e->getArg(1)); + builder.create<cir::AssumeSepStorageOp>(loc, value0, value1); + return RValue::get(nullptr); + } + case Builtin::BI__builtin_complex: { mlir::Value real = emitScalarExpr(e->getArg(0)); mlir::Value imag = emitScalarExpr(e->getArg(1)); diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h index eb079b8..5929568 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXXABI.h +++ b/clang/lib/CIR/CodeGen/CIRGenCXXABI.h @@ -75,6 +75,11 @@ public: /// Emit dtor variants required by this ABI. virtual void emitCXXDestructors(const clang::CXXDestructorDecl *d) = 0; + virtual void emitDestructorCall(CIRGenFunction &cgf, + const CXXDestructorDecl *dd, CXXDtorType type, + bool forVirtualBase, bool delegating, + Address thisAddr, QualType thisTy) = 0; + /// Returns true if the given destructor type should be emitted as a linkonce /// delegating thunk, regardless of whether the dtor is defined in this TU or /// not. diff --git a/clang/lib/CIR/CodeGen/CIRGenCXXExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenCXXExpr.cpp index 8da832d..67d8988 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCXXExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCXXExpr.cpp @@ -246,6 +246,29 @@ static void emitNewInitializer(CIRGenFunction &cgf, const CXXNewExpr *e, } } +RValue CIRGenFunction::emitCXXDestructorCall( + GlobalDecl dtor, const CIRGenCallee &callee, mlir::Value thisVal, + QualType thisTy, mlir::Value implicitParam, QualType implicitParamTy, + const CallExpr *ce) { + const CXXMethodDecl *dtorDecl = cast<CXXMethodDecl>(dtor.getDecl()); + + assert(!thisTy.isNull()); + assert(thisTy->getAsCXXRecordDecl() == dtorDecl->getParent() && + "Pointer/Object mixup"); + + assert(!cir::MissingFeatures::addressSpace()); + + CallArgList args; + commonBuildCXXMemberOrOperatorCall(*this, dtorDecl, thisVal, implicitParam, + implicitParamTy, ce, args, nullptr); + assert((ce || dtor.getDecl()) && "expected source location provider"); + assert(!cir::MissingFeatures::opCallMustTail()); + return emitCall(cgm.getTypes().arrangeCXXStructorDeclaration(dtor), callee, + ReturnValueSlot(), args, nullptr, + ce ? getLoc(ce->getExprLoc()) + : getLoc(dtor.getDecl()->getSourceRange())); +} + /// Emit a call to an operator new or operator delete function, as implicitly /// created by new-expressions and delete-expressions. static RValue emitNewDeleteCall(CIRGenFunction &cgf, diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp index 8667bb6..fbf53db 100644 --- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp @@ -392,6 +392,14 @@ void CIRGenFunction::emitDelegatingCXXConstructorCall( } } +void CIRGenFunction::emitCXXDestructorCall(const CXXDestructorDecl *dd, + CXXDtorType type, + bool forVirtualBase, bool delegating, + Address thisAddr, QualType thisTy) { + cgm.getCXXABI().emitDestructorCall(*this, dd, type, forVirtualBase, + delegating, thisAddr, thisTy); +} + Address CIRGenFunction::getAddressOfBaseClass( Address value, const CXXRecordDecl *derived, llvm::iterator_range<CastExpr::path_const_iterator> path, diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp index 51da48d..1f64801 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp @@ -333,13 +333,12 @@ mlir::Value CIRGenFunction::emitStoreThroughBitfieldLValue(RValue src, Address ptr = dst.getBitFieldAddress(); assert(!cir::MissingFeatures::armComputeVolatileBitfields()); - const bool useVolatile = false; mlir::Value dstAddr = dst.getAddress().getPointer(); - return builder.createSetBitfield(dstAddr.getLoc(), resLTy, dstAddr, + return builder.createSetBitfield(dstAddr.getLoc(), resLTy, ptr, ptr.getElementType(), src.getValue(), info, - dst.isVolatileQualified(), useVolatile); + dst.isVolatileQualified()); } RValue CIRGenFunction::emitLoadOfBitfieldLValue(LValue lv, SourceLocation loc) { @@ -352,8 +351,7 @@ RValue CIRGenFunction::emitLoadOfBitfieldLValue(LValue lv, SourceLocation loc) { assert(!cir::MissingFeatures::armComputeVolatileBitfields()); mlir::Value field = builder.createGetBitfield( - getLoc(loc), resLTy, ptr.getPointer(), ptr.getElementType(), info, - lv.isVolatile(), false); + getLoc(loc), resLTy, ptr, ptr.getElementType(), info, lv.isVolatile()); assert(!cir::MissingFeatures::opLoadEmitScalarRangeCheck() && "NYI"); return RValue::get(field); } @@ -366,7 +364,10 @@ Address CIRGenFunction::getAddrOfBitFieldStorage(LValue base, cir::PointerType fieldPtr = cir::PointerType::get(fieldType); cir::GetMemberOp sea = getBuilder().createGetMember( loc, fieldPtr, base.getPointer(), field->getName(), index); - return Address(sea, CharUnits::One()); + auto rec = cast<cir::RecordType>(base.getAddress().getElementType()); + CharUnits offset = CharUnits::fromQuantity( + rec.getElementOffset(cgm.getDataLayout().layout, index)); + return Address(sea, base.getAlignment().alignmentAtOffset(offset)); } LValue CIRGenFunction::emitLValueForBitField(LValue base, @@ -662,7 +663,8 @@ LValue CIRGenFunction::emitUnaryOpLValue(const UnaryOperator *e) { } case UO_PreInc: case UO_PreDec: { - bool isInc = e->isIncrementOp(); + cir::UnaryOpKind kind = + e->isIncrementOp() ? cir::UnaryOpKind::Inc : cir::UnaryOpKind::Dec; LValue lv = emitLValue(e->getSubExpr()); assert(e->isPrefix() && "Prefix operator in unexpected state!"); @@ -671,7 +673,7 @@ LValue CIRGenFunction::emitUnaryOpLValue(const UnaryOperator *e) { cgm.errorNYI(e->getSourceRange(), "UnaryOp complex inc/dec"); lv = LValue(); } else { - emitScalarPrePostIncDec(e, lv, isInc, /*isPre=*/true); + emitScalarPrePostIncDec(e, lv, kind, /*isPre=*/true); } return lv; @@ -1053,6 +1055,67 @@ LValue CIRGenFunction::emitMemberExpr(const MemberExpr *e) { llvm_unreachable("Unhandled member declaration!"); } +/// Evaluate an expression into a given memory location. +void CIRGenFunction::emitAnyExprToMem(const Expr *e, Address location, + Qualifiers quals, bool isInit) { + // FIXME: This function should take an LValue as an argument. + switch (getEvaluationKind(e->getType())) { + case cir::TEK_Complex: { + LValue lv = makeAddrLValue(location, e->getType()); + emitComplexExprIntoLValue(e, lv, isInit); + return; + } + + case cir::TEK_Aggregate: { + emitAggExpr(e, AggValueSlot::forAddr(location, quals, + AggValueSlot::IsDestructed_t(isInit), + AggValueSlot::IsAliased_t(!isInit), + AggValueSlot::MayOverlap)); + return; + } + + case cir::TEK_Scalar: { + RValue rv = RValue::get(emitScalarExpr(e)); + LValue lv = makeAddrLValue(location, e->getType()); + emitStoreThroughLValue(rv, lv); + return; + } + } + + llvm_unreachable("bad evaluation kind"); +} + +LValue CIRGenFunction::emitCompoundLiteralLValue(const CompoundLiteralExpr *e) { + if (e->isFileScope()) { + cgm.errorNYI(e->getSourceRange(), "emitCompoundLiteralLValue: FileScope"); + return {}; + } + + if (e->getType()->isVariablyModifiedType()) { + cgm.errorNYI(e->getSourceRange(), + "emitCompoundLiteralLValue: VariablyModifiedType"); + return {}; + } + + Address declPtr = createMemTemp(e->getType(), getLoc(e->getSourceRange()), + ".compoundliteral"); + const Expr *initExpr = e->getInitializer(); + LValue result = makeAddrLValue(declPtr, e->getType(), AlignmentSource::Decl); + + emitAnyExprToMem(initExpr, declPtr, e->getType().getQualifiers(), + /*Init*/ true); + + // Block-scope compound literals are destroyed at the end of the enclosing + // scope in C. + if (!getLangOpts().CPlusPlus && e->getType().isDestructedType()) { + cgm.errorNYI(e->getSourceRange(), + "emitCompoundLiteralLValue: non C++ DestructedType"); + return {}; + } + + return result; +} + LValue CIRGenFunction::emitCallExprLValue(const CallExpr *e) { RValue rv = emitCallExpr(e); diff --git a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp index 0a22771..6756a7c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp @@ -52,28 +52,33 @@ public: mlir::Value VisitGenericSelectionExpr(GenericSelectionExpr *e); mlir::Value VisitImplicitCastExpr(ImplicitCastExpr *e); mlir::Value VisitInitListExpr(const InitListExpr *e); + + mlir::Value VisitCompoundLiteralExpr(CompoundLiteralExpr *e) { + return emitLoadOfLValue(e); + } + mlir::Value VisitImaginaryLiteral(const ImaginaryLiteral *il); mlir::Value VisitParenExpr(ParenExpr *e); mlir::Value VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *e); - mlir::Value VisitPrePostIncDec(const UnaryOperator *e, bool isInc, + mlir::Value VisitPrePostIncDec(const UnaryOperator *e, cir::UnaryOpKind op, bool isPre); mlir::Value VisitUnaryPostDec(const UnaryOperator *e) { - return VisitPrePostIncDec(e, false, false); + return VisitPrePostIncDec(e, cir::UnaryOpKind::Dec, false); } mlir::Value VisitUnaryPostInc(const UnaryOperator *e) { - return VisitPrePostIncDec(e, true, false); + return VisitPrePostIncDec(e, cir::UnaryOpKind::Inc, false); } mlir::Value VisitUnaryPreDec(const UnaryOperator *e) { - return VisitPrePostIncDec(e, false, true); + return VisitPrePostIncDec(e, cir::UnaryOpKind::Dec, true); } mlir::Value VisitUnaryPreInc(const UnaryOperator *e) { - return VisitPrePostIncDec(e, true, true); + return VisitPrePostIncDec(e, cir::UnaryOpKind::Inc, true); } mlir::Value VisitUnaryDeref(const Expr *e); @@ -355,9 +360,10 @@ mlir::Value ComplexExprEmitter::VisitSubstNonTypeTemplateParmExpr( } mlir::Value ComplexExprEmitter::VisitPrePostIncDec(const UnaryOperator *e, - bool isInc, bool isPre) { + cir::UnaryOpKind op, + bool isPre) { LValue lv = cgf.emitLValue(e->getSubExpr()); - return cgf.emitComplexPrePostIncDec(e, lv, isInc, isPre); + return cgf.emitComplexPrePostIncDec(e, lv, op, isPre); } mlir::Value ComplexExprEmitter::VisitUnaryDeref(const Expr *e) { @@ -449,12 +455,15 @@ mlir::Value CIRGenFunction::emitComplexExpr(const Expr *e) { } mlir::Value CIRGenFunction::emitComplexPrePostIncDec(const UnaryOperator *e, - LValue lv, bool isInc, + LValue lv, + cir::UnaryOpKind op, bool isPre) { + assert(op == cir::UnaryOpKind::Inc || + op == cir::UnaryOpKind::Dec && "Invalid UnaryOp kind for ComplexType"); + mlir::Value inVal = emitLoadOfComplex(lv, e->getExprLoc()); mlir::Location loc = getLoc(e->getExprLoc()); - auto opKind = isInc ? cir::UnaryOpKind::Inc : cir::UnaryOpKind::Dec; - mlir::Value incVal = builder.createUnaryOp(loc, opKind, inVal); + mlir::Value incVal = builder.createUnaryOp(loc, op, inVal); // Store the updated result through the lvalue. emitStoreOfComplex(loc, incVal, lv, /*isInit=*/false); @@ -467,6 +476,15 @@ mlir::Value CIRGenFunction::emitComplexPrePostIncDec(const UnaryOperator *e, return isPre ? incVal : inVal; } +void CIRGenFunction::emitComplexExprIntoLValue(const Expr *e, LValue dest, + bool isInit) { + assert(e && getComplexType(e->getType()) && + "Invalid complex expression to emit"); + ComplexExprEmitter emitter(*this); + mlir::Value value = emitter.Visit(const_cast<Expr *>(e)); + emitter.emitStoreOfComplex(getLoc(e->getExprLoc()), value, dest, isInit); +} + mlir::Value CIRGenFunction::emitLoadOfComplex(LValue src, SourceLocation loc) { return ComplexExprEmitter(*this).emitLoadOfLValue(src, loc); } diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 9e13b4c..eba6bff 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -233,6 +233,10 @@ public: mlir::Value VisitMemberExpr(MemberExpr *e); + mlir::Value VisitCompoundLiteralExpr(CompoundLiteralExpr *e) { + return emitLoadOfLValue(e); + } + mlir::Value VisitInitListExpr(InitListExpr *e); mlir::Value VisitExplicitCastExpr(ExplicitCastExpr *e) { @@ -383,22 +387,22 @@ public: // Unary Operators. mlir::Value VisitUnaryPostDec(const UnaryOperator *e) { LValue lv = cgf.emitLValue(e->getSubExpr()); - return emitScalarPrePostIncDec(e, lv, false, false); + return emitScalarPrePostIncDec(e, lv, cir::UnaryOpKind::Dec, false); } mlir::Value VisitUnaryPostInc(const UnaryOperator *e) { LValue lv = cgf.emitLValue(e->getSubExpr()); - return emitScalarPrePostIncDec(e, lv, true, false); + return emitScalarPrePostIncDec(e, lv, cir::UnaryOpKind::Inc, false); } mlir::Value VisitUnaryPreDec(const UnaryOperator *e) { LValue lv = cgf.emitLValue(e->getSubExpr()); - return emitScalarPrePostIncDec(e, lv, false, true); + return emitScalarPrePostIncDec(e, lv, cir::UnaryOpKind::Dec, true); } mlir::Value VisitUnaryPreInc(const UnaryOperator *e) { LValue lv = cgf.emitLValue(e->getSubExpr()); - return emitScalarPrePostIncDec(e, lv, true, true); + return emitScalarPrePostIncDec(e, lv, cir::UnaryOpKind::Inc, true); } mlir::Value emitScalarPrePostIncDec(const UnaryOperator *e, LValue lv, - bool isInc, bool isPre) { + cir::UnaryOpKind kind, bool isPre) { if (cgf.getLangOpts().OpenMP) cgf.cgm.errorNYI(e->getSourceRange(), "inc/dec OpenMP"); @@ -427,7 +431,7 @@ public: // -> bool = ((int)bool + 1 != 0) // An interesting aspect of this is that increment is always true. // Decrement does not have this property. - if (isInc && type->isBooleanType()) { + if (kind == cir::UnaryOpKind::Inc && type->isBooleanType()) { value = builder.getTrue(cgf.getLoc(e->getExprLoc())); } else if (type->isIntegerType()) { QualType promotedType; @@ -458,7 +462,7 @@ public: assert(!cir::MissingFeatures::sanitizers()); if (e->canOverflow() && type->isSignedIntegerOrEnumerationType()) { - value = emitIncDecConsiderOverflowBehavior(e, value, isInc); + value = emitIncDecConsiderOverflowBehavior(e, value, kind); } else { cir::UnaryOpKind kind = e->isIncrementOp() ? cir::UnaryOpKind::Inc : cir::UnaryOpKind::Dec; @@ -480,7 +484,7 @@ public: // For everything else, we can just do a simple increment. mlir::Location loc = cgf.getLoc(e->getSourceRange()); CIRGenBuilderTy &builder = cgf.getBuilder(); - int amount = (isInc ? 1 : -1); + int amount = kind == cir::UnaryOpKind::Inc ? 1 : -1; mlir::Value amt = builder.getSInt32(amount, loc); assert(!cir::MissingFeatures::sanitizers()); value = builder.createPtrStride(loc, value, amt); @@ -500,8 +504,8 @@ public: if (mlir::isa<cir::SingleType, cir::DoubleType>(value.getType())) { // Create the inc/dec operation. // NOTE(CIR): clang calls CreateAdd but folds this to a unary op - cir::UnaryOpKind kind = - (isInc ? cir::UnaryOpKind::Inc : cir::UnaryOpKind::Dec); + assert(kind == cir::UnaryOpKind::Inc || + kind == cir::UnaryOpKind::Dec && "Invalid UnaryOp kind"); value = emitUnaryOp(e, kind, value); } else { cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec other fp type"); @@ -532,9 +536,9 @@ public: mlir::Value emitIncDecConsiderOverflowBehavior(const UnaryOperator *e, mlir::Value inVal, - bool isInc) { - cir::UnaryOpKind kind = - e->isIncrementOp() ? cir::UnaryOpKind::Inc : cir::UnaryOpKind::Dec; + cir::UnaryOpKind kind) { + assert(kind == cir::UnaryOpKind::Inc || + kind == cir::UnaryOpKind::Dec && "Invalid UnaryOp kind"); switch (cgf.getLangOpts().getSignedOverflowBehavior()) { case LangOptions::SOB_Defined: return emitUnaryOp(e, kind, inVal, /*nsw=*/false); @@ -2147,8 +2151,9 @@ mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator( } mlir::Value CIRGenFunction::emitScalarPrePostIncDec(const UnaryOperator *e, - LValue lv, bool isInc, + LValue lv, + cir::UnaryOpKind kind, bool isPre) { return ScalarExprEmitter(*this, builder) - .emitScalarPrePostIncDec(e, lv, isInc, isPre); + .emitScalarPrePostIncDec(e, lv, kind, isPre); } diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index e532b9d..3e69e56 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -593,11 +593,12 @@ void CIRGenFunction::emitDestructorBody(FunctionArgList &args) { assert(!cir::MissingFeatures::dtorCleanups()); - // TODO(cir): A complete destructor is supposed to call the base destructor. - // Since we have to emit both dtor kinds we just fall through for now and. - // As long as we don't support virtual bases this should be functionally - // equivalent. - assert(!cir::MissingFeatures::completeDtors()); + if (!isTryBody) { + QualType thisTy = dtor->getFunctionObjectParameterType(); + emitCXXDestructorCall(dtor, Dtor_Base, /*forVirtualBase=*/false, + /*delegating=*/false, loadCXXThisAddress(), thisTy); + break; + } // Fallthrough: act like we're in the base variant. [[fallthrough]]; @@ -698,6 +699,8 @@ LValue CIRGenFunction::emitLValue(const Expr *e) { return emitStringLiteralLValue(cast<StringLiteral>(e)); case Expr::MemberExprClass: return emitMemberExpr(cast<MemberExpr>(e)); + case Expr::CompoundLiteralExprClass: + return emitCompoundLiteralLValue(cast<CompoundLiteralExpr>(e)); case Expr::BinaryOperatorClass: return emitBinaryOperatorLValue(cast<BinaryOperator>(e)); case Expr::CompoundAssignOperatorClass: { diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 9541f4f..2aceeef 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -757,6 +757,11 @@ public: RValue emitAnyExpr(const clang::Expr *e, AggValueSlot aggSlot = AggValueSlot::ignored()); + /// Emits the code necessary to evaluate an arbitrary expression into the + /// given memory location. + void emitAnyExprToMem(const Expr *e, Address location, Qualifiers quals, + bool isInitializer); + /// Similarly to emitAnyExpr(), however, the result will always be accessible /// even if no aggregate location is provided. RValue emitAnyExprToTemp(const clang::Expr *e); @@ -828,6 +833,7 @@ public: mlir::Value emitCheckedArgForAssume(const Expr *e); LValue emitCompoundAssignmentLValue(const clang::CompoundAssignOperator *e); + LValue emitCompoundLiteralLValue(const CompoundLiteralExpr *e); void emitConstructorBody(FunctionArgList &args); void emitDestructorBody(FunctionArgList &args); @@ -847,6 +853,15 @@ public: bool delegating, Address thisAddr, CallArgList &args, clang::SourceLocation loc); + void emitCXXDestructorCall(const CXXDestructorDecl *dd, CXXDtorType type, + bool forVirtualBase, bool delegating, + Address thisAddr, QualType thisTy); + + RValue emitCXXDestructorCall(GlobalDecl dtor, const CIRGenCallee &callee, + mlir::Value thisVal, QualType thisTy, + mlir::Value implicitParam, + QualType implicitParamTy, const CallExpr *e); + mlir::LogicalResult emitCXXForRangeStmt(const CXXForRangeStmt &s, llvm::ArrayRef<const Attr *> attrs); @@ -911,7 +926,7 @@ public: mlir::Value emitScalarExpr(const clang::Expr *e); mlir::Value emitScalarPrePostIncDec(const UnaryOperator *e, LValue lv, - bool isInc, bool isPre); + cir::UnaryOpKind kind, bool isPre); /// Build a debug stoppoint if we are emitting debug info. void emitStopPoint(const Stmt *s); @@ -930,8 +945,10 @@ public: /// returning the result. mlir::Value emitComplexExpr(const Expr *e); + void emitComplexExprIntoLValue(const Expr *e, LValue dest, bool isInit); + mlir::Value emitComplexPrePostIncDec(const UnaryOperator *e, LValue lv, - bool isInc, bool isPre); + cir::UnaryOpKind op, bool isPre); LValue emitComplexAssignmentLValue(const BinaryOperator *e); diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp index 1496d87..6577f5f 100644 --- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp @@ -46,6 +46,11 @@ public: void emitCXXDestructors(const clang::CXXDestructorDecl *d) override; void emitCXXStructor(clang::GlobalDecl gd) override; + void emitDestructorCall(CIRGenFunction &cgf, const CXXDestructorDecl *dd, + CXXDtorType type, bool forVirtualBase, + bool delegating, Address thisAddr, + QualType thisTy) override; + bool useThunkForDtorVariant(const CXXDestructorDecl *dtor, CXXDtorType dt) const override { // Itanium does not emit any destructor variant as an inline thunk. @@ -240,6 +245,25 @@ bool CIRGenItaniumCXXABI::needsVTTParameter(GlobalDecl gd) { return false; } +void CIRGenItaniumCXXABI::emitDestructorCall( + CIRGenFunction &cgf, const CXXDestructorDecl *dd, CXXDtorType type, + bool forVirtualBase, bool delegating, Address thisAddr, QualType thisTy) { + GlobalDecl gd(dd, type); + if (needsVTTParameter(gd)) { + cgm.errorNYI(dd->getSourceRange(), "emitDestructorCall: VTT"); + } + + mlir::Value vtt = nullptr; + ASTContext &astContext = cgm.getASTContext(); + QualType vttTy = astContext.getPointerType(astContext.VoidPtrTy); + assert(!cir::MissingFeatures::appleKext()); + CIRGenCallee callee = + CIRGenCallee::forDirect(cgm.getAddrOfCXXStructor(gd), gd); + + cgf.emitCXXDestructorCall(gd, callee, thisAddr.getPointer(), thisTy, vtt, + vttTy, nullptr); +} + CIRGenCXXABI *clang::CIRGen::CreateCIRGenItaniumCXXABI(CIRGenModule &cgm) { switch (cgm.getASTContext().getCXXABIKind()) { case TargetCXXABI::GenericItanium: diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp index 840e856..3cd7de0 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp @@ -460,6 +460,17 @@ mlir::LogicalResult CIRToLLVMAssumeOpLowering::matchAndRewrite( return mlir::success(); } +mlir::LogicalResult CIRToLLVMAssumeSepStorageOpLowering::matchAndRewrite( + cir::AssumeSepStorageOp op, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const { + auto cond = rewriter.create<mlir::LLVM::ConstantOp>(op.getLoc(), + rewriter.getI1Type(), 1); + rewriter.replaceOpWithNewOp<mlir::LLVM::AssumeOp>( + op, cond, mlir::LLVM::AssumeSeparateStorageTag{}, adaptor.getPtr1(), + adaptor.getPtr2()); + return mlir::success(); +} + mlir::LogicalResult CIRToLLVMBitClrsbOpLowering::matchAndRewrite( cir::BitClrsbOp op, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const { @@ -2066,6 +2077,7 @@ void ConvertCIRToLLVMPass::runOnOperation() { patterns.add< // clang-format off CIRToLLVMAssumeOpLowering, + CIRToLLVMAssumeSepStorageOpLowering, CIRToLLVMBaseClassAddrOpLowering, CIRToLLVMBinOpLowering, CIRToLLVMBitClrsbOpLowering, @@ -2571,7 +2583,7 @@ mlir::LogicalResult CIRToLLVMSetBitfieldOpLowering::matchAndRewrite( assert(storageSize > size && "Invalid bitfield size."); mlir::Value val = rewriter.create<mlir::LLVM::LoadOp>( - op.getLoc(), intType, adaptor.getAddr(), /* alignment */ 0, + op.getLoc(), intType, adaptor.getAddr(), op.getAlignment(), op.getIsVolatile()); srcVal = @@ -2588,7 +2600,7 @@ mlir::LogicalResult CIRToLLVMSetBitfieldOpLowering::matchAndRewrite( } rewriter.create<mlir::LLVM::StoreOp>(op.getLoc(), srcVal, adaptor.getAddr(), - /* alignment */ 0, op.getIsVolatile()); + op.getAlignment(), op.getIsVolatile()); mlir::Type resultTy = getTypeConverter()->convertType(op.getType()); @@ -2662,7 +2674,8 @@ mlir::LogicalResult CIRToLLVMGetBitfieldOpLowering::matchAndRewrite( computeBitfieldIntType(storageType, context, storageSize); mlir::Value val = rewriter.create<mlir::LLVM::LoadOp>( - op.getLoc(), intType, adaptor.getAddr(), 0, op.getIsVolatile()); + op.getLoc(), intType, adaptor.getAddr(), op.getAlignment(), + op.getIsVolatile()); val = rewriter.create<mlir::LLVM::BitcastOp>(op.getLoc(), intType, val); if (info.getIsSigned()) { diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h index 3faf1e9..2911ced 100644 --- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h +++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h @@ -44,6 +44,16 @@ public: mlir::ConversionPatternRewriter &) const override; }; +class CIRToLLVMAssumeSepStorageOpLowering + : public mlir::OpConversionPattern<cir::AssumeSepStorageOp> { +public: + using mlir::OpConversionPattern<cir::AssumeSepStorageOp>::OpConversionPattern; + + mlir::LogicalResult + matchAndRewrite(cir::AssumeSepStorageOp op, OpAdaptor, + mlir::ConversionPatternRewriter &) const override; +}; + class CIRToLLVMBitClrsbOpLowering : public mlir::OpConversionPattern<cir::BitClrsbOp> { public: diff --git a/clang/lib/CodeGen/ABIInfo.cpp b/clang/lib/CodeGen/ABIInfo.cpp index d981d69..3ef430e1 100644 --- a/clang/lib/CodeGen/ABIInfo.cpp +++ b/clang/lib/CodeGen/ABIInfo.cpp @@ -218,8 +218,8 @@ void ABIInfo::appendAttributeMangling(StringRef AttrStr, // only have "+" prefixes here. assert(LHS.starts_with("+") && RHS.starts_with("+") && "Features should always have a prefix."); - return TI.getFMVPriority({LHS.substr(1)}) > - TI.getFMVPriority({RHS.substr(1)}); + return TI.getFMVPriority({LHS.substr(1)}) + .ugt(TI.getFMVPriority({RHS.substr(1)})); }); bool IsFirst = true; diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp index 5f2eb76..3f784fc 100644 --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -4108,6 +4108,22 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, return RValue::get(Result); } + case Builtin::BI__builtin_elementwise_maximumnum: { + Value *Op0 = EmitScalarExpr(E->getArg(0)); + Value *Op1 = EmitScalarExpr(E->getArg(1)); + Value *Result = Builder.CreateBinaryIntrinsic( + Intrinsic::maximumnum, Op0, Op1, nullptr, "elt.maximumnum"); + return RValue::get(Result); + } + + case Builtin::BI__builtin_elementwise_minimumnum: { + Value *Op0 = EmitScalarExpr(E->getArg(0)); + Value *Op1 = EmitScalarExpr(E->getArg(1)); + Value *Result = Builder.CreateBinaryIntrinsic( + Intrinsic::minimumnum, Op0, Op1, nullptr, "elt.minimumnum"); + return RValue::get(Result); + } + case Builtin::BI__builtin_reduce_max: { auto GetIntrinsicID = [this](QualType QT) { if (auto *VecTy = QT->getAs<VectorType>()) diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp index 19d8ba2..0bceece 100644 --- a/clang/lib/CodeGen/CGCall.cpp +++ b/clang/lib/CodeGen/CGCall.cpp @@ -214,7 +214,7 @@ static void appendParameterTypes( for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) { prefix.push_back(FPT->getParamType(I)); if (ExtInfos[I].hasPassObjectSize()) - prefix.push_back(CGT.getContext().getSizeType()); + prefix.push_back(CGT.getContext().getCanonicalSizeType()); } addExtParameterInfosForCall(paramInfos, FPT.getTypePtr(), PrefixSize, diff --git a/clang/lib/CodeGen/CGCoroutine.cpp b/clang/lib/CodeGen/CGCoroutine.cpp index 117ef3d..5ee9089 100644 --- a/clang/lib/CodeGen/CGCoroutine.cpp +++ b/clang/lib/CodeGen/CGCoroutine.cpp @@ -1006,15 +1006,15 @@ RValue CodeGenFunction::EmitCoroutineIntrinsic(const CallExpr *E, } case llvm::Intrinsic::coro_size: { auto &Context = getContext(); - CanQualType SizeTy = Context.getSizeType(); - llvm::IntegerType *T = Builder.getIntNTy(Context.getTypeSize(SizeTy)); + llvm::IntegerType *T = + Builder.getIntNTy(Context.getTypeSize(Context.getSizeType())); llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::coro_size, T); return RValue::get(Builder.CreateCall(F)); } case llvm::Intrinsic::coro_align: { auto &Context = getContext(); - CanQualType SizeTy = Context.getSizeType(); - llvm::IntegerType *T = Builder.getIntNTy(Context.getTypeSize(SizeTy)); + llvm::IntegerType *T = + Builder.getIntNTy(Context.getTypeSize(Context.getSizeType())); llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::coro_align, T); return RValue::get(Builder.CreateCall(F)); } diff --git a/clang/lib/CodeGen/CGDebugInfo.cpp b/clang/lib/CodeGen/CGDebugInfo.cpp index 446cf8d..a371b67 100644 --- a/clang/lib/CodeGen/CGDebugInfo.cpp +++ b/clang/lib/CodeGen/CGDebugInfo.cpp @@ -58,13 +58,6 @@ using namespace clang; using namespace clang::CodeGen; -// TODO: consider deprecating ClArrayBoundsPseudoFn; functionality is subsumed -// by -fsanitize-annotate-debug-info -static llvm::cl::opt<bool> ClArrayBoundsPseudoFn( - "array-bounds-pseudofn", llvm::cl::Hidden, llvm::cl::Optional, - llvm::cl::desc("Emit debug info that places array-bounds instrumentation " - "in an inline function called __ubsan_check_array_bounds.")); - static uint32_t getTypeAlignIfRequired(const Type *Ty, const ASTContext &Ctx) { auto TI = Ctx.getTypeInfo(Ty); if (TI.isAlignRequired()) @@ -4052,7 +4045,8 @@ llvm::DIType *CGDebugInfo::CreateTypeNode(QualType Ty, llvm::DIFile *Unit) { return CreateType(cast<HLSLAttributedResourceType>(Ty), Unit); case Type::HLSLInlineSpirv: return CreateType(cast<HLSLInlineSpirvType>(Ty), Unit); - + case Type::PredefinedSugar: + return getOrCreateType(cast<PredefinedSugarType>(Ty)->desugar(), Unit); case Type::CountAttributed: case Type::Auto: case Type::Attributed: @@ -6068,11 +6062,10 @@ void CGDebugInfo::EmitPseudoVariable(CGBuilderTy &Builder, // ptr, in this case its debug info may not match the actual type of object // being used as in the next instruction, so we will need to emit a pseudo // variable for type-casted value. - auto DeclareTypeMatches = [&](auto *DbgDeclare) { + auto DeclareTypeMatches = [&](llvm::DbgVariableRecord *DbgDeclare) { return DbgDeclare->getVariable()->getType() == Type; }; - if (any_of(llvm::findDbgDeclares(Var), DeclareTypeMatches) || - any_of(llvm::findDVRDeclares(Var), DeclareTypeMatches)) + if (any_of(llvm::findDVRDeclares(Var), DeclareTypeMatches)) return; } @@ -6482,7 +6475,11 @@ llvm::DILocation *CodeGenFunction::SanitizerAnnotateDebugInfo( SanitizerHandler Handler) { llvm::DILocation *CheckDebugLoc = Builder.getCurrentDebugLocation(); auto *DI = getDebugInfo(); - if (!DI) + if (!DI || !CheckDebugLoc) + return CheckDebugLoc; + const auto &AnnotateDebugInfo = + CGM.getCodeGenOpts().SanitizeAnnotateDebugInfo; + if (AnnotateDebugInfo.empty()) return CheckDebugLoc; std::string Label; @@ -6491,14 +6488,8 @@ llvm::DILocation *CodeGenFunction::SanitizerAnnotateDebugInfo( else Label = SanitizerHandlerToCheckLabel(Handler); - for (auto Ord : Ordinals) { - // TODO: deprecate ClArrayBoundsPseudoFn - if (((ClArrayBoundsPseudoFn && Ord == SanitizerKind::SO_ArrayBounds) || - CGM.getCodeGenOpts().SanitizeAnnotateDebugInfo.has(Ord)) && - CheckDebugLoc) { - return DI->CreateSyntheticInlineAt(CheckDebugLoc, Label); - } - } + if (any_of(Ordinals, [&](auto Ord) { return AnnotateDebugInfo.has(Ord); })) + return DI->CreateSyntheticInlineAt(CheckDebugLoc, Label); return CheckDebugLoc; } diff --git a/clang/lib/CodeGen/CGObjCMac.cpp b/clang/lib/CodeGen/CGObjCMac.cpp index 8e71a57..8c66176 100644 --- a/clang/lib/CodeGen/CGObjCMac.cpp +++ b/clang/lib/CodeGen/CGObjCMac.cpp @@ -285,7 +285,7 @@ public: SmallVector<CanQualType, 5> Params; Params.push_back(Ctx.VoidPtrTy); Params.push_back(Ctx.VoidPtrTy); - Params.push_back(Ctx.getSizeType()); + Params.push_back(Ctx.getCanonicalSizeType()); Params.push_back(Ctx.BoolTy); Params.push_back(Ctx.BoolTy); llvm::FunctionType *FTy = Types.GetFunctionType( diff --git a/clang/lib/CodeGen/CGStmt.cpp b/clang/lib/CodeGen/CGStmt.cpp index e065006..1a8c6f0 100644 --- a/clang/lib/CodeGen/CGStmt.cpp +++ b/clang/lib/CodeGen/CGStmt.cpp @@ -846,11 +846,13 @@ void CodeGenFunction::EmitGotoStmt(const GotoStmt &S) { if (HaveInsertPoint()) EmitStopPoint(&S); + ApplyAtomGroup Grp(getDebugInfo()); EmitBranchThroughCleanup(getJumpDestForLabel(S.getLabel())); } void CodeGenFunction::EmitIndirectGotoStmt(const IndirectGotoStmt &S) { + ApplyAtomGroup Grp(getDebugInfo()); if (const LabelDecl *Target = S.getConstantTarget()) { EmitBranchThroughCleanup(getJumpDestForLabel(Target)); return; @@ -869,6 +871,8 @@ void CodeGenFunction::EmitIndirectGotoStmt(const IndirectGotoStmt &S) { cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB); EmitBranch(IndGotoBB); + if (CurBB && CurBB->getTerminator()) + addInstToCurrentSourceAtom(CurBB->getTerminator(), nullptr); } void CodeGenFunction::EmitIfStmt(const IfStmt &S) { @@ -2672,6 +2676,9 @@ static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect, llvm::ConstantAsMetadata::get(Loc))); } + // Make inline-asm calls Key for the debug info feature Key Instructions. + CGF.addInstToNewSourceAtom(&Result, nullptr); + if (!NoConvergent && CGF.getLangOpts().assumeFunctionsAreConvergent()) // Conservatively, mark all inline asm blocks in CUDA or OpenCL as // convergent (meaning, they may call an intrinsically convergent op, such @@ -2750,6 +2757,7 @@ EmitAsmStores(CodeGenFunction &CGF, const AsmStmt &S, } } + ApplyAtomGroup Grp(CGF.getDebugInfo()); LValue Dest = ResultRegDests[i]; // ResultTypeRequiresCast elements correspond to the first // ResultTypeRequiresCast.size() elements of RegResults. @@ -2757,7 +2765,8 @@ EmitAsmStores(CodeGenFunction &CGF, const AsmStmt &S, unsigned Size = CGF.getContext().getTypeSize(ResultRegQualTys[i]); Address A = Dest.getAddress().withElementType(ResultRegTypes[i]); if (CGF.getTargetHooks().isScalarizableAsmOperand(CGF, TruncTy)) { - Builder.CreateStore(Tmp, A); + llvm::StoreInst *S = Builder.CreateStore(Tmp, A); + CGF.addInstToCurrentSourceAtom(S, S->getValueOperand()); continue; } diff --git a/clang/lib/CodeGen/CodeGenFunction.cpp b/clang/lib/CodeGen/CodeGenFunction.cpp index 0fda31c..ab345a5 100644 --- a/clang/lib/CodeGen/CodeGenFunction.cpp +++ b/clang/lib/CodeGen/CodeGenFunction.cpp @@ -720,7 +720,7 @@ static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &Ctx) { (MD->getNumParams() != 1 && MD->getNumParams() != 2)) return false; - if (MD->parameters()[0]->getType().getCanonicalType() != Ctx.getSizeType()) + if (!Ctx.hasSameType(MD->parameters()[0]->getType(), Ctx.getSizeType())) return false; if (MD->getNumParams() == 2) { @@ -2491,6 +2491,7 @@ void CodeGenFunction::EmitVariablyModifiedType(QualType type) { case Type::ObjCObjectPointer: case Type::BitInt: case Type::HLSLInlineSpirv: + case Type::PredefinedSugar: llvm_unreachable("type class is never variably-modified!"); case Type::Elaborated: diff --git a/clang/lib/CodeGen/CodeGenModule.cpp b/clang/lib/CodeGen/CodeGenModule.cpp index 236cc3d..834b1c0 100644 --- a/clang/lib/CodeGen/CodeGenModule.cpp +++ b/clang/lib/CodeGen/CodeGenModule.cpp @@ -4418,8 +4418,9 @@ void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD, llvm::GlobalValue *GV) { static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old, llvm::Function *NewFn); -static uint64_t getFMVPriority(const TargetInfo &TI, - const CodeGenFunction::FMVResolverOption &RO) { +static llvm::APInt +getFMVPriority(const TargetInfo &TI, + const CodeGenFunction::FMVResolverOption &RO) { llvm::SmallVector<StringRef, 8> Features{RO.Features}; if (RO.Architecture) Features.push_back(*RO.Architecture); @@ -4544,7 +4545,7 @@ void CodeGenModule::emitMultiVersionFunctions() { llvm::stable_sort( Options, [&TI](const CodeGenFunction::FMVResolverOption &LHS, const CodeGenFunction::FMVResolverOption &RHS) { - return getFMVPriority(TI, LHS) > getFMVPriority(TI, RHS); + return getFMVPriority(TI, LHS).ugt(getFMVPriority(TI, RHS)); }); CodeGenFunction CGF(*this); CGF.EmitMultiVersionResolver(ResolverFunc, Options); diff --git a/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp b/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp index ee736a2..7dccf82 100644 --- a/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp +++ b/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp @@ -855,6 +855,7 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID, case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x128_bf8_fp8: case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x128_bf8_bf8: case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x64_iu8: + case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x128_f8f6f4: case AMDGPU::BI__builtin_amdgcn_wmma_f32_32x16x128_f4: case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x64_f16: case AMDGPU::BI__builtin_amdgcn_swmmac_f32_16x16x64_bf16: @@ -1118,6 +1119,10 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID, ArgsForMatchingMatrixTypes = {4, 1}; BuiltinWMMAOp = Intrinsic::amdgcn_wmma_i32_16x16x64_iu8; break; + case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x128_f8f6f4: + ArgsForMatchingMatrixTypes = {5, 1, 3}; + BuiltinWMMAOp = Intrinsic::amdgcn_wmma_f32_16x16x128_f8f6f4; + break; case AMDGPU::BI__builtin_amdgcn_wmma_f32_32x16x128_f4: ArgsForMatchingMatrixTypes = {3, 0, 1}; BuiltinWMMAOp = Intrinsic::amdgcn_wmma_f32_32x16x128_f4; diff --git a/clang/lib/CodeGen/TargetBuiltins/ARM.cpp b/clang/lib/CodeGen/TargetBuiltins/ARM.cpp index 7e6a47f..2e6b4b3 100644 --- a/clang/lib/CodeGen/TargetBuiltins/ARM.cpp +++ b/clang/lib/CodeGen/TargetBuiltins/ARM.cpp @@ -8112,7 +8112,7 @@ Value *CodeGenFunction::EmitAArch64CpuSupports(const CallExpr *E) { llvm::Value * CodeGenFunction::EmitAArch64CpuSupports(ArrayRef<StringRef> FeaturesStrs) { - uint64_t FeaturesMask = llvm::AArch64::getCpuSupportsMask(FeaturesStrs); + llvm::APInt FeaturesMask = llvm::AArch64::getCpuSupportsMask(FeaturesStrs); Value *Result = Builder.getTrue(); if (FeaturesMask != 0) { // Get features from structure in runtime library @@ -8128,7 +8128,7 @@ CodeGenFunction::EmitAArch64CpuSupports(ArrayRef<StringRef> FeaturesStrs) { {ConstantInt::get(Int32Ty, 0), ConstantInt::get(Int32Ty, 0)}); Value *Features = Builder.CreateAlignedLoad(Int64Ty, CpuFeatures, CharUnits::fromQuantity(8)); - Value *Mask = Builder.getInt64(FeaturesMask); + Value *Mask = Builder.getInt(FeaturesMask.trunc(64)); Value *Bitset = Builder.CreateAnd(Features, Mask); Value *Cmp = Builder.CreateICmpEQ(Bitset, Mask); Result = Builder.CreateAnd(Result, Cmp); diff --git a/clang/lib/Driver/Driver.cpp b/clang/lib/Driver/Driver.cpp index ec1135e..ef5af66 100644 --- a/clang/lib/Driver/Driver.cpp +++ b/clang/lib/Driver/Driver.cpp @@ -68,6 +68,7 @@ #include "clang/Driver/Types.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/SmallSet.h" #include "llvm/ADT/StringExtras.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/StringSet.h" @@ -83,6 +84,7 @@ #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/ExitCodes.h" #include "llvm/Support/FileSystem.h" +#include "llvm/Support/FileUtilities.h" #include "llvm/Support/FormatVariadic.h" #include "llvm/Support/MD5.h" #include "llvm/Support/Path.h" @@ -109,65 +111,6 @@ using namespace clang::driver; using namespace clang; using namespace llvm::opt; -static std::optional<llvm::Triple> getOffloadTargetTriple(const Driver &D, - const ArgList &Args) { - auto OffloadTargets = Args.getAllArgValues(options::OPT_offload_EQ); - // Offload compilation flow does not support multiple targets for now. We - // need the HIPActionBuilder (and possibly the CudaActionBuilder{,Base}too) - // to support multiple tool chains first. - switch (OffloadTargets.size()) { - default: - D.Diag(diag::err_drv_only_one_offload_target_supported); - return std::nullopt; - case 0: - D.Diag(diag::err_drv_invalid_or_unsupported_offload_target) << ""; - return std::nullopt; - case 1: - break; - } - return llvm::Triple(OffloadTargets[0]); -} - -static std::optional<llvm::Triple> -getNVIDIAOffloadTargetTriple(const Driver &D, const ArgList &Args, - const llvm::Triple &HostTriple) { - if (!Args.hasArg(options::OPT_offload_EQ)) { - return llvm::Triple(HostTriple.isArch64Bit() ? "nvptx64-nvidia-cuda" - : "nvptx-nvidia-cuda"); - } - auto TT = getOffloadTargetTriple(D, Args); - if (TT && (TT->getArch() == llvm::Triple::spirv32 || - TT->getArch() == llvm::Triple::spirv64)) { - if (Args.hasArg(options::OPT_emit_llvm)) - return TT; - D.Diag(diag::err_drv_cuda_offload_only_emit_bc); - return std::nullopt; - } - D.Diag(diag::err_drv_invalid_or_unsupported_offload_target) << TT->str(); - return std::nullopt; -} - -static std::optional<llvm::Triple> -getHIPOffloadTargetTriple(const Driver &D, const ArgList &Args) { - if (!Args.hasArg(options::OPT_offload_EQ)) { - auto OffloadArchs = Args.getAllArgValues(options::OPT_offload_arch_EQ); - if (llvm::is_contained(OffloadArchs, "amdgcnspirv") && - OffloadArchs.size() == 1) - return llvm::Triple("spirv64-amd-amdhsa"); - return llvm::Triple("amdgcn-amd-amdhsa"); // Default HIP triple. - } - auto TT = getOffloadTargetTriple(D, Args); - if (!TT) - return std::nullopt; - if (TT->isAMDGCN() && TT->getVendor() == llvm::Triple::AMD && - TT->getOS() == llvm::Triple::AMDHSA) - return TT; - if (TT->getArch() == llvm::Triple::spirv64) - return TT; - D.Diag(diag::err_drv_invalid_or_unsupported_offload_target) << TT->str(); - return std::nullopt; -} - template <typename F> static bool usesInput(const ArgList &Args, F &&Fn) { return llvm::any_of(Args, [&](Arg *A) { return (A->getOption().matches(options::OPT_x) && @@ -458,6 +401,44 @@ phases::ID Driver::getFinalPhase(const DerivedArgList &DAL, return FinalPhase; } +llvm::Expected<std::unique_ptr<llvm::MemoryBuffer>> +Driver::executeProgram(llvm::ArrayRef<llvm::StringRef> Args) const { + llvm::SmallString<64> OutputFile; + llvm::sys::fs::createTemporaryFile("driver-program", "txt", OutputFile, + llvm::sys::fs::OF_Text); + llvm::FileRemover OutputRemover(OutputFile.c_str()); + std::optional<llvm::StringRef> Redirects[] = { + {""}, + OutputFile.str(), + {""}, + }; + + std::string ErrorMessage; + int SecondsToWait = 60; + if (std::optional<std::string> Str = + llvm::sys::Process::GetEnv("CLANG_TOOLCHAIN_PROGRAM_TIMEOUT")) { + if (!llvm::to_integer(*Str, SecondsToWait)) + return llvm::createStringError(std::error_code(), + "CLANG_TOOLCHAIN_PROGRAM_TIMEOUT expected " + "an integer, got '" + + *Str + "'"); + SecondsToWait = std::max(SecondsToWait, 0); // infinite + } + StringRef Executable = Args[0]; + if (llvm::sys::ExecuteAndWait(Executable, Args, {}, Redirects, SecondsToWait, + /*MemoryLimit=*/0, &ErrorMessage)) + return llvm::createStringError(std::error_code(), + Executable + ": " + ErrorMessage); + + llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> OutputBuf = + llvm::MemoryBuffer::getFile(OutputFile.c_str()); + if (!OutputBuf) + return llvm::createStringError(OutputBuf.getError(), + "Failed to read stdout of " + Executable + + ": " + OutputBuf.getError().message()); + return std::move(*OutputBuf); +} + static Arg *MakeInputArg(DerivedArgList &Args, const OptTable &Opts, StringRef Value, bool Claim = true) { Arg *A = new Arg(Opts.getOption(options::OPT_INPUT), Value, @@ -921,250 +902,266 @@ Driver::OpenMPRuntimeKind Driver::getOpenMPRuntime(const ArgList &Args) const { return RT; } -static llvm::Triple getSYCLDeviceTriple(StringRef TargetArch) { - SmallVector<StringRef, 5> SYCLAlias = {"spir", "spir64", "spirv", "spirv32", - "spirv64"}; - if (llvm::is_contained(SYCLAlias, TargetArch)) { - llvm::Triple TargetTriple; - TargetTriple.setArchName(TargetArch); - TargetTriple.setVendor(llvm::Triple::UnknownVendor); - TargetTriple.setOS(llvm::Triple::UnknownOS); - return TargetTriple; - } - return llvm::Triple(TargetArch); +// Handles `native` offload architectures by using the 'offload-arch' utility. +static llvm::SmallVector<std::string> +getSystemOffloadArchs(Compilation &C, Action::OffloadKind Kind) { + StringRef Program = C.getArgs().getLastArgValue( + options::OPT_offload_arch_tool_EQ, "offload-arch"); + + SmallVector<std::string> GPUArchs; + if (llvm::ErrorOr<std::string> Executable = + llvm::sys::findProgramByName(Program)) { + llvm::SmallVector<StringRef> Args{*Executable}; + if (Kind == Action::OFK_HIP) + Args.push_back("--only=amdgpu"); + else if (Kind == Action::OFK_Cuda) + Args.push_back("--only=nvptx"); + auto StdoutOrErr = C.getDriver().executeProgram(Args); + + if (!StdoutOrErr) { + C.getDriver().Diag(diag::err_drv_undetermined_gpu_arch) + << Action::GetOffloadKindName(Kind) << StdoutOrErr.takeError() + << "--offload-arch"; + return GPUArchs; + } + if ((*StdoutOrErr)->getBuffer().empty()) { + C.getDriver().Diag(diag::err_drv_undetermined_gpu_arch) + << Action::GetOffloadKindName(Kind) << "No GPU detected in the system" + << "--offload-arch"; + return GPUArchs; + } + + for (StringRef Arch : llvm::split((*StdoutOrErr)->getBuffer(), "\n")) + if (!Arch.empty()) + GPUArchs.push_back(Arch.str()); + } else { + C.getDriver().Diag(diag::err_drv_command_failure) << "offload-arch"; + } + return GPUArchs; } -static bool addSYCLDefaultTriple(Compilation &C, - SmallVectorImpl<llvm::Triple> &SYCLTriples) { - // Check current set of triples to see if the default has already been set. - for (const auto &SYCLTriple : SYCLTriples) { - if (SYCLTriple.getSubArch() == llvm::Triple::NoSubArch && - SYCLTriple.isSPIROrSPIRV()) - return false; +// Attempts to infer the correct offloading toolchain triple by looking at the +// requested offloading kind and architectures. +static llvm::DenseSet<llvm::StringRef> +inferOffloadToolchains(Compilation &C, Action::OffloadKind Kind) { + std::set<std::string> Archs; + for (Arg *A : C.getInputArgs()) { + for (StringRef Arch : A->getValues()) { + if (A->getOption().matches(options::OPT_offload_arch_EQ)) { + if (Arch == "native") { + for (StringRef Str : getSystemOffloadArchs(C, Kind)) + Archs.insert(Str.str()); + } else { + Archs.insert(Arch.str()); + } + } else if (A->getOption().matches(options::OPT_no_offload_arch_EQ)) { + if (Arch == "all") + Archs.clear(); + else + Archs.erase(Arch.str()); + } + } } - // Add the default triple as it was not found. - llvm::Triple DefaultTriple = getSYCLDeviceTriple( - C.getDefaultToolChain().getTriple().isArch32Bit() ? "spirv32" - : "spirv64"); - SYCLTriples.insert(SYCLTriples.begin(), DefaultTriple); - return true; + + llvm::DenseSet<llvm::StringRef> Triples; + for (llvm::StringRef Arch : Archs) { + OffloadArch ID = StringToOffloadArch(Arch); + if (ID == OffloadArch::UNKNOWN) + ID = StringToOffloadArch( + getProcessorFromTargetID(llvm::Triple("amdgcn-amd-amdhsa"), Arch)); + + if (Kind == Action::OFK_HIP && !IsAMDOffloadArch(ID)) { + C.getDriver().Diag(clang::diag::err_drv_offload_bad_gpu_arch) + << "HIP" << Arch; + return llvm::DenseSet<llvm::StringRef>(); + } + if (Kind == Action::OFK_Cuda && !IsNVIDIAOffloadArch(ID)) { + C.getDriver().Diag(clang::diag::err_drv_offload_bad_gpu_arch) + << "CUDA" << Arch; + return llvm::DenseSet<llvm::StringRef>(); + } + if (Kind == Action::OFK_OpenMP && + (ID == OffloadArch::UNKNOWN || ID == OffloadArch::UNUSED)) { + C.getDriver().Diag(clang::diag::err_drv_failed_to_deduce_target_from_arch) + << Arch; + return llvm::DenseSet<llvm::StringRef>(); + } + if (ID == OffloadArch::UNKNOWN || ID == OffloadArch::UNUSED) { + C.getDriver().Diag(clang::diag::err_drv_offload_bad_gpu_arch) + << "offload" << Arch; + return llvm::DenseSet<llvm::StringRef>(); + } + + StringRef Triple; + if (ID == OffloadArch::AMDGCNSPIRV) + Triple = "spirv64-amd-amdhsa"; + else if (IsNVIDIAOffloadArch(ID)) + Triple = C.getDefaultToolChain().getTriple().isArch64Bit() + ? "nvptx64-nvidia-cuda" + : "nvptx-nvidia-cuda"; + else if (IsAMDOffloadArch(ID)) + Triple = "amdgcn-amd-amdhsa"; + else + continue; + + // Make a new argument that dispatches this argument to the appropriate + // toolchain. This is required when we infer it and create potentially + // incompatible toolchains from the global option. + Option Opt = C.getDriver().getOpts().getOption(options::OPT_Xarch__); + unsigned Index = C.getArgs().getBaseArgs().MakeIndex("-Xarch_"); + Arg *A = new Arg(Opt, C.getArgs().getArgString(Index), Index, + C.getArgs().MakeArgString(Triple.split("-").first), + C.getArgs().MakeArgString("--offload-arch=" + Arch)); + C.getArgs().append(A); + C.getArgs().AddSynthesizedArg(A); + Triples.insert(Triple); + } + + // Infer the default target triple if no specific architectures are given. + if (Archs.empty() && Kind == Action::OFK_HIP) + Triples.insert("amdgcn-amd-amdhsa"); + else if (Archs.empty() && Kind == Action::OFK_Cuda) + Triples.insert(C.getDefaultToolChain().getTriple().isArch64Bit() + ? "nvptx64-nvidia-cuda" + : "nvptx-nvidia-cuda"); + else if (Archs.empty() && Kind == Action::OFK_SYCL) + Triples.insert(C.getDefaultToolChain().getTriple().isArch64Bit() + ? "spirv64-unknown-unknown" + : "spirv32-unknown-unknown"); + + // We need to dispatch these to the appropriate toolchain now. + C.getArgs().eraseArg(options::OPT_offload_arch_EQ); + C.getArgs().eraseArg(options::OPT_no_offload_arch_EQ); + + return Triples; } void Driver::CreateOffloadingDeviceToolChains(Compilation &C, InputList &Inputs) { - - // - // CUDA/HIP - // - // We need to generate a CUDA/HIP toolchain if any of the inputs has a CUDA - // or HIP type. However, mixed CUDA/HIP compilation is not supported. + bool UseLLVMOffload = C.getInputArgs().hasArg( + options::OPT_foffload_via_llvm, options::OPT_fno_offload_via_llvm, false); bool IsCuda = - llvm::any_of(Inputs, [](std::pair<types::ID, const llvm::opt::Arg *> &I) { - return types::isCuda(I.first); - }); - bool IsHIP = llvm::any_of(Inputs, [](std::pair<types::ID, const llvm::opt::Arg *> &I) { - return types::isHIP(I.first); - }) || - C.getInputArgs().hasArg(options::OPT_hip_link) || - C.getInputArgs().hasArg(options::OPT_hipstdpar); - bool UseLLVMOffload = C.getInputArgs().hasArg( - options::OPT_foffload_via_llvm, options::OPT_fno_offload_via_llvm, false); - if (IsCuda && IsHIP) { - Diag(clang::diag::err_drv_mix_cuda_hip); + return types::isCuda(I.first); + }) && + !UseLLVMOffload; + bool IsHIP = + (llvm::any_of(Inputs, + [](std::pair<types::ID, const llvm::opt::Arg *> &I) { + return types::isHIP(I.first); + }) || + C.getInputArgs().hasArg(options::OPT_hip_link) || + C.getInputArgs().hasArg(options::OPT_hipstdpar)) && + !UseLLVMOffload; + bool IsSYCL = C.getInputArgs().hasFlag(options::OPT_fsycl, + options::OPT_fno_sycl, false); + bool IsOpenMPOffloading = + UseLLVMOffload || + (C.getInputArgs().hasFlag(options::OPT_fopenmp, options::OPT_fopenmp_EQ, + options::OPT_fno_openmp, false) && + (C.getInputArgs().hasArg(options::OPT_offload_targets_EQ) || + (C.getInputArgs().hasArg(options::OPT_offload_arch_EQ) && + !(IsCuda || IsHIP)))); + + llvm::SmallSet<Action::OffloadKind, 4> Kinds; + const std::pair<bool, Action::OffloadKind> ActiveKinds[] = { + {IsCuda, Action::OFK_Cuda}, + {IsHIP, Action::OFK_HIP}, + {IsOpenMPOffloading, Action::OFK_OpenMP}, + {IsSYCL, Action::OFK_SYCL}}; + for (const auto &[Active, Kind] : ActiveKinds) + if (Active) + Kinds.insert(Kind); + + // We currently don't support any kind of mixed offloading. + if (Kinds.size() > 1) { + Diag(clang::diag::err_drv_mix_offload) + << Action::GetOffloadKindName(*Kinds.begin()).upper() + << Action::GetOffloadKindName(*(++Kinds.begin())).upper(); return; } - if (IsCuda && !UseLLVMOffload) { - auto CudaTriple = getNVIDIAOffloadTargetTriple( - *this, C.getInputArgs(), C.getDefaultToolChain().getTriple()); - if (!CudaTriple) - return; - - auto &TC = - getOffloadToolChain(C.getInputArgs(), Action::OFK_Cuda, *CudaTriple, - C.getDefaultToolChain().getTriple()); - - // Emit a warning if the detected CUDA version is too new. - const CudaInstallationDetector &CudaInstallation = - static_cast<const toolchains::CudaToolChain &>(TC).CudaInstallation; - if (CudaInstallation.isValid()) - CudaInstallation.WarnIfUnsupportedVersion(); - C.addOffloadDeviceToolChain(&TC, Action::OFK_Cuda); - OffloadArchs[&TC] = getOffloadArchs(C, C.getArgs(), Action::OFK_Cuda, &TC, - /*SpecificToolchain=*/true); - } else if (IsHIP && !UseLLVMOffload) { - if (auto *OMPTargetArg = - C.getInputArgs().getLastArg(options::OPT_offload_targets_EQ)) { - Diag(clang::diag::err_drv_unsupported_opt_for_language_mode) - << OMPTargetArg->getSpelling() << "HIP"; - return; - } - - auto HIPTriple = getHIPOffloadTargetTriple(*this, C.getInputArgs()); - if (!HIPTriple) - return; - - auto &TC = - getOffloadToolChain(C.getInputArgs(), Action::OFK_HIP, *HIPTriple, - C.getDefaultToolChain().getTriple()); - C.addOffloadDeviceToolChain(&TC, Action::OFK_HIP); - - // TODO: Fix 'amdgcnspirv' handling with the new driver. - if (C.getInputArgs().hasFlag(options::OPT_offload_new_driver, - options::OPT_no_offload_new_driver, false)) - OffloadArchs[&TC] = getOffloadArchs(C, C.getArgs(), Action::OFK_HIP, &TC, - /*SpecificToolchain=*/true); - } + // Initialize the compilation identifier used for unique CUDA / HIP names. if (IsCuda || IsHIP) CUIDOpts = CUIDOptions(C.getArgs(), *this); - // - // OpenMP - // - // We need to generate an OpenMP toolchain if the user specified targets with - // the -fopenmp-targets option or used --offload-arch with OpenMP enabled. - bool IsOpenMPOffloading = - ((IsCuda || IsHIP) && UseLLVMOffload) || - (C.getInputArgs().hasFlag(options::OPT_fopenmp, options::OPT_fopenmp_EQ, - options::OPT_fno_openmp, false) && - (C.getInputArgs().hasArg(options::OPT_offload_targets_EQ) || - C.getInputArgs().hasArg(options::OPT_offload_arch_EQ))); - if (IsOpenMPOffloading) { - // We expect that -fopenmp-targets is always used in conjunction with the - // option -fopenmp specifying a valid runtime with offloading support, i.e. - // libomp or libiomp. - OpenMPRuntimeKind RuntimeKind = getOpenMPRuntime(C.getInputArgs()); - if (RuntimeKind != OMPRT_OMP && RuntimeKind != OMPRT_IOMP5) { - Diag(clang::diag::err_drv_expecting_fopenmp_with_fopenmp_targets); - return; - } - - // If the user specified -fopenmp-targets= we create a toolchain for each - // valid triple. Otherwise, if only --offload-arch= was specified we instead - // attempt to derive the appropriate toolchains from the arguments. - if (Arg *OpenMPTargets = - C.getInputArgs().getLastArg(options::OPT_offload_targets_EQ)) { - if (OpenMPTargets && !OpenMPTargets->getNumValues()) { - Diag(clang::diag::warn_drv_empty_joined_argument) - << OpenMPTargets->getAsString(C.getInputArgs()); + // Get the list of requested offloading toolchains. If they were not + // explicitly specified we will infer them based on the offloading language + // and requested architectures. + std::multiset<llvm::StringRef> Triples; + if (C.getInputArgs().hasArg(options::OPT_offload_targets_EQ)) { + std::vector<std::string> ArgValues = + C.getInputArgs().getAllArgValues(options::OPT_offload_targets_EQ); + for (llvm::StringRef Target : ArgValues) + Triples.insert(C.getInputArgs().MakeArgString(Target)); + + if (ArgValues.empty()) + Diag(clang::diag::warn_drv_empty_joined_argument) + << C.getInputArgs() + .getLastArg(options::OPT_offload_targets_EQ) + ->getAsString(C.getInputArgs()); + } else if (Kinds.size() > 0) { + for (Action::OffloadKind Kind : Kinds) { + llvm::DenseSet<llvm::StringRef> Derived = inferOffloadToolchains(C, Kind); + Triples.insert(Derived.begin(), Derived.end()); + } + } + + // Build an offloading toolchain for every requested target and kind. + llvm::StringMap<StringRef> FoundNormalizedTriples; + for (StringRef Target : Triples) { + // OpenMP offloading requires a compatible libomp. + if (Kinds.contains(Action::OFK_OpenMP)) { + OpenMPRuntimeKind RuntimeKind = getOpenMPRuntime(C.getInputArgs()); + if (RuntimeKind != OMPRT_OMP && RuntimeKind != OMPRT_IOMP5) { + Diag(clang::diag::err_drv_expecting_fopenmp_with_fopenmp_targets); return; } + } - // Make sure these show up in a deterministic order. - std::multiset<StringRef> OpenMPTriples; - for (StringRef T : OpenMPTargets->getValues()) - OpenMPTriples.insert(T); - - llvm::StringMap<StringRef> FoundNormalizedTriples; - for (StringRef T : OpenMPTriples) { - llvm::Triple TT(ToolChain::getOpenMPTriple(T)); - std::string NormalizedName = TT.normalize(); - - // Make sure we don't have a duplicate triple. - auto [TripleIt, Inserted] = - FoundNormalizedTriples.try_emplace(NormalizedName, T); - if (!Inserted) { - Diag(clang::diag::warn_drv_omp_offload_target_duplicate) - << T << TripleIt->second; - continue; - } - - // If the specified target is invalid, emit a diagnostic. - if (TT.getArch() == llvm::Triple::UnknownArch) { - Diag(clang::diag::err_drv_invalid_omp_target) << T; - continue; - } + // Certain options are not allowed when combined with SYCL compilation. + if (Kinds.contains(Action::OFK_SYCL)) { + for (auto ID : + {options::OPT_static_libstdcxx, options::OPT_ffreestanding}) + if (Arg *IncompatArg = C.getInputArgs().getLastArg(ID)) + Diag(clang::diag::err_drv_argument_not_allowed_with) + << IncompatArg->getSpelling() << "-fsycl"; + } - auto &TC = getOffloadToolChain(C.getInputArgs(), Action::OFK_OpenMP, TT, - C.getDefaultToolChain().getTriple()); - C.addOffloadDeviceToolChain(&TC, Action::OFK_OpenMP); - OffloadArchs[&TC] = - getOffloadArchs(C, C.getArgs(), Action::OFK_OpenMP, &TC, - /*SpecificToolchain=*/true); - } - } else if (C.getInputArgs().hasArg(options::OPT_offload_arch_EQ) && - ((!IsHIP && !IsCuda) || UseLLVMOffload)) { - llvm::Triple AMDTriple("amdgcn-amd-amdhsa"); - llvm::Triple NVPTXTriple("nvptx64-nvidia-cuda"); - - for (StringRef Arch : - C.getInputArgs().getAllArgValues(options::OPT_offload_arch_EQ)) { - bool IsNVPTX = IsNVIDIAOffloadArch( - StringToOffloadArch(getProcessorFromTargetID(NVPTXTriple, Arch))); - bool IsAMDGPU = IsAMDOffloadArch( - StringToOffloadArch(getProcessorFromTargetID(AMDTriple, Arch))); - if (!IsNVPTX && !IsAMDGPU && !Arch.empty() && - !Arch.equals_insensitive("native")) { - Diag(clang::diag::err_drv_failed_to_deduce_target_from_arch) << Arch; - return; - } + // Create a device toolchain for every specified kind and triple. + for (Action::OffloadKind Kind : Kinds) { + llvm::Triple TT = Kind == Action::OFK_OpenMP + ? ToolChain::getOpenMPTriple(Target) + : llvm::Triple(Target); + if (TT.getArch() == llvm::Triple::ArchType::UnknownArch) { + Diag(diag::err_drv_invalid_or_unsupported_offload_target) << TT.str(); + continue; } - // Attempt to deduce the offloading triple from the set of architectures. - // We can only correctly deduce NVPTX / AMDGPU triples currently. - for (const llvm::Triple &TT : {AMDTriple, NVPTXTriple}) { - auto &TC = getOffloadToolChain(C.getInputArgs(), Action::OFK_OpenMP, TT, - C.getDefaultToolChain().getTriple()); - - llvm::SmallVector<StringRef> Archs = - getOffloadArchs(C, C.getArgs(), Action::OFK_OpenMP, &TC, - /*SpecificToolchain=*/false); - if (!Archs.empty()) { - C.addOffloadDeviceToolChain(&TC, Action::OFK_OpenMP); - OffloadArchs[&TC] = Archs; - } + std::string NormalizedName = TT.normalize(); + auto [TripleIt, Inserted] = + FoundNormalizedTriples.try_emplace(NormalizedName, Target); + if (!Inserted) { + Diag(clang::diag::warn_drv_omp_offload_target_duplicate) + << Target << TripleIt->second; + continue; } - // If the set is empty then we failed to find a native architecture. - auto TCRange = C.getOffloadToolChains(Action::OFK_OpenMP); - if (TCRange.first == TCRange.second) - Diag(clang::diag::err_drv_failed_to_deduce_target_from_arch) - << "native"; - } - } else if (C.getInputArgs().hasArg(options::OPT_offload_targets_EQ)) { - Diag(clang::diag::err_drv_expecting_fopenmp_with_fopenmp_targets); - return; - } + auto &TC = getOffloadToolChain(C.getInputArgs(), Kind, TT, + C.getDefaultToolChain().getTriple()); - // We need to generate a SYCL toolchain if the user specified -fsycl. - bool IsSYCL = C.getInputArgs().hasFlag(options::OPT_fsycl, - options::OPT_fno_sycl, false); - - auto argSYCLIncompatible = [&](OptSpecifier OptId) { - if (!IsSYCL) - return; - if (Arg *IncompatArg = C.getInputArgs().getLastArg(OptId)) - Diag(clang::diag::err_drv_argument_not_allowed_with) - << IncompatArg->getSpelling() << "-fsycl"; - }; - // -static-libstdc++ is not compatible with -fsycl. - argSYCLIncompatible(options::OPT_static_libstdcxx); - // -ffreestanding cannot be used with -fsycl - argSYCLIncompatible(options::OPT_ffreestanding); - - llvm::SmallVector<llvm::Triple, 4> UniqueSYCLTriplesVec; - - if (IsSYCL) { - addSYCLDefaultTriple(C, UniqueSYCLTriplesVec); + // Emit a warning if the detected CUDA version is too new. + if (Kind == Action::OFK_Cuda) { + auto &CudaInstallation = + static_cast<const toolchains::CudaToolChain &>(TC).CudaInstallation; + if (CudaInstallation.isValid()) + CudaInstallation.WarnIfUnsupportedVersion(); + } - // We'll need to use the SYCL and host triples as the key into - // getOffloadingDeviceToolChain, because the device toolchains we're - // going to create will depend on both. - const ToolChain *HostTC = C.getSingleOffloadToolChain<Action::OFK_Host>(); - for (const auto &TT : UniqueSYCLTriplesVec) { - auto &TC = getOffloadToolChain(C.getInputArgs(), Action::OFK_SYCL, TT, - HostTC->getTriple()); - C.addOffloadDeviceToolChain(&TC, Action::OFK_SYCL); - OffloadArchs[&TC] = getOffloadArchs(C, C.getArgs(), Action::OFK_SYCL, &TC, - /*SpecificToolchain=*/true); + C.addOffloadDeviceToolChain(&TC, Kind); } } - - // - // TODO: Add support for other offloading programming models here. - // } bool Driver::loadZOSCustomizationFile(llvm::cl::ExpansionContext &ExpCtx) { @@ -3306,9 +3303,6 @@ class OffloadingActionBuilder final { // architecture. If we are in host-only mode we return 'success' so that // the host uses the CUDA offload kind. if (auto *IA = dyn_cast<InputAction>(HostAction)) { - assert(!GpuArchList.empty() && - "We should have at least one GPU architecture."); - // If the host input is not CUDA or HIP, we don't need to bother about // this input. if (!(IA->getType() == types::TY_CUDA || @@ -3408,10 +3402,6 @@ class OffloadingActionBuilder final { CudaDeviceActions.clear(); } - /// Get canonicalized offload arch option. \returns empty StringRef if the - /// option is invalid. - virtual StringRef getCanonicalOffloadArch(StringRef Arch) = 0; - virtual std::optional<std::pair<llvm::StringRef, llvm::StringRef>> getConflictOffloadArchCombination(const std::set<StringRef> &GpuArchs) = 0; @@ -3440,91 +3430,25 @@ class OffloadingActionBuilder final { return true; } - ToolChains.push_back( - AssociatedOffloadKind == Action::OFK_Cuda - ? C.getSingleOffloadToolChain<Action::OFK_Cuda>() - : C.getSingleOffloadToolChain<Action::OFK_HIP>()); - - CompileHostOnly = C.getDriver().offloadHostOnly(); - EmitLLVM = Args.getLastArg(options::OPT_emit_llvm); - EmitAsm = Args.getLastArg(options::OPT_S); - - // --offload and --offload-arch options are mutually exclusive. - if (Args.hasArgNoClaim(options::OPT_offload_EQ) && - Args.hasArgNoClaim(options::OPT_offload_arch_EQ, - options::OPT_no_offload_arch_EQ)) { - C.getDriver().Diag(diag::err_opt_not_valid_with_opt) << "--offload-arch" - << "--offload"; - } - - // Collect all offload arch parameters, removing duplicates. std::set<StringRef> GpuArchs; - bool Error = false; - const ToolChain &TC = *ToolChains.front(); - for (Arg *A : C.getArgsForToolChain(&TC, /*BoundArch=*/"", - AssociatedOffloadKind)) { - if (!(A->getOption().matches(options::OPT_offload_arch_EQ) || - A->getOption().matches(options::OPT_no_offload_arch_EQ))) - continue; - A->claim(); - - for (StringRef ArchStr : llvm::split(A->getValue(), ",")) { - if (A->getOption().matches(options::OPT_no_offload_arch_EQ) && - ArchStr == "all") { - GpuArchs.clear(); - } else if (ArchStr == "native") { - auto GPUsOrErr = ToolChains.front()->getSystemGPUArchs(Args); - if (!GPUsOrErr) { - TC.getDriver().Diag(diag::err_drv_undetermined_gpu_arch) - << llvm::Triple::getArchTypeName(TC.getArch()) - << llvm::toString(GPUsOrErr.takeError()) << "--offload-arch"; - continue; - } + for (Action::OffloadKind Kind : {Action::OFK_Cuda, Action::OFK_HIP}) { + for (auto &I : llvm::make_range(C.getOffloadToolChains(Kind))) { + ToolChains.push_back(I.second); - for (auto GPU : *GPUsOrErr) { - GpuArchs.insert(Args.MakeArgString(GPU)); - } - } else { - ArchStr = getCanonicalOffloadArch(ArchStr); - if (ArchStr.empty()) { - Error = true; - } else if (A->getOption().matches(options::OPT_offload_arch_EQ)) - GpuArchs.insert(ArchStr); - else if (A->getOption().matches(options::OPT_no_offload_arch_EQ)) - GpuArchs.erase(ArchStr); - else - llvm_unreachable("Unexpected option."); - } + for (auto Arch : + C.getDriver().getOffloadArchs(C, C.getArgs(), Kind, *I.second)) + GpuArchs.insert(Arch); } } - auto &&ConflictingArchs = getConflictOffloadArchCombination(GpuArchs); - if (ConflictingArchs) { - C.getDriver().Diag(clang::diag::err_drv_bad_offload_arch_combo) - << ConflictingArchs->first << ConflictingArchs->second; - C.setContainsError(); - return true; - } - - // Collect list of GPUs remaining in the set. for (auto Arch : GpuArchs) GpuArchList.push_back(Arch.data()); - // Default to sm_20 which is the lowest common denominator for - // supported GPUs. sm_20 code should work correctly, if - // suboptimally, on all newer GPUs. - if (GpuArchList.empty()) { - if (ToolChains.front()->getTriple().isSPIRV()) { - if (ToolChains.front()->getTriple().getVendor() == llvm::Triple::AMD) - GpuArchList.push_back(OffloadArch::AMDGCNSPIRV); - else - GpuArchList.push_back(OffloadArch::Generic); - } else { - GpuArchList.push_back(DefaultOffloadArch); - } - } + CompileHostOnly = C.getDriver().offloadHostOnly(); + EmitLLVM = Args.getLastArg(options::OPT_emit_llvm); + EmitAsm = Args.getLastArg(options::OPT_S); - return Error; + return false; } }; @@ -3538,15 +3462,6 @@ class OffloadingActionBuilder final { DefaultOffloadArch = OffloadArch::CudaDefault; } - StringRef getCanonicalOffloadArch(StringRef ArchStr) override { - OffloadArch Arch = StringToOffloadArch(ArchStr); - if (Arch == OffloadArch::UNKNOWN || !IsNVIDIAOffloadArch(Arch)) { - C.getDriver().Diag(clang::diag::err_drv_cuda_bad_gpu_arch) << ArchStr; - return StringRef(); - } - return OffloadArchToString(Arch); - } - std::optional<std::pair<llvm::StringRef, llvm::StringRef>> getConflictOffloadArchCombination( const std::set<StringRef> &GpuArchs) override { @@ -3705,24 +3620,6 @@ class OffloadingActionBuilder final { bool canUseBundlerUnbundler() const override { return true; } - StringRef getCanonicalOffloadArch(StringRef IdStr) override { - llvm::StringMap<bool> Features; - // getHIPOffloadTargetTriple() is known to return valid value as it has - // been called successfully in the CreateOffloadingDeviceToolChains(). - auto T = - (IdStr == "amdgcnspirv") - ? llvm::Triple("spirv64-amd-amdhsa") - : *getHIPOffloadTargetTriple(C.getDriver(), C.getInputArgs()); - auto ArchStr = parseTargetID(T, IdStr, &Features); - if (!ArchStr) { - C.getDriver().Diag(clang::diag::err_drv_bad_target_id) << IdStr; - C.setContainsError(); - return StringRef(); - } - auto CanId = getCanonicalTargetID(*ArchStr, Features); - return Args.MakeArgStringRef(CanId); - }; - std::optional<std::pair<llvm::StringRef, llvm::StringRef>> getConflictOffloadArchCombination( const std::set<StringRef> &GpuArchs) override { @@ -4715,23 +4612,20 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args, static StringRef getCanonicalArchString(Compilation &C, const llvm::opt::DerivedArgList &Args, StringRef ArchStr, - const llvm::Triple &Triple, - bool SpecificToolchain) { + const llvm::Triple &Triple) { // Lookup the CUDA / HIP architecture string. Only report an error if we were // expecting the triple to be only NVPTX / AMDGPU. OffloadArch Arch = StringToOffloadArch(getProcessorFromTargetID(Triple, ArchStr)); if (Triple.isNVPTX() && (Arch == OffloadArch::UNKNOWN || !IsNVIDIAOffloadArch(Arch))) { - if (SpecificToolchain) - C.getDriver().Diag(clang::diag::err_drv_offload_bad_gpu_arch) - << "CUDA" << ArchStr; + C.getDriver().Diag(clang::diag::err_drv_offload_bad_gpu_arch) + << "CUDA" << ArchStr; return StringRef(); } else if (Triple.isAMDGPU() && (Arch == OffloadArch::UNKNOWN || !IsAMDOffloadArch(Arch))) { - if (SpecificToolchain) - C.getDriver().Diag(clang::diag::err_drv_offload_bad_gpu_arch) - << "HIP" << ArchStr; + C.getDriver().Diag(clang::diag::err_drv_offload_bad_gpu_arch) + << "HIP" << ArchStr; return StringRef(); } @@ -4767,11 +4661,7 @@ getConflictOffloadArchCombination(const llvm::DenseSet<StringRef> &Archs, llvm::SmallVector<StringRef> Driver::getOffloadArchs(Compilation &C, const llvm::opt::DerivedArgList &Args, - Action::OffloadKind Kind, const ToolChain *TC, - bool SpecificToolchain) const { - if (!TC) - TC = &C.getDefaultToolChain(); - + Action::OffloadKind Kind, const ToolChain &TC) const { // --offload and --offload-arch options are mutually exclusive. if (Args.hasArgNoClaim(options::OPT_offload_EQ) && Args.hasArgNoClaim(options::OPT_offload_arch_EQ, @@ -4784,48 +4674,44 @@ Driver::getOffloadArchs(Compilation &C, const llvm::opt::DerivedArgList &Args, } llvm::DenseSet<StringRef> Archs; - for (auto *Arg : C.getArgsForToolChain(TC, /*BoundArch=*/"", Kind)) { + for (auto *Arg : C.getArgsForToolChain(&TC, /*BoundArch=*/"", Kind)) { // Add or remove the seen architectures in order of appearance. If an // invalid architecture is given we simply exit. if (Arg->getOption().matches(options::OPT_offload_arch_EQ)) { for (StringRef Arch : Arg->getValues()) { if (Arch == "native" || Arch.empty()) { - auto GPUsOrErr = TC->getSystemGPUArchs(Args); + auto GPUsOrErr = TC.getSystemGPUArchs(Args); if (!GPUsOrErr) { - if (!SpecificToolchain) - llvm::consumeError(GPUsOrErr.takeError()); - else - TC->getDriver().Diag(diag::err_drv_undetermined_gpu_arch) - << llvm::Triple::getArchTypeName(TC->getArch()) - << llvm::toString(GPUsOrErr.takeError()) << "--offload-arch"; + TC.getDriver().Diag(diag::err_drv_undetermined_gpu_arch) + << llvm::Triple::getArchTypeName(TC.getArch()) + << llvm::toString(GPUsOrErr.takeError()) << "--offload-arch"; continue; } for (auto ArchStr : *GPUsOrErr) { - StringRef CanonicalStr = - getCanonicalArchString(C, Args, Args.MakeArgString(ArchStr), - TC->getTriple(), SpecificToolchain); + StringRef CanonicalStr = getCanonicalArchString( + C, Args, Args.MakeArgString(ArchStr), TC.getTriple()); if (!CanonicalStr.empty()) Archs.insert(CanonicalStr); - else if (SpecificToolchain) + else return llvm::SmallVector<StringRef>(); } } else { - StringRef CanonicalStr = getCanonicalArchString( - C, Args, Arch, TC->getTriple(), SpecificToolchain); + StringRef CanonicalStr = + getCanonicalArchString(C, Args, Arch, TC.getTriple()); if (!CanonicalStr.empty()) Archs.insert(CanonicalStr); - else if (SpecificToolchain) + else return llvm::SmallVector<StringRef>(); } } } else if (Arg->getOption().matches(options::OPT_no_offload_arch_EQ)) { - for (StringRef Arch : llvm::split(Arg->getValue(), ",")) { + for (StringRef Arch : Arg->getValues()) { if (Arch == "all") { Archs.clear(); } else { - StringRef ArchStr = getCanonicalArchString( - C, Args, Arch, TC->getTriple(), SpecificToolchain); + StringRef ArchStr = + getCanonicalArchString(C, Args, Arch, TC.getTriple()); Archs.erase(ArchStr); } } @@ -4833,28 +4719,30 @@ Driver::getOffloadArchs(Compilation &C, const llvm::opt::DerivedArgList &Args, } if (auto ConflictingArchs = - getConflictOffloadArchCombination(Archs, TC->getTriple())) + getConflictOffloadArchCombination(Archs, TC.getTriple())) C.getDriver().Diag(clang::diag::err_drv_bad_offload_arch_combo) << ConflictingArchs->first << ConflictingArchs->second; - // Skip filling defaults if we're just querying what is availible. - if (SpecificToolchain && Archs.empty()) { + // Fill in the default architectures if not provided explicitly. + if (Archs.empty()) { if (Kind == Action::OFK_Cuda) { Archs.insert(OffloadArchToString(OffloadArch::CudaDefault)); } else if (Kind == Action::OFK_HIP) { - Archs.insert(OffloadArchToString(OffloadArch::HIPDefault)); + Archs.insert(OffloadArchToString(TC.getTriple().isSPIRV() + ? OffloadArch::Generic + : OffloadArch::HIPDefault)); } else if (Kind == Action::OFK_SYCL) { Archs.insert(StringRef()); } else if (Kind == Action::OFK_OpenMP) { // Accept legacy `-march` device arguments for OpenMP. - if (auto *Arg = C.getArgsForToolChain(TC, /*BoundArch=*/"", Kind) + if (auto *Arg = C.getArgsForToolChain(&TC, /*BoundArch=*/"", Kind) .getLastArg(options::OPT_march_EQ)) { Archs.insert(Arg->getValue()); } else { - auto ArchsOrErr = TC->getSystemGPUArchs(Args); + auto ArchsOrErr = TC.getSystemGPUArchs(Args); if (!ArchsOrErr) { - TC->getDriver().Diag(diag::err_drv_undetermined_gpu_arch) - << llvm::Triple::getArchTypeName(TC->getArch()) + TC.getDriver().Diag(diag::err_drv_undetermined_gpu_arch) + << llvm::Triple::getArchTypeName(TC.getArch()) << llvm::toString(ArchsOrErr.takeError()) << "--offload-arch"; } else if (!ArchsOrErr->empty()) { for (auto Arch : *ArchsOrErr) @@ -4934,7 +4822,7 @@ Action *Driver::BuildOffloadingActions(Compilation &C, // Get the product of all bound architectures and toolchains. SmallVector<std::pair<const ToolChain *, StringRef>> TCAndArchs; for (const ToolChain *TC : ToolChains) { - for (StringRef Arch : OffloadArchs.lookup(TC)) { + for (StringRef Arch : getOffloadArchs(C, C.getArgs(), Kind, *TC)) { TCAndArchs.push_back(std::make_pair(TC, Arch)); DeviceActions.push_back( C.MakeAction<InputAction>(*InputArg, InputType, CUID)); @@ -4966,7 +4854,7 @@ Action *Driver::BuildOffloadingActions(Compilation &C, if (Kind == Action::OFK_SYCL && Phase == phases::Assemble) continue; - auto TCAndArch = TCAndArchs.begin(); + auto *TCAndArch = TCAndArchs.begin(); for (Action *&A : DeviceActions) { if (A->getType() == types::TY_Nothing) continue; @@ -4998,7 +4886,13 @@ Action *Driver::BuildOffloadingActions(Compilation &C, // Compiling HIP in device-only non-RDC mode requires linking each action // individually. for (Action *&A : DeviceActions) { - if ((A->getType() != types::TY_Object && + // Special handling for the HIP SPIR-V toolchain because it doesn't use + // the SPIR-V backend yet doesn't report the output as an object. + bool IsAMDGCNSPIRV = A->getOffloadingToolChain() && + A->getOffloadingToolChain()->getTriple().getOS() == + llvm::Triple::OSType::AMDHSA && + A->getOffloadingToolChain()->getTriple().isSPIRV(); + if ((A->getType() != types::TY_Object && !IsAMDGCNSPIRV && A->getType() != types::TY_LTO_BC) || !HIPNoRDC || !offloadDeviceOnly()) continue; @@ -5006,7 +4900,7 @@ Action *Driver::BuildOffloadingActions(Compilation &C, A = C.MakeAction<LinkJobAction>(LinkerInput, types::TY_Image); } - auto TCAndArch = TCAndArchs.begin(); + auto *TCAndArch = TCAndArchs.begin(); for (Action *A : DeviceActions) { DDeps.add(*A, *TCAndArch->first, TCAndArch->second.data(), Kind); OffloadAction::DeviceDependences DDep; @@ -5054,8 +4948,9 @@ Action *Driver::BuildOffloadingActions(Compilation &C, // fatbinary for each translation unit, linking each input individually. Action *FatbinAction = C.MakeAction<LinkJobAction>(OffloadActions, types::TY_HIP_FATBIN); - DDep.add(*FatbinAction, *C.getSingleOffloadToolChain<Action::OFK_HIP>(), - nullptr, Action::OFK_HIP); + DDep.add(*FatbinAction, + *C.getOffloadToolChains<Action::OFK_HIP>().first->second, nullptr, + Action::OFK_HIP); } else { // Package all the offloading actions into a single output that can be // embedded in the host and linked. @@ -5131,11 +5026,13 @@ Action *Driver::ConstructPhaseAction( if (Args.hasArg(options::OPT_extract_api)) return C.MakeAction<ExtractAPIJobAction>(Input, types::TY_API_INFO); - // With 'fexperimental-modules-reduced-bmi', we don't want to run the + // With 'fmodules-reduced-bmi', we don't want to run the // precompile phase unless the user specified '--precompile'. In the case // the '--precompile' flag is enabled, we will try to emit the reduced BMI // as a by product in GenerateModuleInterfaceAction. - if (Args.hasArg(options::OPT_modules_reduced_bmi) && + if (!Args.hasArg(options::OPT_fno_modules_reduced_bmi) && + (Input->getType() == driver::types::TY_CXXModule || + Input->getType() == driver::types::TY_PP_CXXModule) && !Args.getLastArg(options::OPT__precompile)) return Input; @@ -6323,7 +6220,7 @@ const char *Driver::GetNamedOutputPath(Compilation &C, const JobAction &JA, // `-fmodule-output`. if (!AtTopLevel && isa<PrecompileJobAction>(JA) && JA.getType() == types::TY_ModuleFile && SpecifiedModuleOutput) { - assert(!C.getArgs().hasArg(options::OPT_modules_reduced_bmi)); + assert(C.getArgs().hasArg(options::OPT_fno_modules_reduced_bmi)); return GetModuleOutputPath(C, JA, BaseInput); } diff --git a/clang/lib/Driver/ToolChain.cpp b/clang/lib/Driver/ToolChain.cpp index 481f575..47f93fa1 100644 --- a/clang/lib/Driver/ToolChain.cpp +++ b/clang/lib/Driver/ToolChain.cpp @@ -104,44 +104,6 @@ ToolChain::ToolChain(const Driver &D, const llvm::Triple &T, addIfExists(getFilePaths(), Path); } -llvm::Expected<std::unique_ptr<llvm::MemoryBuffer>> -ToolChain::executeToolChainProgram(StringRef Executable) const { - llvm::SmallString<64> OutputFile; - llvm::sys::fs::createTemporaryFile("toolchain-program", "txt", OutputFile, - llvm::sys::fs::OF_Text); - llvm::FileRemover OutputRemover(OutputFile.c_str()); - std::optional<llvm::StringRef> Redirects[] = { - {""}, - OutputFile.str(), - {""}, - }; - - std::string ErrorMessage; - int SecondsToWait = 60; - if (std::optional<std::string> Str = - llvm::sys::Process::GetEnv("CLANG_TOOLCHAIN_PROGRAM_TIMEOUT")) { - if (!llvm::to_integer(*Str, SecondsToWait)) - return llvm::createStringError(std::error_code(), - "CLANG_TOOLCHAIN_PROGRAM_TIMEOUT expected " - "an integer, got '" + - *Str + "'"); - SecondsToWait = std::max(SecondsToWait, 0); // infinite - } - if (llvm::sys::ExecuteAndWait(Executable, {Executable}, {}, Redirects, - SecondsToWait, - /*MemoryLimit=*/0, &ErrorMessage)) - return llvm::createStringError(std::error_code(), - Executable + ": " + ErrorMessage); - - llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> OutputBuf = - llvm::MemoryBuffer::getFile(OutputFile.c_str()); - if (!OutputBuf) - return llvm::createStringError(OutputBuf.getError(), - "Failed to read stdout of " + Executable + - ": " + OutputBuf.getError().message()); - return std::move(*OutputBuf); -} - void ToolChain::setTripleEnvironment(llvm::Triple::EnvironmentType Env) { Triple.setEnvironment(Env); if (EffectiveTriple != llvm::Triple()) @@ -255,6 +217,18 @@ static void getAArch64MultilibFlags(const Driver &D, Result.push_back(ABIArg->getAsString(Args)); } + if (const Arg *A = Args.getLastArg(options::OPT_O_Group); + A && A->getOption().matches(options::OPT_O)) { + switch (A->getValue()[0]) { + case 's': + Result.push_back("-Os"); + break; + case 'z': + Result.push_back("-Oz"); + break; + } + } + processMultilibCustomFlags(Result, Args); } @@ -332,6 +306,19 @@ static void getARMMultilibFlags(const Driver &D, const llvm::Triple &Triple, if (Endian->getOption().matches(options::OPT_mbig_endian)) Result.push_back(Endian->getAsString(Args)); } + + if (const Arg *A = Args.getLastArg(options::OPT_O_Group); + A && A->getOption().matches(options::OPT_O)) { + switch (A->getValue()[0]) { + case 's': + Result.push_back("-Os"); + break; + case 'z': + Result.push_back("-Oz"); + break; + } + } + processMultilibCustomFlags(Result, Args); } @@ -1644,7 +1631,8 @@ void ToolChain::addSYCLIncludeArgs(const ArgList &DriverArgs, ArgStringList &CC1Args) const {} llvm::SmallVector<ToolChain::BitCodeLibraryInfo, 12> -ToolChain::getDeviceLibs(const ArgList &DriverArgs) const { +ToolChain::getDeviceLibs(const ArgList &DriverArgs, + const Action::OffloadKind DeviceOffloadingKind) const { return {}; } diff --git a/clang/lib/Driver/ToolChains/AMDGPU.cpp b/clang/lib/Driver/ToolChains/AMDGPU.cpp index 7fc34f4..0781683 100644 --- a/clang/lib/Driver/ToolChains/AMDGPU.cpp +++ b/clang/lib/Driver/ToolChains/AMDGPU.cpp @@ -31,6 +31,68 @@ using namespace clang::driver::toolchains; using namespace clang; using namespace llvm::opt; +RocmInstallationDetector::CommonBitcodeLibsPreferences:: + CommonBitcodeLibsPreferences(const Driver &D, + const llvm::opt::ArgList &DriverArgs, + StringRef GPUArch, + const Action::OffloadKind DeviceOffloadingKind, + const bool NeedsASanRT) + : ABIVer(DeviceLibABIVersion::fromCodeObjectVersion( + tools::getAMDGPUCodeObjectVersion(D, DriverArgs))) { + const auto Kind = llvm::AMDGPU::parseArchAMDGCN(GPUArch); + const unsigned ArchAttr = llvm::AMDGPU::getArchAttrAMDGCN(Kind); + + IsOpenMP = DeviceOffloadingKind == Action::OFK_OpenMP; + + const bool HasWave32 = (ArchAttr & llvm::AMDGPU::FEATURE_WAVE32); + Wave64 = + !HasWave32 || DriverArgs.hasFlag(options::OPT_mwavefrontsize64, + options::OPT_mno_wavefrontsize64, false); + + const bool IsKnownOffloading = DeviceOffloadingKind == Action::OFK_OpenMP || + DeviceOffloadingKind == Action::OFK_HIP; + + // Default to enabling f32 denormals on subtargets where fma is fast with + // denormals + const bool DefaultDAZ = + (Kind == llvm::AMDGPU::GK_NONE) + ? false + : !((ArchAttr & llvm::AMDGPU::FEATURE_FAST_FMA_F32) && + (ArchAttr & llvm::AMDGPU::FEATURE_FAST_DENORMAL_F32)); + // TODO: There are way too many flags that change this. Do we need to + // check them all? + DAZ = IsKnownOffloading + ? DriverArgs.hasFlag(options::OPT_fgpu_flush_denormals_to_zero, + options::OPT_fno_gpu_flush_denormals_to_zero, + DefaultDAZ) + : DriverArgs.hasArg(options::OPT_cl_denorms_are_zero) || DefaultDAZ; + + FiniteOnly = DriverArgs.hasArg(options::OPT_cl_finite_math_only) || + DriverArgs.hasFlag(options::OPT_ffinite_math_only, + options::OPT_fno_finite_math_only, false); + + UnsafeMathOpt = + DriverArgs.hasArg(options::OPT_cl_unsafe_math_optimizations) || + DriverArgs.hasFlag(options::OPT_funsafe_math_optimizations, + options::OPT_fno_unsafe_math_optimizations, false); + + FastRelaxedMath = DriverArgs.hasArg(options::OPT_cl_fast_relaxed_math) || + DriverArgs.hasFlag(options::OPT_ffast_math, + options::OPT_fno_fast_math, false); + + const bool DefaultSqrt = IsKnownOffloading ? true : false; + CorrectSqrt = + DriverArgs.hasArg(options::OPT_cl_fp32_correctly_rounded_divide_sqrt) || + DriverArgs.hasFlag( + options::OPT_fhip_fp32_correctly_rounded_divide_sqrt, + options::OPT_fno_hip_fp32_correctly_rounded_divide_sqrt, DefaultSqrt); + // GPU Sanitizer currently only supports ASan and is enabled through host + // ASan. + GPUSan = (DriverArgs.hasFlag(options::OPT_fgpu_sanitize, + options::OPT_fno_gpu_sanitize, true) && + NeedsASanRT); +} + void RocmInstallationDetector::scanLibDevicePath(llvm::StringRef Path) { assert(!Path.empty()); @@ -841,7 +903,7 @@ AMDGPUToolChain::getSystemGPUArchs(const ArgList &Args) const { else Program = GetProgramPath("amdgpu-arch"); - auto StdoutOrErr = executeToolChainProgram(Program); + auto StdoutOrErr = getDriver().executeProgram({Program}); if (!StdoutOrErr) return StdoutOrErr.takeError(); @@ -884,33 +946,14 @@ void ROCMToolChain::addClangTargetOptions( ABIVer)) return; - bool Wave64 = isWave64(DriverArgs, Kind); - // TODO: There are way too many flags that change this. Do we need to check - // them all? - bool DAZ = DriverArgs.hasArg(options::OPT_cl_denorms_are_zero) || - getDefaultDenormsAreZeroForTarget(Kind); - bool FiniteOnly = DriverArgs.hasArg(options::OPT_cl_finite_math_only); - - bool UnsafeMathOpt = - DriverArgs.hasArg(options::OPT_cl_unsafe_math_optimizations); - bool FastRelaxedMath = DriverArgs.hasArg(options::OPT_cl_fast_relaxed_math); - bool CorrectSqrt = - DriverArgs.hasArg(options::OPT_cl_fp32_correctly_rounded_divide_sqrt); - - // GPU Sanitizer currently only supports ASan and is enabled through host - // ASan. - bool GPUSan = DriverArgs.hasFlag(options::OPT_fgpu_sanitize, - options::OPT_fno_gpu_sanitize, true) && - getSanitizerArgs(DriverArgs).needsAsanRt(); - // Add the OpenCL specific bitcode library. llvm::SmallVector<BitCodeLibraryInfo, 12> BCLibs; BCLibs.emplace_back(RocmInstallation->getOpenCLPath().str()); // Add the generic set of libraries. BCLibs.append(RocmInstallation->getCommonBitcodeLibs( - DriverArgs, LibDeviceFile, Wave64, DAZ, FiniteOnly, UnsafeMathOpt, - FastRelaxedMath, CorrectSqrt, ABIVer, GPUSan, false)); + DriverArgs, LibDeviceFile, GpuArch, DeviceOffloadingKind, + getSanitizerArgs(DriverArgs).needsAsanRt())); for (auto [BCFile, Internalize] : BCLibs) { if (Internalize) @@ -947,35 +990,37 @@ bool RocmInstallationDetector::checkCommonBitcodeLibs( llvm::SmallVector<ToolChain::BitCodeLibraryInfo, 12> RocmInstallationDetector::getCommonBitcodeLibs( - const llvm::opt::ArgList &DriverArgs, StringRef LibDeviceFile, bool Wave64, - bool DAZ, bool FiniteOnly, bool UnsafeMathOpt, bool FastRelaxedMath, - bool CorrectSqrt, DeviceLibABIVersion ABIVer, bool GPUSan, - bool isOpenMP) const { + const llvm::opt::ArgList &DriverArgs, StringRef LibDeviceFile, + StringRef GPUArch, const Action::OffloadKind DeviceOffloadingKind, + const bool NeedsASanRT) const { llvm::SmallVector<ToolChain::BitCodeLibraryInfo, 12> BCLibs; + CommonBitcodeLibsPreferences Pref{D, DriverArgs, GPUArch, + DeviceOffloadingKind, NeedsASanRT}; + auto AddBCLib = [&](ToolChain::BitCodeLibraryInfo BCLib, bool Internalize = true) { BCLib.ShouldInternalize = Internalize; BCLibs.emplace_back(BCLib); }; auto AddSanBCLibs = [&]() { - if (GPUSan) + if (Pref.GPUSan) AddBCLib(getAsanRTLPath(), false); }; AddSanBCLibs(); AddBCLib(getOCMLPath()); - if (!isOpenMP) + if (!Pref.IsOpenMP) AddBCLib(getOCKLPath()); - else if (GPUSan && isOpenMP) + else if (Pref.GPUSan && Pref.IsOpenMP) AddBCLib(getOCKLPath(), false); - AddBCLib(getDenormalsAreZeroPath(DAZ)); - AddBCLib(getUnsafeMathPath(UnsafeMathOpt || FastRelaxedMath)); - AddBCLib(getFiniteOnlyPath(FiniteOnly || FastRelaxedMath)); - AddBCLib(getCorrectlyRoundedSqrtPath(CorrectSqrt)); - AddBCLib(getWavefrontSize64Path(Wave64)); + AddBCLib(getDenormalsAreZeroPath(Pref.DAZ)); + AddBCLib(getUnsafeMathPath(Pref.UnsafeMathOpt || Pref.FastRelaxedMath)); + AddBCLib(getFiniteOnlyPath(Pref.FiniteOnly || Pref.FastRelaxedMath)); + AddBCLib(getCorrectlyRoundedSqrtPath(Pref.CorrectSqrt)); + AddBCLib(getWavefrontSize64Path(Pref.Wave64)); AddBCLib(LibDeviceFile); - auto ABIVerPath = getABIVersionPath(ABIVer); + auto ABIVerPath = getABIVersionPath(Pref.ABIVer); if (!ABIVerPath.empty()) AddBCLib(ABIVerPath); @@ -983,9 +1028,9 @@ RocmInstallationDetector::getCommonBitcodeLibs( } llvm::SmallVector<ToolChain::BitCodeLibraryInfo, 12> -ROCMToolChain::getCommonDeviceLibNames(const llvm::opt::ArgList &DriverArgs, - const std::string &GPUArch, - bool isOpenMP) const { +ROCMToolChain::getCommonDeviceLibNames( + const llvm::opt::ArgList &DriverArgs, const std::string &GPUArch, + Action::OffloadKind DeviceOffloadingKind) const { auto Kind = llvm::AMDGPU::parseArchAMDGCN(GPUArch); const StringRef CanonArch = llvm::AMDGPU::getArchNameAMDGCN(Kind); @@ -996,33 +1041,9 @@ ROCMToolChain::getCommonDeviceLibNames(const llvm::opt::ArgList &DriverArgs, ABIVer)) return {}; - // If --hip-device-lib is not set, add the default bitcode libraries. - // TODO: There are way too many flags that change this. Do we need to check - // them all? - bool DAZ = DriverArgs.hasFlag(options::OPT_fgpu_flush_denormals_to_zero, - options::OPT_fno_gpu_flush_denormals_to_zero, - getDefaultDenormsAreZeroForTarget(Kind)); - bool FiniteOnly = DriverArgs.hasFlag( - options::OPT_ffinite_math_only, options::OPT_fno_finite_math_only, false); - bool UnsafeMathOpt = - DriverArgs.hasFlag(options::OPT_funsafe_math_optimizations, - options::OPT_fno_unsafe_math_optimizations, false); - bool FastRelaxedMath = DriverArgs.hasFlag(options::OPT_ffast_math, - options::OPT_fno_fast_math, false); - bool CorrectSqrt = DriverArgs.hasFlag( - options::OPT_fhip_fp32_correctly_rounded_divide_sqrt, - options::OPT_fno_hip_fp32_correctly_rounded_divide_sqrt, true); - bool Wave64 = isWave64(DriverArgs, Kind); - - // GPU Sanitizer currently only supports ASan and is enabled through host - // ASan. - bool GPUSan = DriverArgs.hasFlag(options::OPT_fgpu_sanitize, - options::OPT_fno_gpu_sanitize, true) && - getSanitizerArgs(DriverArgs).needsAsanRt(); - return RocmInstallation->getCommonBitcodeLibs( - DriverArgs, LibDeviceFile, Wave64, DAZ, FiniteOnly, UnsafeMathOpt, - FastRelaxedMath, CorrectSqrt, ABIVer, GPUSan, isOpenMP); + DriverArgs, LibDeviceFile, GPUArch, DeviceOffloadingKind, + getSanitizerArgs(DriverArgs).needsAsanRt()); } bool AMDGPUToolChain::shouldSkipSanitizeOption( diff --git a/clang/lib/Driver/ToolChains/AMDGPU.h b/clang/lib/Driver/ToolChains/AMDGPU.h index 08bd4fa..513c77d 100644 --- a/clang/lib/Driver/ToolChains/AMDGPU.h +++ b/clang/lib/Driver/ToolChains/AMDGPU.h @@ -147,7 +147,7 @@ public: llvm::SmallVector<BitCodeLibraryInfo, 12> getCommonDeviceLibNames(const llvm::opt::ArgList &DriverArgs, const std::string &GPUArch, - bool isOpenMP = false) const; + Action::OffloadKind DeviceOffloadingKind) const; SanitizerMask getSupportedSanitizers() const override { return SanitizerKind::Address; diff --git a/clang/lib/Driver/ToolChains/AMDGPUOpenMP.cpp b/clang/lib/Driver/ToolChains/AMDGPUOpenMP.cpp index 7ffa3f0..2b41d54 100644 --- a/clang/lib/Driver/ToolChains/AMDGPUOpenMP.cpp +++ b/clang/lib/Driver/ToolChains/AMDGPUOpenMP.cpp @@ -44,7 +44,7 @@ void AMDGPUOpenMPToolChain::addClangTargetOptions( true)) return; - for (auto BCFile : getDeviceLibs(DriverArgs)) { + for (auto BCFile : getDeviceLibs(DriverArgs, DeviceOffloadingKind)) { CC1Args.push_back(BCFile.ShouldInternalize ? "-mlink-builtin-bitcode" : "-mlink-bitcode-file"); CC1Args.push_back(DriverArgs.MakeArgString(BCFile.Path)); @@ -132,7 +132,9 @@ AMDGPUOpenMPToolChain::computeMSVCVersion(const Driver *D, } llvm::SmallVector<ToolChain::BitCodeLibraryInfo, 12> -AMDGPUOpenMPToolChain::getDeviceLibs(const llvm::opt::ArgList &Args) const { +AMDGPUOpenMPToolChain::getDeviceLibs( + const llvm::opt::ArgList &Args, + const Action::OffloadKind DeviceOffloadingKind) const { if (!Args.hasFlag(options::OPT_offloadlib, options::OPT_no_offloadlib, true)) return {}; @@ -140,8 +142,8 @@ AMDGPUOpenMPToolChain::getDeviceLibs(const llvm::opt::ArgList &Args) const { getTriple(), Args.getLastArgValue(options::OPT_march_EQ)); SmallVector<BitCodeLibraryInfo, 12> BCLibs; - for (auto BCLib : getCommonDeviceLibNames(Args, GpuArch.str(), - /*IsOpenMP=*/true)) + for (auto BCLib : + getCommonDeviceLibNames(Args, GpuArch.str(), DeviceOffloadingKind)) BCLibs.emplace_back(BCLib); return BCLibs; diff --git a/clang/lib/Driver/ToolChains/AMDGPUOpenMP.h b/clang/lib/Driver/ToolChains/AMDGPUOpenMP.h index 0536c9f..cbafdf5 100644 --- a/clang/lib/Driver/ToolChains/AMDGPUOpenMP.h +++ b/clang/lib/Driver/ToolChains/AMDGPUOpenMP.h @@ -58,7 +58,8 @@ public: const llvm::opt::ArgList &Args) const override; llvm::SmallVector<BitCodeLibraryInfo, 12> - getDeviceLibs(const llvm::opt::ArgList &Args) const override; + getDeviceLibs(const llvm::opt::ArgList &Args, + const Action::OffloadKind DeviceOffloadKind) const override; const ToolChain &HostTC; }; diff --git a/clang/lib/Driver/ToolChains/Arch/Sparc.cpp b/clang/lib/Driver/ToolChains/Arch/Sparc.cpp index 9595ee8..3333135 100644 --- a/clang/lib/Driver/ToolChains/Arch/Sparc.cpp +++ b/clang/lib/Driver/ToolChains/Arch/Sparc.cpp @@ -23,7 +23,9 @@ const char *sparc::getSparcAsmModeForCPU(StringRef Name, if (Triple.getArch() == llvm::Triple::sparcv9) { const char *DefV9CPU; - if (Triple.isOSLinux() || Triple.isOSFreeBSD() || Triple.isOSOpenBSD()) + if (Triple.isOSSolaris()) + DefV9CPU = "-Av9b"; + else if (Triple.isOSLinux() || Triple.isOSFreeBSD() || Triple.isOSOpenBSD()) DefV9CPU = "-Av9a"; else DefV9CPU = "-Av9"; @@ -130,7 +132,8 @@ std::string sparc::getSparcTargetCPU(const Driver &D, const ArgList &Args, return ""; } -void sparc::getSparcTargetFeatures(const Driver &D, const ArgList &Args, +void sparc::getSparcTargetFeatures(const Driver &D, const llvm::Triple &Triple, + const ArgList &Args, std::vector<StringRef> &Features) { sparc::FloatABI FloatABI = sparc::getSparcFloatABI(D, Args); if (FloatABI == sparc::FloatABI::Soft) @@ -150,11 +153,20 @@ void sparc::getSparcTargetFeatures(const Driver &D, const ArgList &Args, Features.push_back("-popc"); } + // Those OSes default to enabling VIS on 64-bit SPARC. + // See also the corresponding code for external assemblers in + // sparc::getSparcAsmModeForCPU(). + bool IsSparcV9ATarget = + (Triple.getArch() == llvm::Triple::sparcv9) && + (Triple.isOSLinux() || Triple.isOSFreeBSD() || Triple.isOSOpenBSD()); + bool IsSparcV9BTarget = Triple.isOSSolaris(); if (Arg *A = Args.getLastArg(options::OPT_mvis, options::OPT_mno_vis)) { if (A->getOption().matches(options::OPT_mvis)) Features.push_back("+vis"); else Features.push_back("-vis"); + } else if (IsSparcV9ATarget) { + Features.push_back("+vis"); } if (Arg *A = Args.getLastArg(options::OPT_mvis2, options::OPT_mno_vis2)) { @@ -162,6 +174,8 @@ void sparc::getSparcTargetFeatures(const Driver &D, const ArgList &Args, Features.push_back("+vis2"); else Features.push_back("-vis2"); + } else if (IsSparcV9BTarget) { + Features.push_back("+vis2"); } if (Arg *A = Args.getLastArg(options::OPT_mvis3, options::OPT_mno_vis3)) { diff --git a/clang/lib/Driver/ToolChains/Arch/Sparc.h b/clang/lib/Driver/ToolChains/Arch/Sparc.h index 2b178d9..fa25b49 100644 --- a/clang/lib/Driver/ToolChains/Arch/Sparc.h +++ b/clang/lib/Driver/ToolChains/Arch/Sparc.h @@ -31,7 +31,8 @@ FloatABI getSparcFloatABI(const Driver &D, const llvm::opt::ArgList &Args); std::string getSparcTargetCPU(const Driver &D, const llvm::opt::ArgList &Args, const llvm::Triple &Triple); -void getSparcTargetFeatures(const Driver &D, const llvm::opt::ArgList &Args, +void getSparcTargetFeatures(const Driver &D, const llvm::Triple &Triple, + const llvm::opt::ArgList &Args, std::vector<llvm::StringRef> &Features); const char *getSparcAsmModeForCPU(llvm::StringRef Name, const llvm::Triple &Triple); diff --git a/clang/lib/Driver/ToolChains/BareMetal.cpp b/clang/lib/Driver/ToolChains/BareMetal.cpp index e670696..497f333 100644 --- a/clang/lib/Driver/ToolChains/BareMetal.cpp +++ b/clang/lib/Driver/ToolChains/BareMetal.cpp @@ -694,9 +694,6 @@ void baremetal::Linker::ConstructJob(Compilation &C, const JobAction &JA, NeedCRTs) CmdArgs.push_back(Args.MakeArgString(TC.GetFilePath(CRTEnd))); - if (TC.getTriple().isRISCV()) - CmdArgs.push_back("-X"); - // The R_ARM_TARGET2 relocation must be treated as R_ARM_REL32 on arm*-*-elf // and arm*-*-eabi (the default is R_ARM_GOT_PREL, used on arm*-*-linux and // arm*-*-*bsd). diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp index 8880c93..7d0c142 100644 --- a/clang/lib/Driver/ToolChains/Clang.cpp +++ b/clang/lib/Driver/ToolChains/Clang.cpp @@ -97,32 +97,15 @@ forAllAssociatedToolChains(Compilation &C, const JobAction &JA, // Apply Work on all the offloading tool chains associated with the current // action. - if (JA.isHostOffloading(Action::OFK_Cuda)) - Work(*C.getSingleOffloadToolChain<Action::OFK_Cuda>()); - else if (JA.isDeviceOffloading(Action::OFK_Cuda)) - Work(*C.getSingleOffloadToolChain<Action::OFK_Host>()); - else if (JA.isHostOffloading(Action::OFK_HIP)) - Work(*C.getSingleOffloadToolChain<Action::OFK_HIP>()); - else if (JA.isDeviceOffloading(Action::OFK_HIP)) - Work(*C.getSingleOffloadToolChain<Action::OFK_Host>()); - - if (JA.isHostOffloading(Action::OFK_OpenMP)) { - auto TCs = C.getOffloadToolChains<Action::OFK_OpenMP>(); - for (auto II = TCs.first, IE = TCs.second; II != IE; ++II) - Work(*II->second); - } else if (JA.isDeviceOffloading(Action::OFK_OpenMP)) - Work(*C.getSingleOffloadToolChain<Action::OFK_Host>()); - - if (JA.isHostOffloading(Action::OFK_SYCL)) { - auto TCs = C.getOffloadToolChains<Action::OFK_SYCL>(); - for (auto II = TCs.first, IE = TCs.second; II != IE; ++II) - Work(*II->second); - } else if (JA.isDeviceOffloading(Action::OFK_SYCL)) - Work(*C.getSingleOffloadToolChain<Action::OFK_Host>()); - - // - // TODO: Add support for other offloading programming models here. - // + for (Action::OffloadKind Kind : {Action::OFK_Cuda, Action::OFK_OpenMP, + Action::OFK_HIP, Action::OFK_SYCL}) { + if (JA.isHostOffloading(Kind)) { + auto TCs = C.getOffloadToolChains(Kind); + for (auto II = TCs.first, IE = TCs.second; II != IE; ++II) + Work(*II->second); + } else if (JA.isDeviceOffloading(Kind)) + Work(*C.getSingleOffloadToolChain<Action::OFK_Host>()); + } } static bool @@ -2731,16 +2714,6 @@ static void CollectArgsForIntegratedAssembler(Compilation &C, CmdArgs.push_back(MipsTargetFeature); } - // Those OSes default to enabling VIS on 64-bit SPARC. - // See also the corresponding code for external assemblers in - // sparc::getSparcAsmModeForCPU(). - bool IsSparcV9ATarget = - (C.getDefaultToolChain().getArch() == llvm::Triple::sparcv9) && - (Triple.isOSLinux() || Triple.isOSFreeBSD() || Triple.isOSOpenBSD()); - if (IsSparcV9ATarget && SparcTargetFeatures.empty()) { - CmdArgs.push_back("-target-feature"); - CmdArgs.push_back("+vis"); - } for (const char *Feature : SparcTargetFeatures) { CmdArgs.push_back("-target-feature"); CmdArgs.push_back(Feature); @@ -4095,31 +4068,34 @@ static bool RenderModulesOptions(Compilation &C, const Driver &D, // module fragment. CmdArgs.push_back("-fskip-odr-check-in-gmf"); - if (Args.hasArg(options::OPT_modules_reduced_bmi) && + if (!Args.hasArg(options::OPT_fno_modules_reduced_bmi) && (Input.getType() == driver::types::TY_CXXModule || - Input.getType() == driver::types::TY_PP_CXXModule)) { + Input.getType() == driver::types::TY_PP_CXXModule) && + !Args.hasArg(options::OPT__precompile)) { CmdArgs.push_back("-fmodules-reduced-bmi"); if (Args.hasArg(options::OPT_fmodule_output_EQ)) Args.AddLastArg(CmdArgs, options::OPT_fmodule_output_EQ); - else { - if (Args.hasArg(options::OPT__precompile) && - (!Args.hasArg(options::OPT_o) || - Args.getLastArg(options::OPT_o)->getValue() == - getCXX20NamedModuleOutputPath(Args, Input.getBaseInput()))) { - D.Diag(diag::err_drv_reduced_module_output_overrided); - } - + else CmdArgs.push_back(Args.MakeArgString( "-fmodule-output=" + getCXX20NamedModuleOutputPath(Args, Input.getBaseInput()))); - } } - // Noop if we see '-fmodules-reduced-bmi' with other translation - // units than module units. This is more user friendly to allow end uers to - // enable this feature without asking for help from build systems. - Args.ClaimAllArgs(options::OPT_modules_reduced_bmi); + if (Args.hasArg(options::OPT_fmodules_reduced_bmi) && + Args.hasArg(options::OPT__precompile) && + (!Args.hasArg(options::OPT_o) || + Args.getLastArg(options::OPT_o)->getValue() == + getCXX20NamedModuleOutputPath(Args, Input.getBaseInput()))) { + D.Diag(diag::err_drv_reduced_module_output_overrided); + } + + // Noop if we see '-fmodules-reduced-bmi' or `-fno-modules-reduced-bmi` with + // other translation units than module units. This is more user friendly to + // allow end uers to enable this feature without asking for help from build + // systems. + Args.ClaimAllArgs(options::OPT_fmodules_reduced_bmi); + Args.ClaimAllArgs(options::OPT_fno_modules_reduced_bmi); // We need to include the case the input file is a module file here. // Since the default compilation model for C++ module interface unit will @@ -4992,8 +4968,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA, else { // Host-side compilation. NormalizedTriple = - (IsCuda ? C.getSingleOffloadToolChain<Action::OFK_Cuda>() - : C.getSingleOffloadToolChain<Action::OFK_HIP>()) + (IsCuda ? C.getOffloadToolChains(Action::OFK_Cuda).first->second + : C.getOffloadToolChains(Action::OFK_HIP).first->second) ->getTriple() .normalize(); if (IsCuda) { diff --git a/clang/lib/Driver/ToolChains/CommonArgs.cpp b/clang/lib/Driver/ToolChains/CommonArgs.cpp index 651a39c..826e2ea 100644 --- a/clang/lib/Driver/ToolChains/CommonArgs.cpp +++ b/clang/lib/Driver/ToolChains/CommonArgs.cpp @@ -856,7 +856,7 @@ void tools::getTargetFeatures(const Driver &D, const llvm::Triple &Triple, case llvm::Triple::sparc: case llvm::Triple::sparcel: case llvm::Triple::sparcv9: - sparc::getSparcTargetFeatures(D, Args, Features); + sparc::getSparcTargetFeatures(D, Triple, Args, Features); break; case llvm::Triple::r600: case llvm::Triple::amdgcn: diff --git a/clang/lib/Driver/ToolChains/Cuda.cpp b/clang/lib/Driver/ToolChains/Cuda.cpp index 2373d94..7d803be 100644 --- a/clang/lib/Driver/ToolChains/Cuda.cpp +++ b/clang/lib/Driver/ToolChains/Cuda.cpp @@ -815,7 +815,7 @@ NVPTXToolChain::getSystemGPUArchs(const ArgList &Args) const { else Program = GetProgramPath("nvptx-arch"); - auto StdoutOrErr = executeToolChainProgram(Program); + auto StdoutOrErr = getDriver().executeProgram({Program}); if (!StdoutOrErr) return StdoutOrErr.takeError(); diff --git a/clang/lib/Driver/ToolChains/Flang.cpp b/clang/lib/Driver/ToolChains/Flang.cpp index 1edb83f..7ab41e9 100644 --- a/clang/lib/Driver/ToolChains/Flang.cpp +++ b/clang/lib/Driver/ToolChains/Flang.cpp @@ -447,6 +447,7 @@ void Flang::addTargetOptions(const ArgList &Args, // Add the target features. switch (TC.getArch()) { default: + getTargetFeatures(D, Triple, Args, CmdArgs, /*ForAs*/ false); break; case llvm::Triple::aarch64: getTargetFeatures(D, Triple, Args, CmdArgs, /*ForAs*/ false); diff --git a/clang/lib/Driver/ToolChains/HIPAMD.cpp b/clang/lib/Driver/ToolChains/HIPAMD.cpp index 5fe0f85..b4c6da0 100644 --- a/clang/lib/Driver/ToolChains/HIPAMD.cpp +++ b/clang/lib/Driver/ToolChains/HIPAMD.cpp @@ -264,7 +264,7 @@ void HIPAMDToolChain::addClangTargetOptions( return; // No DeviceLibs for SPIR-V. } - for (auto BCFile : getDeviceLibs(DriverArgs)) { + for (auto BCFile : getDeviceLibs(DriverArgs, DeviceOffloadingKind)) { CC1Args.push_back(BCFile.ShouldInternalize ? "-mlink-builtin-bitcode" : "-mlink-bitcode-file"); CC1Args.push_back(DriverArgs.MakeArgString(BCFile.Path)); @@ -355,7 +355,8 @@ VersionTuple HIPAMDToolChain::computeMSVCVersion(const Driver *D, } llvm::SmallVector<ToolChain::BitCodeLibraryInfo, 12> -HIPAMDToolChain::getDeviceLibs(const llvm::opt::ArgList &DriverArgs) const { +HIPAMDToolChain::getDeviceLibs(const llvm::opt::ArgList &DriverArgs, + Action::OffloadKind DeviceOffloadingKind) const { llvm::SmallVector<BitCodeLibraryInfo, 12> BCLibs; if (!DriverArgs.hasFlag(options::OPT_offloadlib, options::OPT_no_offloadlib, true) || @@ -397,7 +398,8 @@ HIPAMDToolChain::getDeviceLibs(const llvm::opt::ArgList &DriverArgs) const { assert(!GpuArch.empty() && "Must have an explicit GPU arch."); // Add common device libraries like ocml etc. - for (auto N : getCommonDeviceLibNames(DriverArgs, GpuArch.str())) + for (auto N : getCommonDeviceLibNames(DriverArgs, GpuArch.str(), + DeviceOffloadingKind)) BCLibs.emplace_back(N); // Add instrument lib. diff --git a/clang/lib/Driver/ToolChains/HIPAMD.h b/clang/lib/Driver/ToolChains/HIPAMD.h index 3630b11..bcc3ebb 100644 --- a/clang/lib/Driver/ToolChains/HIPAMD.h +++ b/clang/lib/Driver/ToolChains/HIPAMD.h @@ -80,7 +80,8 @@ public: void AddHIPIncludeArgs(const llvm::opt::ArgList &DriverArgs, llvm::opt::ArgStringList &CC1Args) const override; llvm::SmallVector<BitCodeLibraryInfo, 12> - getDeviceLibs(const llvm::opt::ArgList &Args) const override; + getDeviceLibs(const llvm::opt::ArgList &Args, + Action::OffloadKind DeviceOffloadKind) const override; SanitizerMask getSupportedSanitizers() const override; diff --git a/clang/lib/Driver/ToolChains/HIPSPV.cpp b/clang/lib/Driver/ToolChains/HIPSPV.cpp index 53649ca..643a67f 100644 --- a/clang/lib/Driver/ToolChains/HIPSPV.cpp +++ b/clang/lib/Driver/ToolChains/HIPSPV.cpp @@ -149,7 +149,8 @@ void HIPSPVToolChain::addClangTargetOptions( CC1Args.append( {"-fvisibility=hidden", "-fapply-global-visibility-to-externs"}); - for (const BitCodeLibraryInfo &BCFile : getDeviceLibs(DriverArgs)) + for (const BitCodeLibraryInfo &BCFile : + getDeviceLibs(DriverArgs, DeviceOffloadingKind)) CC1Args.append( {"-mlink-builtin-bitcode", DriverArgs.MakeArgString(BCFile.Path)}); } @@ -200,7 +201,9 @@ void HIPSPVToolChain::AddHIPIncludeArgs(const ArgList &DriverArgs, } llvm::SmallVector<ToolChain::BitCodeLibraryInfo, 12> -HIPSPVToolChain::getDeviceLibs(const llvm::opt::ArgList &DriverArgs) const { +HIPSPVToolChain::getDeviceLibs( + const llvm::opt::ArgList &DriverArgs, + const Action::OffloadKind DeviceOffloadingKind) const { llvm::SmallVector<ToolChain::BitCodeLibraryInfo, 12> BCLibs; if (!DriverArgs.hasFlag(options::OPT_offloadlib, options::OPT_no_offloadlib, true)) diff --git a/clang/lib/Driver/ToolChains/HIPSPV.h b/clang/lib/Driver/ToolChains/HIPSPV.h index ecd82e7..caf6924 100644 --- a/clang/lib/Driver/ToolChains/HIPSPV.h +++ b/clang/lib/Driver/ToolChains/HIPSPV.h @@ -69,7 +69,8 @@ public: void AddHIPIncludeArgs(const llvm::opt::ArgList &DriverArgs, llvm::opt::ArgStringList &CC1Args) const override; llvm::SmallVector<BitCodeLibraryInfo, 12> - getDeviceLibs(const llvm::opt::ArgList &Args) const override; + getDeviceLibs(const llvm::opt::ArgList &Args, + const Action::OffloadKind DeviceOffloadKind) const override; SanitizerMask getSupportedSanitizers() const override; diff --git a/clang/lib/Driver/ToolChains/OpenBSD.cpp b/clang/lib/Driver/ToolChains/OpenBSD.cpp index 79b1b69..8f58918 100644 --- a/clang/lib/Driver/ToolChains/OpenBSD.cpp +++ b/clang/lib/Driver/ToolChains/OpenBSD.cpp @@ -161,7 +161,7 @@ void openbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA, if (Nopie || Profiling) CmdArgs.push_back("-nopie"); - if (Triple.isRISCV64()) { + if (Triple.isLoongArch64() || Triple.isRISCV64()) { CmdArgs.push_back("-X"); if (Args.hasArg(options::OPT_mno_relax)) CmdArgs.push_back("--no-relax"); diff --git a/clang/lib/Driver/ToolChains/ROCm.h b/clang/lib/Driver/ToolChains/ROCm.h index 2a09da01..ebd5443 100644 --- a/clang/lib/Driver/ToolChains/ROCm.h +++ b/clang/lib/Driver/ToolChains/ROCm.h @@ -11,6 +11,7 @@ #include "clang/Basic/Cuda.h" #include "clang/Basic/LLVM.h" +#include "clang/Driver/CommonArgs.h" #include "clang/Driver/Driver.h" #include "clang/Driver/Options.h" #include "clang/Driver/SanitizerArgs.h" @@ -18,6 +19,7 @@ #include "llvm/ADT/StringMap.h" #include "llvm/Option/ArgList.h" #include "llvm/Support/VersionTuple.h" +#include "llvm/TargetParser/TargetParser.h" #include "llvm/TargetParser/Triple.h" namespace clang { @@ -77,6 +79,24 @@ private: SPACKReleaseStr(SPACKReleaseStr.str()) {} }; + struct CommonBitcodeLibsPreferences { + CommonBitcodeLibsPreferences(const Driver &D, + const llvm::opt::ArgList &DriverArgs, + StringRef GPUArch, + const Action::OffloadKind DeviceOffloadingKind, + const bool NeedsASanRT); + + DeviceLibABIVersion ABIVer; + bool IsOpenMP; + bool Wave64; + bool DAZ; + bool FiniteOnly; + bool UnsafeMathOpt; + bool FastRelaxedMath; + bool CorrectSqrt; + bool GPUSan; + }; + const Driver &D; bool HasHIPRuntime = false; bool HasDeviceLibrary = false; @@ -175,11 +195,11 @@ public: /// Get file paths of default bitcode libraries common to AMDGPU based /// toolchains. - llvm::SmallVector<ToolChain::BitCodeLibraryInfo, 12> getCommonBitcodeLibs( - const llvm::opt::ArgList &DriverArgs, StringRef LibDeviceFile, - bool Wave64, bool DAZ, bool FiniteOnly, bool UnsafeMathOpt, - bool FastRelaxedMath, bool CorrectSqrt, DeviceLibABIVersion ABIVer, - bool GPUSan, bool isOpenMP) const; + llvm::SmallVector<ToolChain::BitCodeLibraryInfo, 12> + getCommonBitcodeLibs(const llvm::opt::ArgList &DriverArgs, + StringRef LibDeviceFile, StringRef GPUArch, + const Action::OffloadKind DeviceOffloadingKind, + const bool NeedsASanRT) const; /// Check file paths of default bitcode libraries common to AMDGPU based /// toolchains. \returns false if there are invalid or missing files. bool checkCommonBitcodeLibs(StringRef GPUArch, StringRef LibDeviceFile, diff --git a/clang/lib/Format/BreakableToken.cpp b/clang/lib/Format/BreakableToken.cpp index c36cb74..29db200 100644 --- a/clang/lib/Format/BreakableToken.cpp +++ b/clang/lib/Format/BreakableToken.cpp @@ -25,7 +25,7 @@ namespace clang { namespace format { -static constexpr StringRef Blanks = " \t\v\f\r"; +static constexpr StringRef Blanks(" \t\v\f\r"); static StringRef getLineCommentIndentPrefix(StringRef Comment, const FormatStyle &Style) { @@ -513,7 +513,7 @@ BreakableBlockComment::BreakableBlockComment( Decoration = ""; } for (size_t i = 1, e = Content.size(); i < e && !Decoration.empty(); ++i) { - const StringRef &Text = Content[i]; + const StringRef Text(Content[i]); if (i + 1 == e) { // If the last line is empty, the closing "*/" will have a star. if (Text.empty()) diff --git a/clang/lib/Format/ContinuationIndenter.cpp b/clang/lib/Format/ContinuationIndenter.cpp index 4010f7f..bf67f9e 100644 --- a/clang/lib/Format/ContinuationIndenter.cpp +++ b/clang/lib/Format/ContinuationIndenter.cpp @@ -560,6 +560,7 @@ bool ContinuationIndenter::mustBreak(const LineState &State) { return true; } } else if (Current.is(TT_BinaryOperator) && Current.CanBreakBefore && + Current.getPrecedence() != prec::Assignment && CurrentState.BreakBeforeParameter) { return true; } diff --git a/clang/lib/Format/FormatTokenLexer.cpp b/clang/lib/Format/FormatTokenLexer.cpp index 49da316..3f4aa52 100644 --- a/clang/lib/Format/FormatTokenLexer.cpp +++ b/clang/lib/Format/FormatTokenLexer.cpp @@ -1198,7 +1198,7 @@ void FormatTokenLexer::truncateToken(size_t NewLen) { /// Count the length of leading whitespace in a token. static size_t countLeadingWhitespace(StringRef Text) { // Basically counting the length matched by this regex. - // "^([\n\r\f\v \t]|(\\\\|\\?\\?/)[\n\r])+" + // "^([\n\r\f\v \t]|\\\\[\n\r])+" // Directly using the regex turned out to be slow. With the regex // version formatting all files in this directory took about 1.25 // seconds. This version took about 0.5 seconds. @@ -1222,13 +1222,6 @@ static size_t countLeadingWhitespace(StringRef Text) { break; // Splice found, consume it. Cur = Lookahead + 1; - } else if (Cur[0] == '?' && Cur[1] == '?' && Cur[2] == '/' && - (Cur[3] == '\n' || Cur[3] == '\r')) { - // Newlines can also be escaped by a '?' '?' '/' trigraph. By the way, the - // characters are quoted individually in this comment because if we write - // them together some compilers warn that we have a trigraph in the code. - assert(End - Cur >= 4); - Cur += 4; } else { break; } @@ -1300,22 +1293,16 @@ FormatToken *FormatTokenLexer::getNextToken() { Style.TabWidth - (Style.TabWidth ? Column % Style.TabWidth : 0); break; case '\\': - case '?': - case '/': - // The text was entirely whitespace when this loop was entered. Thus - // this has to be an escape sequence. - assert(Text.substr(i, 4) == "\?\?/\r" || - Text.substr(i, 4) == "\?\?/\n" || - (i >= 1 && (Text.substr(i - 1, 4) == "\?\?/\r" || - Text.substr(i - 1, 4) == "\?\?/\n")) || - (i >= 2 && (Text.substr(i - 2, 4) == "\?\?/\r" || - Text.substr(i - 2, 4) == "\?\?/\n")) || - (Text[i] == '\\' && [&]() -> bool { - size_t j = i + 1; - while (j < Text.size() && isHorizontalWhitespace(Text[j])) - ++j; - return j < Text.size() && (Text[j] == '\n' || Text[j] == '\r'); - }())); + // The code preceding the loop and in the countLeadingWhitespace + // function guarantees that Text is entirely whitespace, not including + // comments but including escaped newlines. So the character shows up, + // then it has to be in an escape sequence. + assert([&]() -> bool { + size_t j = i + 1; + while (j < Text.size() && isHorizontalWhitespace(Text[j])) + ++j; + return j < Text.size() && (Text[j] == '\n' || Text[j] == '\r'); + }()); InEscape = true; break; default: diff --git a/clang/lib/Format/IntegerLiteralSeparatorFixer.cpp b/clang/lib/Format/IntegerLiteralSeparatorFixer.cpp index 87823ae..80487fa 100644 --- a/clang/lib/Format/IntegerLiteralSeparatorFixer.cpp +++ b/clang/lib/Format/IntegerLiteralSeparatorFixer.cpp @@ -19,7 +19,7 @@ namespace format { enum class Base { Binary, Decimal, Hex, Other }; -static Base getBase(const StringRef IntegerLiteral) { +static Base getBase(StringRef IntegerLiteral) { assert(IntegerLiteral.size() > 1); if (IntegerLiteral[0] > '0') { @@ -164,8 +164,8 @@ IntegerLiteralSeparatorFixer::process(const Environment &Env, return {Result, 0}; } -bool IntegerLiteralSeparatorFixer::checkSeparator( - const StringRef IntegerLiteral, int DigitsPerGroup) const { +bool IntegerLiteralSeparatorFixer::checkSeparator(StringRef IntegerLiteral, + int DigitsPerGroup) const { assert(DigitsPerGroup > 0); int I = 0; @@ -184,7 +184,7 @@ bool IntegerLiteralSeparatorFixer::checkSeparator( return true; } -std::string IntegerLiteralSeparatorFixer::format(const StringRef IntegerLiteral, +std::string IntegerLiteralSeparatorFixer::format(StringRef IntegerLiteral, int DigitsPerGroup, int DigitCount, bool RemoveSeparator) const { diff --git a/clang/lib/Format/IntegerLiteralSeparatorFixer.h b/clang/lib/Format/IntegerLiteralSeparatorFixer.h index 2c158e4..e24af18 100644 --- a/clang/lib/Format/IntegerLiteralSeparatorFixer.h +++ b/clang/lib/Format/IntegerLiteralSeparatorFixer.h @@ -26,8 +26,8 @@ public: const FormatStyle &Style); private: - bool checkSeparator(const StringRef IntegerLiteral, int DigitsPerGroup) const; - std::string format(const StringRef IntegerLiteral, int DigitsPerGroup, + bool checkSeparator(StringRef IntegerLiteral, int DigitsPerGroup) const; + std::string format(StringRef IntegerLiteral, int DigitsPerGroup, int DigitCount, bool RemoveSeparator) const; char Separator; diff --git a/clang/lib/Format/ObjCPropertyAttributeOrderFixer.cpp b/clang/lib/Format/ObjCPropertyAttributeOrderFixer.cpp index 37a1807..b885942 100644 --- a/clang/lib/Format/ObjCPropertyAttributeOrderFixer.cpp +++ b/clang/lib/Format/ObjCPropertyAttributeOrderFixer.cpp @@ -66,7 +66,7 @@ void ObjCPropertyAttributeOrderFixer::sortPropertyAttributes( return; } - const StringRef Attribute{Tok->TokenText}; + const StringRef Attribute(Tok->TokenText); StringRef Value; // Also handle `getter=getFoo` attributes. diff --git a/clang/lib/Frontend/CompilerInvocation.cpp b/clang/lib/Frontend/CompilerInvocation.cpp index 3a36250..ab4384a 100644 --- a/clang/lib/Frontend/CompilerInvocation.cpp +++ b/clang/lib/Frontend/CompilerInvocation.cpp @@ -2013,8 +2013,8 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, : llvm::codegenoptions::DebugTemplateNamesKind::Mangled); } - if (const Arg *A = Args.getLastArg(OPT_ftime_report, OPT_ftime_report_EQ, - OPT_ftime_report_json)) { + if (Args.hasArg(OPT_ftime_report, OPT_ftime_report_EQ, OPT_ftime_report_json, + OPT_stats_file_timers)) { Opts.TimePasses = true; // -ftime-report= is only for new pass manager. @@ -2026,7 +2026,7 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, Opts.TimePassesPerRun = true; else Diags.Report(diag::err_drv_invalid_value) - << A->getAsString(Args) << A->getValue(); + << EQ->getAsString(Args) << EQ->getValue(); } if (Args.getLastArg(OPT_ftime_report_json)) diff --git a/clang/lib/Interpreter/CMakeLists.txt b/clang/lib/Interpreter/CMakeLists.txt index 38cf139..70de4a2 100644 --- a/clang/lib/Interpreter/CMakeLists.txt +++ b/clang/lib/Interpreter/CMakeLists.txt @@ -29,6 +29,7 @@ add_clang_library(clangInterpreter InterpreterUtils.cpp RemoteJITUtils.cpp Value.cpp + InterpreterValuePrinter.cpp ${WASM_SRC} PARTIAL_SOURCES_INTENDED diff --git a/clang/lib/Interpreter/Interpreter.cpp b/clang/lib/Interpreter/Interpreter.cpp index ed3bae5..9b71486 100644 --- a/clang/lib/Interpreter/Interpreter.cpp +++ b/clang/lib/Interpreter/Interpreter.cpp @@ -264,7 +264,7 @@ public: if (auto *TLSD = llvm::dyn_cast<TopLevelStmtDecl>(D)) if (TLSD && TLSD->isSemiMissing()) { auto ExprOrErr = - Interp.ExtractValueFromExpr(cast<Expr>(TLSD->getStmt())); + Interp.convertExprToValue(cast<Expr>(TLSD->getStmt())); if (llvm::Error E = ExprOrErr.takeError()) { llvm::logAllUnhandledErrors(std::move(E), llvm::errs(), "Value printing failed: "); @@ -440,11 +440,10 @@ const char *const Runtimes = R"( #define __CLANG_REPL__ 1 #ifdef __cplusplus #define EXTERN_C extern "C" - void *__clang_Interpreter_SetValueWithAlloc(void*, void*, void*); struct __clang_Interpreter_NewTag{} __ci_newtag; void* operator new(__SIZE_TYPE__, void* __p, __clang_Interpreter_NewTag) noexcept; template <class T, class = T (*)() /*disable for arrays*/> - void __clang_Interpreter_SetValueCopyArr(T* Src, void* Placement, unsigned long Size) { + void __clang_Interpreter_SetValueCopyArr(const T* Src, void* Placement, unsigned long Size) { for (auto Idx = 0; Idx < Size; ++Idx) new ((void*)(((T*)Placement) + Idx), __ci_newtag) T(Src[Idx]); } @@ -454,8 +453,12 @@ const char *const Runtimes = R"( } #else #define EXTERN_C extern + EXTERN_C void *memcpy(void *restrict dst, const void *restrict src, __SIZE_TYPE__ n); + EXTERN_C inline void __clang_Interpreter_SetValueCopyArr(const void* Src, void* Placement, unsigned long Size) { + memcpy(Placement, Src, Size); + } #endif // __cplusplus - + EXTERN_C void *__clang_Interpreter_SetValueWithAlloc(void*, void*, void*); EXTERN_C void __clang_Interpreter_SetValueNoAlloc(void *This, void *OutVal, void *OpaqueType, ...); )"; @@ -470,12 +473,12 @@ Interpreter::create(std::unique_ptr<CompilerInstance> CI, // Add runtime code and set a marker to hide it from user code. Undo will not // go through that. - auto PTU = Interp->Parse(Runtimes); - if (!PTU) - return PTU.takeError(); + Err = Interp->ParseAndExecute(Runtimes); + if (Err) + return std::move(Err); + Interp->markUserCodeStart(); - Interp->ValuePrintingInfo.resize(4); return std::move(Interp); } @@ -524,12 +527,11 @@ Interpreter::createWithCUDA(std::unique_ptr<CompilerInstance> CI, return std::move(Interp); } +CompilerInstance *Interpreter::getCompilerInstance() { return CI.get(); } const CompilerInstance *Interpreter::getCompilerInstance() const { - return CI.get(); + return const_cast<Interpreter *>(this)->getCompilerInstance(); } -CompilerInstance *Interpreter::getCompilerInstance() { return CI.get(); } - llvm::Expected<llvm::orc::LLJIT &> Interpreter::getExecutionEngine() { if (!IncrExecutor) { if (auto Err = CreateExecutor()) @@ -610,7 +612,14 @@ Interpreter::Parse(llvm::StringRef Code) { if (!TuOrErr) return TuOrErr.takeError(); - return RegisterPTU(*TuOrErr); + PTUs.emplace_back(PartialTranslationUnit()); + PartialTranslationUnit &LastPTU = PTUs.back(); + LastPTU.TUPart = *TuOrErr; + + if (std::unique_ptr<llvm::Module> M = GenModule()) + LastPTU.TheModule = std::move(M); + + return LastPTU; } static llvm::Expected<llvm::orc::JITTargetMachineBuilder> @@ -752,10 +761,18 @@ Interpreter::getSymbolAddressFromLinkerName(llvm::StringRef Name) const { llvm::Error Interpreter::Undo(unsigned N) { - if (N > getEffectivePTUSize()) + if (getEffectivePTUSize() == 0) { return llvm::make_error<llvm::StringError>("Operation failed. " - "Too many undos", + "No input left to undo", std::error_code()); + } else if (N > getEffectivePTUSize()) { + return llvm::make_error<llvm::StringError>( + llvm::formatv( + "Operation failed. Wanted to undo {0} inputs, only have {1}.", N, + getEffectivePTUSize()), + std::error_code()); + } + for (unsigned I = 0; I < N; I++) { if (IncrExecutor) { if (llvm::Error Err = IncrExecutor->removeModule(PTUs.back())) @@ -808,10 +825,10 @@ Interpreter::GenModule(IncrementalAction *Action) { // sure it always stays empty. assert(((!CachedInCodeGenModule || !getCompilerInstance()->getPreprocessorOpts().Includes.empty()) || - (CachedInCodeGenModule->empty() && - CachedInCodeGenModule->global_empty() && - CachedInCodeGenModule->alias_empty() && - CachedInCodeGenModule->ifunc_empty())) && + ((CachedInCodeGenModule->empty() && + CachedInCodeGenModule->global_empty() && + CachedInCodeGenModule->alias_empty() && + CachedInCodeGenModule->ifunc_empty()))) && "CodeGen wrote to a readonly module"); std::unique_ptr<llvm::Module> M(CG->ReleaseModule()); CG->StartModule("incr_module_" + std::to_string(ID++), M->getContext()); @@ -828,4 +845,4 @@ CodeGenerator *Interpreter::getCodeGen(IncrementalAction *Action) const { return nullptr; return static_cast<CodeGenAction *>(WrappedAct)->getCodeGenerator(); } -} // namespace clang +} // end namespace clang diff --git a/clang/lib/Interpreter/InterpreterUtils.cpp b/clang/lib/Interpreter/InterpreterUtils.cpp index 45f6322..a19f96c 100644 --- a/clang/lib/Interpreter/InterpreterUtils.cpp +++ b/clang/lib/Interpreter/InterpreterUtils.cpp @@ -11,6 +11,7 @@ //===----------------------------------------------------------------------===// #include "InterpreterUtils.h" +#include "clang/AST/QualTypeNames.h" namespace clang { @@ -81,7 +82,7 @@ NamedDecl *LookupNamed(Sema &S, llvm::StringRef Name, else { const DeclContext *PrimaryWithin = nullptr; if (const auto *TD = dyn_cast<TagDecl>(Within)) - PrimaryWithin = llvm::dyn_cast_or_null<DeclContext>(TD->getDefinition()); + PrimaryWithin = dyn_cast_if_present<DeclContext>(TD->getDefinition()); else PrimaryWithin = Within->getPrimaryContext(); @@ -97,15 +98,16 @@ NamedDecl *LookupNamed(Sema &S, llvm::StringRef Name, R.resolveKind(); if (R.isSingleResult()) - return llvm::dyn_cast<NamedDecl>(R.getFoundDecl()); + return dyn_cast<NamedDecl>(R.getFoundDecl()); return nullptr; } std::string GetFullTypeName(ASTContext &Ctx, QualType QT) { + QualType FQT = TypeName::getFullyQualifiedType(QT, Ctx); PrintingPolicy Policy(Ctx.getPrintingPolicy()); Policy.SuppressScope = false; Policy.AnonymousTagLocations = false; - return QT.getAsString(Policy); + return FQT.getAsString(Policy); } } // namespace clang diff --git a/clang/lib/Interpreter/InterpreterUtils.h b/clang/lib/Interpreter/InterpreterUtils.h index c7b405b..fbf9814 100644 --- a/clang/lib/Interpreter/InterpreterUtils.h +++ b/clang/lib/Interpreter/InterpreterUtils.h @@ -45,7 +45,7 @@ NamespaceDecl *LookupNamespace(Sema &S, llvm::StringRef Name, const DeclContext *Within = nullptr); NamedDecl *LookupNamed(Sema &S, llvm::StringRef Name, - const DeclContext *Within); + const DeclContext *Within = nullptr); std::string GetFullTypeName(ASTContext &Ctx, QualType QT); } // namespace clang diff --git a/clang/lib/Interpreter/InterpreterValuePrinter.cpp b/clang/lib/Interpreter/InterpreterValuePrinter.cpp index 3e7e32b..0ea6274 100644 --- a/clang/lib/Interpreter/InterpreterValuePrinter.cpp +++ b/clang/lib/Interpreter/InterpreterValuePrinter.cpp @@ -18,6 +18,7 @@ #include "clang/Frontend/CompilerInstance.h" #include "clang/Interpreter/Interpreter.h" #include "clang/Interpreter/Value.h" +#include "clang/Lex/Preprocessor.h" #include "clang/Sema/Lookup.h" #include "clang/Sema/Sema.h" @@ -25,13 +26,335 @@ #include "llvm/Support/raw_ostream.h" #include <cassert> - +#include <cmath> #include <cstdarg> +#include <sstream> +#include <string> + +#define DEBUG_TYPE "interp-value" + +using namespace clang; + +static std::string DeclTypeToString(const QualType &QT, NamedDecl *D) { + std::string Str; + llvm::raw_string_ostream SS(Str); + if (QT.hasQualifiers()) + SS << QT.getQualifiers().getAsString() << " "; + SS << D->getQualifiedNameAsString(); + return Str; +} + +static std::string QualTypeToString(ASTContext &Ctx, QualType QT) { + PrintingPolicy Policy(Ctx.getPrintingPolicy()); + // Print the Allocator in STL containers, for instance. + Policy.SuppressDefaultTemplateArgs = false; + Policy.SuppressUnwrittenScope = true; + // Print 'a<b<c> >' rather than 'a<b<c>>'. + Policy.SplitTemplateClosers = true; + + struct LocalPrintingPolicyRAII { + ASTContext &Context; + PrintingPolicy Policy; + + LocalPrintingPolicyRAII(ASTContext &Ctx, PrintingPolicy &PP) + : Context(Ctx), Policy(Ctx.getPrintingPolicy()) { + Context.setPrintingPolicy(PP); + } + ~LocalPrintingPolicyRAII() { Context.setPrintingPolicy(Policy); } + } X(Ctx, Policy); + + const QualType NonRefTy = QT.getNonReferenceType(); + + if (const auto *TTy = llvm::dyn_cast<TagType>(NonRefTy)) + return DeclTypeToString(NonRefTy, TTy->getDecl()); + + if (const auto *TRy = dyn_cast<RecordType>(NonRefTy)) + return DeclTypeToString(NonRefTy, TRy->getDecl()); + + const QualType Canon = NonRefTy.getCanonicalType(); + + // FIXME: How a builtin type can be a function pointer type? + if (Canon->isBuiltinType() && !NonRefTy->isFunctionPointerType() && + !NonRefTy->isMemberPointerType()) + return Canon.getAsString(Ctx.getPrintingPolicy()); + + if (const auto *TDTy = dyn_cast<TypedefType>(NonRefTy)) { + // FIXME: TemplateSpecializationType & SubstTemplateTypeParmType checks + // are predominately to get STL containers to print nicer and might be + // better handled in GetFullyQualifiedName. + // + // std::vector<Type>::iterator is a TemplateSpecializationType + // std::vector<Type>::value_type is a SubstTemplateTypeParmType + // + QualType SSDesugar = TDTy->getLocallyUnqualifiedSingleStepDesugaredType(); + if (llvm::isa<SubstTemplateTypeParmType>(SSDesugar)) + return GetFullTypeName(Ctx, Canon); + else if (llvm::isa<TemplateSpecializationType>(SSDesugar)) + return GetFullTypeName(Ctx, NonRefTy); + return DeclTypeToString(NonRefTy, TDTy->getDecl()); + } + return GetFullTypeName(Ctx, NonRefTy); +} + +static std::string EnumToString(const Value &V) { + std::string Str; + llvm::raw_string_ostream SS(Str); + ASTContext &Ctx = const_cast<ASTContext &>(V.getASTContext()); + + QualType DesugaredTy = V.getType().getDesugaredType(Ctx); + const EnumType *EnumTy = DesugaredTy.getNonReferenceType()->getAs<EnumType>(); + assert(EnumTy && "Fail to cast to enum type"); + + EnumDecl *ED = EnumTy->getDecl(); + uint64_t Data = V.convertTo<uint64_t>(); + bool IsFirst = true; + llvm::APSInt AP = Ctx.MakeIntValue(Data, DesugaredTy); + + for (auto I = ED->enumerator_begin(), E = ED->enumerator_end(); I != E; ++I) { + if (I->getInitVal() == AP) { + if (!IsFirst) + SS << " ? "; + SS << "(" + I->getQualifiedNameAsString() << ")"; + IsFirst = false; + } + } + llvm::SmallString<64> APStr; + AP.toString(APStr, /*Radix=*/10); + SS << " : " << QualTypeToString(Ctx, ED->getIntegerType()) << " " << APStr; + return Str; +} + +static std::string FunctionToString(const Value &V, const void *Ptr) { + std::string Str; + llvm::raw_string_ostream SS(Str); + SS << "Function @" << Ptr; + + const DeclContext *PTU = V.getASTContext().getTranslationUnitDecl(); + // Find the last top-level-stmt-decl. This is a forward iterator but the + // partial translation unit should not be large. + const TopLevelStmtDecl *TLSD = nullptr; + for (const Decl *D : PTU->noload_decls()) + if (isa<TopLevelStmtDecl>(D)) + TLSD = cast<TopLevelStmtDecl>(D); + + // Get __clang_Interpreter_SetValueNoAlloc(void *This, void *OutVal, void + // *OpaqueType, void *Val); + const FunctionDecl *FD = nullptr; + if (auto *InterfaceCall = llvm::dyn_cast<CallExpr>(TLSD->getStmt())) { + const auto *Arg = InterfaceCall->getArg(/*Val*/ 3); + // Get rid of cast nodes. + while (const CastExpr *CastE = llvm::dyn_cast<CastExpr>(Arg)) + Arg = CastE->getSubExpr(); + if (const DeclRefExpr *DeclRefExp = llvm::dyn_cast<DeclRefExpr>(Arg)) + FD = llvm::dyn_cast<FunctionDecl>(DeclRefExp->getDecl()); + + if (FD) { + SS << '\n'; + const clang::FunctionDecl *FDef; + if (FD->hasBody(FDef)) + FDef->print(SS); + } + } + return Str; +} + +static std::string VoidPtrToString(const void *Ptr) { + std::string Str; + llvm::raw_string_ostream SS(Str); + SS << Ptr; + return Str; +} + +static std::string CharPtrToString(const char *Ptr) { + if (!Ptr) + return "0"; + + std::string Result = "\""; + Result += Ptr; + Result += '"'; + return Result; +} namespace clang { +struct ValueRef : public Value { + ValueRef(const Interpreter *In, void *Ty) : Value(In, Ty) { + // Tell the base class to not try to deallocate if it manages the value. + IsManuallyAlloc = false; + } +}; + +std::string Interpreter::ValueDataToString(const Value &V) const { + Sema &S = getCompilerInstance()->getSema(); + ASTContext &Ctx = S.getASTContext(); + + QualType QT = V.getType(); + + if (const ConstantArrayType *CAT = Ctx.getAsConstantArrayType(QT)) { + QualType ElemTy = CAT->getElementType(); + size_t ElemCount = Ctx.getConstantArrayElementCount(CAT); + const Type *BaseTy = CAT->getBaseElementTypeUnsafe(); + size_t ElemSize = Ctx.getTypeSizeInChars(BaseTy).getQuantity(); + + // Treat null terminated char arrays as strings basically. + if (ElemTy->isCharType()) { + char last = *(char *)(((uintptr_t)V.getPtr()) + ElemCount * ElemSize - 1); + if (last == '\0') + return CharPtrToString((char *)V.getPtr()); + } + + std::string Result = "{ "; + for (unsigned Idx = 0, N = CAT->getZExtSize(); Idx < N; ++Idx) { + ValueRef InnerV = ValueRef(this, ElemTy.getAsOpaquePtr()); + if (ElemTy->isBuiltinType()) { + // Single dim arrays, advancing. + uintptr_t Offset = (uintptr_t)V.getPtr() + Idx * ElemSize; + InnerV.setRawBits((void *)Offset, ElemSize * 8); + } else { + // Multi dim arrays, position to the next dimension. + size_t Stride = ElemCount / N; + uintptr_t Offset = ((uintptr_t)V.getPtr()) + Idx * Stride * ElemSize; + InnerV.setPtr((void *)Offset); + } + + Result += ValueDataToString(InnerV); + + // Skip the \0 if the char types + if (Idx < N - 1) + Result += ", "; + } + Result += " }"; + return Result; + } + + QualType DesugaredTy = QT.getDesugaredType(Ctx); + QualType NonRefTy = DesugaredTy.getNonReferenceType(); + + // FIXME: Add support for user defined printers. + // LookupResult R = LookupUserDefined(S, QT); + // if (!R.empty()) + // return CallUserSpecifiedPrinter(R, V); + + // If it is a builtin type dispatch to the builtin overloads. + if (auto *BT = DesugaredTy.getCanonicalType()->getAs<BuiltinType>()) { + + auto formatFloating = [](auto Val, char Suffix = '\0') -> std::string { + std::string Out; + llvm::raw_string_ostream SS(Out); + + if (std::isnan(Val) || std::isinf(Val)) { + SS << llvm::format("%g", Val); + return SS.str(); + } + if (Val == static_cast<decltype(Val)>(static_cast<int64_t>(Val))) + SS << llvm::format("%.1f", Val); + else if (std::abs(Val) < 1e-4 || std::abs(Val) > 1e6 || Suffix == 'f') + SS << llvm::format("%#.6g", Val); + else if (Suffix == 'L') + SS << llvm::format("%#.12Lg", Val); + else + SS << llvm::format("%#.8g", Val); + + if (Suffix != '\0') + SS << Suffix; + return SS.str(); + }; + + std::string Str; + llvm::raw_string_ostream SS(Str); + switch (BT->getKind()) { + default: + return "{ error: unknown builtin type '" + std::to_string(BT->getKind()) + + " '}"; + case clang::BuiltinType::Bool: + SS << ((V.getBool()) ? "true" : "false"); + return Str; + case clang::BuiltinType::Char_S: + SS << '\'' << V.getChar_S() << '\''; + return Str; + case clang::BuiltinType::SChar: + SS << '\'' << V.getSChar() << '\''; + return Str; + case clang::BuiltinType::Char_U: + SS << '\'' << V.getChar_U() << '\''; + return Str; + case clang::BuiltinType::UChar: + SS << '\'' << V.getUChar() << '\''; + return Str; + case clang::BuiltinType::Short: + SS << V.getShort(); + return Str; + case clang::BuiltinType::UShort: + SS << V.getUShort(); + return Str; + case clang::BuiltinType::Int: + SS << V.getInt(); + return Str; + case clang::BuiltinType::UInt: + SS << V.getUInt(); + return Str; + case clang::BuiltinType::Long: + SS << V.getLong(); + return Str; + case clang::BuiltinType::ULong: + SS << V.getULong(); + return Str; + case clang::BuiltinType::LongLong: + SS << V.getLongLong(); + return Str; + case clang::BuiltinType::ULongLong: + SS << V.getULongLong(); + return Str; + case clang::BuiltinType::Float: + return formatFloating(V.getFloat(), /*suffix=*/'f'); + + case clang::BuiltinType::Double: + return formatFloating(V.getDouble()); + + case clang::BuiltinType::LongDouble: + return formatFloating(V.getLongDouble(), /*suffix=*/'L'); + } + } + + if ((NonRefTy->isPointerType() || NonRefTy->isMemberPointerType()) && + NonRefTy->getPointeeType()->isFunctionProtoType()) + return FunctionToString(V, V.getPtr()); + + if (NonRefTy->isFunctionType()) + return FunctionToString(V, &V); + + if (NonRefTy->isEnumeralType()) + return EnumToString(V); + + if (NonRefTy->isNullPtrType()) + return "nullptr\n"; + + // FIXME: Add support for custom printers in C. + if (NonRefTy->isPointerType()) { + if (NonRefTy->getPointeeType()->isCharType()) + return CharPtrToString((char *)V.getPtr()); + + return VoidPtrToString(V.getPtr()); + } + + // Fall back to printing just the address of the unknown object. + return "@" + VoidPtrToString(V.getPtr()); +} + +std::string Interpreter::ValueTypeToString(const Value &V) const { + ASTContext &Ctx = const_cast<ASTContext &>(V.getASTContext()); + QualType QT = V.getType(); + + std::string QTStr = QualTypeToString(Ctx, QT); + + if (QT->isReferenceType()) + QTStr += " &"; + + return QTStr; +} + llvm::Expected<llvm::orc::ExecutorAddr> -Interpreter::CompileDtorCall(CXXRecordDecl *CXXRD) { +Interpreter::CompileDtorCall(CXXRecordDecl *CXXRD) const { assert(CXXRD && "Cannot compile a destructor for a nullptr"); if (auto Dtor = Dtors.find(CXXRD); Dtor != Dtors.end()) return Dtor->getSecond(); @@ -81,7 +404,7 @@ public: return InterfaceKind::CopyArray; } - InterfaceKind VisitFunctionProtoType(const FunctionProtoType *Ty) { + InterfaceKind VisitFunctionType(const FunctionType *Ty) { HandlePtrType(Ty); return InterfaceKind::NoAlloc; } @@ -141,9 +464,14 @@ private: } }; +static constexpr llvm::StringRef VPName[] = { + "__clang_Interpreter_SetValueNoAlloc", + "__clang_Interpreter_SetValueWithAlloc", + "__clang_Interpreter_SetValueCopyArr", "__ci_newtag"}; + // This synthesizes a call expression to a speciall // function that is responsible for generating the Value. -// In general, we transform: +// In general, we transform c++: // clang-repl> x // To: // // 1. If x is a built-in type like int, float. @@ -154,7 +482,7 @@ private: // // 3. If x is a struct, but a rvalue. // new (__clang_Interpreter_SetValueWithAlloc(ThisInterp, OpaqueValue, // xQualType)) (x); -llvm::Expected<Expr *> Interpreter::ExtractValueFromExpr(Expr *E) { +llvm::Expected<Expr *> Interpreter::convertExprToValue(Expr *E) { Sema &S = getCompilerInstance()->getSema(); ASTContext &Ctx = S.getASTContext(); @@ -176,23 +504,21 @@ llvm::Expected<Expr *> Interpreter::ExtractValueFromExpr(Expr *E) { Interface = S.BuildDeclarationNameExpr(CSS, R, /*ADL=*/false).get(); return llvm::Error::success(); }; - static constexpr llvm::StringRef Builtin[] = { - "__clang_Interpreter_SetValueNoAlloc", - "__clang_Interpreter_SetValueWithAlloc", - "__clang_Interpreter_SetValueCopyArr", "__ci_newtag"}; if (llvm::Error Err = - LookupInterface(ValuePrintingInfo[NoAlloc], Builtin[NoAlloc])) + LookupInterface(ValuePrintingInfo[NoAlloc], VPName[NoAlloc])) + return std::move(Err); + + if (llvm::Error Err = + LookupInterface(ValuePrintingInfo[CopyArray], VPName[CopyArray])) + return std::move(Err); + + if (llvm::Error Err = + LookupInterface(ValuePrintingInfo[WithAlloc], VPName[WithAlloc])) return std::move(Err); if (Ctx.getLangOpts().CPlusPlus) { if (llvm::Error Err = - LookupInterface(ValuePrintingInfo[WithAlloc], Builtin[WithAlloc])) - return std::move(Err); - if (llvm::Error Err = - LookupInterface(ValuePrintingInfo[CopyArray], Builtin[CopyArray])) - return std::move(Err); - if (llvm::Error Err = - LookupInterface(ValuePrintingInfo[NewTag], Builtin[NewTag])) + LookupInterface(ValuePrintingInfo[NewTag], VPName[NewTag])) return std::move(Err); } } @@ -211,7 +537,7 @@ llvm::Expected<Expr *> Interpreter::ExtractValueFromExpr(Expr *E) { if (auto *EWC = llvm::dyn_cast_if_present<ExprWithCleanups>(E)) E = EWC->getSubExpr(); - QualType Ty = E->getType(); + QualType Ty = E->IgnoreImpCasts()->getType(); QualType DesugaredTy = Ty.getDesugaredType(Ctx); // For lvalue struct, we treat it as a reference. @@ -239,7 +565,10 @@ llvm::Expected<Expr *> Interpreter::ExtractValueFromExpr(Expr *E) { ExprResult AllocCall = S.ActOnCallExpr(Scope, ValuePrintingInfo[InterfaceKind::WithAlloc], E->getBeginLoc(), AdjustedArgs, E->getEndLoc()); - assert(!AllocCall.isInvalid() && "Can't create runtime interface call!"); + if (AllocCall.isInvalid()) + return llvm::make_error<llvm::StringError>( + "Cannot call to " + VPName[WithAlloc], + llvm::inconvertibleErrorCode()); TypeSourceInfo *TSI = Ctx.getTrivialTypeSourceInfo(Ty, SourceLocation()); @@ -253,14 +582,23 @@ llvm::Expected<Expr *> Interpreter::ExtractValueFromExpr(Expr *E) { // __clang_Interpreter_SetValueCopyArr. if (Kind == InterfaceKind::CopyArray) { - const auto *ConstantArrTy = - cast<ConstantArrayType>(DesugaredTy.getTypePtr()); - size_t ArrSize = Ctx.getConstantArrayElementCount(ConstantArrTy); + const auto *CATy = cast<ConstantArrayType>(DesugaredTy.getTypePtr()); + size_t ArrSize = Ctx.getConstantArrayElementCount(CATy); + + if (!Ctx.getLangOpts().CPlusPlus) + ArrSize *= Ctx.getTypeSizeInChars(CATy->getBaseElementTypeUnsafe()) + .getQuantity(); + Expr *ArrSizeExpr = IntegerLiteralExpr(Ctx, ArrSize); Expr *Args[] = {E, AllocCall.get(), ArrSizeExpr}; SetValueE = S.ActOnCallExpr(Scope, ValuePrintingInfo[InterfaceKind::CopyArray], SourceLocation(), Args, SourceLocation()); + if (SetValueE.isInvalid()) + return llvm::make_error<llvm::StringError>( + "Cannot call to " + VPName[CopyArray], + llvm::inconvertibleErrorCode()); + break; } Expr *Args[] = {AllocCall.get(), ValuePrintingInfo[InterfaceKind::NewTag]}; ExprResult CXXNewCall = S.BuildCXXNew( @@ -270,8 +608,10 @@ llvm::Expected<Expr *> Interpreter::ExtractValueFromExpr(Expr *E) { /*TypeIdParens=*/SourceRange(), TSI->getType(), TSI, std::nullopt, E->getSourceRange(), E); - assert(!CXXNewCall.isInvalid() && - "Can't create runtime placement new call!"); + if (CXXNewCall.isInvalid()) + return llvm::make_error<llvm::StringError>( + "Cannot build a call to placement new", + llvm::inconvertibleErrorCode()); SetValueE = S.ActOnFinishFullExpr(CXXNewCall.get(), /*DiscardedValue=*/false); @@ -300,6 +640,7 @@ llvm::Expected<Expr *> Interpreter::ExtractValueFromExpr(Expr *E) { using namespace clang; // Temporary rvalue struct that need special care. +extern "C" { REPL_EXTERNAL_VISIBILITY void * __clang_Interpreter_SetValueWithAlloc(void *This, void *OutVal, void *OpaqueType) { @@ -308,8 +649,9 @@ __clang_Interpreter_SetValueWithAlloc(void *This, void *OutVal, return VRef.getPtr(); } -extern "C" void REPL_EXTERNAL_VISIBILITY __clang_Interpreter_SetValueNoAlloc( - void *This, void *OutVal, void *OpaqueType, ...) { +REPL_EXTERNAL_VISIBILITY void +__clang_Interpreter_SetValueNoAlloc(void *This, void *OutVal, void *OpaqueType, + ...) { Value &VRef = *(Value *)OutVal; Interpreter *I = static_cast<Interpreter *>(This); VRef = Value(I, OpaqueType); @@ -384,6 +726,7 @@ extern "C" void REPL_EXTERNAL_VISIBILITY __clang_Interpreter_SetValueNoAlloc( } va_end(args); } +} // A trampoline to work around the fact that operator placement new cannot // really be forward declared due to libc++ and libstdc++ declaration mismatch. diff --git a/clang/lib/Interpreter/Value.cpp b/clang/lib/Interpreter/Value.cpp index afdf406..be2ab55 100644 --- a/clang/lib/Interpreter/Value.cpp +++ b/clang/lib/Interpreter/Value.cpp @@ -12,6 +12,7 @@ //===----------------------------------------------------------------------===// #include "clang/Interpreter/Value.h" +#include "InterpreterUtils.h" #include "clang/AST/ASTContext.h" #include "clang/AST/Type.h" #include "clang/Interpreter/Interpreter.h" @@ -19,6 +20,8 @@ #include <cassert> #include <utility> +using namespace clang; + namespace { // This is internal buffer maintained by Value, used to hold temporaries. @@ -117,8 +120,9 @@ static Value::Kind ConvertQualTypeToKind(const ASTContext &Ctx, QualType QT) { } } -Value::Value(Interpreter *In, void *Ty) : Interp(In), OpaqueType(Ty) { - setKind(ConvertQualTypeToKind(getASTContext(), getType())); +Value::Value(const Interpreter *In, void *Ty) : Interp(In), OpaqueType(Ty) { + const ASTContext &C = getASTContext(); + setKind(ConvertQualTypeToKind(C, getType())); if (ValueKind == K_PtrOrObj) { QualType Canon = getType().getCanonicalType(); if ((Canon->isPointerType() || Canon->isObjectType() || @@ -127,7 +131,7 @@ Value::Value(Interpreter *In, void *Ty) : Interp(In), OpaqueType(Ty) { Canon->isMemberPointerType())) { IsManuallyAlloc = true; // Compile dtor function. - Interpreter &Interp = getInterpreter(); + const Interpreter &Interp = getInterpreter(); void *DtorF = nullptr; size_t ElementsSize = 1; QualType DtorTy = getType(); @@ -228,14 +232,13 @@ void *Value::getPtr() const { return Data.m_Ptr; } -QualType Value::getType() const { - return QualType::getFromOpaquePtr(OpaqueType); +void Value::setRawBits(void *Ptr, unsigned NBits /*= sizeof(Storage)*/) { + assert(NBits <= sizeof(Storage) && "Greater than the total size"); + memcpy(/*dest=*/Data.m_RawBits, /*src=*/Ptr, /*nbytes=*/NBits / 8); } -Interpreter &Value::getInterpreter() { - assert(Interp != nullptr && - "Can't get interpreter from a default constructed value"); - return *Interp; +QualType Value::getType() const { + return QualType::getFromOpaquePtr(OpaqueType); } const Interpreter &Value::getInterpreter() const { @@ -244,8 +247,6 @@ const Interpreter &Value::getInterpreter() const { return *Interp; } -ASTContext &Value::getASTContext() { return getInterpreter().getASTContext(); } - const ASTContext &Value::getASTContext() const { return getInterpreter().getASTContext(); } @@ -253,14 +254,32 @@ const ASTContext &Value::getASTContext() const { void Value::dump() const { print(llvm::outs()); } void Value::printType(llvm::raw_ostream &Out) const { - Out << "Not implement yet.\n"; + Out << Interp->ValueTypeToString(*this); } + void Value::printData(llvm::raw_ostream &Out) const { - Out << "Not implement yet.\n"; + Out << Interp->ValueDataToString(*this); } +// FIXME: We do not support the multiple inheritance case where one of the base +// classes has a pretty-printer and the other does not. void Value::print(llvm::raw_ostream &Out) const { assert(OpaqueType != nullptr && "Can't print default Value"); - Out << "Not implement yet.\n"; + + // Don't even try to print a void or an invalid type, it doesn't make sense. + if (getType()->isVoidType() || !isValid()) + return; + + // We need to get all the results together then print it, since `printType` is + // much faster than `printData`. + std::string Str; + llvm::raw_string_ostream SS(Str); + + SS << "("; + printType(SS); + SS << ") "; + printData(SS); + SS << "\n"; + Out << Str; } } // namespace clang diff --git a/clang/lib/Lex/DependencyDirectivesScanner.cpp b/clang/lib/Lex/DependencyDirectivesScanner.cpp index 869c9ce..9ccff5e 100644 --- a/clang/lib/Lex/DependencyDirectivesScanner.cpp +++ b/clang/lib/Lex/DependencyDirectivesScanner.cpp @@ -560,15 +560,13 @@ bool Scanner::lexModuleDirectiveBody(DirectiveKind Kind, const char *&First, if (Tok.is(tok::semi)) break; } + + const auto &Tok = lexToken(First, End); pushDirective(Kind); - skipWhitespace(First, End); - if (First == End) + if (Tok.is(tok::eof) || Tok.is(tok::eod)) return false; - if (!isVerticalWhitespace(*First)) - return reportError( - DirectiveLoc, diag::err_dep_source_scanner_unexpected_tokens_at_import); - skipNewline(First, End); - return false; + return reportError(DirectiveLoc, + diag::err_dep_source_scanner_unexpected_tokens_at_import); } dependency_directives_scan::Token &Scanner::lexToken(const char *&First, @@ -735,6 +733,13 @@ bool Scanner::lexModule(const char *&First, const char *const End) { return false; break; } + case ';': { + // Handle the global module fragment `module;`. + if (Id == "module" && !Export) + break; + skipLine(First, End); + return false; + } case '<': case '"': break; @@ -905,14 +910,6 @@ bool Scanner::lexPPLine(const char *&First, const char *const End) { CurDirToks.clear(); }); - // Handle "@import". - if (*First == '@') - return lexAt(First, End); - - // Handle module directives for C++20 modules. - if (*First == 'i' || *First == 'e' || *First == 'm') - return lexModule(First, End); - if (*First == '_') { if (isNextIdentifierOrSkipLine("_Pragma", First, End)) return lex_Pragma(First, End); @@ -925,6 +922,14 @@ bool Scanner::lexPPLine(const char *&First, const char *const End) { auto ScEx2 = make_scope_exit( [&]() { TheLexer.setParsingPreprocessorDirective(false); }); + // Handle "@import". + if (*First == '@') + return lexAt(First, End); + + // Handle module directives for C++20 modules. + if (*First == 'i' || *First == 'e' || *First == 'm') + return lexModule(First, End); + // Lex '#'. const dependency_directives_scan::Token &HashTok = lexToken(First, End); if (HashTok.is(tok::hashhash)) { diff --git a/clang/lib/Lex/LiteralSupport.cpp b/clang/lib/Lex/LiteralSupport.cpp index a62508e..5b08d7f 100644 --- a/clang/lib/Lex/LiteralSupport.cpp +++ b/clang/lib/Lex/LiteralSupport.cpp @@ -1467,7 +1467,7 @@ void NumericLiteralParser::ParseNumberStartingWithZero(SourceLocation TokLoc) { if (s != PossibleNewDigitStart) DigitsBegin = PossibleNewDigitStart; else - IsSingleZero = (s == ThisTokEnd); // Is the only thing we've seen a 0? + IsSingleZero = (s == ThisTokBegin + 1); if (s == ThisTokEnd) return; // Done, simple octal number like 01234 diff --git a/clang/lib/Lex/Pragma.cpp b/clang/lib/Lex/Pragma.cpp index 01c85e6..bba3c89 100644 --- a/clang/lib/Lex/Pragma.cpp +++ b/clang/lib/Lex/Pragma.cpp @@ -591,7 +591,8 @@ IdentifierInfo *Preprocessor::ParsePragmaPushOrPopMacro(Token &Tok) { } // Remember the macro string. - std::string StrVal = getSpelling(Tok); + Token StrTok = Tok; + std::string StrVal = getSpelling(StrTok); // Read the ')'. Lex(Tok); @@ -604,6 +605,15 @@ IdentifierInfo *Preprocessor::ParsePragmaPushOrPopMacro(Token &Tok) { assert(StrVal[0] == '"' && StrVal[StrVal.size()-1] == '"' && "Invalid string token!"); + if (StrVal.size() <= 2) { + Diag(StrTok.getLocation(), diag::warn_pargma_push_pop_macro_empty_string) + << SourceRange( + StrTok.getLocation(), + StrTok.getLocation().getLocWithOffset(StrTok.getLength())) + << PragmaTok.getIdentifierInfo()->isStr("pop_macro"); + return nullptr; + } + // Create a Token from the string. Token MacroTok; MacroTok.startToken(); diff --git a/clang/lib/Lex/Preprocessor.cpp b/clang/lib/Lex/Preprocessor.cpp index bcd3ea6..e278846 100644 --- a/clang/lib/Lex/Preprocessor.cpp +++ b/clang/lib/Lex/Preprocessor.cpp @@ -950,6 +950,8 @@ void Preprocessor::Lex(Token &Result) { case tok::period: ModuleDeclState.handlePeriod(); break; + case tok::eod: + break; case tok::identifier: // Check "import" and "module" when there is no open bracket. The two // identifiers are not meaningful with open brackets. diff --git a/clang/lib/Parse/ParseStmt.cpp b/clang/lib/Parse/ParseStmt.cpp index 31b84b6..bf1978c 100644 --- a/clang/lib/Parse/ParseStmt.cpp +++ b/clang/lib/Parse/ParseStmt.cpp @@ -541,7 +541,8 @@ StmtResult Parser::ParseExprStatement(ParsedStmtContext StmtCtx) { } Token *CurTok = nullptr; - // Note we shouldn't eat the token since the callback needs it. + // If the semicolon is missing at the end of REPL input, we want to print + // the result. Note we shouldn't eat the token since the callback needs it. if (Tok.is(tok::annot_repl_input_end)) CurTok = &Tok; else diff --git a/clang/lib/Parse/Parser.cpp b/clang/lib/Parse/Parser.cpp index 8834bf8..ff50b3f 100644 --- a/clang/lib/Parse/Parser.cpp +++ b/clang/lib/Parse/Parser.cpp @@ -2519,6 +2519,7 @@ Decl *Parser::ParseModuleImport(SourceLocation AtLoc, break; } ExpectAndConsumeSemi(diag::err_module_expected_semi); + TryConsumeToken(tok::eod); if (SeenError) return nullptr; diff --git a/clang/lib/Sema/AnalysisBasedWarnings.cpp b/clang/lib/Sema/AnalysisBasedWarnings.cpp index d1400cb..829c81b 100644 --- a/clang/lib/Sema/AnalysisBasedWarnings.cpp +++ b/clang/lib/Sema/AnalysisBasedWarnings.cpp @@ -2901,8 +2901,7 @@ void clang::sema::AnalysisBasedWarnings::IssueWarnings( .setAlwaysAdd(Stmt::UnaryOperatorClass); } - bool EnableLifetimeSafetyAnalysis = !Diags.isIgnored( - diag::warn_experimental_lifetime_safety_dummy_warning, D->getBeginLoc()); + bool EnableLifetimeSafetyAnalysis = S.getLangOpts().EnableLifetimeSafety; // Install the logical handler. std::optional<LogicalErrorHandler> LEH; if (LogicalErrorHandler::hasActiveDiagnostics(Diags, D->getBeginLoc())) { @@ -3029,8 +3028,8 @@ void clang::sema::AnalysisBasedWarnings::IssueWarnings( // TODO: Enable lifetime safety analysis for other languages once it is // stable. if (EnableLifetimeSafetyAnalysis && S.getLangOpts().CPlusPlus) { - if (CFG *cfg = AC.getCFG()) - runLifetimeSafetyAnalysis(*cast<DeclContext>(D), *cfg, AC); + if (AC.getCFG()) + lifetimes::runLifetimeSafetyAnalysis(AC); } // Check for violations of "called once" parameter properties. if (S.getLangOpts().ObjC && !S.getLangOpts().CPlusPlus && diff --git a/clang/lib/Sema/SemaARM.cpp b/clang/lib/Sema/SemaARM.cpp index bd603a9..8e27fab 100644 --- a/clang/lib/Sema/SemaARM.cpp +++ b/clang/lib/Sema/SemaARM.cpp @@ -1535,4 +1535,95 @@ bool SemaARM::areLaxCompatibleSveTypes(QualType FirstType, IsLaxCompatible(SecondType, FirstType); } +bool SemaARM::checkTargetVersionAttr(const StringRef Param, + const SourceLocation Loc) { + using namespace DiagAttrParams; + + llvm::SmallVector<StringRef, 8> Features; + Param.split(Features, '+'); + for (StringRef Feat : Features) { + Feat = Feat.trim(); + if (Feat == "default") + continue; + if (!getASTContext().getTargetInfo().validateCpuSupports(Feat)) + return Diag(Loc, diag::warn_unsupported_target_attribute) + << Unsupported << None << Feat << TargetVersion; + } + return false; +} + +bool SemaARM::checkTargetClonesAttr( + SmallVectorImpl<StringRef> &Params, SmallVectorImpl<SourceLocation> &Locs, + SmallVectorImpl<SmallString<64>> &NewParams) { + using namespace DiagAttrParams; + + if (!getASTContext().getTargetInfo().hasFeature("fmv")) + return true; + + assert(Params.size() == Locs.size() && + "Mismatch between number of string parameters and locations"); + + bool HasDefault = false; + bool HasNonDefault = false; + for (unsigned I = 0, E = Params.size(); I < E; ++I) { + const StringRef Param = Params[I].trim(); + const SourceLocation &Loc = Locs[I]; + + if (Param.empty()) + return Diag(Loc, diag::warn_unsupported_target_attribute) + << Unsupported << None << "" << TargetClones; + + if (Param == "default") { + if (HasDefault) + Diag(Loc, diag::warn_target_clone_duplicate_options); + else { + NewParams.push_back(Param); + HasDefault = true; + } + continue; + } + + bool HasCodeGenImpact = false; + llvm::SmallVector<StringRef, 8> Features; + llvm::SmallVector<StringRef, 8> ValidFeatures; + Param.split(Features, '+'); + for (StringRef Feat : Features) { + Feat = Feat.trim(); + if (!getASTContext().getTargetInfo().validateCpuSupports(Feat)) { + Diag(Loc, diag::warn_unsupported_target_attribute) + << Unsupported << None << Feat << TargetClones; + continue; + } + if (getASTContext().getTargetInfo().doesFeatureAffectCodeGen(Feat)) + HasCodeGenImpact = true; + ValidFeatures.push_back(Feat); + } + + // Ignore features that don't impact code generation. + if (!HasCodeGenImpact) { + Diag(Loc, diag::warn_target_clone_no_impact_options); + continue; + } + + if (ValidFeatures.empty()) + continue; + + // Canonicalize attribute parameter. + llvm::sort(ValidFeatures); + SmallString<64> NewParam(llvm::join(ValidFeatures, "+")); + if (llvm::is_contained(NewParams, NewParam)) { + Diag(Loc, diag::warn_target_clone_duplicate_options); + continue; + } + + // Valid non-default argument. + NewParams.push_back(NewParam); + HasNonDefault = true; + } + if (!HasNonDefault) + return true; + + return false; +} + } // namespace clang diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp index dd5b710..c74b671 100644 --- a/clang/lib/Sema/SemaChecking.cpp +++ b/clang/lib/Sema/SemaChecking.cpp @@ -3013,6 +3013,8 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, case Builtin::BI__builtin_elementwise_maxnum: case Builtin::BI__builtin_elementwise_minimum: case Builtin::BI__builtin_elementwise_maximum: + case Builtin::BI__builtin_elementwise_minimumnum: + case Builtin::BI__builtin_elementwise_maximumnum: case Builtin::BI__builtin_elementwise_atan2: case Builtin::BI__builtin_elementwise_fmod: case Builtin::BI__builtin_elementwise_pow: @@ -5239,7 +5241,9 @@ bool Sema::BuiltinVAStartARMMicrosoft(CallExpr *Call) { << 2 << Arg1->getType() << ConstCharPtrTy; const QualType SizeTy = Context.getSizeType(); - if (Arg2Ty->getCanonicalTypeInternal().withoutLocalFastQualifiers() != SizeTy) + if (!Context.hasSameType( + Arg2Ty->getCanonicalTypeInternal().withoutLocalFastQualifiers(), + SizeTy)) Diag(Arg2->getBeginLoc(), diag::err_typecheck_convert_incompatible) << Arg2->getType() << SizeTy << 1 /* different class */ << 0 /* qualifier difference */ diff --git a/clang/lib/Sema/SemaConcept.cpp b/clang/lib/Sema/SemaConcept.cpp index 5205ca0b..044cf5c 100644 --- a/clang/lib/Sema/SemaConcept.cpp +++ b/clang/lib/Sema/SemaConcept.cpp @@ -588,6 +588,9 @@ static bool CheckConstraintSatisfaction( return true; for (const AssociatedConstraint &AC : AssociatedConstraints) { + if (AC.isNull()) + return true; + Sema::ArgPackSubstIndexRAII _(S, AC.ArgPackSubstIndex); ExprResult Res = calculateConstraintSatisfaction( S, Template, TemplateIDRange.getBegin(), TemplateArgsLists, diff --git a/clang/lib/Sema/SemaDecl.cpp b/clang/lib/Sema/SemaDecl.cpp index 14403e6..fd22e01 100644 --- a/clang/lib/Sema/SemaDecl.cpp +++ b/clang/lib/Sema/SemaDecl.cpp @@ -18476,6 +18476,10 @@ CreateNewDecl: // record. AddPushedVisibilityAttribute(New); + // If this is not a definition, process API notes for it now. + if (TUK != TagUseKind::Definition) + ProcessAPINotes(New); + if (isMemberSpecialization && !New->isInvalidDecl()) CompleteMemberSpecialization(New, Previous); diff --git a/clang/lib/Sema/SemaDeclAttr.cpp b/clang/lib/Sema/SemaDeclAttr.cpp index 78f4804..9a2950c 100644 --- a/clang/lib/Sema/SemaDeclAttr.cpp +++ b/clang/lib/Sema/SemaDeclAttr.cpp @@ -3254,9 +3254,8 @@ static void handleCodeSegAttr(Sema &S, Decl *D, const ParsedAttr &AL) { } bool Sema::checkTargetAttr(SourceLocation LiteralLoc, StringRef AttrStr) { - enum FirstParam { Unsupported, Duplicate, Unknown }; - enum SecondParam { None, CPU, Tune }; - enum ThirdParam { Target, TargetClones }; + using namespace DiagAttrParams; + if (AttrStr.contains("fpmath=")) return Diag(LiteralLoc, diag::warn_unsupported_target_attribute) << Unsupported << None << "fpmath=" << Target; @@ -3331,80 +3330,22 @@ bool Sema::checkTargetAttr(SourceLocation LiteralLoc, StringRef AttrStr) { return false; } -bool Sema::checkTargetVersionAttr(SourceLocation LiteralLoc, Decl *D, - StringRef AttrStr) { - enum FirstParam { Unsupported }; - enum SecondParam { None }; - enum ThirdParam { Target, TargetClones, TargetVersion }; - llvm::SmallVector<StringRef, 8> Features; - if (Context.getTargetInfo().getTriple().isRISCV()) { - llvm::SmallVector<StringRef, 8> AttrStrs; - AttrStr.split(AttrStrs, ';'); - - bool HasArch = false; - bool HasPriority = false; - bool HasDefault = false; - bool DuplicateAttr = false; - for (auto &AttrStr : AttrStrs) { - // Only support arch=+ext,... syntax. - if (AttrStr.starts_with("arch=+")) { - if (HasArch) - DuplicateAttr = true; - HasArch = true; - ParsedTargetAttr TargetAttr = - Context.getTargetInfo().parseTargetAttr(AttrStr); - - if (TargetAttr.Features.empty() || - llvm::any_of(TargetAttr.Features, [&](const StringRef Ext) { - return !RISCV().isValidFMVExtension(Ext); - })) - return Diag(LiteralLoc, diag::warn_unsupported_target_attribute) - << Unsupported << None << AttrStr << TargetVersion; - } else if (AttrStr.starts_with("default")) { - if (HasDefault) - DuplicateAttr = true; - HasDefault = true; - } else if (AttrStr.consume_front("priority=")) { - if (HasPriority) - DuplicateAttr = true; - HasPriority = true; - unsigned Digit; - if (AttrStr.getAsInteger(0, Digit)) - return Diag(LiteralLoc, diag::warn_unsupported_target_attribute) - << Unsupported << None << AttrStr << TargetVersion; - } else { - return Diag(LiteralLoc, diag::warn_unsupported_target_attribute) - << Unsupported << None << AttrStr << TargetVersion; - } - } - - if (((HasPriority || HasArch) && HasDefault) || DuplicateAttr || - (HasPriority && !HasArch)) - return Diag(LiteralLoc, diag::warn_unsupported_target_attribute) - << Unsupported << None << AttrStr << TargetVersion; +static void handleTargetVersionAttr(Sema &S, Decl *D, const ParsedAttr &AL) { + StringRef Param; + SourceLocation Loc; + if (!S.checkStringLiteralArgumentAttr(AL, 0, Param, &Loc)) + return; - return false; - } - AttrStr.split(Features, "+"); - for (auto &CurFeature : Features) { - CurFeature = CurFeature.trim(); - if (CurFeature == "default") - continue; - if (!Context.getTargetInfo().validateCpuSupports(CurFeature)) - return Diag(LiteralLoc, diag::warn_unsupported_target_attribute) - << Unsupported << None << CurFeature << TargetVersion; + if (S.Context.getTargetInfo().getTriple().isAArch64()) { + if (S.ARM().checkTargetVersionAttr(Param, Loc)) + return; + } else if (S.Context.getTargetInfo().getTriple().isRISCV()) { + if (S.RISCV().checkTargetVersionAttr(Param, Loc)) + return; } - return false; -} -static void handleTargetVersionAttr(Sema &S, Decl *D, const ParsedAttr &AL) { - StringRef Str; - SourceLocation LiteralLoc; - if (!S.checkStringLiteralArgumentAttr(AL, 0, Str, &LiteralLoc) || - S.checkTargetVersionAttr(LiteralLoc, D, Str)) - return; TargetVersionAttr *NewAttr = - ::new (S.Context) TargetVersionAttr(S.Context, AL, Str); + ::new (S.Context) TargetVersionAttr(S.Context, AL, Param); D->addAttr(NewAttr); } @@ -3419,158 +3360,7 @@ static void handleTargetAttr(Sema &S, Decl *D, const ParsedAttr &AL) { D->addAttr(NewAttr); } -bool Sema::checkTargetClonesAttrString( - SourceLocation LiteralLoc, StringRef Str, const StringLiteral *Literal, - Decl *D, bool &HasDefault, bool &HasCommas, bool &HasNotDefault, - SmallVectorImpl<SmallString<64>> &StringsBuffer) { - enum FirstParam { Unsupported, Duplicate, Unknown }; - enum SecondParam { None, CPU, Tune }; - enum ThirdParam { Target, TargetClones }; - HasCommas = HasCommas || Str.contains(','); - const TargetInfo &TInfo = Context.getTargetInfo(); - // Warn on empty at the beginning of a string. - if (Str.size() == 0) - return Diag(LiteralLoc, diag::warn_unsupported_target_attribute) - << Unsupported << None << "" << TargetClones; - - std::pair<StringRef, StringRef> Parts = {{}, Str}; - while (!Parts.second.empty()) { - Parts = Parts.second.split(','); - StringRef Cur = Parts.first.trim(); - SourceLocation CurLoc = - Literal->getLocationOfByte(Cur.data() - Literal->getString().data(), - getSourceManager(), getLangOpts(), TInfo); - - bool DefaultIsDupe = false; - bool HasCodeGenImpact = false; - if (Cur.empty()) - return Diag(CurLoc, diag::warn_unsupported_target_attribute) - << Unsupported << None << "" << TargetClones; - - if (TInfo.getTriple().isAArch64()) { - // AArch64 target clones specific - if (Cur == "default") { - DefaultIsDupe = HasDefault; - HasDefault = true; - if (llvm::is_contained(StringsBuffer, Cur) || DefaultIsDupe) - Diag(CurLoc, diag::warn_target_clone_duplicate_options); - else - StringsBuffer.push_back(Cur); - } else { - std::pair<StringRef, StringRef> CurParts = {{}, Cur}; - llvm::SmallVector<StringRef, 8> CurFeatures; - while (!CurParts.second.empty()) { - CurParts = CurParts.second.split('+'); - StringRef CurFeature = CurParts.first.trim(); - if (!TInfo.validateCpuSupports(CurFeature)) { - Diag(CurLoc, diag::warn_unsupported_target_attribute) - << Unsupported << None << CurFeature << TargetClones; - continue; - } - if (TInfo.doesFeatureAffectCodeGen(CurFeature)) - HasCodeGenImpact = true; - CurFeatures.push_back(CurFeature); - } - // Canonize TargetClones Attributes - llvm::sort(CurFeatures); - SmallString<64> Res; - for (auto &CurFeat : CurFeatures) { - if (!Res.empty()) - Res.append("+"); - Res.append(CurFeat); - } - if (llvm::is_contained(StringsBuffer, Res) || DefaultIsDupe) - Diag(CurLoc, diag::warn_target_clone_duplicate_options); - else if (!HasCodeGenImpact) - // Ignore features in target_clone attribute that don't impact - // code generation - Diag(CurLoc, diag::warn_target_clone_no_impact_options); - else if (!Res.empty()) { - StringsBuffer.push_back(Res); - HasNotDefault = true; - } - } - } else if (TInfo.getTriple().isRISCV()) { - // Suppress warn_target_clone_mixed_values - HasCommas = false; - - // Cur is split's parts of Str. RISC-V uses Str directly, - // so skip when encountered more than once. - if (!Str.starts_with(Cur)) - continue; - - llvm::SmallVector<StringRef, 8> AttrStrs; - Str.split(AttrStrs, ";"); - - bool IsPriority = false; - bool IsDefault = false; - for (auto &AttrStr : AttrStrs) { - // Only support arch=+ext,... syntax. - if (AttrStr.starts_with("arch=+")) { - ParsedTargetAttr TargetAttr = - Context.getTargetInfo().parseTargetAttr(AttrStr); - - if (TargetAttr.Features.empty() || - llvm::any_of(TargetAttr.Features, [&](const StringRef Ext) { - return !RISCV().isValidFMVExtension(Ext); - })) - return Diag(CurLoc, diag::warn_unsupported_target_attribute) - << Unsupported << None << Str << TargetClones; - } else if (AttrStr.starts_with("default")) { - IsDefault = true; - DefaultIsDupe = HasDefault; - HasDefault = true; - } else if (AttrStr.consume_front("priority=")) { - IsPriority = true; - unsigned Digit; - if (AttrStr.getAsInteger(0, Digit)) - return Diag(CurLoc, diag::warn_unsupported_target_attribute) - << Unsupported << None << Str << TargetClones; - } else { - return Diag(CurLoc, diag::warn_unsupported_target_attribute) - << Unsupported << None << Str << TargetClones; - } - } - - if (IsPriority && IsDefault) - return Diag(CurLoc, diag::warn_unsupported_target_attribute) - << Unsupported << None << Str << TargetClones; - - if (llvm::is_contained(StringsBuffer, Str) || DefaultIsDupe) - Diag(CurLoc, diag::warn_target_clone_duplicate_options); - StringsBuffer.push_back(Str); - } else { - // Other targets ( currently X86 ) - if (Cur.starts_with("arch=")) { - if (!Context.getTargetInfo().isValidCPUName( - Cur.drop_front(sizeof("arch=") - 1))) - return Diag(CurLoc, diag::warn_unsupported_target_attribute) - << Unsupported << CPU << Cur.drop_front(sizeof("arch=") - 1) - << TargetClones; - } else if (Cur == "default") { - DefaultIsDupe = HasDefault; - HasDefault = true; - } else if (!Context.getTargetInfo().isValidFeatureName(Cur) || - Context.getTargetInfo().getFMVPriority(Cur) == 0) - return Diag(CurLoc, diag::warn_unsupported_target_attribute) - << Unsupported << None << Cur << TargetClones; - if (llvm::is_contained(StringsBuffer, Cur) || DefaultIsDupe) - Diag(CurLoc, diag::warn_target_clone_duplicate_options); - // Note: Add even if there are duplicates, since it changes name mangling. - StringsBuffer.push_back(Cur); - } - } - if (Str.rtrim().ends_with(",")) - return Diag(LiteralLoc, diag::warn_unsupported_target_attribute) - << Unsupported << None << "" << TargetClones; - return false; -} - static void handleTargetClonesAttr(Sema &S, Decl *D, const ParsedAttr &AL) { - if (S.Context.getTargetInfo().getTriple().isAArch64() && - !S.Context.getTargetInfo().hasFeature("fmv")) - return; - // Ensure we don't combine these with themselves, since that causes some // confusing behavior. if (const auto *Other = D->getAttr<TargetClonesAttr>()) { @@ -3581,31 +3371,6 @@ static void handleTargetClonesAttr(Sema &S, Decl *D, const ParsedAttr &AL) { if (checkAttrMutualExclusion<TargetClonesAttr>(S, D, AL)) return; - SmallVector<StringRef, 2> Strings; - SmallVector<SmallString<64>, 2> StringsBuffer; - bool HasCommas = false, HasDefault = false, HasNotDefault = false; - - for (unsigned I = 0, E = AL.getNumArgs(); I != E; ++I) { - StringRef CurStr; - SourceLocation LiteralLoc; - if (!S.checkStringLiteralArgumentAttr(AL, I, CurStr, &LiteralLoc) || - S.checkTargetClonesAttrString( - LiteralLoc, CurStr, - cast<StringLiteral>(AL.getArgAsExpr(I)->IgnoreParenCasts()), D, - HasDefault, HasCommas, HasNotDefault, StringsBuffer)) - return; - } - for (auto &SmallStr : StringsBuffer) - Strings.push_back(SmallStr.str()); - - if (HasCommas && AL.getNumArgs() > 1) - S.Diag(AL.getLoc(), diag::warn_target_clone_mixed_values); - - if (!HasDefault && !S.Context.getTargetInfo().getTriple().isAArch64()) { - S.Diag(AL.getLoc(), diag::err_target_clone_must_have_default); - return; - } - // FIXME: We could probably figure out how to get this to work for lambdas // someday. if (const auto *MD = dyn_cast<CXXMethodDecl>(D)) { @@ -3617,13 +3382,34 @@ static void handleTargetClonesAttr(Sema &S, Decl *D, const ParsedAttr &AL) { } } - // No multiversion if we have default version only. - if (S.Context.getTargetInfo().getTriple().isAArch64() && !HasNotDefault) - return; + SmallVector<StringRef, 2> Params; + SmallVector<SourceLocation, 2> Locations; + for (unsigned I = 0, E = AL.getNumArgs(); I != E; ++I) { + StringRef Param; + SourceLocation Loc; + if (!S.checkStringLiteralArgumentAttr(AL, I, Param, &Loc)) + return; + Params.push_back(Param); + Locations.push_back(Loc); + } + + SmallVector<SmallString<64>, 2> NewParams; + if (S.Context.getTargetInfo().getTriple().isAArch64()) { + if (S.ARM().checkTargetClonesAttr(Params, Locations, NewParams)) + return; + } else if (S.Context.getTargetInfo().getTriple().isRISCV()) { + if (S.RISCV().checkTargetClonesAttr(Params, Locations, NewParams)) + return; + } else if (S.Context.getTargetInfo().getTriple().isX86()) { + if (S.X86().checkTargetClonesAttr(Params, Locations, NewParams)) + return; + } + Params.clear(); + for (auto &SmallStr : NewParams) + Params.push_back(SmallStr.str()); - cast<FunctionDecl>(D)->setIsMultiVersion(); TargetClonesAttr *NewAttr = ::new (S.Context) - TargetClonesAttr(S.Context, AL, Strings.data(), Strings.size()); + TargetClonesAttr(S.Context, AL, Params.data(), Params.size()); D->addAttr(NewAttr); } diff --git a/clang/lib/Sema/SemaExpr.cpp b/clang/lib/Sema/SemaExpr.cpp index 728ada3..45c7178 100644 --- a/clang/lib/Sema/SemaExpr.cpp +++ b/clang/lib/Sema/SemaExpr.cpp @@ -4564,6 +4564,9 @@ static void captureVariablyModifiedType(ASTContext &Context, QualType T, case Type::Atomic: T = cast<AtomicType>(Ty)->getValueType(); break; + case Type::PredefinedSugar: + T = cast<PredefinedSugarType>(Ty)->desugar(); + break; } } while (!T.isNull() && T->isVariablyModifiedType()); } diff --git a/clang/lib/Sema/SemaExprCXX.cpp b/clang/lib/Sema/SemaExprCXX.cpp index fd95f4e..0edfd60 100644 --- a/clang/lib/Sema/SemaExprCXX.cpp +++ b/clang/lib/Sema/SemaExprCXX.cpp @@ -3461,11 +3461,11 @@ void Sema::DeclareGlobalAllocationFunction(DeclarationName Name, // non-templated allocation function we are trying to declare here. if (FunctionDecl *Func = dyn_cast<FunctionDecl>(*Alloc)) { if (Func->getNumParams() == Params.size()) { - llvm::SmallVector<QualType, 3> FuncParams; - for (auto *P : Func->parameters()) - FuncParams.push_back( - Context.getCanonicalType(P->getType().getUnqualifiedType())); - if (llvm::ArrayRef(FuncParams) == Params) { + if (std::equal(Func->param_begin(), Func->param_end(), Params.begin(), + Params.end(), [&](ParmVarDecl *D, QualType RT) { + return Context.hasSameUnqualifiedType(D->getType(), + RT); + })) { // Make the function visible to name lookup, even if we found it in // an unimported module. It either is an implicitly-declared global // allocation function, or is suppressing that function. diff --git a/clang/lib/Sema/SemaOverload.cpp b/clang/lib/Sema/SemaOverload.cpp index 1b54628..5dd5b49 100644 --- a/clang/lib/Sema/SemaOverload.cpp +++ b/clang/lib/Sema/SemaOverload.cpp @@ -11354,55 +11354,18 @@ OverloadingResult OverloadCandidateSet::BestViableFunction(Sema &S, DeferredCandidatesCount != 0 && !ResolutionByPerfectCandidateIsDisabled; if (TwoPhaseResolution) { - - PerfectViableFunction(S, Loc, Best); - if (Best != end()) - return ResultForBestCandidate(Best); + OverloadingResult Res = BestViableFunctionImpl(S, Loc, Best); + if (Best != end() && Best->isPerfectMatch(S.Context)) { + if (!(HasDeferredTemplateConstructors && + isa_and_nonnull<CXXConversionDecl>(Best->Function))) + return Res; + } } InjectNonDeducedTemplateCandidates(S); return BestViableFunctionImpl(S, Loc, Best); } -void OverloadCandidateSet::PerfectViableFunction( - Sema &S, SourceLocation Loc, OverloadCandidateSet::iterator &Best) { - - Best = end(); - for (auto It = Candidates.begin(); It != Candidates.end(); ++It) { - - if (!It->isPerfectMatch(S.getASTContext())) - continue; - - // We found a suitable conversion function - // but if there is a template constructor in the target class - // we might prefer that instead. - if (HasDeferredTemplateConstructors && - isa_and_nonnull<CXXConversionDecl>(It->Function)) { - Best = end(); - break; - } - - if (Best == end()) { - Best = It; - continue; - } - if (Best->Function && It->Function) { - FunctionDecl *D = - S.getMoreConstrainedFunction(Best->Function, It->Function); - if (D == nullptr) { - Best = end(); - break; - } - if (D == It->Function) - Best = It; - continue; - } - // ambiguous - Best = end(); - break; - } -} - OverloadingResult OverloadCandidateSet::BestViableFunctionImpl( Sema &S, SourceLocation Loc, OverloadCandidateSet::iterator &Best) { diff --git a/clang/lib/Sema/SemaRISCV.cpp b/clang/lib/Sema/SemaRISCV.cpp index 43f7992..994cd07 100644 --- a/clang/lib/Sema/SemaRISCV.cpp +++ b/clang/lib/Sema/SemaRISCV.cpp @@ -1635,6 +1635,116 @@ bool SemaRISCV::isValidFMVExtension(StringRef Ext) { return -1 != RISCVISAInfo::getRISCVFeaturesBitsInfo(Ext).second; } +bool SemaRISCV::checkTargetVersionAttr(const StringRef Param, + const SourceLocation Loc) { + using namespace DiagAttrParams; + + llvm::SmallVector<StringRef, 8> AttrStrs; + Param.split(AttrStrs, ';'); + + bool HasArch = false; + bool HasPriority = false; + bool HasDefault = false; + bool DuplicateAttr = false; + for (StringRef AttrStr : AttrStrs) { + AttrStr = AttrStr.trim(); + // Only support arch=+ext,... syntax. + if (AttrStr.starts_with("arch=+")) { + DuplicateAttr = HasArch; + HasArch = true; + ParsedTargetAttr TargetAttr = + getASTContext().getTargetInfo().parseTargetAttr(AttrStr); + + if (TargetAttr.Features.empty() || + llvm::any_of(TargetAttr.Features, [&](const StringRef Ext) { + return !isValidFMVExtension(Ext); + })) + return Diag(Loc, diag::warn_unsupported_target_attribute) + << Unsupported << None << AttrStr << TargetVersion; + } else if (AttrStr == "default") { + DuplicateAttr = HasDefault; + HasDefault = true; + } else if (AttrStr.consume_front("priority=")) { + DuplicateAttr = HasPriority; + HasPriority = true; + unsigned Digit; + if (AttrStr.getAsInteger(0, Digit)) + return Diag(Loc, diag::warn_unsupported_target_attribute) + << Unsupported << None << AttrStr << TargetVersion; + } else { + return Diag(Loc, diag::warn_unsupported_target_attribute) + << Unsupported << None << AttrStr << TargetVersion; + } + } + + if (((HasPriority || HasArch) && HasDefault) || DuplicateAttr || + (HasPriority && !HasArch)) + return Diag(Loc, diag::warn_unsupported_target_attribute) + << Unsupported << None << Param << TargetVersion; + + return false; +} + +bool SemaRISCV::checkTargetClonesAttr( + SmallVectorImpl<StringRef> &Params, SmallVectorImpl<SourceLocation> &Locs, + SmallVectorImpl<SmallString<64>> &NewParams) { + using namespace DiagAttrParams; + + assert(Params.size() == Locs.size() && + "Mismatch between number of string parameters and locations"); + + bool HasDefault = false; + for (unsigned I = 0, E = Params.size(); I < E; ++I) { + const StringRef Param = Params[I].trim(); + const SourceLocation &Loc = Locs[I]; + + llvm::SmallVector<StringRef, 8> AttrStrs; + Param.split(AttrStrs, ';'); + + bool IsPriority = false; + bool IsDefault = false; + for (StringRef AttrStr : AttrStrs) { + AttrStr = AttrStr.trim(); + // Only support arch=+ext,... syntax. + if (AttrStr.starts_with("arch=+")) { + ParsedTargetAttr TargetAttr = + getASTContext().getTargetInfo().parseTargetAttr(AttrStr); + + if (TargetAttr.Features.empty() || + llvm::any_of(TargetAttr.Features, [&](const StringRef Ext) { + return !isValidFMVExtension(Ext); + })) + return Diag(Loc, diag::warn_unsupported_target_attribute) + << Unsupported << None << Param << TargetClones; + } else if (AttrStr == "default") { + IsDefault = true; + HasDefault = true; + } else if (AttrStr.consume_front("priority=")) { + IsPriority = true; + unsigned Digit; + if (AttrStr.getAsInteger(0, Digit)) + return Diag(Loc, diag::warn_unsupported_target_attribute) + << Unsupported << None << Param << TargetClones; + } else { + return Diag(Loc, diag::warn_unsupported_target_attribute) + << Unsupported << None << Param << TargetClones; + } + } + + if (IsPriority && IsDefault) + return Diag(Loc, diag::warn_unsupported_target_attribute) + << Unsupported << None << Param << TargetClones; + + if (llvm::is_contained(NewParams, Param)) + Diag(Loc, diag::warn_target_clone_duplicate_options); + NewParams.push_back(Param); + } + if (!HasDefault) + return Diag(Locs[0], diag::err_target_clone_must_have_default); + + return false; +} + SemaRISCV::SemaRISCV(Sema &S) : SemaBase(S) {} } // namespace clang diff --git a/clang/lib/Sema/SemaX86.cpp b/clang/lib/Sema/SemaX86.cpp index 5c149bd..850bcb1 100644 --- a/clang/lib/Sema/SemaX86.cpp +++ b/clang/lib/Sema/SemaX86.cpp @@ -954,6 +954,11 @@ bool SemaX86::CheckBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, l = 0; u = 15; break; + case X86::BI__builtin_ia32_prefetchi: + i = 1; + l = 2; // _MM_HINT_T1 + u = 3; // _MM_HINT_T0 + break; } // Note that we don't force a hard error on the range check here, allowing @@ -1056,4 +1061,61 @@ void SemaX86::handleForceAlignArgPointerAttr(Decl *D, const ParsedAttr &AL) { X86ForceAlignArgPointerAttr(getASTContext(), AL)); } +bool SemaX86::checkTargetClonesAttr( + SmallVectorImpl<StringRef> &Params, SmallVectorImpl<SourceLocation> &Locs, + SmallVectorImpl<SmallString<64>> &NewParams) { + using namespace DiagAttrParams; + + assert(Params.size() == Locs.size() && + "Mismatch between number of string parameters and locations"); + + bool HasDefault = false; + bool HasComma = false; + for (unsigned I = 0, E = Params.size(); I < E; ++I) { + const StringRef Param = Params[I].trim(); + const SourceLocation &Loc = Locs[I]; + + if (Param.empty() || Param.ends_with(',')) + return Diag(Loc, diag::warn_unsupported_target_attribute) + << Unsupported << None << "" << TargetClones; + + if (Param.contains(',')) + HasComma = true; + + StringRef LHS; + StringRef RHS = Param; + do { + std::tie(LHS, RHS) = RHS.split(','); + LHS = LHS.trim(); + const SourceLocation &CurLoc = + Loc.getLocWithOffset(LHS.data() - Param.data()); + + if (LHS.starts_with("arch=")) { + if (!getASTContext().getTargetInfo().isValidCPUName( + LHS.drop_front(sizeof("arch=") - 1))) + return Diag(CurLoc, diag::warn_unsupported_target_attribute) + << Unsupported << CPU << LHS.drop_front(sizeof("arch=") - 1) + << TargetClones; + } else if (LHS == "default") + HasDefault = true; + else if (!getASTContext().getTargetInfo().isValidFeatureName(LHS) || + getASTContext().getTargetInfo().getFMVPriority(LHS) == 0) + return Diag(CurLoc, diag::warn_unsupported_target_attribute) + << Unsupported << None << LHS << TargetClones; + + if (llvm::is_contained(NewParams, LHS)) + Diag(CurLoc, diag::warn_target_clone_duplicate_options); + // Note: Add even if there are duplicates, since it changes name mangling. + NewParams.push_back(LHS); + } while (!RHS.empty()); + } + if (HasComma && Params.size() > 1) + Diag(Locs[0], diag::warn_target_clone_mixed_values); + + if (!HasDefault) + return Diag(Locs[0], diag::err_target_clone_must_have_default); + + return false; +} + } // namespace clang diff --git a/clang/lib/Sema/TreeTransform.h b/clang/lib/Sema/TreeTransform.h index 286c2b4..c7428d1 100644 --- a/clang/lib/Sema/TreeTransform.h +++ b/clang/lib/Sema/TreeTransform.h @@ -7245,6 +7245,12 @@ QualType TreeTransform<Derived>::TransformDependentBitIntType( return Result; } +template <typename Derived> +QualType TreeTransform<Derived>::TransformPredefinedSugarType( + TypeLocBuilder &TLB, PredefinedSugarTypeLoc TL) { + llvm_unreachable("This type does not need to be transformed."); +} + /// Simple iterator that traverses the template arguments in a /// container that provides a \c getArgLoc() member function. /// diff --git a/clang/lib/Serialization/ASTReader.cpp b/clang/lib/Serialization/ASTReader.cpp index 3596d224..10aedb6 100644 --- a/clang/lib/Serialization/ASTReader.cpp +++ b/clang/lib/Serialization/ASTReader.cpp @@ -7574,11 +7574,16 @@ void TypeLocReader::VisitPipeTypeLoc(PipeTypeLoc TL) { void TypeLocReader::VisitBitIntTypeLoc(clang::BitIntTypeLoc TL) { TL.setNameLoc(readSourceLocation()); } + void TypeLocReader::VisitDependentBitIntTypeLoc( clang::DependentBitIntTypeLoc TL) { TL.setNameLoc(readSourceLocation()); } +void TypeLocReader::VisitPredefinedSugarTypeLoc(PredefinedSugarTypeLoc TL) { + // Nothing to do. +} + void ASTRecordReader::readTypeLoc(TypeLoc TL) { TypeLocReader TLR(*this); for (; !TL.isNull(); TL = TL.getNextTypeLoc()) diff --git a/clang/lib/Serialization/ASTWriter.cpp b/clang/lib/Serialization/ASTWriter.cpp index e868afe..a6957e5 100644 --- a/clang/lib/Serialization/ASTWriter.cpp +++ b/clang/lib/Serialization/ASTWriter.cpp @@ -692,7 +692,6 @@ void TypeLocWriter::VisitAtomicTypeLoc(AtomicTypeLoc TL) { void TypeLocWriter::VisitPipeTypeLoc(PipeTypeLoc TL) { addSourceLocation(TL.getKWLoc()); } - void TypeLocWriter::VisitBitIntTypeLoc(clang::BitIntTypeLoc TL) { addSourceLocation(TL.getNameLoc()); } @@ -701,6 +700,11 @@ void TypeLocWriter::VisitDependentBitIntTypeLoc( addSourceLocation(TL.getNameLoc()); } +void TypeLocWriter::VisitPredefinedSugarTypeLoc( + clang::PredefinedSugarTypeLoc TL) { + // Nothing to do. +} + void ASTWriter::WriteTypeAbbrevs() { using namespace llvm; diff --git a/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp index 30a0497..68efdba 100644 --- a/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp +++ b/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp @@ -1281,7 +1281,7 @@ SVal MallocChecker::evalMulForBufferSize(CheckerContext &C, const Expr *Blocks, SVal BlockBytesVal = C.getSVal(BlockBytes); ProgramStateRef State = C.getState(); SVal TotalSize = SB.evalBinOp(State, BO_Mul, BlocksVal, BlockBytesVal, - SB.getContext().getSizeType()); + SB.getContext().getCanonicalSizeType()); return TotalSize; } @@ -1311,11 +1311,9 @@ static bool isStandardRealloc(const CallEvent &Call) { const FunctionDecl *FD = dyn_cast<FunctionDecl>(Call.getDecl()); assert(FD); ASTContext &AC = FD->getASTContext(); - - return FD->getDeclaredReturnType().getDesugaredType(AC) == AC.VoidPtrTy && - FD->getParamDecl(0)->getType().getDesugaredType(AC) == AC.VoidPtrTy && - FD->getParamDecl(1)->getType().getDesugaredType(AC) == - AC.getSizeType(); + return AC.hasSameType(FD->getDeclaredReturnType(), AC.VoidPtrTy) && + AC.hasSameType(FD->getParamDecl(0)->getType(), AC.VoidPtrTy) && + AC.hasSameType(FD->getParamDecl(1)->getType(), AC.getSizeType()); } static bool isGRealloc(const CallEvent &Call) { @@ -1323,10 +1321,9 @@ static bool isGRealloc(const CallEvent &Call) { assert(FD); ASTContext &AC = FD->getASTContext(); - return FD->getDeclaredReturnType().getDesugaredType(AC) == AC.VoidPtrTy && - FD->getParamDecl(0)->getType().getDesugaredType(AC) == AC.VoidPtrTy && - FD->getParamDecl(1)->getType().getDesugaredType(AC) == - AC.UnsignedLongTy; + return AC.hasSameType(FD->getDeclaredReturnType(), AC.VoidPtrTy) && + AC.hasSameType(FD->getParamDecl(0)->getType(), AC.VoidPtrTy) && + AC.hasSameType(FD->getParamDecl(1)->getType(), AC.UnsignedLongTy); } void MallocChecker::checkRealloc(ProgramStateRef State, const CallEvent &Call, @@ -2830,10 +2827,10 @@ MallocChecker::ReallocMemAux(CheckerContext &C, const CallEvent &Call, return nullptr; // Compare the size argument to 0. - DefinedOrUnknownSVal SizeZero = - svalBuilder.evalEQ(State, TotalSize.castAs<DefinedOrUnknownSVal>(), - svalBuilder.makeIntValWithWidth( - svalBuilder.getContext().getSizeType(), 0)); + DefinedOrUnknownSVal SizeZero = svalBuilder.evalEQ( + State, TotalSize.castAs<DefinedOrUnknownSVal>(), + svalBuilder.makeIntValWithWidth( + svalBuilder.getContext().getCanonicalSizeType(), 0)); ProgramStateRef StatePtrIsNull, StatePtrNotNull; std::tie(StatePtrIsNull, StatePtrNotNull) = State->assume(PtrEQ); diff --git a/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp index 1c748f9..52b3d1e 100644 --- a/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp +++ b/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp @@ -1666,7 +1666,7 @@ void StdLibraryFunctionsChecker::initFunctionSummaries( const QualType IntTy = ACtx.IntTy; const QualType UnsignedIntTy = ACtx.UnsignedIntTy; const QualType LongTy = ACtx.LongTy; - const QualType SizeTy = ACtx.getSizeType(); + const QualType SizeTyCanonTy = ACtx.getCanonicalSizeType(); const QualType VoidPtrTy = getPointerTy(VoidTy); // void * const QualType IntPtrTy = getPointerTy(IntTy); // int * @@ -1684,14 +1684,14 @@ void StdLibraryFunctionsChecker::initFunctionSummaries( const QualType ConstWchar_tPtrTy = getPointerTy(getConstTy(WCharTy)); // const wchar_t * const QualType ConstVoidPtrRestrictTy = getRestrictTy(ConstVoidPtrTy); - const QualType SizePtrTy = getPointerTy(SizeTy); + const QualType SizePtrTy = getPointerTy(SizeTyCanonTy); const QualType SizePtrRestrictTy = getRestrictTy(SizePtrTy); const RangeInt IntMax = BVF.getMaxValue(IntTy)->getLimitedValue(); const RangeInt UnsignedIntMax = BVF.getMaxValue(UnsignedIntTy)->getLimitedValue(); const RangeInt LongMax = BVF.getMaxValue(LongTy)->getLimitedValue(); - const RangeInt SizeMax = BVF.getMaxValue(SizeTy)->getLimitedValue(); + const RangeInt SizeMax = BVF.getMaxValue(SizeTyCanonTy)->getLimitedValue(); // Set UCharRangeMax to min of int or uchar maximum value. // The C standard states that the arguments of functions like isalpha must @@ -2057,18 +2057,19 @@ void StdLibraryFunctionsChecker::initFunctionSummaries( // size_t fread(void *restrict ptr, size_t size, size_t nitems, // FILE *restrict stream); - addToFunctionSummaryMap( - "fread", - Signature(ArgTypes{VoidPtrRestrictTy, SizeTy, SizeTy, FilePtrRestrictTy}, - RetType{SizeTy}), - FreadSummary); + addToFunctionSummaryMap("fread", + Signature(ArgTypes{VoidPtrRestrictTy, SizeTyCanonTy, + SizeTyCanonTy, FilePtrRestrictTy}, + RetType{SizeTyCanonTy}), + FreadSummary); // size_t fwrite(const void *restrict ptr, size_t size, size_t nitems, // FILE *restrict stream); - addToFunctionSummaryMap("fwrite", - Signature(ArgTypes{ConstVoidPtrRestrictTy, SizeTy, - SizeTy, FilePtrRestrictTy}, - RetType{SizeTy}), - FreadSummary); + addToFunctionSummaryMap( + "fwrite", + Signature(ArgTypes{ConstVoidPtrRestrictTy, SizeTyCanonTy, SizeTyCanonTy, + FilePtrRestrictTy}, + RetType{SizeTyCanonTy}), + FreadSummary); std::optional<QualType> Ssize_tTy = lookupTy("ssize_t"); std::optional<RangeInt> Ssize_tMax = getMaxValue(Ssize_tTy); @@ -2083,12 +2084,14 @@ void StdLibraryFunctionsChecker::initFunctionSummaries( // should handle them together with the rest of the POSIX functions. // ssize_t read(int fildes, void *buf, size_t nbyte); addToFunctionSummaryMap( - "read", Signature(ArgTypes{IntTy, VoidPtrTy, SizeTy}, RetType{Ssize_tTy}), + "read", + Signature(ArgTypes{IntTy, VoidPtrTy, SizeTyCanonTy}, RetType{Ssize_tTy}), ReadSummary); // ssize_t write(int fildes, const void *buf, size_t nbyte); addToFunctionSummaryMap( "write", - Signature(ArgTypes{IntTy, ConstVoidPtrTy, SizeTy}, RetType{Ssize_tTy}), + Signature(ArgTypes{IntTy, ConstVoidPtrTy, SizeTyCanonTy}, + RetType{Ssize_tTy}), ReadSummary); auto GetLineSummary = @@ -2618,7 +2621,7 @@ void StdLibraryFunctionsChecker::initFunctionSummaries( // char *strndup(const char *s, size_t n); addToFunctionSummaryMap( "strndup", - Signature(ArgTypes{ConstCharPtrTy, SizeTy}, RetType{CharPtrTy}), + Signature(ArgTypes{ConstCharPtrTy, SizeTyCanonTy}, RetType{CharPtrTy}), Summary(NoEvalCall) .ArgConstraint(NotNull(ArgNo(0))) .ArgConstraint( @@ -2649,7 +2652,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries( // char *getcwd(char *buf, size_t size); addToFunctionSummaryMap( - "getcwd", Signature(ArgTypes{CharPtrTy, SizeTy}, RetType{CharPtrTy}), + "getcwd", + Signature(ArgTypes{CharPtrTy, SizeTyCanonTy}, RetType{CharPtrTy}), Summary(NoEvalCall) .Case({NotNull(0), ArgumentCondition(1, WithinRange, Range(1, SizeMax)), @@ -2957,8 +2961,9 @@ void StdLibraryFunctionsChecker::initFunctionSummaries( // FIXME: Improve for errno modeling. addToFunctionSummaryMap( "mmap", - Signature(ArgTypes{VoidPtrTy, SizeTy, IntTy, IntTy, IntTy, Off_tTy}, - RetType{VoidPtrTy}), + Signature( + ArgTypes{VoidPtrTy, SizeTyCanonTy, IntTy, IntTy, IntTy, Off_tTy}, + RetType{VoidPtrTy}), Summary(NoEvalCall) .ArgConstraint(ArgumentCondition(1, WithinRange, Range(1, SizeMax))) .ArgConstraint( @@ -2970,8 +2975,9 @@ void StdLibraryFunctionsChecker::initFunctionSummaries( // FIXME: Improve for errno modeling. addToFunctionSummaryMap( "mmap64", - Signature(ArgTypes{VoidPtrTy, SizeTy, IntTy, IntTy, IntTy, Off64_tTy}, - RetType{VoidPtrTy}), + Signature( + ArgTypes{VoidPtrTy, SizeTyCanonTy, IntTy, IntTy, IntTy, Off64_tTy}, + RetType{VoidPtrTy}), Summary(NoEvalCall) .ArgConstraint(ArgumentCondition(1, WithinRange, Range(1, SizeMax))) .ArgConstraint( @@ -3002,8 +3008,9 @@ void StdLibraryFunctionsChecker::initFunctionSummaries( // size_t bufsize); addToFunctionSummaryMap( "readlink", - Signature(ArgTypes{ConstCharPtrRestrictTy, CharPtrRestrictTy, SizeTy}, - RetType{Ssize_tTy}), + Signature( + ArgTypes{ConstCharPtrRestrictTy, CharPtrRestrictTy, SizeTyCanonTy}, + RetType{Ssize_tTy}), Summary(NoEvalCall) .Case({ArgumentCondition(2, WithinRange, Range(1, IntMax)), ReturnValueCondition(LessThanOrEq, ArgNo(2)), @@ -3025,9 +3032,9 @@ void StdLibraryFunctionsChecker::initFunctionSummaries( // char *restrict buf, size_t bufsize); addToFunctionSummaryMap( "readlinkat", - Signature( - ArgTypes{IntTy, ConstCharPtrRestrictTy, CharPtrRestrictTy, SizeTy}, - RetType{Ssize_tTy}), + Signature(ArgTypes{IntTy, ConstCharPtrRestrictTy, CharPtrRestrictTy, + SizeTyCanonTy}, + RetType{Ssize_tTy}), Summary(NoEvalCall) .Case({ArgumentCondition(3, WithinRange, Range(1, IntMax)), ReturnValueCondition(LessThanOrEq, ArgNo(3)), @@ -3268,14 +3275,14 @@ void StdLibraryFunctionsChecker::initFunctionSummaries( // size_t length, // int flags, struct sockaddr *restrict address, // socklen_t *restrict address_len); - Signature(ArgTypes{IntTy, VoidPtrRestrictTy, SizeTy, IntTy, + Signature(ArgTypes{IntTy, VoidPtrRestrictTy, SizeTyCanonTy, IntTy, StructSockaddrPtrRestrictTy, Socklen_tPtrRestrictTy}, RetType{Ssize_tTy}), Recvfrom)) addToFunctionSummaryMap( "recvfrom", - Signature(ArgTypes{IntTy, VoidPtrRestrictTy, SizeTy, IntTy, + Signature(ArgTypes{IntTy, VoidPtrRestrictTy, SizeTyCanonTy, IntTy, Irrelevant, Socklen_tPtrRestrictTy}, RetType{Ssize_tTy}), Recvfrom); @@ -3297,14 +3304,14 @@ void StdLibraryFunctionsChecker::initFunctionSummaries( // ssize_t sendto(int socket, const void *message, size_t length, // int flags, const struct sockaddr *dest_addr, // socklen_t dest_len); - Signature(ArgTypes{IntTy, ConstVoidPtrTy, SizeTy, IntTy, + Signature(ArgTypes{IntTy, ConstVoidPtrTy, SizeTyCanonTy, IntTy, ConstStructSockaddrPtrTy, Socklen_tTy}, RetType{Ssize_tTy}), Sendto)) addToFunctionSummaryMap( "sendto", - Signature(ArgTypes{IntTy, ConstVoidPtrTy, SizeTy, IntTy, Irrelevant, - Socklen_tTy}, + Signature(ArgTypes{IntTy, ConstVoidPtrTy, SizeTyCanonTy, IntTy, + Irrelevant, Socklen_tTy}, RetType{Ssize_tTy}), Sendto); @@ -3320,7 +3327,7 @@ void StdLibraryFunctionsChecker::initFunctionSummaries( // ssize_t recv(int sockfd, void *buf, size_t len, int flags); addToFunctionSummaryMap( "recv", - Signature(ArgTypes{IntTy, VoidPtrTy, SizeTy, IntTy}, + Signature(ArgTypes{IntTy, VoidPtrTy, SizeTyCanonTy, IntTy}, RetType{Ssize_tTy}), Summary(NoEvalCall) .Case({ReturnValueCondition(LessThanOrEq, ArgNo(2)), @@ -3395,7 +3402,7 @@ void StdLibraryFunctionsChecker::initFunctionSummaries( // ssize_t send(int sockfd, const void *buf, size_t len, int flags); addToFunctionSummaryMap( "send", - Signature(ArgTypes{IntTy, ConstVoidPtrTy, SizeTy, IntTy}, + Signature(ArgTypes{IntTy, ConstVoidPtrTy, SizeTyCanonTy, IntTy}, RetType{Ssize_tTy}), Summary(NoEvalCall) .Case({ReturnValueCondition(LessThanOrEq, ArgNo(2)), @@ -3683,7 +3690,7 @@ void StdLibraryFunctionsChecker::initFunctionSummaries( // int pthread_attr_setguardsize(pthread_attr_t *attr, size_t guardsize); addToFunctionSummaryMap( {"pthread_attr_setstacksize", "pthread_attr_setguardsize"}, - Signature(ArgTypes{Pthread_attr_tPtrTy, SizeTy}, RetType{IntTy}), + Signature(ArgTypes{Pthread_attr_tPtrTy, SizeTyCanonTy}, RetType{IntTy}), Summary(NoEvalCall) .ArgConstraint(NotNull(ArgNo(0))) .ArgConstraint( @@ -3888,13 +3895,14 @@ void StdLibraryFunctionsChecker::initFunctionSummaries( .ArgConstraint(NotNull(ArgNo(1)))); addToFunctionSummaryMap( "__buf_size_arg_constraint", - Signature(ArgTypes{ConstVoidPtrTy, SizeTy}, RetType{IntTy}), + Signature(ArgTypes{ConstVoidPtrTy, SizeTyCanonTy}, RetType{IntTy}), Summary(EvalCallAsPure) .ArgConstraint( BufferSize(/*Buffer=*/ArgNo(0), /*BufSize=*/ArgNo(1)))); addToFunctionSummaryMap( "__buf_size_arg_constraint_mul", - Signature(ArgTypes{ConstVoidPtrTy, SizeTy, SizeTy}, RetType{IntTy}), + Signature(ArgTypes{ConstVoidPtrTy, SizeTyCanonTy, SizeTyCanonTy}, + RetType{IntTy}), Summary(EvalCallAsPure) .ArgConstraint(BufferSize(/*Buffer=*/ArgNo(0), /*BufSize=*/ArgNo(1), /*BufSizeMultiplier=*/ArgNo(2)))); diff --git a/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp index 1042b43..c97341f 100644 --- a/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp +++ b/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp @@ -92,7 +92,7 @@ ProgramStateRef VLASizeChecker::checkVLA(CheckerContext &C, ASTContext &Ctx = C.getASTContext(); SValBuilder &SVB = C.getSValBuilder(); - CanQualType SizeTy = Ctx.getSizeType(); + QualType SizeTy = Ctx.getSizeType(); uint64_t SizeMax = SVB.getBasicValueFactory().getMaxValue(SizeTy)->getZExtValue(); diff --git a/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp b/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp index 9bd8547..8ce2706 100644 --- a/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp +++ b/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp @@ -24,7 +24,6 @@ #include "clang/Tooling/DependencyScanning/DependencyScanningService.h" #include "clang/Tooling/DependencyScanning/InProcessModuleCache.h" #include "clang/Tooling/DependencyScanning/ModuleDepCollector.h" -#include "clang/Tooling/Tooling.h" #include "llvm/ADT/IntrusiveRefCntPtr.h" #include "llvm/Support/Allocator.h" #include "llvm/Support/Error.h" @@ -376,25 +375,23 @@ public: /// A clang tool that runs the preprocessor in a mode that's optimized for /// dependency scanning for the given compiler invocation. -class DependencyScanningAction : public tooling::ToolAction { +class DependencyScanningAction { public: DependencyScanningAction( DependencyScanningService &Service, StringRef WorkingDirectory, DependencyConsumer &Consumer, DependencyActionController &Controller, llvm::IntrusiveRefCntPtr<DependencyScanningWorkerFilesystem> DepFS, - bool DisableFree, std::optional<StringRef> ModuleName = std::nullopt) + std::optional<StringRef> ModuleName = std::nullopt) : Service(Service), WorkingDirectory(WorkingDirectory), Consumer(Consumer), Controller(Controller), DepFS(std::move(DepFS)), - DisableFree(DisableFree), ModuleName(ModuleName) {} + ModuleName(ModuleName) {} bool runInvocation(std::shared_ptr<CompilerInvocation> Invocation, - FileManager *DriverFileMgr, + IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS, std::shared_ptr<PCHContainerOperations> PCHContainerOps, - DiagnosticConsumer *DiagConsumer) override { + DiagnosticConsumer *DiagConsumer) { // Make a deep copy of the original Clang invocation. CompilerInvocation OriginalInvocation(*Invocation); - // Restore the value of DisableFree, which may be modified by Tooling. - OriginalInvocation.getFrontendOpts().DisableFree = DisableFree; if (any(Service.getOptimizeArgs() & ScanningOptimizations::Macros)) canonicalizeDefines(OriginalInvocation.getPreprocessorOpts()); @@ -419,8 +416,8 @@ public: // Create the compiler's actual diagnostics engine. sanitizeDiagOpts(ScanInstance.getDiagnosticOpts()); assert(!DiagConsumerFinished && "attempt to reuse finished consumer"); - ScanInstance.createDiagnostics(DriverFileMgr->getVirtualFileSystem(), - DiagConsumer, /*ShouldOwnClient=*/false); + ScanInstance.createDiagnostics(*FS, DiagConsumer, + /*ShouldOwnClient=*/false); if (!ScanInstance.hasDiagnostics()) return false; @@ -431,6 +428,7 @@ public: ScanInstance.getHeaderSearchOpts().BuildSessionTimestamp = Service.getBuildSessionTimestamp(); + ScanInstance.getFrontendOpts().DisableFree = false; ScanInstance.getFrontendOpts().GenerateGlobalModuleIndex = false; ScanInstance.getFrontendOpts().UseGlobalModuleIndex = false; // This will prevent us compiling individual modules asynchronously since @@ -441,9 +439,9 @@ public: any(Service.getOptimizeArgs() & ScanningOptimizations::VFS); // Support for virtual file system overlays. - auto FS = createVFSFromCompilerInvocation( - ScanInstance.getInvocation(), ScanInstance.getDiagnostics(), - DriverFileMgr->getVirtualFileSystemPtr()); + FS = createVFSFromCompilerInvocation(ScanInstance.getInvocation(), + ScanInstance.getDiagnostics(), + std::move(FS)); // Create a new FileManager to match the invocation's FileSystemOptions. auto *FileMgr = ScanInstance.createFileManager(FS); @@ -554,9 +552,6 @@ public: if (Result) setLastCC1Arguments(std::move(OriginalInvocation)); - // Propagate the statistics to the parent FileManager. - DriverFileMgr->AddStats(ScanInstance.getFileManager()); - return Result; } @@ -584,7 +579,6 @@ private: DependencyConsumer &Consumer; DependencyActionController &Controller; llvm::IntrusiveRefCntPtr<DependencyScanningWorkerFilesystem> DepFS; - bool DisableFree; std::optional<StringRef> ModuleName; std::optional<CompilerInstance> ScanInstanceStorage; std::shared_ptr<ModuleDepCollector> MDC; @@ -669,15 +663,14 @@ llvm::Error DependencyScanningWorker::computeDependencies( } static bool forEachDriverJob( - ArrayRef<std::string> ArgStrs, DiagnosticsEngine &Diags, FileManager &FM, + ArrayRef<std::string> ArgStrs, DiagnosticsEngine &Diags, + IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS, llvm::function_ref<bool(const driver::Command &Cmd)> Callback) { SmallVector<const char *, 256> Argv; Argv.reserve(ArgStrs.size()); for (const std::string &Arg : ArgStrs) Argv.push_back(Arg.c_str()); - llvm::vfs::FileSystem *FS = &FM.getVirtualFileSystem(); - std::unique_ptr<driver::Driver> Driver = std::make_unique<driver::Driver>( Argv[0], llvm::sys::getDefaultTargetTriple(), Diags, "clang LLVM compiler", FS); @@ -687,7 +680,8 @@ static bool forEachDriverJob( bool CLMode = driver::IsClangCL( driver::getDriverMode(Argv[0], ArrayRef(Argv).slice(1))); - if (llvm::Error E = driver::expandResponseFiles(Argv, CLMode, Alloc, FS)) { + if (llvm::Error E = + driver::expandResponseFiles(Argv, CLMode, Alloc, FS.get())) { Diags.Report(diag::err_drv_expand_response_file) << llvm::toString(std::move(E)); return false; @@ -710,17 +704,25 @@ static bool forEachDriverJob( static bool createAndRunToolInvocation( std::vector<std::string> CommandLine, DependencyScanningAction &Action, - FileManager &FM, + IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS, std::shared_ptr<clang::PCHContainerOperations> &PCHContainerOps, DiagnosticsEngine &Diags, DependencyConsumer &Consumer) { // Save executable path before providing CommandLine to ToolInvocation std::string Executable = CommandLine[0]; - ToolInvocation Invocation(std::move(CommandLine), &Action, &FM, - PCHContainerOps); - Invocation.setDiagnosticConsumer(Diags.getClient()); - Invocation.setDiagnosticOptions(&Diags.getDiagnosticOptions()); - if (!Invocation.run()) + + llvm::opt::ArgStringList Argv; + for (const std::string &Str : ArrayRef(CommandLine).drop_front()) + Argv.push_back(Str.c_str()); + + auto Invocation = std::make_shared<CompilerInvocation>(); + if (!CompilerInvocation::CreateFromArgs(*Invocation, Argv, Diags)) { + // FIXME: Should we just go on like cc1_main does? + return false; + } + + if (!Action.runInvocation(std::move(Invocation), std::move(FS), + PCHContainerOps, Diags.getClient())) return false; std::vector<std::string> Args = Action.takeLastCC1Arguments(); @@ -733,37 +735,24 @@ bool DependencyScanningWorker::scanDependencies( DependencyConsumer &Consumer, DependencyActionController &Controller, DiagnosticConsumer &DC, llvm::IntrusiveRefCntPtr<llvm::vfs::FileSystem> FS, std::optional<StringRef> ModuleName) { - auto FileMgr = - llvm::makeIntrusiveRefCnt<FileManager>(FileSystemOptions{}, FS); - std::vector<const char *> CCommandLine(CommandLine.size(), nullptr); llvm::transform(CommandLine, CCommandLine.begin(), [](const std::string &Str) { return Str.c_str(); }); auto DiagOpts = CreateAndPopulateDiagOpts(CCommandLine); sanitizeDiagOpts(*DiagOpts); - IntrusiveRefCntPtr<DiagnosticsEngine> Diags = - CompilerInstance::createDiagnostics(FileMgr->getVirtualFileSystem(), - *DiagOpts, &DC, - /*ShouldOwnClient=*/false); - - // Although `Diagnostics` are used only for command-line parsing, the - // custom `DiagConsumer` might expect a `SourceManager` to be present. - SourceManager SrcMgr(*Diags, *FileMgr); - Diags->setSourceManager(&SrcMgr); - // DisableFree is modified by Tooling for running - // in-process; preserve the original value, which is - // always true for a driver invocation. - bool DisableFree = true; + auto Diags = CompilerInstance::createDiagnostics(*FS, *DiagOpts, &DC, + /*ShouldOwnClient=*/false); + DependencyScanningAction Action(Service, WorkingDirectory, Consumer, - Controller, DepFS, DisableFree, ModuleName); + Controller, DepFS, ModuleName); bool Success = false; if (CommandLine[1] == "-cc1") { - Success = createAndRunToolInvocation(CommandLine, Action, *FileMgr, + Success = createAndRunToolInvocation(CommandLine, Action, FS, PCHContainerOps, *Diags, Consumer); } else { Success = forEachDriverJob( - CommandLine, *Diags, *FileMgr, [&](const driver::Command &Cmd) { + CommandLine, *Diags, FS, [&](const driver::Command &Cmd) { if (StringRef(Cmd.getCreator().getName()) != "clang") { // Non-clang command. Just pass through to the dependency // consumer. @@ -782,7 +771,7 @@ bool DependencyScanningWorker::scanDependencies( // system to ensure that any file system requests that // are made by the driver do not go through the // dependency scanning filesystem. - return createAndRunToolInvocation(std::move(Argv), Action, *FileMgr, + return createAndRunToolInvocation(std::move(Argv), Action, FS, PCHContainerOps, *Diags, Consumer); }); } diff --git a/clang/lib/Tooling/Inclusions/HeaderIncludes.cpp b/clang/lib/Tooling/Inclusions/HeaderIncludes.cpp index 9f10ee1..2b5a293 100644 --- a/clang/lib/Tooling/Inclusions/HeaderIncludes.cpp +++ b/clang/lib/Tooling/Inclusions/HeaderIncludes.cpp @@ -285,8 +285,7 @@ HeaderIncludes::HeaderIncludes(StringRef FileName, StringRef Code, MaxInsertOffset(MinInsertOffset + getMaxHeaderInsertionOffset( FileName, Code.drop_front(MinInsertOffset), Style)), - MainIncludeFound(false), - Categories(Style, FileName) { + MainIncludeFound(false), Categories(Style, FileName) { // Add 0 for main header and INT_MAX for headers that are not in any // category. Priorities = {0, INT_MAX}; diff --git a/clang/lib/Tooling/Inclusions/Stdlib/StandardLibrary.cpp b/clang/lib/Tooling/Inclusions/Stdlib/StandardLibrary.cpp index b88e6db..807a8d8 100644 --- a/clang/lib/Tooling/Inclusions/Stdlib/StandardLibrary.cpp +++ b/clang/lib/Tooling/Inclusions/Stdlib/StandardLibrary.cpp @@ -131,7 +131,7 @@ static int initialize(Lang Language) { Mapping->SymbolNames[SymIndex] = { QName.data(), NSLen, static_cast<unsigned int>(QName.size() - NSLen)}; if (!HeaderName.empty()) - Mapping->SymbolHeaderIDs[SymIndex].push_back(AddHeader(HeaderName)); + Mapping->SymbolHeaderIDs[SymIndex].push_back(AddHeader(HeaderName)); NSSymbolMap &NSSymbols = AddNS(QName.take_front(NSLen)); NSSymbols.try_emplace(QName.drop_front(NSLen), SymIndex); @@ -236,7 +236,7 @@ std::optional<Symbol> Symbol::named(llvm::StringRef Scope, llvm::StringRef Name, return std::nullopt; } std::optional<Header> Symbol::header() const { - const auto& Headers = getMappingPerLang(Language)->SymbolHeaderIDs[ID]; + const auto &Headers = getMappingPerLang(Language)->SymbolHeaderIDs[ID]; if (Headers.empty()) return std::nullopt; return Header(Headers.front(), Language); |