diff options
Diffstat (limited to 'clang/lib/AST')
| -rw-r--r-- | clang/lib/AST/ASTConcept.cpp | 2 | ||||
| -rw-r--r-- | clang/lib/AST/ASTContext.cpp | 4 | ||||
| -rw-r--r-- | clang/lib/AST/ByteCode/Compiler.cpp | 291 | ||||
| -rw-r--r-- | clang/lib/AST/ByteCode/Interp.cpp | 9 | ||||
| -rw-r--r-- | clang/lib/AST/ByteCode/Interp.h | 76 | ||||
| -rw-r--r-- | clang/lib/AST/ByteCode/InterpBuiltin.cpp | 191 | ||||
| -rw-r--r-- | clang/lib/AST/ByteCode/Opcodes.td | 23 | ||||
| -rw-r--r-- | clang/lib/AST/CMakeLists.txt | 1 | ||||
| -rw-r--r-- | clang/lib/AST/ExprConstant.cpp | 180 | ||||
| -rw-r--r-- | clang/lib/AST/InferAlloc.cpp | 201 | ||||
| -rw-r--r-- | clang/lib/AST/StmtOpenACC.cpp | 54 | ||||
| -rw-r--r-- | clang/lib/AST/TypePrinter.cpp | 3 |
12 files changed, 891 insertions, 144 deletions
diff --git a/clang/lib/AST/ASTConcept.cpp b/clang/lib/AST/ASTConcept.cpp index 9ea104c..fd12bc4 100644 --- a/clang/lib/AST/ASTConcept.cpp +++ b/clang/lib/AST/ASTConcept.cpp @@ -86,7 +86,7 @@ void ConstraintSatisfaction::Profile(llvm::FoldingSetNodeID &ID, ID.AddPointer(ConstraintOwner); ID.AddInteger(TemplateArgs.size()); for (auto &Arg : TemplateArgs) - C.getCanonicalTemplateArgument(Arg).Profile(ID, C); + Arg.Profile(ID, C); } ConceptReference * diff --git a/clang/lib/AST/ASTContext.cpp b/clang/lib/AST/ASTContext.cpp index 32c8f62..687cd46 100644 --- a/clang/lib/AST/ASTContext.cpp +++ b/clang/lib/AST/ASTContext.cpp @@ -1648,6 +1648,9 @@ ASTContext::findPointerAuthContent(QualType T) const { if (!RD) return PointerAuthContent::None; + if (RD->isInvalidDecl()) + return PointerAuthContent::None; + if (auto Existing = RecordContainsAddressDiscriminatedPointerAuth.find(RD); Existing != RecordContainsAddressDiscriminatedPointerAuth.end()) return Existing->second; @@ -3517,7 +3520,6 @@ static void encodeTypeForFunctionPointerAuth(const ASTContext &Ctx, uint16_t ASTContext::getPointerAuthTypeDiscriminator(QualType T) { assert(!T->isDependentType() && "cannot compute type discriminator of a dependent type"); - SmallString<256> Str; llvm::raw_svector_ostream Out(Str); diff --git a/clang/lib/AST/ByteCode/Compiler.cpp b/clang/lib/AST/ByteCode/Compiler.cpp index f7731f0..f4ddbf4 100644 --- a/clang/lib/AST/ByteCode/Compiler.cpp +++ b/clang/lib/AST/ByteCode/Compiler.cpp @@ -1842,7 +1842,6 @@ bool Compiler<Emitter>::visitInitList(ArrayRef<const Expr *> Inits, const Expr *Init, PrimType T, bool Activate = false) -> bool { InitStackScope<Emitter> ISS(this, isa<CXXDefaultInitExpr>(Init)); - InitLinkScope<Emitter> ILS(this, InitLink::Field(FieldToInit->Offset)); if (!this->visit(Init)) return false; @@ -3274,34 +3273,43 @@ bool Compiler<Emitter>::VisitCXXConstructExpr(const CXXConstructExpr *E) { } if (T->isArrayType()) { - const ConstantArrayType *CAT = - Ctx.getASTContext().getAsConstantArrayType(E->getType()); - if (!CAT) - return false; - - size_t NumElems = CAT->getZExtSize(); const Function *Func = getFunction(E->getConstructor()); if (!Func) return false; - // FIXME(perf): We're calling the constructor once per array element here, - // in the old intepreter we had a special-case for trivial constructors. - for (size_t I = 0; I != NumElems; ++I) { - if (!this->emitConstUint64(I, E)) - return false; - if (!this->emitArrayElemPtrUint64(E)) - return false; + if (!this->emitDupPtr(E)) + return false; - // Constructor arguments. - for (const auto *Arg : E->arguments()) { - if (!this->visit(Arg)) - return false; + std::function<bool(QualType)> initArrayDimension; + initArrayDimension = [&](QualType T) -> bool { + if (!T->isArrayType()) { + // Constructor arguments. + for (const auto *Arg : E->arguments()) { + if (!this->visit(Arg)) + return false; + } + + return this->emitCall(Func, 0, E); } - if (!this->emitCall(Func, 0, E)) + const ConstantArrayType *CAT = + Ctx.getASTContext().getAsConstantArrayType(T); + if (!CAT) return false; - } - return true; + QualType ElemTy = CAT->getElementType(); + unsigned NumElems = CAT->getZExtSize(); + for (size_t I = 0; I != NumElems; ++I) { + if (!this->emitConstUint64(I, E)) + return false; + if (!this->emitArrayElemPtrUint64(E)) + return false; + if (!initArrayDimension(ElemTy)) + return false; + } + return this->emitPopPtr(E); + }; + + return initArrayDimension(E->getType()); } return false; @@ -3600,8 +3608,6 @@ bool Compiler<Emitter>::VisitCXXNewExpr(const CXXNewExpr *E) { if (PlacementDest) { if (!this->visit(PlacementDest)) return false; - if (!this->emitStartLifetime(E)) - return false; if (!this->emitGetLocal(SizeT, ArrayLen, E)) return false; if (!this->emitCheckNewTypeMismatchArray(SizeT, E, E)) @@ -3741,10 +3747,9 @@ bool Compiler<Emitter>::VisitCXXNewExpr(const CXXNewExpr *E) { if (PlacementDest) { if (!this->visit(PlacementDest)) return false; - if (!this->emitStartLifetime(E)) - return false; if (!this->emitCheckNewTypeMismatch(E, E)) return false; + } else { // Allocate just one element. if (!this->emitAlloc(Desc, E)) @@ -4841,46 +4846,39 @@ Compiler<Emitter>::visitVarDecl(const VarDecl *VD, const Expr *Init, return !NeedsOp || this->emitCheckDecl(VD, VD); }; - auto initGlobal = [&](unsigned GlobalIndex) -> bool { - assert(Init); - - if (VarT) { - if (!this->visit(Init)) - return checkDecl() && false; - - return checkDecl() && this->emitInitGlobal(*VarT, GlobalIndex, VD); - } - - if (!checkDecl()) - return false; - - if (!this->emitGetPtrGlobal(GlobalIndex, Init)) - return false; - - if (!visitInitializer(Init)) - return false; - - return this->emitFinishInitGlobal(Init); - }; - DeclScope<Emitter> LocalScope(this, VD); - // We've already seen and initialized this global. - if (UnsignedOrNone GlobalIndex = P.getGlobal(VD)) { + UnsignedOrNone GlobalIndex = P.getGlobal(VD); + if (GlobalIndex) { + // We've already seen and initialized this global. if (P.getPtrGlobal(*GlobalIndex).isInitialized()) return checkDecl(); - // The previous attempt at initialization might've been unsuccessful, // so let's try this one. - return !Init || (checkDecl() && initGlobal(*GlobalIndex)); + } else if ((GlobalIndex = P.createGlobal(VD, Init))) { + } else { + return false; } + if (!Init) + return true; - UnsignedOrNone GlobalIndex = P.createGlobal(VD, Init); + if (!checkDecl()) + return false; - if (!GlobalIndex) + if (VarT) { + if (!this->visit(Init)) + return false; + + return this->emitInitGlobal(*VarT, *GlobalIndex, VD); + } + + if (!this->emitGetPtrGlobal(*GlobalIndex, Init)) return false; - return !Init || (checkDecl() && initGlobal(*GlobalIndex)); + if (!visitInitializer(Init)) + return false; + + return this->emitFinishInitGlobal(Init); } // Local variables. InitLinkScope<Emitter> ILS(this, InitLink::Decl(VD)); @@ -4890,36 +4888,37 @@ Compiler<Emitter>::visitVarDecl(const VarDecl *VD, const Expr *Init, VD, *VarT, VD->getType().isConstQualified(), VD->getType().isVolatileQualified(), nullptr, ScopeKind::Block, IsConstexprUnknown); - if (Init) { - // If this is a toplevel declaration, create a scope for the - // initializer. - if (Toplevel) { - LocalScope<Emitter> Scope(this); - if (!this->visit(Init)) - return false; - return this->emitSetLocal(*VarT, Offset, VD) && Scope.destroyLocals(); - } - if (!this->visit(Init)) - return false; - return this->emitSetLocal(*VarT, Offset, VD); - } - } else { - if (UnsignedOrNone Offset = this->allocateLocal( - VD, VD->getType(), nullptr, ScopeKind::Block, IsConstexprUnknown)) { - if (!Init) - return true; - if (!this->emitGetPtrLocal(*Offset, Init)) - return false; + if (!Init) + return true; - if (!visitInitializer(Init)) + // If this is a toplevel declaration, create a scope for the + // initializer. + if (Toplevel) { + LocalScope<Emitter> Scope(this); + if (!this->visit(Init)) return false; - - return this->emitFinishInitPop(Init); + return this->emitSetLocal(*VarT, Offset, VD) && Scope.destroyLocals(); } - return false; + if (!this->visit(Init)) + return false; + return this->emitSetLocal(*VarT, Offset, VD); } - return true; + // Local composite variables. + if (UnsignedOrNone Offset = this->allocateLocal( + VD, VD->getType(), nullptr, ScopeKind::Block, IsConstexprUnknown)) { + if (!Init) + return true; + + if (!this->emitGetPtrLocal(*Offset, Init)) + return false; + + if (!visitInitializer(Init)) + return false; + + return this->emitFinishInitPop(Init); + } + return false; } template <class Emitter> @@ -5391,55 +5390,57 @@ bool Compiler<Emitter>::VisitCXXThisExpr(const CXXThisExpr *E) { // instance pointer of the current function frame, but e.g. to the declaration // currently being initialized. Here we emit the necessary instruction(s) for // this scenario. - if (!InitStackActive) + if (!InitStackActive || InitStack.empty()) return this->emitThis(E); - if (!InitStack.empty()) { - // If our init stack is, for example: - // 0 Stack: 3 (decl) - // 1 Stack: 6 (init list) - // 2 Stack: 1 (field) - // 3 Stack: 6 (init list) - // 4 Stack: 1 (field) - // - // We want to find the LAST element in it that's an init list, - // which is marked with the K_InitList marker. The index right - // before that points to an init list. We need to find the - // elements before the K_InitList element that point to a base - // (e.g. a decl or This), optionally followed by field, elem, etc. - // In the example above, we want to emit elements [0..2]. - unsigned StartIndex = 0; - unsigned EndIndex = 0; - // Find the init list. - for (StartIndex = InitStack.size() - 1; StartIndex > 0; --StartIndex) { - if (InitStack[StartIndex].Kind == InitLink::K_InitList || - InitStack[StartIndex].Kind == InitLink::K_This) { - EndIndex = StartIndex; - --StartIndex; - break; - } + // If our init stack is, for example: + // 0 Stack: 3 (decl) + // 1 Stack: 6 (init list) + // 2 Stack: 1 (field) + // 3 Stack: 6 (init list) + // 4 Stack: 1 (field) + // + // We want to find the LAST element in it that's an init list, + // which is marked with the K_InitList marker. The index right + // before that points to an init list. We need to find the + // elements before the K_InitList element that point to a base + // (e.g. a decl or This), optionally followed by field, elem, etc. + // In the example above, we want to emit elements [0..2]. + unsigned StartIndex = 0; + unsigned EndIndex = 0; + // Find the init list. + for (StartIndex = InitStack.size() - 1; StartIndex > 0; --StartIndex) { + if (InitStack[StartIndex].Kind == InitLink::K_InitList || + InitStack[StartIndex].Kind == InitLink::K_This) { + EndIndex = StartIndex; + --StartIndex; + break; } + } - // Walk backwards to find the base. - for (; StartIndex > 0; --StartIndex) { - if (InitStack[StartIndex].Kind == InitLink::K_InitList) - continue; + // Walk backwards to find the base. + for (; StartIndex > 0; --StartIndex) { + if (InitStack[StartIndex].Kind == InitLink::K_InitList) + continue; - if (InitStack[StartIndex].Kind != InitLink::K_Field && - InitStack[StartIndex].Kind != InitLink::K_Elem) - break; - } + if (InitStack[StartIndex].Kind != InitLink::K_Field && + InitStack[StartIndex].Kind != InitLink::K_Elem) + break; + } - // Emit the instructions. - for (unsigned I = StartIndex; I != EndIndex; ++I) { - if (InitStack[I].Kind == InitLink::K_InitList) - continue; - if (!InitStack[I].template emit<Emitter>(this, E)) - return false; - } - return true; + if (StartIndex == 0 && EndIndex == 0) + EndIndex = InitStack.size() - 1; + + assert(StartIndex < EndIndex); + + // Emit the instructions. + for (unsigned I = StartIndex; I != (EndIndex + 1); ++I) { + if (InitStack[I].Kind == InitLink::K_InitList) + continue; + if (!InitStack[I].template emit<Emitter>(this, E)) + return false; } - return this->emitThis(E); + return true; } template <class Emitter> bool Compiler<Emitter>::visitStmt(const Stmt *S) { @@ -6301,6 +6302,10 @@ bool Compiler<Emitter>::compileConstructor(const CXXConstructorDecl *Ctor) { } assert(NestedField); + unsigned FirstLinkOffset = + R->getField(cast<FieldDecl>(IFD->chain()[0]))->Offset; + InitStackScope<Emitter> ISS(this, isa<CXXDefaultInitExpr>(InitExpr)); + InitLinkScope<Emitter> ILS(this, InitLink::Field(FirstLinkOffset)); if (!emitFieldInitializer(NestedField, NestedFieldOffset, InitExpr, IsUnion)) return false; @@ -6438,6 +6443,13 @@ bool Compiler<Emitter>::visitFunc(const FunctionDecl *F) { return this->emitNoRet(SourceInfo{}); } +static uint32_t getBitWidth(const Expr *E) { + assert(E->refersToBitField()); + const auto *ME = cast<MemberExpr>(E); + const auto *FD = cast<FieldDecl>(ME->getMemberDecl()); + return FD->getBitWidthValue(); +} + template <class Emitter> bool Compiler<Emitter>::VisitUnaryOperator(const UnaryOperator *E) { const Expr *SubExpr = E->getSubExpr(); @@ -6466,10 +6478,15 @@ bool Compiler<Emitter>::VisitUnaryOperator(const UnaryOperator *E) { return DiscardResult ? this->emitPopPtr(E) : true; } - if (T == PT_Float) { + if (T == PT_Float) return DiscardResult ? this->emitIncfPop(getFPOptions(E), E) : this->emitIncf(getFPOptions(E), E); - } + + if (SubExpr->refersToBitField()) + return DiscardResult ? this->emitIncPopBitfield(*T, E->canOverflow(), + getBitWidth(SubExpr), E) + : this->emitIncBitfield(*T, E->canOverflow(), + getBitWidth(SubExpr), E); return DiscardResult ? this->emitIncPop(*T, E->canOverflow(), E) : this->emitInc(*T, E->canOverflow(), E); @@ -6490,9 +6507,15 @@ bool Compiler<Emitter>::VisitUnaryOperator(const UnaryOperator *E) { return DiscardResult ? this->emitPopPtr(E) : true; } - if (T == PT_Float) { + if (T == PT_Float) return DiscardResult ? this->emitDecfPop(getFPOptions(E), E) : this->emitDecf(getFPOptions(E), E); + + if (SubExpr->refersToBitField()) { + return DiscardResult ? this->emitDecPopBitfield(*T, E->canOverflow(), + getBitWidth(SubExpr), E) + : this->emitDecBitfield(*T, E->canOverflow(), + getBitWidth(SubExpr), E); } return DiscardResult ? this->emitDecPop(*T, E->canOverflow(), E) @@ -6521,6 +6544,11 @@ bool Compiler<Emitter>::VisitUnaryOperator(const UnaryOperator *E) { if (DiscardResult) { if (T == PT_Float) return this->emitIncfPop(getFPOptions(E), E); + if (SubExpr->refersToBitField()) + return DiscardResult ? this->emitIncPopBitfield(*T, E->canOverflow(), + getBitWidth(SubExpr), E) + : this->emitIncBitfield(*T, E->canOverflow(), + getBitWidth(SubExpr), E); return this->emitIncPop(*T, E->canOverflow(), E); } @@ -6536,6 +6564,11 @@ bool Compiler<Emitter>::VisitUnaryOperator(const UnaryOperator *E) { return false; if (!this->emitStoreFloat(E)) return false; + } else if (SubExpr->refersToBitField()) { + assert(isIntegralType(*T)); + if (!this->emitPreIncBitfield(*T, E->canOverflow(), getBitWidth(SubExpr), + E)) + return false; } else { assert(isIntegralType(*T)); if (!this->emitPreInc(*T, E->canOverflow(), E)) @@ -6566,6 +6599,11 @@ bool Compiler<Emitter>::VisitUnaryOperator(const UnaryOperator *E) { if (DiscardResult) { if (T == PT_Float) return this->emitDecfPop(getFPOptions(E), E); + if (SubExpr->refersToBitField()) + return DiscardResult ? this->emitDecPopBitfield(*T, E->canOverflow(), + getBitWidth(SubExpr), E) + : this->emitDecBitfield(*T, E->canOverflow(), + getBitWidth(SubExpr), E); return this->emitDecPop(*T, E->canOverflow(), E); } @@ -6581,6 +6619,11 @@ bool Compiler<Emitter>::VisitUnaryOperator(const UnaryOperator *E) { return false; if (!this->emitStoreFloat(E)) return false; + } else if (SubExpr->refersToBitField()) { + assert(isIntegralType(*T)); + if (!this->emitPreDecBitfield(*T, E->canOverflow(), getBitWidth(SubExpr), + E)) + return false; } else { assert(isIntegralType(*T)); if (!this->emitPreDec(*T, E->canOverflow(), E)) diff --git a/clang/lib/AST/ByteCode/Interp.cpp b/clang/lib/AST/ByteCode/Interp.cpp index a72282c..169a9a2 100644 --- a/clang/lib/AST/ByteCode/Interp.cpp +++ b/clang/lib/AST/ByteCode/Interp.cpp @@ -1903,12 +1903,19 @@ bool CheckNewTypeMismatch(InterpState &S, CodePtr OpPC, const Expr *E, if (Ptr.inUnion() && Ptr.getBase().getRecord()->isUnion()) Ptr.activate(); + if (Ptr.isZero()) { + S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_access_null) + << AK_Construct; + return false; + } + if (!Ptr.isBlockPointer()) return false; + startLifetimeRecurse(Ptr); + // Similar to CheckStore(), but with the additional CheckTemporary() call and // the AccessKinds are different. - if (!Ptr.block()->isAccessible()) { if (!CheckExtern(S, OpPC, Ptr)) return false; diff --git a/clang/lib/AST/ByteCode/Interp.h b/clang/lib/AST/ByteCode/Interp.h index d8529da..89f6fbe 100644 --- a/clang/lib/AST/ByteCode/Interp.h +++ b/clang/lib/AST/ByteCode/Interp.h @@ -702,7 +702,7 @@ enum class IncDecOp { template <typename T, IncDecOp Op, PushVal DoPush> bool IncDecHelper(InterpState &S, CodePtr OpPC, const Pointer &Ptr, - bool CanOverflow) { + bool CanOverflow, UnsignedOrNone BitWidth = std::nullopt) { assert(!Ptr.isDummy()); if (!S.inConstantContext()) { @@ -725,12 +725,18 @@ bool IncDecHelper(InterpState &S, CodePtr OpPC, const Pointer &Ptr, if constexpr (Op == IncDecOp::Inc) { if (!T::increment(Value, &Result) || !CanOverflow) { - Ptr.deref<T>() = Result; + if (BitWidth) + Ptr.deref<T>() = Result.truncate(*BitWidth); + else + Ptr.deref<T>() = Result; return true; } } else { if (!T::decrement(Value, &Result) || !CanOverflow) { - Ptr.deref<T>() = Result; + if (BitWidth) + Ptr.deref<T>() = Result.truncate(*BitWidth); + else + Ptr.deref<T>() = Result; return true; } } @@ -774,6 +780,17 @@ bool Inc(InterpState &S, CodePtr OpPC, bool CanOverflow) { CanOverflow); } +template <PrimType Name, class T = typename PrimConv<Name>::T> +bool IncBitfield(InterpState &S, CodePtr OpPC, bool CanOverflow, + unsigned BitWidth) { + const Pointer &Ptr = S.Stk.pop<Pointer>(); + if (!CheckLoad(S, OpPC, Ptr, AK_Increment)) + return false; + + return IncDecHelper<T, IncDecOp::Inc, PushVal::Yes>(S, OpPC, Ptr, CanOverflow, + BitWidth); +} + /// 1) Pops a pointer from the stack /// 2) Load the value from the pointer /// 3) Writes the value increased by one back to the pointer @@ -787,6 +804,17 @@ bool IncPop(InterpState &S, CodePtr OpPC, bool CanOverflow) { } template <PrimType Name, class T = typename PrimConv<Name>::T> +bool IncPopBitfield(InterpState &S, CodePtr OpPC, bool CanOverflow, + uint32_t BitWidth) { + const Pointer &Ptr = S.Stk.pop<Pointer>(); + if (!CheckLoad(S, OpPC, Ptr, AK_Increment)) + return false; + + return IncDecHelper<T, IncDecOp::Inc, PushVal::No>(S, OpPC, Ptr, CanOverflow, + BitWidth); +} + +template <PrimType Name, class T = typename PrimConv<Name>::T> bool PreInc(InterpState &S, CodePtr OpPC, bool CanOverflow) { const Pointer &Ptr = S.Stk.peek<Pointer>(); if (!CheckLoad(S, OpPC, Ptr, AK_Increment)) @@ -795,6 +823,17 @@ bool PreInc(InterpState &S, CodePtr OpPC, bool CanOverflow) { return IncDecHelper<T, IncDecOp::Inc, PushVal::No>(S, OpPC, Ptr, CanOverflow); } +template <PrimType Name, class T = typename PrimConv<Name>::T> +bool PreIncBitfield(InterpState &S, CodePtr OpPC, bool CanOverflow, + uint32_t BitWidth) { + const Pointer &Ptr = S.Stk.peek<Pointer>(); + if (!CheckLoad(S, OpPC, Ptr, AK_Increment)) + return false; + + return IncDecHelper<T, IncDecOp::Inc, PushVal::No>(S, OpPC, Ptr, CanOverflow, + BitWidth); +} + /// 1) Pops a pointer from the stack /// 2) Load the value from the pointer /// 3) Writes the value decreased by one back to the pointer @@ -808,6 +847,16 @@ bool Dec(InterpState &S, CodePtr OpPC, bool CanOverflow) { return IncDecHelper<T, IncDecOp::Dec, PushVal::Yes>(S, OpPC, Ptr, CanOverflow); } +template <PrimType Name, class T = typename PrimConv<Name>::T> +bool DecBitfield(InterpState &S, CodePtr OpPC, bool CanOverflow, + uint32_t BitWidth) { + const Pointer &Ptr = S.Stk.pop<Pointer>(); + if (!CheckLoad(S, OpPC, Ptr, AK_Decrement)) + return false; + + return IncDecHelper<T, IncDecOp::Dec, PushVal::Yes>(S, OpPC, Ptr, CanOverflow, + BitWidth); +} /// 1) Pops a pointer from the stack /// 2) Load the value from the pointer @@ -822,6 +871,17 @@ bool DecPop(InterpState &S, CodePtr OpPC, bool CanOverflow) { } template <PrimType Name, class T = typename PrimConv<Name>::T> +bool DecPopBitfield(InterpState &S, CodePtr OpPC, bool CanOverflow, + uint32_t BitWidth) { + const Pointer &Ptr = S.Stk.pop<Pointer>(); + if (!CheckLoad(S, OpPC, Ptr, AK_Decrement)) + return false; + + return IncDecHelper<T, IncDecOp::Dec, PushVal::No>(S, OpPC, Ptr, CanOverflow, + BitWidth); +} + +template <PrimType Name, class T = typename PrimConv<Name>::T> bool PreDec(InterpState &S, CodePtr OpPC, bool CanOverflow) { const Pointer &Ptr = S.Stk.peek<Pointer>(); if (!CheckLoad(S, OpPC, Ptr, AK_Decrement)) @@ -829,6 +889,16 @@ bool PreDec(InterpState &S, CodePtr OpPC, bool CanOverflow) { return IncDecHelper<T, IncDecOp::Dec, PushVal::No>(S, OpPC, Ptr, CanOverflow); } +template <PrimType Name, class T = typename PrimConv<Name>::T> +bool PreDecBitfield(InterpState &S, CodePtr OpPC, bool CanOverflow, + uint32_t BitWidth) { + const Pointer &Ptr = S.Stk.peek<Pointer>(); + if (!CheckLoad(S, OpPC, Ptr, AK_Decrement)) + return false; + return IncDecHelper<T, IncDecOp::Dec, PushVal::No>(S, OpPC, Ptr, CanOverflow, + BitWidth); +} + template <IncDecOp Op, PushVal DoPush> bool IncDecFloatHelper(InterpState &S, CodePtr OpPC, const Pointer &Ptr, uint32_t FPOI) { diff --git a/clang/lib/AST/ByteCode/InterpBuiltin.cpp b/clang/lib/AST/ByteCode/InterpBuiltin.cpp index ff83c52..d0b97a1 100644 --- a/clang/lib/AST/ByteCode/InterpBuiltin.cpp +++ b/clang/lib/AST/ByteCode/InterpBuiltin.cpp @@ -12,12 +12,14 @@ #include "InterpHelpers.h" #include "PrimType.h" #include "Program.h" +#include "clang/AST/InferAlloc.h" #include "clang/AST/OSLog.h" #include "clang/AST/RecordLayout.h" #include "clang/Basic/Builtins.h" #include "clang/Basic/TargetBuiltins.h" #include "clang/Basic/TargetInfo.h" #include "llvm/ADT/StringExtras.h" +#include "llvm/Support/AllocToken.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/SipHash.h" @@ -1307,6 +1309,45 @@ interp__builtin_ptrauth_string_discriminator(InterpState &S, CodePtr OpPC, return true; } +static bool interp__builtin_infer_alloc_token(InterpState &S, CodePtr OpPC, + const InterpFrame *Frame, + const CallExpr *Call) { + const ASTContext &ASTCtx = S.getASTContext(); + uint64_t BitWidth = ASTCtx.getTypeSize(ASTCtx.getSizeType()); + auto Mode = + ASTCtx.getLangOpts().AllocTokenMode.value_or(llvm::DefaultAllocTokenMode); + uint64_t MaxTokens = + ASTCtx.getLangOpts().AllocTokenMax.value_or(~0ULL >> (64 - BitWidth)); + + // We do not read any of the arguments; discard them. + for (int I = Call->getNumArgs() - 1; I >= 0; --I) + discard(S.Stk, *S.getContext().classify(Call->getArg(I))); + + // Note: Type inference from a surrounding cast is not supported in + // constexpr evaluation. + QualType AllocType = infer_alloc::inferPossibleType(Call, ASTCtx, nullptr); + if (AllocType.isNull()) { + S.CCEDiag(Call, + diag::note_constexpr_infer_alloc_token_type_inference_failed); + return false; + } + + auto ATMD = infer_alloc::getAllocTokenMetadata(AllocType, ASTCtx); + if (!ATMD) { + S.CCEDiag(Call, diag::note_constexpr_infer_alloc_token_no_metadata); + return false; + } + + auto MaybeToken = llvm::getAllocToken(Mode, *ATMD, MaxTokens); + if (!MaybeToken) { + S.CCEDiag(Call, diag::note_constexpr_infer_alloc_token_stateful_mode); + return false; + } + + pushInteger(S, llvm::APInt(BitWidth, *MaybeToken), ASTCtx.getSizeType()); + return true; +} + static bool interp__builtin_operator_new(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call) { @@ -3279,6 +3320,65 @@ static bool interp__builtin_ia32_vpconflict(InterpState &S, CodePtr OpPC, return true; } +static bool interp__builtin_x86_byteshift( + InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID, + llvm::function_ref<APInt(const Pointer &, unsigned Lane, unsigned I, + unsigned Shift)> + Fn) { + assert(Call->getNumArgs() == 2); + + APSInt ImmAPS = popToAPSInt(S, Call->getArg(1)); + uint64_t Shift = ImmAPS.getZExtValue() & 0xff; + + const Pointer &Src = S.Stk.pop<Pointer>(); + if (!Src.getFieldDesc()->isPrimitiveArray()) + return false; + + unsigned NumElems = Src.getNumElems(); + const Pointer &Dst = S.Stk.peek<Pointer>(); + PrimType ElemT = Src.getFieldDesc()->getPrimType(); + + for (unsigned Lane = 0; Lane != NumElems; Lane += 16) { + for (unsigned I = 0; I != 16; ++I) { + unsigned Base = Lane + I; + APSInt Result = APSInt(Fn(Src, Lane, I, Shift)); + INT_TYPE_SWITCH_NO_BOOL(ElemT, + { Dst.elem<T>(Base) = static_cast<T>(Result); }); + } + } + + Dst.initializeAllElements(); + + return true; +} + +static bool interp__builtin_ia32_shuffle_generic( + InterpState &S, CodePtr OpPC, const CallExpr *Call, + llvm::function_ref<std::pair<unsigned, unsigned>(unsigned, unsigned)> + GetSourceIndex) { + + assert(Call->getNumArgs() == 3); + unsigned ShuffleMask = popToAPSInt(S, Call->getArg(2)).getZExtValue(); + + QualType Arg0Type = Call->getArg(0)->getType(); + const auto *VecT = Arg0Type->castAs<VectorType>(); + PrimType ElemT = *S.getContext().classify(VecT->getElementType()); + unsigned NumElems = VecT->getNumElements(); + + const Pointer &B = S.Stk.pop<Pointer>(); + const Pointer &A = S.Stk.pop<Pointer>(); + const Pointer &Dst = S.Stk.peek<Pointer>(); + + for (unsigned DstIdx = 0; DstIdx != NumElems; ++DstIdx) { + auto [SrcVecIdx, SrcIdx] = GetSourceIndex(DstIdx, ShuffleMask); + const Pointer &Src = (SrcVecIdx == 0) ? A : B; + TYPE_SWITCH(ElemT, { Dst.elem<T>(DstIdx) = Src.elem<T>(SrcIdx); }); + } + Dst.initializeAllElements(); + + return true; +} + bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call, uint32_t BuiltinID) { if (!S.getASTContext().BuiltinInfo.isConstantEvaluated(BuiltinID)) @@ -3471,7 +3571,7 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call, case Builtin::BI_lrotl: case Builtin::BI_rotl64: return interp__builtin_elementwise_int_binop( - S, OpPC, Call, [](const APSInt &Value, const APSInt &Amount) -> APInt { + S, OpPC, Call, [](const APSInt &Value, const APSInt &Amount) { return Value.rotl(Amount); }); @@ -3485,7 +3585,7 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call, case Builtin::BI_lrotr: case Builtin::BI_rotr64: return interp__builtin_elementwise_int_binop( - S, OpPC, Call, [](const APSInt &Value, const APSInt &Amount) -> APInt { + S, OpPC, Call, [](const APSInt &Value, const APSInt &Amount) { return Value.rotr(Amount); }); @@ -3694,6 +3794,9 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call, case Builtin::BI__builtin_ptrauth_string_discriminator: return interp__builtin_ptrauth_string_discriminator(S, OpPC, Frame, Call); + case Builtin::BI__builtin_infer_alloc_token: + return interp__builtin_infer_alloc_token(S, OpPC, Frame, Call); + case Builtin::BI__noop: pushInteger(S, 0, Call->getType()); return true; @@ -3809,6 +3912,21 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call, return interp__builtin_ia32_movmsk_op(S, OpPC, Call); } + case X86::BI__builtin_ia32_psignb128: + case X86::BI__builtin_ia32_psignb256: + case X86::BI__builtin_ia32_psignw128: + case X86::BI__builtin_ia32_psignw256: + case X86::BI__builtin_ia32_psignd128: + case X86::BI__builtin_ia32_psignd256: + return interp__builtin_elementwise_int_binop( + S, OpPC, Call, [](const APInt &AElem, const APInt &BElem) { + if (BElem.isZero()) + return APInt::getZero(AElem.getBitWidth()); + if (BElem.isNegative()) + return -AElem; + return AElem; + }); + case clang::X86::BI__builtin_ia32_pavgb128: case clang::X86::BI__builtin_ia32_pavgw128: case clang::X86::BI__builtin_ia32_pavgb256: @@ -4191,6 +4309,42 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call, case X86::BI__builtin_ia32_selectpd_512: return interp__builtin_select(S, OpPC, Call); + case X86::BI__builtin_ia32_shufps: + case X86::BI__builtin_ia32_shufps256: + case X86::BI__builtin_ia32_shufps512: + return interp__builtin_ia32_shuffle_generic( + S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) { + unsigned NumElemPerLane = 4; + unsigned NumSelectableElems = NumElemPerLane / 2; + unsigned BitsPerElem = 2; + unsigned IndexMask = 0x3; + unsigned MaskBits = 8; + unsigned Lane = DstIdx / NumElemPerLane; + unsigned ElemInLane = DstIdx % NumElemPerLane; + unsigned LaneOffset = Lane * NumElemPerLane; + unsigned SrcIdx = ElemInLane >= NumSelectableElems ? 1 : 0; + unsigned BitIndex = (DstIdx * BitsPerElem) % MaskBits; + unsigned Index = (ShuffleMask >> BitIndex) & IndexMask; + return std::pair<unsigned, unsigned>{SrcIdx, LaneOffset + Index}; + }); + case X86::BI__builtin_ia32_shufpd: + case X86::BI__builtin_ia32_shufpd256: + case X86::BI__builtin_ia32_shufpd512: + return interp__builtin_ia32_shuffle_generic( + S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) { + unsigned NumElemPerLane = 2; + unsigned NumSelectableElems = NumElemPerLane / 2; + unsigned BitsPerElem = 1; + unsigned IndexMask = 0x1; + unsigned MaskBits = 8; + unsigned Lane = DstIdx / NumElemPerLane; + unsigned ElemInLane = DstIdx % NumElemPerLane; + unsigned LaneOffset = Lane * NumElemPerLane; + unsigned SrcIdx = ElemInLane >= NumSelectableElems ? 1 : 0; + unsigned BitIndex = (DstIdx * BitsPerElem) % MaskBits; + unsigned Index = (ShuffleMask >> BitIndex) & IndexMask; + return std::pair<unsigned, unsigned>{SrcIdx, LaneOffset + Index}; + }); case X86::BI__builtin_ia32_pshufb128: case X86::BI__builtin_ia32_pshufb256: case X86::BI__builtin_ia32_pshufb512: @@ -4331,6 +4485,39 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call, case X86::BI__builtin_ia32_vec_set_v4di: return interp__builtin_vec_set(S, OpPC, Call, BuiltinID); + case X86::BI__builtin_ia32_pslldqi128_byteshift: + case X86::BI__builtin_ia32_pslldqi256_byteshift: + case X86::BI__builtin_ia32_pslldqi512_byteshift: + // These SLLDQ intrinsics always operate on byte elements (8 bits). + // The lane width is hardcoded to 16 to match the SIMD register size, + // but the algorithm processes one byte per iteration, + // so APInt(8, ...) is correct and intentional. + return interp__builtin_x86_byteshift( + S, OpPC, Call, BuiltinID, + [](const Pointer &Src, unsigned Lane, unsigned I, unsigned Shift) { + if (I < Shift) { + return APInt(8, 0); + } + return APInt(8, Src.elem<uint8_t>(Lane + I - Shift)); + }); + + case X86::BI__builtin_ia32_psrldqi128_byteshift: + case X86::BI__builtin_ia32_psrldqi256_byteshift: + case X86::BI__builtin_ia32_psrldqi512_byteshift: + // These SRLDQ intrinsics always operate on byte elements (8 bits). + // The lane width is hardcoded to 16 to match the SIMD register size, + // but the algorithm processes one byte per iteration, + // so APInt(8, ...) is correct and intentional. + return interp__builtin_x86_byteshift( + S, OpPC, Call, BuiltinID, + [](const Pointer &Src, unsigned Lane, unsigned I, unsigned Shift) { + if (I + Shift < 16) { + return APInt(8, Src.elem<uint8_t>(Lane + I + Shift)); + } + + return APInt(8, 0); + }); + default: S.FFDiag(S.Current->getLocation(OpPC), diag::note_invalid_subexpr_in_const_expr) diff --git a/clang/lib/AST/ByteCode/Opcodes.td b/clang/lib/AST/ByteCode/Opcodes.td index 532c444..1c17ad9e 100644 --- a/clang/lib/AST/ByteCode/Opcodes.td +++ b/clang/lib/AST/ByteCode/Opcodes.td @@ -612,12 +612,25 @@ class OverflowOpcode : Opcode { let HasGroup = 1; } +class OverflowBitfieldOpcode : Opcode { + let Types = [AluTypeClass]; + let Args = [ArgBool, ArgUint32]; + let HasGroup = 1; +} + def Inc : OverflowOpcode; +def IncBitfield : OverflowBitfieldOpcode; def IncPop : OverflowOpcode; +def IncPopBitfield : OverflowBitfieldOpcode; def PreInc : OverflowOpcode; +def PreIncBitfield : OverflowBitfieldOpcode; + def Dec : OverflowOpcode; +def DecBitfield : OverflowBitfieldOpcode; def DecPop : OverflowOpcode; +def DecPopBitfield : OverflowBitfieldOpcode; def PreDec : OverflowOpcode; +def PreDecBitfield : OverflowBitfieldOpcode; // Float increment and decrement. def Incf: FloatOpcode; @@ -853,19 +866,13 @@ def Free : Opcode { let Args = [ArgBool, ArgBool]; } -def CheckNewTypeMismatch : Opcode { - let Args = [ArgExpr]; -} - -def InvalidNewDeleteExpr : Opcode { - let Args = [ArgExpr]; -} - +def CheckNewTypeMismatch : Opcode { let Args = [ArgExpr]; } def CheckNewTypeMismatchArray : Opcode { let Types = [IntegerTypeClass]; let Args = [ArgExpr]; let HasGroup = 1; } +def InvalidNewDeleteExpr : Opcode { let Args = [ArgExpr]; } def IsConstantContext: Opcode; def CheckAllocations : Opcode; diff --git a/clang/lib/AST/CMakeLists.txt b/clang/lib/AST/CMakeLists.txt index d4fd7a7..fd50e95 100644 --- a/clang/lib/AST/CMakeLists.txt +++ b/clang/lib/AST/CMakeLists.txt @@ -66,6 +66,7 @@ add_clang_library(clangAST ExternalASTMerger.cpp ExternalASTSource.cpp FormatString.cpp + InferAlloc.cpp InheritViz.cpp ByteCode/BitcastBuffer.cpp ByteCode/ByteCodeEmitter.cpp diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp index 00aaaab..29ee089 100644 --- a/clang/lib/AST/ExprConstant.cpp +++ b/clang/lib/AST/ExprConstant.cpp @@ -44,6 +44,7 @@ #include "clang/AST/CharUnits.h" #include "clang/AST/CurrentSourceLocExprScope.h" #include "clang/AST/Expr.h" +#include "clang/AST/InferAlloc.h" #include "clang/AST/OSLog.h" #include "clang/AST/OptionalDiagnostic.h" #include "clang/AST/RecordLayout.h" @@ -11618,6 +11619,39 @@ static bool evalPackBuiltin(const CallExpr *E, EvalInfo &Info, APValue &Result, return true; } +static bool evalShuffleGeneric( + EvalInfo &Info, const CallExpr *Call, APValue &Out, + llvm::function_ref<std::pair<unsigned, unsigned>(unsigned, unsigned)> + GetSourceIndex) { + + const auto *VT = Call->getType()->getAs<VectorType>(); + if (!VT) + return false; + + APSInt MaskImm; + if (!EvaluateInteger(Call->getArg(2), MaskImm, Info)) + return false; + unsigned ShuffleMask = static_cast<unsigned>(MaskImm.getZExtValue()); + + APValue A, B; + if (!EvaluateAsRValue(Info, Call->getArg(0), A) || + !EvaluateAsRValue(Info, Call->getArg(1), B)) + return false; + + unsigned NumElts = VT->getNumElements(); + SmallVector<APValue, 16> ResultElements; + ResultElements.reserve(NumElts); + + for (unsigned DstIdx = 0; DstIdx != NumElts; ++DstIdx) { + auto [SrcVecIdx, SrcIdx] = GetSourceIndex(DstIdx, ShuffleMask); + const APValue &Src = (SrcVecIdx == 0) ? A : B; + ResultElements.push_back(Src.getVectorElt(SrcIdx)); + } + + Out = APValue(ResultElements.data(), ResultElements.size()); + return true; +} + static bool evalPshufbBuiltin(EvalInfo &Info, const CallExpr *Call, APValue &Out) { APValue SrcVec, ControlVec; @@ -12312,6 +12346,20 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) { return Success(APValue(ResultElements.data(), ResultElements.size()), E); } + case X86::BI__builtin_ia32_psignb128: + case X86::BI__builtin_ia32_psignb256: + case X86::BI__builtin_ia32_psignw128: + case X86::BI__builtin_ia32_psignw256: + case X86::BI__builtin_ia32_psignd128: + case X86::BI__builtin_ia32_psignd256: + return EvaluateBinOpExpr([](const APInt &AElem, const APInt &BElem) { + if (BElem.isZero()) + return APInt::getZero(AElem.getBitWidth()); + if (BElem.isNegative()) + return -AElem; + return AElem; + }); + case X86::BI__builtin_ia32_blendvpd: case X86::BI__builtin_ia32_blendvpd256: case X86::BI__builtin_ia32_blendvps: @@ -12383,7 +12431,56 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) { return Success(APValue(ResultElements.data(), ResultElements.size()), E); } - + case X86::BI__builtin_ia32_shufps: + case X86::BI__builtin_ia32_shufps256: + case X86::BI__builtin_ia32_shufps512: { + APValue R; + if (!evalShuffleGeneric( + Info, E, R, + [](unsigned DstIdx, + unsigned ShuffleMask) -> std::pair<unsigned, unsigned> { + constexpr unsigned LaneBits = 128u; + unsigned NumElemPerLane = LaneBits / 32; + unsigned NumSelectableElems = NumElemPerLane / 2; + unsigned BitsPerElem = 2; + unsigned IndexMask = (1u << BitsPerElem) - 1; + unsigned MaskBits = 8; + unsigned Lane = DstIdx / NumElemPerLane; + unsigned ElemInLane = DstIdx % NumElemPerLane; + unsigned LaneOffset = Lane * NumElemPerLane; + unsigned BitIndex = (DstIdx * BitsPerElem) % MaskBits; + unsigned SrcIdx = (ElemInLane < NumSelectableElems) ? 0 : 1; + unsigned Index = (ShuffleMask >> BitIndex) & IndexMask; + return {SrcIdx, LaneOffset + Index}; + })) + return false; + return Success(R, E); + } + case X86::BI__builtin_ia32_shufpd: + case X86::BI__builtin_ia32_shufpd256: + case X86::BI__builtin_ia32_shufpd512: { + APValue R; + if (!evalShuffleGeneric( + Info, E, R, + [](unsigned DstIdx, + unsigned ShuffleMask) -> std::pair<unsigned, unsigned> { + constexpr unsigned LaneBits = 128u; + unsigned NumElemPerLane = LaneBits / 64; + unsigned NumSelectableElems = NumElemPerLane / 2; + unsigned BitsPerElem = 1; + unsigned IndexMask = (1u << BitsPerElem) - 1; + unsigned MaskBits = 8; + unsigned Lane = DstIdx / NumElemPerLane; + unsigned ElemInLane = DstIdx % NumElemPerLane; + unsigned LaneOffset = Lane * NumElemPerLane; + unsigned BitIndex = (DstIdx * BitsPerElem) % MaskBits; + unsigned SrcIdx = (ElemInLane < NumSelectableElems) ? 0 : 1; + unsigned Index = (ShuffleMask >> BitIndex) & IndexMask; + return {SrcIdx, LaneOffset + Index}; + })) + return false; + return Success(R, E); + } case X86::BI__builtin_ia32_pshufb128: case X86::BI__builtin_ia32_pshufb256: case X86::BI__builtin_ia32_pshufb512: { @@ -12891,6 +12988,66 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) { return Success(APValue(Elems.data(), NumElems), E); } + + case X86::BI__builtin_ia32_pslldqi128_byteshift: + case X86::BI__builtin_ia32_pslldqi256_byteshift: + case X86::BI__builtin_ia32_pslldqi512_byteshift: { + assert(E->getNumArgs() == 2); + + APValue Src; + APSInt Imm; + if (!EvaluateAsRValue(Info, E->getArg(0), Src) || + !EvaluateInteger(E->getArg(1), Imm, Info)) + return false; + + unsigned VecLen = Src.getVectorLength(); + unsigned Shift = Imm.getZExtValue() & 0xff; + + SmallVector<APValue> ResultElements; + for (unsigned Lane = 0; Lane != VecLen; Lane += 16) { + for (unsigned I = 0; I != 16; ++I) { + if (I < Shift) { + APSInt Zero(8, /*isUnsigned=*/true); + Zero = 0; + ResultElements.push_back(APValue(Zero)); + } else { + ResultElements.push_back(Src.getVectorElt(Lane + I - Shift)); + } + } + } + + return Success(APValue(ResultElements.data(), ResultElements.size()), E); + } + + case X86::BI__builtin_ia32_psrldqi128_byteshift: + case X86::BI__builtin_ia32_psrldqi256_byteshift: + case X86::BI__builtin_ia32_psrldqi512_byteshift: { + assert(E->getNumArgs() == 2); + + APValue Src; + APSInt Imm; + if (!EvaluateAsRValue(Info, E->getArg(0), Src) || + !EvaluateInteger(E->getArg(1), Imm, Info)) + return false; + + unsigned VecLen = Src.getVectorLength(); + unsigned Shift = Imm.getZExtValue() & 0xff; + + SmallVector<APValue> ResultElements; + for (unsigned Lane = 0; Lane != VecLen; Lane += 16) { + for (unsigned I = 0; I != 16; ++I) { + if (I + Shift < 16) { + ResultElements.push_back(Src.getVectorElt(Lane + I + Shift)); + } else { + APSInt Zero(8, /*isUnsigned=*/true); + Zero = 0; + ResultElements.push_back(APValue(Zero)); + } + } + } + + return Success(APValue(ResultElements.data(), ResultElements.size()), E); + } } } @@ -14649,6 +14806,27 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E, return Success(Result, E); } + case Builtin::BI__builtin_infer_alloc_token: { + // If we fail to infer a type, this fails to be a constant expression; this + // can be checked with __builtin_constant_p(...). + QualType AllocType = infer_alloc::inferPossibleType(E, Info.Ctx, nullptr); + if (AllocType.isNull()) + return Error( + E, diag::note_constexpr_infer_alloc_token_type_inference_failed); + auto ATMD = infer_alloc::getAllocTokenMetadata(AllocType, Info.Ctx); + if (!ATMD) + return Error(E, diag::note_constexpr_infer_alloc_token_no_metadata); + auto Mode = + Info.getLangOpts().AllocTokenMode.value_or(llvm::DefaultAllocTokenMode); + uint64_t BitWidth = Info.Ctx.getTypeSize(Info.Ctx.getSizeType()); + uint64_t MaxTokens = + Info.getLangOpts().AllocTokenMax.value_or(~0ULL >> (64 - BitWidth)); + auto MaybeToken = llvm::getAllocToken(Mode, *ATMD, MaxTokens); + if (!MaybeToken) + return Error(E, diag::note_constexpr_infer_alloc_token_stateful_mode); + return Success(llvm::APInt(BitWidth, *MaybeToken), E); + } + case Builtin::BI__builtin_ffs: case Builtin::BI__builtin_ffsl: case Builtin::BI__builtin_ffsll: { diff --git a/clang/lib/AST/InferAlloc.cpp b/clang/lib/AST/InferAlloc.cpp new file mode 100644 index 0000000..e439ed4 --- /dev/null +++ b/clang/lib/AST/InferAlloc.cpp @@ -0,0 +1,201 @@ +//===--- InferAlloc.cpp - Allocation type inference -----------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file implements allocation-related type inference. +// +//===----------------------------------------------------------------------===// + +#include "clang/AST/InferAlloc.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/Decl.h" +#include "clang/AST/DeclCXX.h" +#include "clang/AST/Expr.h" +#include "clang/AST/Type.h" +#include "clang/Basic/IdentifierTable.h" +#include "llvm/ADT/SmallPtrSet.h" + +using namespace clang; +using namespace infer_alloc; + +static bool +typeContainsPointer(QualType T, + llvm::SmallPtrSet<const RecordDecl *, 4> &VisitedRD, + bool &IncompleteType) { + QualType CanonicalType = T.getCanonicalType(); + if (CanonicalType->isPointerType()) + return true; // base case + + // Look through typedef chain to check for special types. + for (QualType CurrentT = T; const auto *TT = CurrentT->getAs<TypedefType>(); + CurrentT = TT->getDecl()->getUnderlyingType()) { + const IdentifierInfo *II = TT->getDecl()->getIdentifier(); + // Special Case: Syntactically uintptr_t is not a pointer; semantically, + // however, very likely used as such. Therefore, classify uintptr_t as a + // pointer, too. + if (II && II->isStr("uintptr_t")) + return true; + } + + // The type is an array; check the element type. + if (const ArrayType *AT = dyn_cast<ArrayType>(CanonicalType)) + return typeContainsPointer(AT->getElementType(), VisitedRD, IncompleteType); + // The type is a struct, class, or union. + if (const RecordDecl *RD = CanonicalType->getAsRecordDecl()) { + if (!RD->isCompleteDefinition()) { + IncompleteType = true; + return false; + } + if (!VisitedRD.insert(RD).second) + return false; // already visited + // Check all fields. + for (const FieldDecl *Field : RD->fields()) { + if (typeContainsPointer(Field->getType(), VisitedRD, IncompleteType)) + return true; + } + // For C++ classes, also check base classes. + if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { + // Polymorphic types require a vptr. + if (CXXRD->isDynamicClass()) + return true; + for (const CXXBaseSpecifier &Base : CXXRD->bases()) { + if (typeContainsPointer(Base.getType(), VisitedRD, IncompleteType)) + return true; + } + } + } + return false; +} + +/// Infer type from a simple sizeof expression. +static QualType inferTypeFromSizeofExpr(const Expr *E) { + const Expr *Arg = E->IgnoreParenImpCasts(); + if (const auto *UET = dyn_cast<UnaryExprOrTypeTraitExpr>(Arg)) { + if (UET->getKind() == UETT_SizeOf) { + if (UET->isArgumentType()) + return UET->getArgumentTypeInfo()->getType(); + else + return UET->getArgumentExpr()->getType(); + } + } + return QualType(); +} + +/// Infer type from an arithmetic expression involving a sizeof. For example: +/// +/// malloc(sizeof(MyType) + padding); // infers 'MyType' +/// malloc(sizeof(MyType) * 32); // infers 'MyType' +/// malloc(32 * sizeof(MyType)); // infers 'MyType' +/// malloc(sizeof(MyType) << 1); // infers 'MyType' +/// ... +/// +/// More complex arithmetic expressions are supported, but are a heuristic, e.g. +/// when considering allocations for structs with flexible array members: +/// +/// malloc(sizeof(HasFlexArray) + sizeof(int) * 32); // infers 'HasFlexArray' +/// +static QualType inferPossibleTypeFromArithSizeofExpr(const Expr *E) { + const Expr *Arg = E->IgnoreParenImpCasts(); + // The argument is a lone sizeof expression. + if (QualType T = inferTypeFromSizeofExpr(Arg); !T.isNull()) + return T; + if (const auto *BO = dyn_cast<BinaryOperator>(Arg)) { + // Argument is an arithmetic expression. Cover common arithmetic patterns + // involving sizeof. + switch (BO->getOpcode()) { + case BO_Add: + case BO_Div: + case BO_Mul: + case BO_Shl: + case BO_Shr: + case BO_Sub: + if (QualType T = inferPossibleTypeFromArithSizeofExpr(BO->getLHS()); + !T.isNull()) + return T; + if (QualType T = inferPossibleTypeFromArithSizeofExpr(BO->getRHS()); + !T.isNull()) + return T; + break; + default: + break; + } + } + return QualType(); +} + +/// If the expression E is a reference to a variable, infer the type from a +/// variable's initializer if it contains a sizeof. Beware, this is a heuristic +/// and ignores if a variable is later reassigned. For example: +/// +/// size_t my_size = sizeof(MyType); +/// void *x = malloc(my_size); // infers 'MyType' +/// +static QualType inferPossibleTypeFromVarInitSizeofExpr(const Expr *E) { + const Expr *Arg = E->IgnoreParenImpCasts(); + if (const auto *DRE = dyn_cast<DeclRefExpr>(Arg)) { + if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl())) { + if (const Expr *Init = VD->getInit()) + return inferPossibleTypeFromArithSizeofExpr(Init); + } + } + return QualType(); +} + +/// Deduces the allocated type by checking if the allocation call's result +/// is immediately used in a cast expression. For example: +/// +/// MyType *x = (MyType *)malloc(4096); // infers 'MyType' +/// +static QualType inferPossibleTypeFromCastExpr(const CallExpr *CallE, + const CastExpr *CastE) { + if (!CastE) + return QualType(); + QualType PtrType = CastE->getType(); + if (PtrType->isPointerType()) + return PtrType->getPointeeType(); + return QualType(); +} + +QualType infer_alloc::inferPossibleType(const CallExpr *E, + const ASTContext &Ctx, + const CastExpr *CastE) { + QualType AllocType; + // First check arguments. + for (const Expr *Arg : E->arguments()) { + AllocType = inferPossibleTypeFromArithSizeofExpr(Arg); + if (AllocType.isNull()) + AllocType = inferPossibleTypeFromVarInitSizeofExpr(Arg); + if (!AllocType.isNull()) + break; + } + // Then check later casts. + if (AllocType.isNull()) + AllocType = inferPossibleTypeFromCastExpr(E, CastE); + return AllocType; +} + +std::optional<llvm::AllocTokenMetadata> +infer_alloc::getAllocTokenMetadata(QualType T, const ASTContext &Ctx) { + llvm::AllocTokenMetadata ATMD; + + // Get unique type name. + PrintingPolicy Policy(Ctx.getLangOpts()); + Policy.SuppressTagKeyword = true; + Policy.FullyQualifiedName = true; + llvm::raw_svector_ostream TypeNameOS(ATMD.TypeName); + T.getCanonicalType().print(TypeNameOS, Policy); + + // Check if QualType contains a pointer. Implements a simple DFS to + // recursively check if a type contains a pointer type. + llvm::SmallPtrSet<const RecordDecl *, 4> VisitedRD; + bool IncompleteType = false; + ATMD.ContainsPointer = typeContainsPointer(T, VisitedRD, IncompleteType); + if (!ATMD.ContainsPointer && IncompleteType) + return std::nullopt; + + return ATMD; +} diff --git a/clang/lib/AST/StmtOpenACC.cpp b/clang/lib/AST/StmtOpenACC.cpp index 07e3de8..462a10d 100644 --- a/clang/lib/AST/StmtOpenACC.cpp +++ b/clang/lib/AST/StmtOpenACC.cpp @@ -12,7 +12,9 @@ #include "clang/AST/StmtOpenACC.h" #include "clang/AST/ASTContext.h" +#include "clang/AST/ExprCXX.h" #include "clang/AST/StmtCXX.h" + using namespace clang; OpenACCComputeConstruct * @@ -322,6 +324,58 @@ OpenACCAtomicConstruct *OpenACCAtomicConstruct::Create( return Inst; } +static std::pair<const Expr *, const Expr *> getBinaryOpArgs(const Expr *Op) { + if (const auto *BO = dyn_cast<BinaryOperator>(Op)) { + assert(BO->getOpcode() == BO_Assign); + return {BO->getLHS(), BO->getRHS()}; + } + + const auto *OO = cast<CXXOperatorCallExpr>(Op); + assert(OO->getOperator() == OO_Equal); + + return {OO->getArg(0), OO->getArg(1)}; +} + +const OpenACCAtomicConstruct::StmtInfo +OpenACCAtomicConstruct::getAssociatedStmtInfo() const { + // This ends up being a vastly simplified version of SemaOpenACCAtomic, since + // it doesn't have to worry about erroring out, but we should do a lot of + // asserts to ensure we don't get off into the weeds. + assert(getAssociatedStmt() && "invalid associated stmt?"); + + switch (AtomicKind) { + case OpenACCAtomicKind::None: + case OpenACCAtomicKind::Update: + case OpenACCAtomicKind::Capture: + assert(false && "Only 'read'/'write' have been implemented here"); + return {}; + case OpenACCAtomicKind::Read: { + // Read only supports the format 'v = x'; where both sides are a scalar + // expression. This can come in 2 forms; BinaryOperator or + // CXXOperatorCallExpr (rarely). + std::pair<const Expr *, const Expr *> BinaryArgs = + getBinaryOpArgs(cast<const Expr>(getAssociatedStmt())); + // We want the L-value for each side, so we ignore implicit casts. + return {BinaryArgs.first->IgnoreImpCasts(), + BinaryArgs.second->IgnoreImpCasts(), /*expr=*/nullptr}; + } + case OpenACCAtomicKind::Write: { + // Write supports only the format 'x = expr', where the expression is scalar + // type, and 'x' is a scalar l value. As above, this can come in 2 forms; + // Binary Operator or CXXOperatorCallExpr. + std::pair<const Expr *, const Expr *> BinaryArgs = + getBinaryOpArgs(cast<const Expr>(getAssociatedStmt())); + // We want the L-value for ONLY the X side, so we ignore implicit casts. For + // the right side (the expr), we emit it as an r-value so we need to + // maintain implicit casts. + return {/*v=*/nullptr, BinaryArgs.first->IgnoreImpCasts(), + BinaryArgs.second}; + } + } + + llvm_unreachable("unknown OpenACC atomic kind"); +} + OpenACCCacheConstruct *OpenACCCacheConstruct::CreateEmpty(const ASTContext &C, unsigned NumVars) { void *Mem = diff --git a/clang/lib/AST/TypePrinter.cpp b/clang/lib/AST/TypePrinter.cpp index 2da7789..c18b2ea 100644 --- a/clang/lib/AST/TypePrinter.cpp +++ b/clang/lib/AST/TypePrinter.cpp @@ -2147,9 +2147,6 @@ void TypePrinter::printAttributedAfter(const AttributedType *T, } case attr::AArch64VectorPcs: OS << "aarch64_vector_pcs"; break; case attr::AArch64SVEPcs: OS << "aarch64_sve_pcs"; break; - case attr::DeviceKernel: - OS << T->getAttr()->getSpelling(); - break; case attr::IntelOclBicc: OS << "inteloclbicc"; break; |
