diff options
Diffstat (limited to 'clang/lib/AST')
-rw-r--r-- | clang/lib/AST/ByteCode/Interp.h | 7 | ||||
-rw-r--r-- | clang/lib/AST/ByteCode/InterpBuiltin.cpp | 128 | ||||
-rw-r--r-- | clang/lib/AST/ExprConstant.cpp | 71 | ||||
-rw-r--r-- | clang/lib/AST/OpenACCClause.cpp | 14 | ||||
-rw-r--r-- | clang/lib/AST/OpenMPClause.cpp | 13 | ||||
-rw-r--r-- | clang/lib/AST/StmtProfile.cpp | 5 | ||||
-rw-r--r-- | clang/lib/AST/TypeLoc.cpp | 33 |
7 files changed, 194 insertions, 77 deletions
diff --git a/clang/lib/AST/ByteCode/Interp.h b/clang/lib/AST/ByteCode/Interp.h index bb0c458..57cc705 100644 --- a/clang/lib/AST/ByteCode/Interp.h +++ b/clang/lib/AST/ByteCode/Interp.h @@ -3183,9 +3183,8 @@ inline bool CopyArray(InterpState &S, CodePtr OpPC, uint32_t SrcIndex, if (!CheckLoad(S, OpPC, SP)) return false; - const Pointer &DP = DestPtr.atIndex(DestIndex + I); - DP.deref<T>() = SP.deref<T>(); - DP.initialize(); + DestPtr.elem<T>(DestIndex + I) = SrcPtr.elem<T>(SrcIndex + I); + DestPtr.initializeElement(DestIndex + I); } return true; } @@ -3700,7 +3699,7 @@ inline bool CheckDestruction(InterpState &S, CodePtr OpPC) { inline bool CheckArraySize(InterpState &S, CodePtr OpPC, uint64_t NumElems) { uint64_t Limit = S.getLangOpts().ConstexprStepLimit; - if (NumElems > Limit) { + if (Limit != 0 && NumElems > Limit) { S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_new_exceeds_limits) << NumElems << Limit; diff --git a/clang/lib/AST/ByteCode/InterpBuiltin.cpp b/clang/lib/AST/ByteCode/InterpBuiltin.cpp index 922d679..84c5ecc 100644 --- a/clang/lib/AST/ByteCode/InterpBuiltin.cpp +++ b/clang/lib/AST/ByteCode/InterpBuiltin.cpp @@ -1633,8 +1633,8 @@ static bool interp__builtin_elementwise_countzeroes(InterpState &S, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinID) { - const bool HasZeroArg = Call->getNumArgs() == 2; - const bool IsCTTZ = BuiltinID == Builtin::BI__builtin_elementwise_ctzg; + bool HasZeroArg = Call->getNumArgs() == 2; + bool IsCTTZ = BuiltinID == Builtin::BI__builtin_elementwise_ctzg; assert(Call->getNumArgs() == 1 || HasZeroArg); if (Call->getArg(0)->getType()->isIntegerType()) { PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType()); @@ -2447,18 +2447,18 @@ interp__builtin_x86_pack(InterpState &S, CodePtr, const CallExpr *E, const Pointer &Dst = S.Stk.peek<Pointer>(); const ASTContext &ASTCtx = S.getASTContext(); - const unsigned SrcBits = ASTCtx.getIntWidth(VT0->getElementType()); - const unsigned LHSVecLen = VT0->getNumElements(); - const unsigned SrcPerLane = 128 / SrcBits; - const unsigned Lanes = LHSVecLen * SrcBits / 128; + unsigned SrcBits = ASTCtx.getIntWidth(VT0->getElementType()); + unsigned LHSVecLen = VT0->getNumElements(); + unsigned SrcPerLane = 128 / SrcBits; + unsigned Lanes = LHSVecLen * SrcBits / 128; PrimType SrcT = *S.getContext().classify(VT0->getElementType()); PrimType DstT = *S.getContext().classify(getElemType(Dst)); - const bool IsUnsigend = getElemType(Dst)->isUnsignedIntegerType(); + bool IsUnsigend = getElemType(Dst)->isUnsignedIntegerType(); for (unsigned Lane = 0; Lane != Lanes; ++Lane) { - const unsigned BaseSrc = Lane * SrcPerLane; - const unsigned BaseDst = Lane * (2 * SrcPerLane); + unsigned BaseSrc = Lane * SrcPerLane; + unsigned BaseDst = Lane * (2 * SrcPerLane); for (unsigned I = 0; I != SrcPerLane; ++I) { INT_TYPE_SWITCH_NO_BOOL(SrcT, { @@ -2596,9 +2596,9 @@ static bool interp__builtin_elementwise_triop_fp( FPOptions FPO = Call->getFPFeaturesInEffect(S.Ctx.getLangOpts()); llvm::RoundingMode RM = getRoundingMode(FPO); - const QualType Arg1Type = Call->getArg(0)->getType(); - const QualType Arg2Type = Call->getArg(1)->getType(); - const QualType Arg3Type = Call->getArg(2)->getType(); + QualType Arg1Type = Call->getArg(0)->getType(); + QualType Arg2Type = Call->getArg(1)->getType(); + QualType Arg3Type = Call->getArg(2)->getType(); // Non-vector floating point types. if (!Arg1Type->isVectorType()) { @@ -2621,16 +2621,16 @@ static bool interp__builtin_elementwise_triop_fp( assert(Arg1Type->isVectorType() && Arg2Type->isVectorType() && Arg3Type->isVectorType()); - const VectorType *VecT = Arg1Type->castAs<VectorType>(); - const QualType ElemT = VecT->getElementType(); - unsigned NumElems = VecT->getNumElements(); + const VectorType *VecTy = Arg1Type->castAs<VectorType>(); + QualType ElemQT = VecTy->getElementType(); + unsigned NumElems = VecTy->getNumElements(); - assert(ElemT == Arg2Type->castAs<VectorType>()->getElementType() && - ElemT == Arg3Type->castAs<VectorType>()->getElementType()); + assert(ElemQT == Arg2Type->castAs<VectorType>()->getElementType() && + ElemQT == Arg3Type->castAs<VectorType>()->getElementType()); assert(NumElems == Arg2Type->castAs<VectorType>()->getNumElements() && NumElems == Arg3Type->castAs<VectorType>()->getNumElements()); - assert(ElemT->isRealFloatingType()); - (void)ElemT; + assert(ElemQT->isRealFloatingType()); + (void)ElemQT; const Pointer &VZ = S.Stk.pop<Pointer>(); const Pointer &VY = S.Stk.pop<Pointer>(); @@ -2756,6 +2756,45 @@ static bool interp__builtin_ia32_pshuf(InterpState &S, CodePtr OpPC, return true; } +static bool interp__builtin_ia32_test_op( + InterpState &S, CodePtr OpPC, const CallExpr *Call, + llvm::function_ref<bool(const APInt &A, const APInt &B)> Fn) { + const Pointer &RHS = S.Stk.pop<Pointer>(); + const Pointer &LHS = S.Stk.pop<Pointer>(); + + assert(LHS.getNumElems() == RHS.getNumElems()); + + unsigned SourceLen = LHS.getNumElems(); + QualType ElemQT = getElemType(LHS); + OptPrimType ElemPT = S.getContext().classify(ElemQT); + unsigned LaneWidth = S.getASTContext().getTypeSize(ElemQT); + + APInt AWide(LaneWidth * SourceLen, 0); + APInt BWide(LaneWidth * SourceLen, 0); + + for (unsigned I = 0; I != SourceLen; ++I) { + APInt ALane; + APInt BLane; + + if (ElemQT->isIntegerType()) { // Get value. + INT_TYPE_SWITCH_NO_BOOL(*ElemPT, { + ALane = LHS.elem<T>(I).toAPSInt(); + BLane = RHS.elem<T>(I).toAPSInt(); + }); + } else if (ElemQT->isFloatingType()) { // Get only sign bit. + using T = PrimConv<PT_Float>::T; + ALane = LHS.elem<T>(I).getAPFloat().bitcastToAPInt().isNegative(); + BLane = RHS.elem<T>(I).getAPFloat().bitcastToAPInt().isNegative(); + } else { // Must be integer or floating type. + return false; + } + AWide.insertBits(ALane, I * LaneWidth); + BWide.insertBits(BLane, I * LaneWidth); + } + pushInteger(S, Fn(AWide, BWide), Call->getType()); + return true; +} + static bool interp__builtin_elementwise_triop( InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref<APInt(const APSInt &, const APSInt &, const APSInt &)> @@ -2775,7 +2814,7 @@ static bool interp__builtin_elementwise_triop( } const auto *VecT = Arg0Type->castAs<VectorType>(); - const PrimType &ElemT = *S.getContext().classify(VecT->getElementType()); + PrimType ElemT = *S.getContext().classify(VecT->getElementType()); unsigned NumElems = VecT->getNumElements(); bool DestUnsigned = Call->getType()->isUnsignedIntegerOrEnumerationType(); @@ -2847,9 +2886,9 @@ static bool interp__builtin_x86_insert_subvector(InterpState &S, CodePtr OpPC, unsigned Lane = static_cast<unsigned>(Index % NumLanes); unsigned InsertPos = Lane * SubElements; - PrimType ElemPT = BaseVec.getFieldDesc()->getPrimType(); + PrimType ElemT = BaseVec.getFieldDesc()->getPrimType(); - TYPE_SWITCH(ElemPT, { + TYPE_SWITCH(ElemT, { for (unsigned I = 0; I != BaseElements; ++I) Dst.elem<T>(I) = BaseVec.elem<T>(I); for (unsigned I = 0; I != SubElements; ++I) @@ -2872,12 +2911,12 @@ static bool interp__builtin_ia32_pternlog(InterpState &S, CodePtr OpPC, const Pointer &Dst = S.Stk.peek<Pointer>(); unsigned DstLen = A.getNumElems(); - const QualType ElemQT = getElemType(A); - const OptPrimType ElemPT = S.getContext().classify(ElemQT); + QualType ElemQT = getElemType(A); + OptPrimType ElemT = S.getContext().classify(ElemQT); unsigned LaneWidth = S.getASTContext().getTypeSize(ElemQT); bool DstUnsigned = ElemQT->isUnsignedIntegerOrEnumerationType(); - INT_TYPE_SWITCH_NO_BOOL(*ElemPT, { + INT_TYPE_SWITCH_NO_BOOL(*ElemT, { for (unsigned I = 0; I != DstLen; ++I) { APInt ALane = A.elem<T>(I).toAPSInt(); APInt BLane = B.elem<T>(I).toAPSInt(); @@ -2916,13 +2955,13 @@ static bool interp__builtin_vec_ext(InterpState &S, CodePtr OpPC, unsigned Index = static_cast<unsigned>(ImmAPS.getZExtValue() & (NumElems - 1)); - PrimType ElemPT = Vec.getFieldDesc()->getPrimType(); + PrimType ElemT = Vec.getFieldDesc()->getPrimType(); // FIXME(#161685): Replace float+int split with a numeric-only type switch - if (ElemPT == PT_Float) { + if (ElemT == PT_Float) { S.Stk.push<Floating>(Vec.elem<Floating>(Index)); return true; } - INT_TYPE_SWITCH_NO_BOOL(ElemPT, { + INT_TYPE_SWITCH_NO_BOOL(ElemT, { APSInt V = Vec.elem<T>(Index).toAPSInt(); pushInteger(S, V, Call->getType()); }); @@ -2947,8 +2986,8 @@ static bool interp__builtin_vec_set(InterpState &S, CodePtr OpPC, unsigned Index = static_cast<unsigned>(ImmAPS.getZExtValue() & (NumElems - 1)); - PrimType ElemPT = Base.getFieldDesc()->getPrimType(); - INT_TYPE_SWITCH_NO_BOOL(ElemPT, { + PrimType ElemT = Base.getFieldDesc()->getPrimType(); + INT_TYPE_SWITCH_NO_BOOL(ElemT, { for (unsigned I = 0; I != NumElems; ++I) Dst.elem<T>(I) = Base.elem<T>(I); Dst.elem<T>(Index) = static_cast<T>(ValAPS); @@ -3712,7 +3751,34 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call, S, OpPC, Call, [](const APSInt &F, const APSInt &T, const APSInt &C) { return ((APInt)C).isNegative() ? T : F; }); - + case X86::BI__builtin_ia32_ptestz128: + case X86::BI__builtin_ia32_ptestz256: + case X86::BI__builtin_ia32_vtestzps: + case X86::BI__builtin_ia32_vtestzps256: + case X86::BI__builtin_ia32_vtestzpd: + case X86::BI__builtin_ia32_vtestzpd256: + return interp__builtin_ia32_test_op( + S, OpPC, Call, + [](const APInt &A, const APInt &B) { return (A & B) == 0; }); + case X86::BI__builtin_ia32_ptestc128: + case X86::BI__builtin_ia32_ptestc256: + case X86::BI__builtin_ia32_vtestcps: + case X86::BI__builtin_ia32_vtestcps256: + case X86::BI__builtin_ia32_vtestcpd: + case X86::BI__builtin_ia32_vtestcpd256: + return interp__builtin_ia32_test_op( + S, OpPC, Call, + [](const APInt &A, const APInt &B) { return (~A & B) == 0; }); + case X86::BI__builtin_ia32_ptestnzc128: + case X86::BI__builtin_ia32_ptestnzc256: + case X86::BI__builtin_ia32_vtestnzcps: + case X86::BI__builtin_ia32_vtestnzcps256: + case X86::BI__builtin_ia32_vtestnzcpd: + case X86::BI__builtin_ia32_vtestnzcpd256: + return interp__builtin_ia32_test_op( + S, OpPC, Call, [](const APInt &A, const APInt &B) { + return ((A & B) != 0) && ((~A & B) != 0); + }); case X86::BI__builtin_ia32_selectb_128: case X86::BI__builtin_ia32_selectb_256: case X86::BI__builtin_ia32_selectb_512: diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp index 35a866e..dfdfef2 100644 --- a/clang/lib/AST/ExprConstant.cpp +++ b/clang/lib/AST/ExprConstant.cpp @@ -990,7 +990,7 @@ namespace { // of arrays to avoid exhausting the system resources, as initialization // of each element is likely to take some number of steps anyway. uint64_t Limit = Ctx.getLangOpts().ConstexprStepLimit; - if (ElemCount > Limit) { + if (Limit != 0 && ElemCount > Limit) { if (Diag) FFDiag(Loc, diag::note_constexpr_new_exceeds_limits) << ElemCount << Limit; @@ -1016,6 +1016,9 @@ namespace { } bool nextStep(const Stmt *S) { + if (Ctx.getLangOpts().ConstexprStepLimit == 0) + return true; + if (!StepsLeft) { FFDiag(S->getBeginLoc(), diag::note_constexpr_step_limit_exceeded); return false; @@ -1186,7 +1189,8 @@ namespace { /// Should we continue evaluation as much as possible after encountering a /// construct which can't be reduced to a value? bool keepEvaluatingAfterFailure() const override { - if (!StepsLeft) + uint64_t Limit = Ctx.getLangOpts().ConstexprStepLimit; + if (Limit != 0 && !StepsLeft) return false; switch (EvalMode) { @@ -13905,6 +13909,40 @@ static bool getBuiltinAlignArguments(const CallExpr *E, EvalInfo &Info, bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E, unsigned BuiltinOp) { + auto EvalTestOp = [&](llvm::function_ref<bool(const APInt &, const APInt &)> + Fn) { + APValue SourceLHS, SourceRHS; + if (!EvaluateAsRValue(Info, E->getArg(0), SourceLHS) || + !EvaluateAsRValue(Info, E->getArg(1), SourceRHS)) + return false; + + unsigned SourceLen = SourceLHS.getVectorLength(); + const VectorType *VT = E->getArg(0)->getType()->castAs<VectorType>(); + QualType ElemQT = VT->getElementType(); + unsigned LaneWidth = Info.Ctx.getTypeSize(ElemQT); + + APInt AWide(LaneWidth * SourceLen, 0); + APInt BWide(LaneWidth * SourceLen, 0); + + for (unsigned I = 0; I != SourceLen; ++I) { + APInt ALane; + APInt BLane; + if (ElemQT->isIntegerType()) { // Get value. + ALane = SourceLHS.getVectorElt(I).getInt(); + BLane = SourceRHS.getVectorElt(I).getInt(); + } else if (ElemQT->isFloatingType()) { // Get only sign bit. + ALane = + SourceLHS.getVectorElt(I).getFloat().bitcastToAPInt().isNegative(); + BLane = + SourceRHS.getVectorElt(I).getFloat().bitcastToAPInt().isNegative(); + } else { // Must be integer or floating type. + return false; + } + AWide.insertBits(ALane, I * LaneWidth); + BWide.insertBits(BLane, I * LaneWidth); + } + return Success(Fn(AWide, BWide), E); + }; auto HandleMaskBinOp = [&](llvm::function_ref<APSInt(const APSInt &, const APSInt &)> Fn) @@ -15018,7 +15056,34 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E, Result.setBitVal(P++, Val[I]); return Success(Result, E); } - + case X86::BI__builtin_ia32_ptestz128: + case X86::BI__builtin_ia32_ptestz256: + case X86::BI__builtin_ia32_vtestzps: + case X86::BI__builtin_ia32_vtestzps256: + case X86::BI__builtin_ia32_vtestzpd: + case X86::BI__builtin_ia32_vtestzpd256: { + return EvalTestOp( + [](const APInt &A, const APInt &B) { return (A & B) == 0; }); + } + case X86::BI__builtin_ia32_ptestc128: + case X86::BI__builtin_ia32_ptestc256: + case X86::BI__builtin_ia32_vtestcps: + case X86::BI__builtin_ia32_vtestcps256: + case X86::BI__builtin_ia32_vtestcpd: + case X86::BI__builtin_ia32_vtestcpd256: { + return EvalTestOp( + [](const APInt &A, const APInt &B) { return (~A & B) == 0; }); + } + case X86::BI__builtin_ia32_ptestnzc128: + case X86::BI__builtin_ia32_ptestnzc256: + case X86::BI__builtin_ia32_vtestnzcps: + case X86::BI__builtin_ia32_vtestnzcps256: + case X86::BI__builtin_ia32_vtestnzcpd: + case X86::BI__builtin_ia32_vtestnzcpd256: { + return EvalTestOp([](const APInt &A, const APInt &B) { + return ((A & B) != 0) && ((~A & B) != 0); + }); + } case X86::BI__builtin_ia32_kandqi: case X86::BI__builtin_ia32_kandhi: case X86::BI__builtin_ia32_kandsi: diff --git a/clang/lib/AST/OpenACCClause.cpp b/clang/lib/AST/OpenACCClause.cpp index 6c4bc7c..17c6bec 100644 --- a/clang/lib/AST/OpenACCClause.cpp +++ b/clang/lib/AST/OpenACCClause.cpp @@ -506,11 +506,17 @@ OpenACCDeviceTypeClause *OpenACCDeviceTypeClause::Create( OpenACCReductionClause *OpenACCReductionClause::Create( const ASTContext &C, SourceLocation BeginLoc, SourceLocation LParenLoc, OpenACCReductionOperator Operator, ArrayRef<Expr *> VarList, - ArrayRef<OpenACCReductionRecipe> Recipes, + ArrayRef<OpenACCReductionRecipeWithStorage> Recipes, SourceLocation EndLoc) { - void *Mem = C.Allocate( - OpenACCReductionClause::totalSizeToAlloc<Expr *, OpenACCReductionRecipe>( - VarList.size(), Recipes.size())); + size_t NumCombiners = llvm::accumulate( + Recipes, 0, [](size_t Num, const OpenACCReductionRecipe &R) { + return Num + R.CombinerRecipes.size(); + }); + + void *Mem = C.Allocate(OpenACCReductionClause::totalSizeToAlloc< + Expr *, OpenACCReductionRecipe, + OpenACCReductionRecipe::CombinerRecipe>( + VarList.size(), Recipes.size(), NumCombiners)); return new (Mem) OpenACCReductionClause(BeginLoc, LParenLoc, Operator, VarList, Recipes, EndLoc); } diff --git a/clang/lib/AST/OpenMPClause.cpp b/clang/lib/AST/OpenMPClause.cpp index 2ce4419..791df7e 100644 --- a/clang/lib/AST/OpenMPClause.cpp +++ b/clang/lib/AST/OpenMPClause.cpp @@ -309,6 +309,12 @@ OMPClause::child_range OMPIfClause::used_children() { return child_range(&Condition, &Condition + 1); } +OMPClause::child_range OMPNowaitClause::used_children() { + if (Condition) + return child_range(&Condition, &Condition + 1); + return children(); +} + OMPClause::child_range OMPGrainsizeClause::used_children() { if (Stmt **C = getAddrOfExprAsWritten(getPreInitStmt())) return child_range(C, C + 1); @@ -2113,8 +2119,13 @@ void OMPClausePrinter::VisitOMPOrderedClause(OMPOrderedClause *Node) { } } -void OMPClausePrinter::VisitOMPNowaitClause(OMPNowaitClause *) { +void OMPClausePrinter::VisitOMPNowaitClause(OMPNowaitClause *Node) { OS << "nowait"; + if (auto *Cond = Node->getCondition()) { + OS << "("; + Cond->printPretty(OS, nullptr, Policy, 0); + OS << ")"; + } } void OMPClausePrinter::VisitOMPUntiedClause(OMPUntiedClause *) { diff --git a/clang/lib/AST/StmtProfile.cpp b/clang/lib/AST/StmtProfile.cpp index 3cd033e..05b64cc 100644 --- a/clang/lib/AST/StmtProfile.cpp +++ b/clang/lib/AST/StmtProfile.cpp @@ -585,7 +585,10 @@ void OMPClauseProfiler::VisitOMPOrderedClause(const OMPOrderedClause *C) { Profiler->VisitStmt(Num); } -void OMPClauseProfiler::VisitOMPNowaitClause(const OMPNowaitClause *) {} +void OMPClauseProfiler::VisitOMPNowaitClause(const OMPNowaitClause *C) { + if (C->getCondition()) + Profiler->VisitStmt(C->getCondition()); +} void OMPClauseProfiler::VisitOMPUntiedClause(const OMPUntiedClause *) {} diff --git a/clang/lib/AST/TypeLoc.cpp b/clang/lib/AST/TypeLoc.cpp index 55476e2..e952e82 100644 --- a/clang/lib/AST/TypeLoc.cpp +++ b/clang/lib/AST/TypeLoc.cpp @@ -494,39 +494,6 @@ NestedNameSpecifierLoc TypeLoc::getPrefix() const { } } -SourceLocation TypeLoc::getNonPrefixBeginLoc() const { - switch (getTypeLocClass()) { - case TypeLoc::TemplateSpecialization: { - auto TL = castAs<TemplateSpecializationTypeLoc>(); - SourceLocation Loc = TL.getTemplateKeywordLoc(); - if (!Loc.isValid()) - Loc = TL.getTemplateNameLoc(); - return Loc; - } - case TypeLoc::DeducedTemplateSpecialization: { - auto TL = castAs<DeducedTemplateSpecializationTypeLoc>(); - SourceLocation Loc = TL.getTemplateKeywordLoc(); - if (!Loc.isValid()) - Loc = TL.getTemplateNameLoc(); - return Loc; - } - case TypeLoc::DependentName: - return castAs<DependentNameTypeLoc>().getNameLoc(); - case TypeLoc::Enum: - case TypeLoc::Record: - case TypeLoc::InjectedClassName: - return castAs<TagTypeLoc>().getNameLoc(); - case TypeLoc::Typedef: - return castAs<TypedefTypeLoc>().getNameLoc(); - case TypeLoc::UnresolvedUsing: - return castAs<UnresolvedUsingTypeLoc>().getNameLoc(); - case TypeLoc::Using: - return castAs<UsingTypeLoc>().getNameLoc(); - default: - return getBeginLoc(); - } -} - SourceLocation TypeLoc::getNonElaboratedBeginLoc() const { // For elaborated types (e.g. `struct a::A`) we want the portion after the // `struct` but including the namespace qualifier, `a::`. |