diff options
Diffstat (limited to 'clang/lib/AST')
-rw-r--r-- | clang/lib/AST/ByteCode/Interp.h | 2 | ||||
-rw-r--r-- | clang/lib/AST/ByteCode/InterpBuiltin.cpp | 206 | ||||
-rw-r--r-- | clang/lib/AST/Decl.cpp | 4 | ||||
-rw-r--r-- | clang/lib/AST/ExprConstant.cpp | 282 | ||||
-rw-r--r-- | clang/lib/AST/OpenACCClause.cpp | 14 | ||||
-rw-r--r-- | clang/lib/AST/OpenMPClause.cpp | 13 | ||||
-rw-r--r-- | clang/lib/AST/StmtProfile.cpp | 5 | ||||
-rw-r--r-- | clang/lib/AST/TypeLoc.cpp | 33 |
8 files changed, 515 insertions, 44 deletions
diff --git a/clang/lib/AST/ByteCode/Interp.h b/clang/lib/AST/ByteCode/Interp.h index a9c71c7..57cc705 100644 --- a/clang/lib/AST/ByteCode/Interp.h +++ b/clang/lib/AST/ByteCode/Interp.h @@ -3699,7 +3699,7 @@ inline bool CheckDestruction(InterpState &S, CodePtr OpPC) { inline bool CheckArraySize(InterpState &S, CodePtr OpPC, uint64_t NumElems) { uint64_t Limit = S.getLangOpts().ConstexprStepLimit; - if (NumElems > Limit) { + if (Limit != 0 && NumElems > Limit) { S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_new_exceeds_limits) << NumElems << Limit; diff --git a/clang/lib/AST/ByteCode/InterpBuiltin.cpp b/clang/lib/AST/ByteCode/InterpBuiltin.cpp index 3811fb0..2d3cb6a 100644 --- a/clang/lib/AST/ByteCode/InterpBuiltin.cpp +++ b/clang/lib/AST/ByteCode/InterpBuiltin.cpp @@ -2587,6 +2587,82 @@ static bool interp__builtin_ia32_pmul( return true; } +static bool interp_builtin_horizontal_int_binop( + InterpState &S, CodePtr OpPC, const CallExpr *Call, + llvm::function_ref<APInt(const APSInt &, const APSInt &)> Fn) { + const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>(); + PrimType ElemT = *S.getContext().classify(VT->getElementType()); + bool DestUnsigned = Call->getType()->isUnsignedIntegerOrEnumerationType(); + + const Pointer &RHS = S.Stk.pop<Pointer>(); + const Pointer &LHS = S.Stk.pop<Pointer>(); + const Pointer &Dst = S.Stk.peek<Pointer>(); + unsigned NumElts = VT->getNumElements(); + unsigned EltBits = S.getASTContext().getIntWidth(VT->getElementType()); + unsigned EltsPerLane = 128 / EltBits; + unsigned Lanes = NumElts * EltBits / 128; + unsigned DestIndex = 0; + + for (unsigned Lane = 0; Lane < Lanes; ++Lane) { + unsigned LaneStart = Lane * EltsPerLane; + for (unsigned I = 0; I < EltsPerLane; I += 2) { + INT_TYPE_SWITCH_NO_BOOL(ElemT, { + APSInt Elem1 = LHS.elem<T>(LaneStart + I).toAPSInt(); + APSInt Elem2 = LHS.elem<T>(LaneStart + I + 1).toAPSInt(); + APSInt ResL = APSInt(Fn(Elem1, Elem2), DestUnsigned); + Dst.elem<T>(DestIndex++) = static_cast<T>(ResL); + }); + } + + for (unsigned I = 0; I < EltsPerLane; I += 2) { + INT_TYPE_SWITCH_NO_BOOL(ElemT, { + APSInt Elem1 = RHS.elem<T>(LaneStart + I).toAPSInt(); + APSInt Elem2 = RHS.elem<T>(LaneStart + I + 1).toAPSInt(); + APSInt ResR = APSInt(Fn(Elem1, Elem2), DestUnsigned); + Dst.elem<T>(DestIndex++) = static_cast<T>(ResR); + }); + } + } + Dst.initializeAllElements(); + return true; +} + +static bool interp_builtin_horizontal_fp_binop( + InterpState &S, CodePtr OpPC, const CallExpr *Call, + llvm::function_ref<APFloat(const APFloat &, const APFloat &, + llvm::RoundingMode)> + Fn) { + const Pointer &RHS = S.Stk.pop<Pointer>(); + const Pointer &LHS = S.Stk.pop<Pointer>(); + const Pointer &Dst = S.Stk.peek<Pointer>(); + FPOptions FPO = Call->getFPFeaturesInEffect(S.Ctx.getLangOpts()); + llvm::RoundingMode RM = getRoundingMode(FPO); + const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>(); + + unsigned NumElts = VT->getNumElements(); + unsigned EltBits = S.getASTContext().getTypeSize(VT->getElementType()); + unsigned NumLanes = NumElts * EltBits / 128; + unsigned NumElemsPerLane = NumElts / NumLanes; + unsigned HalfElemsPerLane = NumElemsPerLane / 2; + + for (unsigned L = 0; L != NumElts; L += NumElemsPerLane) { + using T = PrimConv<PT_Float>::T; + for (unsigned E = 0; E != HalfElemsPerLane; ++E) { + APFloat Elem1 = LHS.elem<T>(L + (2 * E) + 0).getAPFloat(); + APFloat Elem2 = LHS.elem<T>(L + (2 * E) + 1).getAPFloat(); + Dst.elem<T>(L + E) = static_cast<T>(Fn(Elem1, Elem2, RM)); + } + for (unsigned E = 0; E != HalfElemsPerLane; ++E) { + APFloat Elem1 = RHS.elem<T>(L + (2 * E) + 0).getAPFloat(); + APFloat Elem2 = RHS.elem<T>(L + (2 * E) + 1).getAPFloat(); + Dst.elem<T>(L + E + HalfElemsPerLane) = + static_cast<T>(Fn(Elem1, Elem2, RM)); + } + } + Dst.initializeAllElements(); + return true; +} + static bool interp__builtin_elementwise_triop_fp( InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref<APFloat(const APFloat &, const APFloat &, @@ -2756,6 +2832,45 @@ static bool interp__builtin_ia32_pshuf(InterpState &S, CodePtr OpPC, return true; } +static bool interp__builtin_ia32_test_op( + InterpState &S, CodePtr OpPC, const CallExpr *Call, + llvm::function_ref<bool(const APInt &A, const APInt &B)> Fn) { + const Pointer &RHS = S.Stk.pop<Pointer>(); + const Pointer &LHS = S.Stk.pop<Pointer>(); + + assert(LHS.getNumElems() == RHS.getNumElems()); + + unsigned SourceLen = LHS.getNumElems(); + QualType ElemQT = getElemType(LHS); + OptPrimType ElemPT = S.getContext().classify(ElemQT); + unsigned LaneWidth = S.getASTContext().getTypeSize(ElemQT); + + APInt AWide(LaneWidth * SourceLen, 0); + APInt BWide(LaneWidth * SourceLen, 0); + + for (unsigned I = 0; I != SourceLen; ++I) { + APInt ALane; + APInt BLane; + + if (ElemQT->isIntegerType()) { // Get value. + INT_TYPE_SWITCH_NO_BOOL(*ElemPT, { + ALane = LHS.elem<T>(I).toAPSInt(); + BLane = RHS.elem<T>(I).toAPSInt(); + }); + } else if (ElemQT->isFloatingType()) { // Get only sign bit. + using T = PrimConv<PT_Float>::T; + ALane = LHS.elem<T>(I).getAPFloat().bitcastToAPInt().isNegative(); + BLane = RHS.elem<T>(I).getAPFloat().bitcastToAPInt().isNegative(); + } else { // Must be integer or floating type. + return false; + } + AWide.insertBits(ALane, I * LaneWidth); + BWide.insertBits(BLane, I * LaneWidth); + } + pushInteger(S, Fn(AWide, BWide), Call->getType()); + return true; +} + static bool interp__builtin_elementwise_triop( InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref<APInt(const APSInt &, const APSInt &, const APSInt &)> @@ -3626,6 +3741,53 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call, case Builtin::BI__builtin_elementwise_min: return interp__builtin_elementwise_maxmin(S, OpPC, Call, BuiltinID); + case clang::X86::BI__builtin_ia32_phaddw128: + case clang::X86::BI__builtin_ia32_phaddw256: + case clang::X86::BI__builtin_ia32_phaddd128: + case clang::X86::BI__builtin_ia32_phaddd256: + return interp_builtin_horizontal_int_binop( + S, OpPC, Call, + [](const APSInt &LHS, const APSInt &RHS) { return LHS + RHS; }); + case clang::X86::BI__builtin_ia32_phaddsw128: + case clang::X86::BI__builtin_ia32_phaddsw256: + return interp_builtin_horizontal_int_binop( + S, OpPC, Call, + [](const APSInt &LHS, const APSInt &RHS) { return LHS.sadd_sat(RHS); }); + case clang::X86::BI__builtin_ia32_phsubw128: + case clang::X86::BI__builtin_ia32_phsubw256: + case clang::X86::BI__builtin_ia32_phsubd128: + case clang::X86::BI__builtin_ia32_phsubd256: + return interp_builtin_horizontal_int_binop( + S, OpPC, Call, + [](const APSInt &LHS, const APSInt &RHS) { return LHS - RHS; }); + case clang::X86::BI__builtin_ia32_phsubsw128: + case clang::X86::BI__builtin_ia32_phsubsw256: + return interp_builtin_horizontal_int_binop( + S, OpPC, Call, + [](const APSInt &LHS, const APSInt &RHS) { return LHS.ssub_sat(RHS); }); + case clang::X86::BI__builtin_ia32_haddpd: + case clang::X86::BI__builtin_ia32_haddps: + case clang::X86::BI__builtin_ia32_haddpd256: + case clang::X86::BI__builtin_ia32_haddps256: + return interp_builtin_horizontal_fp_binop( + S, OpPC, Call, + [](const APFloat &LHS, const APFloat &RHS, llvm::RoundingMode RM) { + APFloat F = LHS; + F.add(RHS, RM); + return F; + }); + case clang::X86::BI__builtin_ia32_hsubpd: + case clang::X86::BI__builtin_ia32_hsubps: + case clang::X86::BI__builtin_ia32_hsubpd256: + case clang::X86::BI__builtin_ia32_hsubps256: + return interp_builtin_horizontal_fp_binop( + S, OpPC, Call, + [](const APFloat &LHS, const APFloat &RHS, llvm::RoundingMode RM) { + APFloat F = LHS; + F.subtract(RHS, RM); + return F; + }); + case clang::X86::BI__builtin_ia32_pmuldq128: case clang::X86::BI__builtin_ia32_pmuldq256: case clang::X86::BI__builtin_ia32_pmuldq512: @@ -3656,6 +3818,21 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call, return F; }); + case X86::BI__builtin_ia32_vpmadd52luq128: + case X86::BI__builtin_ia32_vpmadd52luq256: + case X86::BI__builtin_ia32_vpmadd52luq512: + return interp__builtin_elementwise_triop( + S, OpPC, Call, [](const APSInt &A, const APSInt &B, const APSInt &C) { + return A + (B.trunc(52) * C.trunc(52)).zext(64); + }); + case X86::BI__builtin_ia32_vpmadd52huq128: + case X86::BI__builtin_ia32_vpmadd52huq256: + case X86::BI__builtin_ia32_vpmadd52huq512: + return interp__builtin_elementwise_triop( + S, OpPC, Call, [](const APSInt &A, const APSInt &B, const APSInt &C) { + return A + llvm::APIntOps::mulhu(B.trunc(52), C.trunc(52)).zext(64); + }); + case X86::BI__builtin_ia32_vpshldd128: case X86::BI__builtin_ia32_vpshldd256: case X86::BI__builtin_ia32_vpshldd512: @@ -3712,7 +3889,34 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call, S, OpPC, Call, [](const APSInt &F, const APSInt &T, const APSInt &C) { return ((APInt)C).isNegative() ? T : F; }); - + case X86::BI__builtin_ia32_ptestz128: + case X86::BI__builtin_ia32_ptestz256: + case X86::BI__builtin_ia32_vtestzps: + case X86::BI__builtin_ia32_vtestzps256: + case X86::BI__builtin_ia32_vtestzpd: + case X86::BI__builtin_ia32_vtestzpd256: + return interp__builtin_ia32_test_op( + S, OpPC, Call, + [](const APInt &A, const APInt &B) { return (A & B) == 0; }); + case X86::BI__builtin_ia32_ptestc128: + case X86::BI__builtin_ia32_ptestc256: + case X86::BI__builtin_ia32_vtestcps: + case X86::BI__builtin_ia32_vtestcps256: + case X86::BI__builtin_ia32_vtestcpd: + case X86::BI__builtin_ia32_vtestcpd256: + return interp__builtin_ia32_test_op( + S, OpPC, Call, + [](const APInt &A, const APInt &B) { return (~A & B) == 0; }); + case X86::BI__builtin_ia32_ptestnzc128: + case X86::BI__builtin_ia32_ptestnzc256: + case X86::BI__builtin_ia32_vtestnzcps: + case X86::BI__builtin_ia32_vtestnzcps256: + case X86::BI__builtin_ia32_vtestnzcpd: + case X86::BI__builtin_ia32_vtestnzcpd256: + return interp__builtin_ia32_test_op( + S, OpPC, Call, [](const APInt &A, const APInt &B) { + return ((A & B) != 0) && ((~A & B) != 0); + }); case X86::BI__builtin_ia32_selectb_128: case X86::BI__builtin_ia32_selectb_256: case X86::BI__builtin_ia32_selectb_512: diff --git a/clang/lib/AST/Decl.cpp b/clang/lib/AST/Decl.cpp index c734155..69cbf6e 100644 --- a/clang/lib/AST/Decl.cpp +++ b/clang/lib/AST/Decl.cpp @@ -3316,6 +3316,10 @@ bool FunctionDecl::isImmediateEscalating() const { CD && CD->isInheritingConstructor()) return CD->getInheritedConstructor().getConstructor(); + // Destructors are not immediate escalating. + if (isa<CXXDestructorDecl>(this)) + return false; + // - a function that results from the instantiation of a templated entity // defined with the constexpr specifier. TemplatedKind TK = getTemplatedKind(); diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp index 35a866e..51c0382 100644 --- a/clang/lib/AST/ExprConstant.cpp +++ b/clang/lib/AST/ExprConstant.cpp @@ -990,7 +990,7 @@ namespace { // of arrays to avoid exhausting the system resources, as initialization // of each element is likely to take some number of steps anyway. uint64_t Limit = Ctx.getLangOpts().ConstexprStepLimit; - if (ElemCount > Limit) { + if (Limit != 0 && ElemCount > Limit) { if (Diag) FFDiag(Loc, diag::note_constexpr_new_exceeds_limits) << ElemCount << Limit; @@ -1016,6 +1016,9 @@ namespace { } bool nextStep(const Stmt *S) { + if (Ctx.getLangOpts().ConstexprStepLimit == 0) + return true; + if (!StepsLeft) { FFDiag(S->getBeginLoc(), diag::note_constexpr_step_limit_exceeded); return false; @@ -1186,7 +1189,8 @@ namespace { /// Should we continue evaluation as much as possible after encountering a /// construct which can't be reduced to a value? bool keepEvaluatingAfterFailure() const override { - if (!StepsLeft) + uint64_t Limit = Ctx.getLangOpts().ConstexprStepLimit; + if (Limit != 0 && !StepsLeft) return false; switch (EvalMode) { @@ -11970,6 +11974,54 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) { return Success(APValue(ResultElements.data(), ResultElements.size()), E); } + + case X86::BI__builtin_ia32_vpmadd52luq128: + case X86::BI__builtin_ia32_vpmadd52luq256: + case X86::BI__builtin_ia32_vpmadd52luq512: { + APValue A, B, C; + if (!EvaluateAsRValue(Info, E->getArg(0), A) || + !EvaluateAsRValue(Info, E->getArg(1), B) || + !EvaluateAsRValue(Info, E->getArg(2), C)) + return false; + + unsigned ALen = A.getVectorLength(); + SmallVector<APValue, 4> ResultElements; + ResultElements.reserve(ALen); + + for (unsigned EltNum = 0; EltNum < ALen; EltNum += 1) { + APInt AElt = A.getVectorElt(EltNum).getInt(); + APInt BElt = B.getVectorElt(EltNum).getInt().trunc(52); + APInt CElt = C.getVectorElt(EltNum).getInt().trunc(52); + APSInt ResElt(AElt + (BElt * CElt).zext(64), false); + ResultElements.push_back(APValue(ResElt)); + } + + return Success(APValue(ResultElements.data(), ResultElements.size()), E); + } + case X86::BI__builtin_ia32_vpmadd52huq128: + case X86::BI__builtin_ia32_vpmadd52huq256: + case X86::BI__builtin_ia32_vpmadd52huq512: { + APValue A, B, C; + if (!EvaluateAsRValue(Info, E->getArg(0), A) || + !EvaluateAsRValue(Info, E->getArg(1), B) || + !EvaluateAsRValue(Info, E->getArg(2), C)) + return false; + + unsigned ALen = A.getVectorLength(); + SmallVector<APValue, 4> ResultElements; + ResultElements.reserve(ALen); + + for (unsigned EltNum = 0; EltNum < ALen; EltNum += 1) { + APInt AElt = A.getVectorElt(EltNum).getInt(); + APInt BElt = B.getVectorElt(EltNum).getInt().trunc(52); + APInt CElt = C.getVectorElt(EltNum).getInt().trunc(52); + APSInt ResElt(AElt + llvm::APIntOps::mulhu(BElt, CElt).zext(64), false); + ResultElements.push_back(APValue(ResElt)); + } + + return Success(APValue(ResultElements.data(), ResultElements.size()), E); + } + case clang::X86::BI__builtin_ia32_vprotbi: case clang::X86::BI__builtin_ia32_vprotdi: case clang::X86::BI__builtin_ia32_vprotqi: @@ -12377,6 +12429,169 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) { return Success(APValue(ResultElements.data(), ResultElements.size()), E); } + case clang::X86::BI__builtin_ia32_phaddw128: + case clang::X86::BI__builtin_ia32_phaddw256: + case clang::X86::BI__builtin_ia32_phaddd128: + case clang::X86::BI__builtin_ia32_phaddd256: + case clang::X86::BI__builtin_ia32_phaddsw128: + case clang::X86::BI__builtin_ia32_phaddsw256: + + case clang::X86::BI__builtin_ia32_phsubw128: + case clang::X86::BI__builtin_ia32_phsubw256: + case clang::X86::BI__builtin_ia32_phsubd128: + case clang::X86::BI__builtin_ia32_phsubd256: + case clang::X86::BI__builtin_ia32_phsubsw128: + case clang::X86::BI__builtin_ia32_phsubsw256: { + APValue SourceLHS, SourceRHS; + if (!EvaluateAsRValue(Info, E->getArg(0), SourceLHS) || + !EvaluateAsRValue(Info, E->getArg(1), SourceRHS)) + return false; + QualType DestEltTy = E->getType()->castAs<VectorType>()->getElementType(); + bool DestUnsigned = DestEltTy->isUnsignedIntegerOrEnumerationType(); + + unsigned NumElts = SourceLHS.getVectorLength(); + unsigned EltBits = Info.Ctx.getIntWidth(DestEltTy); + unsigned EltsPerLane = 128 / EltBits; + SmallVector<APValue, 4> ResultElements; + ResultElements.reserve(NumElts); + + for (unsigned LaneStart = 0; LaneStart != NumElts; + LaneStart += EltsPerLane) { + for (unsigned I = 0; I != EltsPerLane; I += 2) { + APSInt LHSA = SourceLHS.getVectorElt(LaneStart + I).getInt(); + APSInt LHSB = SourceLHS.getVectorElt(LaneStart + I + 1).getInt(); + switch (E->getBuiltinCallee()) { + case clang::X86::BI__builtin_ia32_phaddw128: + case clang::X86::BI__builtin_ia32_phaddw256: + case clang::X86::BI__builtin_ia32_phaddd128: + case clang::X86::BI__builtin_ia32_phaddd256: { + APSInt Res(LHSA + LHSB, DestUnsigned); + ResultElements.push_back(APValue(Res)); + break; + } + case clang::X86::BI__builtin_ia32_phaddsw128: + case clang::X86::BI__builtin_ia32_phaddsw256: { + APSInt Res(LHSA.sadd_sat(LHSB)); + ResultElements.push_back(APValue(Res)); + break; + } + case clang::X86::BI__builtin_ia32_phsubw128: + case clang::X86::BI__builtin_ia32_phsubw256: + case clang::X86::BI__builtin_ia32_phsubd128: + case clang::X86::BI__builtin_ia32_phsubd256: { + APSInt Res(LHSA - LHSB, DestUnsigned); + ResultElements.push_back(APValue(Res)); + break; + } + case clang::X86::BI__builtin_ia32_phsubsw128: + case clang::X86::BI__builtin_ia32_phsubsw256: { + APSInt Res(LHSA.ssub_sat(LHSB)); + ResultElements.push_back(APValue(Res)); + break; + } + } + } + for (unsigned I = 0; I != EltsPerLane; I += 2) { + APSInt RHSA = SourceRHS.getVectorElt(LaneStart + I).getInt(); + APSInt RHSB = SourceRHS.getVectorElt(LaneStart + I + 1).getInt(); + switch (E->getBuiltinCallee()) { + case clang::X86::BI__builtin_ia32_phaddw128: + case clang::X86::BI__builtin_ia32_phaddw256: + case clang::X86::BI__builtin_ia32_phaddd128: + case clang::X86::BI__builtin_ia32_phaddd256: { + APSInt Res(RHSA + RHSB, DestUnsigned); + ResultElements.push_back(APValue(Res)); + break; + } + case clang::X86::BI__builtin_ia32_phaddsw128: + case clang::X86::BI__builtin_ia32_phaddsw256: { + APSInt Res(RHSA.sadd_sat(RHSB)); + ResultElements.push_back(APValue(Res)); + break; + } + case clang::X86::BI__builtin_ia32_phsubw128: + case clang::X86::BI__builtin_ia32_phsubw256: + case clang::X86::BI__builtin_ia32_phsubd128: + case clang::X86::BI__builtin_ia32_phsubd256: { + APSInt Res(RHSA - RHSB, DestUnsigned); + ResultElements.push_back(APValue(Res)); + break; + } + case clang::X86::BI__builtin_ia32_phsubsw128: + case clang::X86::BI__builtin_ia32_phsubsw256: { + APSInt Res(RHSA.ssub_sat(RHSB)); + ResultElements.push_back(APValue(Res)); + break; + } + } + } + } + return Success(APValue(ResultElements.data(), ResultElements.size()), E); + } + case clang::X86::BI__builtin_ia32_haddpd: + case clang::X86::BI__builtin_ia32_haddps: + case clang::X86::BI__builtin_ia32_haddps256: + case clang::X86::BI__builtin_ia32_haddpd256: + case clang::X86::BI__builtin_ia32_hsubpd: + case clang::X86::BI__builtin_ia32_hsubps: + case clang::X86::BI__builtin_ia32_hsubps256: + case clang::X86::BI__builtin_ia32_hsubpd256: { + APValue SourceLHS, SourceRHS; + if (!EvaluateAsRValue(Info, E->getArg(0), SourceLHS) || + !EvaluateAsRValue(Info, E->getArg(1), SourceRHS)) + return false; + unsigned NumElts = SourceLHS.getVectorLength(); + SmallVector<APValue, 4> ResultElements; + ResultElements.reserve(NumElts); + llvm::RoundingMode RM = getActiveRoundingMode(getEvalInfo(), E); + QualType DestEltTy = E->getType()->castAs<VectorType>()->getElementType(); + unsigned EltBits = Info.Ctx.getTypeSize(DestEltTy); + unsigned NumLanes = NumElts * EltBits / 128; + unsigned NumElemsPerLane = NumElts / NumLanes; + unsigned HalfElemsPerLane = NumElemsPerLane / 2; + + for (unsigned L = 0; L != NumElts; L += NumElemsPerLane) { + for (unsigned I = 0; I != HalfElemsPerLane; ++I) { + APFloat LHSA = SourceLHS.getVectorElt(L + (2 * I) + 0).getFloat(); + APFloat LHSB = SourceLHS.getVectorElt(L + (2 * I) + 1).getFloat(); + switch (E->getBuiltinCallee()) { + case clang::X86::BI__builtin_ia32_haddpd: + case clang::X86::BI__builtin_ia32_haddps: + case clang::X86::BI__builtin_ia32_haddps256: + case clang::X86::BI__builtin_ia32_haddpd256: + LHSA.add(LHSB, RM); + break; + case clang::X86::BI__builtin_ia32_hsubpd: + case clang::X86::BI__builtin_ia32_hsubps: + case clang::X86::BI__builtin_ia32_hsubps256: + case clang::X86::BI__builtin_ia32_hsubpd256: + LHSA.subtract(LHSB, RM); + break; + } + ResultElements.push_back(APValue(LHSA)); + } + for (unsigned I = 0; I != HalfElemsPerLane; ++I) { + APFloat RHSA = SourceRHS.getVectorElt(L + (2 * I) + 0).getFloat(); + APFloat RHSB = SourceRHS.getVectorElt(L + (2 * I) + 1).getFloat(); + switch (E->getBuiltinCallee()) { + case clang::X86::BI__builtin_ia32_haddpd: + case clang::X86::BI__builtin_ia32_haddps: + case clang::X86::BI__builtin_ia32_haddps256: + case clang::X86::BI__builtin_ia32_haddpd256: + RHSA.add(RHSB, RM); + break; + case clang::X86::BI__builtin_ia32_hsubpd: + case clang::X86::BI__builtin_ia32_hsubps: + case clang::X86::BI__builtin_ia32_hsubps256: + case clang::X86::BI__builtin_ia32_hsubpd256: + RHSA.subtract(RHSB, RM); + break; + } + ResultElements.push_back(APValue(RHSA)); + } + } + return Success(APValue(ResultElements.data(), ResultElements.size()), E); + } case Builtin::BI__builtin_elementwise_fshl: case Builtin::BI__builtin_elementwise_fshr: { APValue SourceHi, SourceLo, SourceShift; @@ -13905,6 +14120,40 @@ static bool getBuiltinAlignArguments(const CallExpr *E, EvalInfo &Info, bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E, unsigned BuiltinOp) { + auto EvalTestOp = [&](llvm::function_ref<bool(const APInt &, const APInt &)> + Fn) { + APValue SourceLHS, SourceRHS; + if (!EvaluateAsRValue(Info, E->getArg(0), SourceLHS) || + !EvaluateAsRValue(Info, E->getArg(1), SourceRHS)) + return false; + + unsigned SourceLen = SourceLHS.getVectorLength(); + const VectorType *VT = E->getArg(0)->getType()->castAs<VectorType>(); + QualType ElemQT = VT->getElementType(); + unsigned LaneWidth = Info.Ctx.getTypeSize(ElemQT); + + APInt AWide(LaneWidth * SourceLen, 0); + APInt BWide(LaneWidth * SourceLen, 0); + + for (unsigned I = 0; I != SourceLen; ++I) { + APInt ALane; + APInt BLane; + if (ElemQT->isIntegerType()) { // Get value. + ALane = SourceLHS.getVectorElt(I).getInt(); + BLane = SourceRHS.getVectorElt(I).getInt(); + } else if (ElemQT->isFloatingType()) { // Get only sign bit. + ALane = + SourceLHS.getVectorElt(I).getFloat().bitcastToAPInt().isNegative(); + BLane = + SourceRHS.getVectorElt(I).getFloat().bitcastToAPInt().isNegative(); + } else { // Must be integer or floating type. + return false; + } + AWide.insertBits(ALane, I * LaneWidth); + BWide.insertBits(BLane, I * LaneWidth); + } + return Success(Fn(AWide, BWide), E); + }; auto HandleMaskBinOp = [&](llvm::function_ref<APSInt(const APSInt &, const APSInt &)> Fn) @@ -15018,7 +15267,34 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E, Result.setBitVal(P++, Val[I]); return Success(Result, E); } - + case X86::BI__builtin_ia32_ptestz128: + case X86::BI__builtin_ia32_ptestz256: + case X86::BI__builtin_ia32_vtestzps: + case X86::BI__builtin_ia32_vtestzps256: + case X86::BI__builtin_ia32_vtestzpd: + case X86::BI__builtin_ia32_vtestzpd256: { + return EvalTestOp( + [](const APInt &A, const APInt &B) { return (A & B) == 0; }); + } + case X86::BI__builtin_ia32_ptestc128: + case X86::BI__builtin_ia32_ptestc256: + case X86::BI__builtin_ia32_vtestcps: + case X86::BI__builtin_ia32_vtestcps256: + case X86::BI__builtin_ia32_vtestcpd: + case X86::BI__builtin_ia32_vtestcpd256: { + return EvalTestOp( + [](const APInt &A, const APInt &B) { return (~A & B) == 0; }); + } + case X86::BI__builtin_ia32_ptestnzc128: + case X86::BI__builtin_ia32_ptestnzc256: + case X86::BI__builtin_ia32_vtestnzcps: + case X86::BI__builtin_ia32_vtestnzcps256: + case X86::BI__builtin_ia32_vtestnzcpd: + case X86::BI__builtin_ia32_vtestnzcpd256: { + return EvalTestOp([](const APInt &A, const APInt &B) { + return ((A & B) != 0) && ((~A & B) != 0); + }); + } case X86::BI__builtin_ia32_kandqi: case X86::BI__builtin_ia32_kandhi: case X86::BI__builtin_ia32_kandsi: diff --git a/clang/lib/AST/OpenACCClause.cpp b/clang/lib/AST/OpenACCClause.cpp index 6c4bc7c..142c932 100644 --- a/clang/lib/AST/OpenACCClause.cpp +++ b/clang/lib/AST/OpenACCClause.cpp @@ -506,11 +506,17 @@ OpenACCDeviceTypeClause *OpenACCDeviceTypeClause::Create( OpenACCReductionClause *OpenACCReductionClause::Create( const ASTContext &C, SourceLocation BeginLoc, SourceLocation LParenLoc, OpenACCReductionOperator Operator, ArrayRef<Expr *> VarList, - ArrayRef<OpenACCReductionRecipe> Recipes, + ArrayRef<OpenACCReductionRecipeWithStorage> Recipes, SourceLocation EndLoc) { - void *Mem = C.Allocate( - OpenACCReductionClause::totalSizeToAlloc<Expr *, OpenACCReductionRecipe>( - VarList.size(), Recipes.size())); + size_t NumCombiners = llvm::accumulate( + Recipes, 0, [](size_t Num, const OpenACCReductionRecipeWithStorage &R) { + return Num + R.CombinerRecipes.size(); + }); + + void *Mem = C.Allocate(OpenACCReductionClause::totalSizeToAlloc< + Expr *, OpenACCReductionRecipe, + OpenACCReductionRecipe::CombinerRecipe>( + VarList.size(), Recipes.size(), NumCombiners)); return new (Mem) OpenACCReductionClause(BeginLoc, LParenLoc, Operator, VarList, Recipes, EndLoc); } diff --git a/clang/lib/AST/OpenMPClause.cpp b/clang/lib/AST/OpenMPClause.cpp index 2ce4419..791df7e 100644 --- a/clang/lib/AST/OpenMPClause.cpp +++ b/clang/lib/AST/OpenMPClause.cpp @@ -309,6 +309,12 @@ OMPClause::child_range OMPIfClause::used_children() { return child_range(&Condition, &Condition + 1); } +OMPClause::child_range OMPNowaitClause::used_children() { + if (Condition) + return child_range(&Condition, &Condition + 1); + return children(); +} + OMPClause::child_range OMPGrainsizeClause::used_children() { if (Stmt **C = getAddrOfExprAsWritten(getPreInitStmt())) return child_range(C, C + 1); @@ -2113,8 +2119,13 @@ void OMPClausePrinter::VisitOMPOrderedClause(OMPOrderedClause *Node) { } } -void OMPClausePrinter::VisitOMPNowaitClause(OMPNowaitClause *) { +void OMPClausePrinter::VisitOMPNowaitClause(OMPNowaitClause *Node) { OS << "nowait"; + if (auto *Cond = Node->getCondition()) { + OS << "("; + Cond->printPretty(OS, nullptr, Policy, 0); + OS << ")"; + } } void OMPClausePrinter::VisitOMPUntiedClause(OMPUntiedClause *) { diff --git a/clang/lib/AST/StmtProfile.cpp b/clang/lib/AST/StmtProfile.cpp index 3cd033e..05b64cc 100644 --- a/clang/lib/AST/StmtProfile.cpp +++ b/clang/lib/AST/StmtProfile.cpp @@ -585,7 +585,10 @@ void OMPClauseProfiler::VisitOMPOrderedClause(const OMPOrderedClause *C) { Profiler->VisitStmt(Num); } -void OMPClauseProfiler::VisitOMPNowaitClause(const OMPNowaitClause *) {} +void OMPClauseProfiler::VisitOMPNowaitClause(const OMPNowaitClause *C) { + if (C->getCondition()) + Profiler->VisitStmt(C->getCondition()); +} void OMPClauseProfiler::VisitOMPUntiedClause(const OMPUntiedClause *) {} diff --git a/clang/lib/AST/TypeLoc.cpp b/clang/lib/AST/TypeLoc.cpp index 55476e2..e952e82 100644 --- a/clang/lib/AST/TypeLoc.cpp +++ b/clang/lib/AST/TypeLoc.cpp @@ -494,39 +494,6 @@ NestedNameSpecifierLoc TypeLoc::getPrefix() const { } } -SourceLocation TypeLoc::getNonPrefixBeginLoc() const { - switch (getTypeLocClass()) { - case TypeLoc::TemplateSpecialization: { - auto TL = castAs<TemplateSpecializationTypeLoc>(); - SourceLocation Loc = TL.getTemplateKeywordLoc(); - if (!Loc.isValid()) - Loc = TL.getTemplateNameLoc(); - return Loc; - } - case TypeLoc::DeducedTemplateSpecialization: { - auto TL = castAs<DeducedTemplateSpecializationTypeLoc>(); - SourceLocation Loc = TL.getTemplateKeywordLoc(); - if (!Loc.isValid()) - Loc = TL.getTemplateNameLoc(); - return Loc; - } - case TypeLoc::DependentName: - return castAs<DependentNameTypeLoc>().getNameLoc(); - case TypeLoc::Enum: - case TypeLoc::Record: - case TypeLoc::InjectedClassName: - return castAs<TagTypeLoc>().getNameLoc(); - case TypeLoc::Typedef: - return castAs<TypedefTypeLoc>().getNameLoc(); - case TypeLoc::UnresolvedUsing: - return castAs<UnresolvedUsingTypeLoc>().getNameLoc(); - case TypeLoc::Using: - return castAs<UsingTypeLoc>().getNameLoc(); - default: - return getBeginLoc(); - } -} - SourceLocation TypeLoc::getNonElaboratedBeginLoc() const { // For elaborated types (e.g. `struct a::A`) we want the portion after the // `struct` but including the namespace qualifier, `a::`. |