aboutsummaryrefslogtreecommitdiff
path: root/clang/lib
diff options
context:
space:
mode:
Diffstat (limited to 'clang/lib')
-rw-r--r--clang/lib/AST/ByteCode/InterpBuiltin.cpp138
-rw-r--r--clang/lib/AST/Decl.cpp4
-rw-r--r--clang/lib/AST/ExprConstant.cpp211
-rw-r--r--clang/lib/AST/OpenACCClause.cpp2
-rw-r--r--clang/lib/Analysis/LifetimeSafety/FactsGenerator.cpp4
-rw-r--r--clang/lib/Basic/Targets/AArch64.cpp1
-rw-r--r--clang/lib/Basic/Targets/SystemZ.cpp13
-rw-r--r--clang/lib/Basic/Targets/SystemZ.h6
-rw-r--r--clang/lib/Basic/Targets/X86.cpp1
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenAtomic.cpp4
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenOpenACCClause.cpp6
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp132
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.h15
-rw-r--r--clang/lib/CIR/Dialect/IR/CIRDialect.cpp14
-rw-r--r--clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp8
-rw-r--r--clang/lib/CodeGen/CGStmt.cpp26
-rw-r--r--clang/lib/CodeGen/Targets/RISCV.cpp24
-rw-r--r--clang/lib/Format/FormatToken.cpp3
-rw-r--r--clang/lib/Headers/avx2intrin.h42
-rw-r--r--clang/lib/Headers/avx512ifmaintrin.h63
-rw-r--r--clang/lib/Headers/avx512ifmavlintrin.h92
-rw-r--r--clang/lib/Headers/avxifmaintrin.h9
-rw-r--r--clang/lib/Headers/avxintrin.h20
-rw-r--r--clang/lib/Headers/pmmintrin.h25
-rw-r--r--clang/lib/Headers/tmmintrin.h103
-rw-r--r--clang/lib/Sema/SemaConcept.cpp16
-rw-r--r--clang/lib/Sema/SemaOpenACCClause.cpp2
-rw-r--r--clang/lib/Sema/SemaTemplateInstantiateDecl.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp73
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/WebKit/ASTUtils.cpp3
-rw-r--r--clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp4
31 files changed, 785 insertions, 281 deletions
diff --git a/clang/lib/AST/ByteCode/InterpBuiltin.cpp b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
index 84c5ecc..2d3cb6a 100644
--- a/clang/lib/AST/ByteCode/InterpBuiltin.cpp
+++ b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
@@ -2587,6 +2587,82 @@ static bool interp__builtin_ia32_pmul(
return true;
}
+static bool interp_builtin_horizontal_int_binop(
+ InterpState &S, CodePtr OpPC, const CallExpr *Call,
+ llvm::function_ref<APInt(const APSInt &, const APSInt &)> Fn) {
+ const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
+ PrimType ElemT = *S.getContext().classify(VT->getElementType());
+ bool DestUnsigned = Call->getType()->isUnsignedIntegerOrEnumerationType();
+
+ const Pointer &RHS = S.Stk.pop<Pointer>();
+ const Pointer &LHS = S.Stk.pop<Pointer>();
+ const Pointer &Dst = S.Stk.peek<Pointer>();
+ unsigned NumElts = VT->getNumElements();
+ unsigned EltBits = S.getASTContext().getIntWidth(VT->getElementType());
+ unsigned EltsPerLane = 128 / EltBits;
+ unsigned Lanes = NumElts * EltBits / 128;
+ unsigned DestIndex = 0;
+
+ for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
+ unsigned LaneStart = Lane * EltsPerLane;
+ for (unsigned I = 0; I < EltsPerLane; I += 2) {
+ INT_TYPE_SWITCH_NO_BOOL(ElemT, {
+ APSInt Elem1 = LHS.elem<T>(LaneStart + I).toAPSInt();
+ APSInt Elem2 = LHS.elem<T>(LaneStart + I + 1).toAPSInt();
+ APSInt ResL = APSInt(Fn(Elem1, Elem2), DestUnsigned);
+ Dst.elem<T>(DestIndex++) = static_cast<T>(ResL);
+ });
+ }
+
+ for (unsigned I = 0; I < EltsPerLane; I += 2) {
+ INT_TYPE_SWITCH_NO_BOOL(ElemT, {
+ APSInt Elem1 = RHS.elem<T>(LaneStart + I).toAPSInt();
+ APSInt Elem2 = RHS.elem<T>(LaneStart + I + 1).toAPSInt();
+ APSInt ResR = APSInt(Fn(Elem1, Elem2), DestUnsigned);
+ Dst.elem<T>(DestIndex++) = static_cast<T>(ResR);
+ });
+ }
+ }
+ Dst.initializeAllElements();
+ return true;
+}
+
+static bool interp_builtin_horizontal_fp_binop(
+ InterpState &S, CodePtr OpPC, const CallExpr *Call,
+ llvm::function_ref<APFloat(const APFloat &, const APFloat &,
+ llvm::RoundingMode)>
+ Fn) {
+ const Pointer &RHS = S.Stk.pop<Pointer>();
+ const Pointer &LHS = S.Stk.pop<Pointer>();
+ const Pointer &Dst = S.Stk.peek<Pointer>();
+ FPOptions FPO = Call->getFPFeaturesInEffect(S.Ctx.getLangOpts());
+ llvm::RoundingMode RM = getRoundingMode(FPO);
+ const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
+
+ unsigned NumElts = VT->getNumElements();
+ unsigned EltBits = S.getASTContext().getTypeSize(VT->getElementType());
+ unsigned NumLanes = NumElts * EltBits / 128;
+ unsigned NumElemsPerLane = NumElts / NumLanes;
+ unsigned HalfElemsPerLane = NumElemsPerLane / 2;
+
+ for (unsigned L = 0; L != NumElts; L += NumElemsPerLane) {
+ using T = PrimConv<PT_Float>::T;
+ for (unsigned E = 0; E != HalfElemsPerLane; ++E) {
+ APFloat Elem1 = LHS.elem<T>(L + (2 * E) + 0).getAPFloat();
+ APFloat Elem2 = LHS.elem<T>(L + (2 * E) + 1).getAPFloat();
+ Dst.elem<T>(L + E) = static_cast<T>(Fn(Elem1, Elem2, RM));
+ }
+ for (unsigned E = 0; E != HalfElemsPerLane; ++E) {
+ APFloat Elem1 = RHS.elem<T>(L + (2 * E) + 0).getAPFloat();
+ APFloat Elem2 = RHS.elem<T>(L + (2 * E) + 1).getAPFloat();
+ Dst.elem<T>(L + E + HalfElemsPerLane) =
+ static_cast<T>(Fn(Elem1, Elem2, RM));
+ }
+ }
+ Dst.initializeAllElements();
+ return true;
+}
+
static bool interp__builtin_elementwise_triop_fp(
InterpState &S, CodePtr OpPC, const CallExpr *Call,
llvm::function_ref<APFloat(const APFloat &, const APFloat &,
@@ -3665,6 +3741,53 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
case Builtin::BI__builtin_elementwise_min:
return interp__builtin_elementwise_maxmin(S, OpPC, Call, BuiltinID);
+ case clang::X86::BI__builtin_ia32_phaddw128:
+ case clang::X86::BI__builtin_ia32_phaddw256:
+ case clang::X86::BI__builtin_ia32_phaddd128:
+ case clang::X86::BI__builtin_ia32_phaddd256:
+ return interp_builtin_horizontal_int_binop(
+ S, OpPC, Call,
+ [](const APSInt &LHS, const APSInt &RHS) { return LHS + RHS; });
+ case clang::X86::BI__builtin_ia32_phaddsw128:
+ case clang::X86::BI__builtin_ia32_phaddsw256:
+ return interp_builtin_horizontal_int_binop(
+ S, OpPC, Call,
+ [](const APSInt &LHS, const APSInt &RHS) { return LHS.sadd_sat(RHS); });
+ case clang::X86::BI__builtin_ia32_phsubw128:
+ case clang::X86::BI__builtin_ia32_phsubw256:
+ case clang::X86::BI__builtin_ia32_phsubd128:
+ case clang::X86::BI__builtin_ia32_phsubd256:
+ return interp_builtin_horizontal_int_binop(
+ S, OpPC, Call,
+ [](const APSInt &LHS, const APSInt &RHS) { return LHS - RHS; });
+ case clang::X86::BI__builtin_ia32_phsubsw128:
+ case clang::X86::BI__builtin_ia32_phsubsw256:
+ return interp_builtin_horizontal_int_binop(
+ S, OpPC, Call,
+ [](const APSInt &LHS, const APSInt &RHS) { return LHS.ssub_sat(RHS); });
+ case clang::X86::BI__builtin_ia32_haddpd:
+ case clang::X86::BI__builtin_ia32_haddps:
+ case clang::X86::BI__builtin_ia32_haddpd256:
+ case clang::X86::BI__builtin_ia32_haddps256:
+ return interp_builtin_horizontal_fp_binop(
+ S, OpPC, Call,
+ [](const APFloat &LHS, const APFloat &RHS, llvm::RoundingMode RM) {
+ APFloat F = LHS;
+ F.add(RHS, RM);
+ return F;
+ });
+ case clang::X86::BI__builtin_ia32_hsubpd:
+ case clang::X86::BI__builtin_ia32_hsubps:
+ case clang::X86::BI__builtin_ia32_hsubpd256:
+ case clang::X86::BI__builtin_ia32_hsubps256:
+ return interp_builtin_horizontal_fp_binop(
+ S, OpPC, Call,
+ [](const APFloat &LHS, const APFloat &RHS, llvm::RoundingMode RM) {
+ APFloat F = LHS;
+ F.subtract(RHS, RM);
+ return F;
+ });
+
case clang::X86::BI__builtin_ia32_pmuldq128:
case clang::X86::BI__builtin_ia32_pmuldq256:
case clang::X86::BI__builtin_ia32_pmuldq512:
@@ -3695,6 +3818,21 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
return F;
});
+ case X86::BI__builtin_ia32_vpmadd52luq128:
+ case X86::BI__builtin_ia32_vpmadd52luq256:
+ case X86::BI__builtin_ia32_vpmadd52luq512:
+ return interp__builtin_elementwise_triop(
+ S, OpPC, Call, [](const APSInt &A, const APSInt &B, const APSInt &C) {
+ return A + (B.trunc(52) * C.trunc(52)).zext(64);
+ });
+ case X86::BI__builtin_ia32_vpmadd52huq128:
+ case X86::BI__builtin_ia32_vpmadd52huq256:
+ case X86::BI__builtin_ia32_vpmadd52huq512:
+ return interp__builtin_elementwise_triop(
+ S, OpPC, Call, [](const APSInt &A, const APSInt &B, const APSInt &C) {
+ return A + llvm::APIntOps::mulhu(B.trunc(52), C.trunc(52)).zext(64);
+ });
+
case X86::BI__builtin_ia32_vpshldd128:
case X86::BI__builtin_ia32_vpshldd256:
case X86::BI__builtin_ia32_vpshldd512:
diff --git a/clang/lib/AST/Decl.cpp b/clang/lib/AST/Decl.cpp
index c734155..69cbf6e 100644
--- a/clang/lib/AST/Decl.cpp
+++ b/clang/lib/AST/Decl.cpp
@@ -3316,6 +3316,10 @@ bool FunctionDecl::isImmediateEscalating() const {
CD && CD->isInheritingConstructor())
return CD->getInheritedConstructor().getConstructor();
+ // Destructors are not immediate escalating.
+ if (isa<CXXDestructorDecl>(this))
+ return false;
+
// - a function that results from the instantiation of a templated entity
// defined with the constexpr specifier.
TemplatedKind TK = getTemplatedKind();
diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp
index dfdfef2..51c0382 100644
--- a/clang/lib/AST/ExprConstant.cpp
+++ b/clang/lib/AST/ExprConstant.cpp
@@ -11974,6 +11974,54 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) {
return Success(APValue(ResultElements.data(), ResultElements.size()), E);
}
+
+ case X86::BI__builtin_ia32_vpmadd52luq128:
+ case X86::BI__builtin_ia32_vpmadd52luq256:
+ case X86::BI__builtin_ia32_vpmadd52luq512: {
+ APValue A, B, C;
+ if (!EvaluateAsRValue(Info, E->getArg(0), A) ||
+ !EvaluateAsRValue(Info, E->getArg(1), B) ||
+ !EvaluateAsRValue(Info, E->getArg(2), C))
+ return false;
+
+ unsigned ALen = A.getVectorLength();
+ SmallVector<APValue, 4> ResultElements;
+ ResultElements.reserve(ALen);
+
+ for (unsigned EltNum = 0; EltNum < ALen; EltNum += 1) {
+ APInt AElt = A.getVectorElt(EltNum).getInt();
+ APInt BElt = B.getVectorElt(EltNum).getInt().trunc(52);
+ APInt CElt = C.getVectorElt(EltNum).getInt().trunc(52);
+ APSInt ResElt(AElt + (BElt * CElt).zext(64), false);
+ ResultElements.push_back(APValue(ResElt));
+ }
+
+ return Success(APValue(ResultElements.data(), ResultElements.size()), E);
+ }
+ case X86::BI__builtin_ia32_vpmadd52huq128:
+ case X86::BI__builtin_ia32_vpmadd52huq256:
+ case X86::BI__builtin_ia32_vpmadd52huq512: {
+ APValue A, B, C;
+ if (!EvaluateAsRValue(Info, E->getArg(0), A) ||
+ !EvaluateAsRValue(Info, E->getArg(1), B) ||
+ !EvaluateAsRValue(Info, E->getArg(2), C))
+ return false;
+
+ unsigned ALen = A.getVectorLength();
+ SmallVector<APValue, 4> ResultElements;
+ ResultElements.reserve(ALen);
+
+ for (unsigned EltNum = 0; EltNum < ALen; EltNum += 1) {
+ APInt AElt = A.getVectorElt(EltNum).getInt();
+ APInt BElt = B.getVectorElt(EltNum).getInt().trunc(52);
+ APInt CElt = C.getVectorElt(EltNum).getInt().trunc(52);
+ APSInt ResElt(AElt + llvm::APIntOps::mulhu(BElt, CElt).zext(64), false);
+ ResultElements.push_back(APValue(ResElt));
+ }
+
+ return Success(APValue(ResultElements.data(), ResultElements.size()), E);
+ }
+
case clang::X86::BI__builtin_ia32_vprotbi:
case clang::X86::BI__builtin_ia32_vprotdi:
case clang::X86::BI__builtin_ia32_vprotqi:
@@ -12381,6 +12429,169 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) {
return Success(APValue(ResultElements.data(), ResultElements.size()), E);
}
+ case clang::X86::BI__builtin_ia32_phaddw128:
+ case clang::X86::BI__builtin_ia32_phaddw256:
+ case clang::X86::BI__builtin_ia32_phaddd128:
+ case clang::X86::BI__builtin_ia32_phaddd256:
+ case clang::X86::BI__builtin_ia32_phaddsw128:
+ case clang::X86::BI__builtin_ia32_phaddsw256:
+
+ case clang::X86::BI__builtin_ia32_phsubw128:
+ case clang::X86::BI__builtin_ia32_phsubw256:
+ case clang::X86::BI__builtin_ia32_phsubd128:
+ case clang::X86::BI__builtin_ia32_phsubd256:
+ case clang::X86::BI__builtin_ia32_phsubsw128:
+ case clang::X86::BI__builtin_ia32_phsubsw256: {
+ APValue SourceLHS, SourceRHS;
+ if (!EvaluateAsRValue(Info, E->getArg(0), SourceLHS) ||
+ !EvaluateAsRValue(Info, E->getArg(1), SourceRHS))
+ return false;
+ QualType DestEltTy = E->getType()->castAs<VectorType>()->getElementType();
+ bool DestUnsigned = DestEltTy->isUnsignedIntegerOrEnumerationType();
+
+ unsigned NumElts = SourceLHS.getVectorLength();
+ unsigned EltBits = Info.Ctx.getIntWidth(DestEltTy);
+ unsigned EltsPerLane = 128 / EltBits;
+ SmallVector<APValue, 4> ResultElements;
+ ResultElements.reserve(NumElts);
+
+ for (unsigned LaneStart = 0; LaneStart != NumElts;
+ LaneStart += EltsPerLane) {
+ for (unsigned I = 0; I != EltsPerLane; I += 2) {
+ APSInt LHSA = SourceLHS.getVectorElt(LaneStart + I).getInt();
+ APSInt LHSB = SourceLHS.getVectorElt(LaneStart + I + 1).getInt();
+ switch (E->getBuiltinCallee()) {
+ case clang::X86::BI__builtin_ia32_phaddw128:
+ case clang::X86::BI__builtin_ia32_phaddw256:
+ case clang::X86::BI__builtin_ia32_phaddd128:
+ case clang::X86::BI__builtin_ia32_phaddd256: {
+ APSInt Res(LHSA + LHSB, DestUnsigned);
+ ResultElements.push_back(APValue(Res));
+ break;
+ }
+ case clang::X86::BI__builtin_ia32_phaddsw128:
+ case clang::X86::BI__builtin_ia32_phaddsw256: {
+ APSInt Res(LHSA.sadd_sat(LHSB));
+ ResultElements.push_back(APValue(Res));
+ break;
+ }
+ case clang::X86::BI__builtin_ia32_phsubw128:
+ case clang::X86::BI__builtin_ia32_phsubw256:
+ case clang::X86::BI__builtin_ia32_phsubd128:
+ case clang::X86::BI__builtin_ia32_phsubd256: {
+ APSInt Res(LHSA - LHSB, DestUnsigned);
+ ResultElements.push_back(APValue(Res));
+ break;
+ }
+ case clang::X86::BI__builtin_ia32_phsubsw128:
+ case clang::X86::BI__builtin_ia32_phsubsw256: {
+ APSInt Res(LHSA.ssub_sat(LHSB));
+ ResultElements.push_back(APValue(Res));
+ break;
+ }
+ }
+ }
+ for (unsigned I = 0; I != EltsPerLane; I += 2) {
+ APSInt RHSA = SourceRHS.getVectorElt(LaneStart + I).getInt();
+ APSInt RHSB = SourceRHS.getVectorElt(LaneStart + I + 1).getInt();
+ switch (E->getBuiltinCallee()) {
+ case clang::X86::BI__builtin_ia32_phaddw128:
+ case clang::X86::BI__builtin_ia32_phaddw256:
+ case clang::X86::BI__builtin_ia32_phaddd128:
+ case clang::X86::BI__builtin_ia32_phaddd256: {
+ APSInt Res(RHSA + RHSB, DestUnsigned);
+ ResultElements.push_back(APValue(Res));
+ break;
+ }
+ case clang::X86::BI__builtin_ia32_phaddsw128:
+ case clang::X86::BI__builtin_ia32_phaddsw256: {
+ APSInt Res(RHSA.sadd_sat(RHSB));
+ ResultElements.push_back(APValue(Res));
+ break;
+ }
+ case clang::X86::BI__builtin_ia32_phsubw128:
+ case clang::X86::BI__builtin_ia32_phsubw256:
+ case clang::X86::BI__builtin_ia32_phsubd128:
+ case clang::X86::BI__builtin_ia32_phsubd256: {
+ APSInt Res(RHSA - RHSB, DestUnsigned);
+ ResultElements.push_back(APValue(Res));
+ break;
+ }
+ case clang::X86::BI__builtin_ia32_phsubsw128:
+ case clang::X86::BI__builtin_ia32_phsubsw256: {
+ APSInt Res(RHSA.ssub_sat(RHSB));
+ ResultElements.push_back(APValue(Res));
+ break;
+ }
+ }
+ }
+ }
+ return Success(APValue(ResultElements.data(), ResultElements.size()), E);
+ }
+ case clang::X86::BI__builtin_ia32_haddpd:
+ case clang::X86::BI__builtin_ia32_haddps:
+ case clang::X86::BI__builtin_ia32_haddps256:
+ case clang::X86::BI__builtin_ia32_haddpd256:
+ case clang::X86::BI__builtin_ia32_hsubpd:
+ case clang::X86::BI__builtin_ia32_hsubps:
+ case clang::X86::BI__builtin_ia32_hsubps256:
+ case clang::X86::BI__builtin_ia32_hsubpd256: {
+ APValue SourceLHS, SourceRHS;
+ if (!EvaluateAsRValue(Info, E->getArg(0), SourceLHS) ||
+ !EvaluateAsRValue(Info, E->getArg(1), SourceRHS))
+ return false;
+ unsigned NumElts = SourceLHS.getVectorLength();
+ SmallVector<APValue, 4> ResultElements;
+ ResultElements.reserve(NumElts);
+ llvm::RoundingMode RM = getActiveRoundingMode(getEvalInfo(), E);
+ QualType DestEltTy = E->getType()->castAs<VectorType>()->getElementType();
+ unsigned EltBits = Info.Ctx.getTypeSize(DestEltTy);
+ unsigned NumLanes = NumElts * EltBits / 128;
+ unsigned NumElemsPerLane = NumElts / NumLanes;
+ unsigned HalfElemsPerLane = NumElemsPerLane / 2;
+
+ for (unsigned L = 0; L != NumElts; L += NumElemsPerLane) {
+ for (unsigned I = 0; I != HalfElemsPerLane; ++I) {
+ APFloat LHSA = SourceLHS.getVectorElt(L + (2 * I) + 0).getFloat();
+ APFloat LHSB = SourceLHS.getVectorElt(L + (2 * I) + 1).getFloat();
+ switch (E->getBuiltinCallee()) {
+ case clang::X86::BI__builtin_ia32_haddpd:
+ case clang::X86::BI__builtin_ia32_haddps:
+ case clang::X86::BI__builtin_ia32_haddps256:
+ case clang::X86::BI__builtin_ia32_haddpd256:
+ LHSA.add(LHSB, RM);
+ break;
+ case clang::X86::BI__builtin_ia32_hsubpd:
+ case clang::X86::BI__builtin_ia32_hsubps:
+ case clang::X86::BI__builtin_ia32_hsubps256:
+ case clang::X86::BI__builtin_ia32_hsubpd256:
+ LHSA.subtract(LHSB, RM);
+ break;
+ }
+ ResultElements.push_back(APValue(LHSA));
+ }
+ for (unsigned I = 0; I != HalfElemsPerLane; ++I) {
+ APFloat RHSA = SourceRHS.getVectorElt(L + (2 * I) + 0).getFloat();
+ APFloat RHSB = SourceRHS.getVectorElt(L + (2 * I) + 1).getFloat();
+ switch (E->getBuiltinCallee()) {
+ case clang::X86::BI__builtin_ia32_haddpd:
+ case clang::X86::BI__builtin_ia32_haddps:
+ case clang::X86::BI__builtin_ia32_haddps256:
+ case clang::X86::BI__builtin_ia32_haddpd256:
+ RHSA.add(RHSB, RM);
+ break;
+ case clang::X86::BI__builtin_ia32_hsubpd:
+ case clang::X86::BI__builtin_ia32_hsubps:
+ case clang::X86::BI__builtin_ia32_hsubps256:
+ case clang::X86::BI__builtin_ia32_hsubpd256:
+ RHSA.subtract(RHSB, RM);
+ break;
+ }
+ ResultElements.push_back(APValue(RHSA));
+ }
+ }
+ return Success(APValue(ResultElements.data(), ResultElements.size()), E);
+ }
case Builtin::BI__builtin_elementwise_fshl:
case Builtin::BI__builtin_elementwise_fshr: {
APValue SourceHi, SourceLo, SourceShift;
diff --git a/clang/lib/AST/OpenACCClause.cpp b/clang/lib/AST/OpenACCClause.cpp
index 17c6bec..142c932 100644
--- a/clang/lib/AST/OpenACCClause.cpp
+++ b/clang/lib/AST/OpenACCClause.cpp
@@ -509,7 +509,7 @@ OpenACCReductionClause *OpenACCReductionClause::Create(
ArrayRef<OpenACCReductionRecipeWithStorage> Recipes,
SourceLocation EndLoc) {
size_t NumCombiners = llvm::accumulate(
- Recipes, 0, [](size_t Num, const OpenACCReductionRecipe &R) {
+ Recipes, 0, [](size_t Num, const OpenACCReductionRecipeWithStorage &R) {
return Num + R.CombinerRecipes.size();
});
diff --git a/clang/lib/Analysis/LifetimeSafety/FactsGenerator.cpp b/clang/lib/Analysis/LifetimeSafety/FactsGenerator.cpp
index 485308f..9b68de1 100644
--- a/clang/lib/Analysis/LifetimeSafety/FactsGenerator.cpp
+++ b/clang/lib/Analysis/LifetimeSafety/FactsGenerator.cpp
@@ -9,9 +9,11 @@
#include "clang/Analysis/Analyses/LifetimeSafety/FactsGenerator.h"
#include "clang/Analysis/Analyses/LifetimeSafety/LifetimeAnnotations.h"
#include "clang/Analysis/Analyses/PostOrderCFGView.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Support/TimeProfiler.h"
namespace clang::lifetimes::internal {
+using llvm::isa_and_present;
static bool isGslPointerType(QualType QT) {
if (const auto *RD = QT->getAsCXXRecordDecl()) {
@@ -108,7 +110,7 @@ void FactsGenerator::VisitCXXMemberCallExpr(const CXXMemberCallExpr *MCE) {
// Specifically for conversion operators,
// like `std::string_view p = std::string{};`
if (isGslPointerType(MCE->getType()) &&
- isa<CXXConversionDecl>(MCE->getCalleeDecl())) {
+ isa_and_present<CXXConversionDecl>(MCE->getCalleeDecl())) {
// The argument is the implicit object itself.
handleFunctionCall(MCE, MCE->getMethodDecl(),
{MCE->getImplicitObjectArgument()},
diff --git a/clang/lib/Basic/Targets/AArch64.cpp b/clang/lib/Basic/Targets/AArch64.cpp
index 9e03a08..18641a9 100644
--- a/clang/lib/Basic/Targets/AArch64.cpp
+++ b/clang/lib/Basic/Targets/AArch64.cpp
@@ -1568,6 +1568,7 @@ bool AArch64TargetInfo::validateAsmConstraint(
if (const unsigned Len = matchAsmCCConstraint(Name)) {
Name += Len - 1;
Info.setAllowsRegister();
+ Info.setOutputOperandBounds(0, 2);
return true;
}
}
diff --git a/clang/lib/Basic/Targets/SystemZ.cpp b/clang/lib/Basic/Targets/SystemZ.cpp
index 13b8623..30f846c 100644
--- a/clang/lib/Basic/Targets/SystemZ.cpp
+++ b/clang/lib/Basic/Targets/SystemZ.cpp
@@ -99,6 +99,16 @@ bool SystemZTargetInfo::validateAsmConstraint(
case 'T': // Likewise, plus an index
Info.setAllowsMemory();
return true;
+ case '@':
+ // CC condition changes.
+ if (StringRef(Name) == "@cc") {
+ Name += 2;
+ Info.setAllowsRegister();
+ // SystemZ has 2-bits CC, and hence Interval [0, 4).
+ Info.setOutputOperandBounds(0, 4);
+ return true;
+ }
+ return false;
}
}
@@ -161,6 +171,9 @@ unsigned SystemZTargetInfo::getMinGlobalAlign(uint64_t Size,
void SystemZTargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
+ // Inline assembly supports SystemZ flag outputs.
+ Builder.defineMacro("__GCC_ASM_FLAG_OUTPUTS__");
+
Builder.defineMacro("__s390__");
Builder.defineMacro("__s390x__");
Builder.defineMacro("__zarch__");
diff --git a/clang/lib/Basic/Targets/SystemZ.h b/clang/lib/Basic/Targets/SystemZ.h
index dc2185e..4e15d5a 100644
--- a/clang/lib/Basic/Targets/SystemZ.h
+++ b/clang/lib/Basic/Targets/SystemZ.h
@@ -136,6 +136,12 @@ public:
std::string convertConstraint(const char *&Constraint) const override {
switch (Constraint[0]) {
+ case '@': // Flag output operand.
+ if (llvm::StringRef(Constraint) == "@cc") {
+ Constraint += 2;
+ return std::string("{@cc}");
+ }
+ break;
case 'p': // Keep 'p' constraint.
return std::string("p");
case 'Z':
diff --git a/clang/lib/Basic/Targets/X86.cpp b/clang/lib/Basic/Targets/X86.cpp
index 6eb4db5..187815c 100644
--- a/clang/lib/Basic/Targets/X86.cpp
+++ b/clang/lib/Basic/Targets/X86.cpp
@@ -1516,6 +1516,7 @@ bool X86TargetInfo::validateAsmConstraint(
if (auto Len = matchAsmCCConstraint(Name)) {
Name += Len - 1;
Info.setAllowsRegister();
+ Info.setOutputOperandBounds(0, 2);
return true;
}
return false;
diff --git a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
index 0f4d6d2..a9983f8 100644
--- a/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
@@ -255,7 +255,7 @@ static void emitAtomicCmpXchg(CIRGenFunction &cgf, AtomicExpr *e, bool isWeak,
mlir::Value expected = builder.createLoad(loc, val1);
mlir::Value desired = builder.createLoad(loc, val2);
- auto cmpxchg = cir::AtomicCmpXchg::create(
+ auto cmpxchg = cir::AtomicCmpXchgOp::create(
builder, loc, expected.getType(), builder.getBoolTy(), ptr.getPointer(),
expected, desired,
cir::MemOrderAttr::get(&cgf.getMLIRContext(), successOrder),
@@ -404,7 +404,7 @@ static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest,
case AtomicExpr::AO__c11_atomic_exchange:
case AtomicExpr::AO__atomic_exchange_n:
case AtomicExpr::AO__atomic_exchange:
- opName = cir::AtomicXchg::getOperationName();
+ opName = cir::AtomicXchgOp::getOperationName();
break;
case AtomicExpr::AO__opencl_atomic_init:
diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenACCClause.cpp b/clang/lib/CIR/CodeGen/CIRGenOpenACCClause.cpp
index 3d86f71..ce4ae7e 100644
--- a/clang/lib/CIR/CodeGen/CIRGenOpenACCClause.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenOpenACCClause.cpp
@@ -1005,7 +1005,7 @@ public:
/*temporary=*/nullptr, OpenACCReductionOperator::Invalid,
Decl::castToDeclContext(cgf.curFuncDecl), opInfo.origType,
opInfo.bounds.size(), opInfo.boundTypes, opInfo.baseType,
- privateOp);
+ privateOp, /*reductionCombinerRecipes=*/{});
// TODO: OpenACC: The dialect is going to change in the near future to
// have these be on a different operation, so when that changes, we
// probably need to change these here.
@@ -1046,7 +1046,7 @@ public:
OpenACCReductionOperator::Invalid,
Decl::castToDeclContext(cgf.curFuncDecl), opInfo.origType,
opInfo.bounds.size(), opInfo.boundTypes, opInfo.baseType,
- firstPrivateOp);
+ firstPrivateOp, /*reductionCombinerRecipe=*/{});
// TODO: OpenACC: The dialect is going to change in the near future to
// have these be on a different operation, so when that changes, we
@@ -1088,7 +1088,7 @@ public:
/*temporary=*/nullptr, clause.getReductionOp(),
Decl::castToDeclContext(cgf.curFuncDecl), opInfo.origType,
opInfo.bounds.size(), opInfo.boundTypes, opInfo.baseType,
- reductionOp);
+ reductionOp, varRecipe.CombinerRecipes);
operation.addReduction(builder.getContext(), reductionOp, recipe);
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp b/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp
index 24a5fc2..ce14aa8 100644
--- a/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp
@@ -527,16 +527,142 @@ void OpenACCRecipeBuilderBase::createFirstprivateRecipeCopy(
// doesn't restore it aftewards.
void OpenACCRecipeBuilderBase::createReductionRecipeCombiner(
mlir::Location loc, mlir::Location locEnd, mlir::Value mainOp,
- mlir::acc::ReductionRecipeOp recipe, size_t numBounds) {
+ mlir::acc::ReductionRecipeOp recipe, size_t numBounds, QualType origType,
+ llvm::ArrayRef<OpenACCReductionRecipe::CombinerRecipe> combinerRecipes) {
mlir::Block *block =
createRecipeBlock(recipe.getCombinerRegion(), mainOp.getType(), loc,
numBounds, /*isInit=*/false);
builder.setInsertionPointToEnd(&recipe.getCombinerRegion().back());
CIRGenFunction::LexicalScope ls(cgf, loc, block);
- mlir::BlockArgument lhsArg = block->getArgument(0);
+ mlir::Value lhsArg = block->getArgument(0);
+ mlir::Value rhsArg = block->getArgument(1);
+ llvm::MutableArrayRef<mlir::BlockArgument> boundsRange =
+ block->getArguments().drop_front(2);
+
+ if (llvm::any_of(combinerRecipes, [](auto &r) { return r.Op == nullptr; })) {
+ cgf.cgm.errorNYI(loc, "OpenACC Reduction combiner not generated");
+ mlir::acc::YieldOp::create(builder, locEnd, block->getArgument(0));
+ return;
+ }
+
+ // apply the bounds so that we can get our bounds emitted correctly.
+ for (mlir::BlockArgument boundArg : llvm::reverse(boundsRange))
+ std::tie(lhsArg, rhsArg) =
+ createBoundsLoop(lhsArg, rhsArg, boundArg, loc, /*inverse=*/false);
+
+ // Emitter for when we know this isn't a struct or array we have to loop
+ // through. This should work for the 'field' once the get-element call has
+ // been made.
+ auto emitSingleCombiner =
+ [&](mlir::Value lhsArg, mlir::Value rhsArg,
+ const OpenACCReductionRecipe::CombinerRecipe &combiner) {
+ mlir::Type elementTy =
+ mlir::cast<cir::PointerType>(lhsArg.getType()).getPointee();
+ CIRGenFunction::DeclMapRevertingRAII declMapRAIILhs{cgf, combiner.LHS};
+ cgf.setAddrOfLocalVar(
+ combiner.LHS, Address{lhsArg, elementTy,
+ cgf.getContext().getDeclAlign(combiner.LHS)});
+ CIRGenFunction::DeclMapRevertingRAII declMapRAIIRhs{cgf, combiner.RHS};
+ cgf.setAddrOfLocalVar(
+ combiner.RHS, Address{rhsArg, elementTy,
+ cgf.getContext().getDeclAlign(combiner.RHS)});
+
+ [[maybe_unused]] mlir::LogicalResult stmtRes =
+ cgf.emitStmt(combiner.Op, /*useCurrentScope=*/true);
+ };
+
+ // Emitter for when we know this is either a non-array or element of an array
+ // (which also shouldn't be an array type?). This function should generate the
+ // initialization code for an entire 'array-element'/non-array, including
+ // diving into each element of a struct (if necessary).
+ auto emitCombiner = [&](mlir::Value lhsArg, mlir::Value rhsArg, QualType ty) {
+ assert(!ty->isArrayType() && "Array type shouldn't get here");
+ if (const auto *rd = ty->getAsRecordDecl()) {
+ if (combinerRecipes.size() == 1 &&
+ cgf.getContext().hasSameType(ty, combinerRecipes[0].LHS->getType())) {
+ // If this is a 'top level' operator on the type we can just emit this
+ // as a simple one.
+ emitSingleCombiner(lhsArg, rhsArg, combinerRecipes[0]);
+ } else {
+ // else we have to handle each individual field after after a
+ // get-element.
+ for (const auto &[field, combiner] :
+ llvm::zip_equal(rd->fields(), combinerRecipes)) {
+ mlir::Type fieldType = cgf.convertType(field->getType());
+ auto fieldPtr = cir::PointerType::get(fieldType);
+
+ mlir::Value lhsField = builder.createGetMember(
+ loc, fieldPtr, lhsArg, field->getName(), field->getFieldIndex());
+ mlir::Value rhsField = builder.createGetMember(
+ loc, fieldPtr, rhsArg, field->getName(), field->getFieldIndex());
+
+ emitSingleCombiner(lhsField, rhsField, combiner);
+ }
+ }
+
+ } else {
+ // if this is a single-thing (because we should know this isn't an array,
+ // as Sema wouldn't let us get here), we can just do a normal emit call.
+ emitSingleCombiner(lhsArg, rhsArg, combinerRecipes[0]);
+ }
+ };
+
+ if (const auto *cat = cgf.getContext().getAsConstantArrayType(origType)) {
+ // If we're in an array, we have to emit the combiner for each element of
+ // the array.
+ auto itrTy = mlir::cast<cir::IntType>(cgf.PtrDiffTy);
+ auto itrPtrTy = cir::PointerType::get(itrTy);
+
+ mlir::Value zero =
+ builder.getConstInt(loc, mlir::cast<cir::IntType>(cgf.PtrDiffTy), 0);
+ mlir::Value itr =
+ cir::AllocaOp::create(builder, loc, itrPtrTy, itrTy, "itr",
+ cgf.cgm.getSize(cgf.getPointerAlign()));
+ builder.CIRBaseBuilderTy::createStore(loc, zero, itr);
+
+ builder.setInsertionPointAfter(builder.createFor(
+ loc,
+ /*condBuilder=*/
+ [&](mlir::OpBuilder &b, mlir::Location loc) {
+ auto loadItr = cir::LoadOp::create(builder, loc, {itr});
+ mlir::Value arraySize = builder.getConstInt(
+ loc, mlir::cast<cir::IntType>(cgf.PtrDiffTy), cat->getZExtSize());
+ auto cmp = builder.createCompare(loc, cir::CmpOpKind::lt, loadItr,
+ arraySize);
+ builder.createCondition(cmp);
+ },
+ /*bodyBuilder=*/
+ [&](mlir::OpBuilder &b, mlir::Location loc) {
+ auto loadItr = cir::LoadOp::create(builder, loc, {itr});
+ auto lhsElt = builder.getArrayElement(
+ loc, loc, lhsArg, cgf.convertType(cat->getElementType()), loadItr,
+ /*shouldDecay=*/true);
+ auto rhsElt = builder.getArrayElement(
+ loc, loc, rhsArg, cgf.convertType(cat->getElementType()), loadItr,
+ /*shouldDecay=*/true);
+
+ emitCombiner(lhsElt, rhsElt, cat->getElementType());
+ builder.createYield(loc);
+ },
+ /*stepBuilder=*/
+ [&](mlir::OpBuilder &b, mlir::Location loc) {
+ auto loadItr = cir::LoadOp::create(builder, loc, {itr});
+ auto inc = cir::UnaryOp::create(builder, loc, loadItr.getType(),
+ cir::UnaryOpKind::Inc, loadItr);
+ builder.CIRBaseBuilderTy::createStore(loc, inc, itr);
+ builder.createYield(loc);
+ }));
- mlir::acc::YieldOp::create(builder, locEnd, lhsArg);
+ } else if (origType->isArrayType()) {
+ cgf.cgm.errorNYI(loc,
+ "OpenACC Reduction combiner non-constant array recipe");
+ } else {
+ emitCombiner(lhsArg, rhsArg, origType);
+ }
+
+ builder.setInsertionPointToEnd(&recipe.getCombinerRegion().back());
+ mlir::acc::YieldOp::create(builder, locEnd, block->getArgument(0));
}
} // namespace clang::CIRGen
diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.h b/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.h
index a5da744..745d424 100644
--- a/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.h
+++ b/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.h
@@ -64,10 +64,10 @@ protected:
// that this function is not 'insertion point' clean, in that it alters the
// insertion point to be inside of the 'combiner' section of the recipe, but
// doesn't restore it aftewards.
- void createReductionRecipeCombiner(mlir::Location loc, mlir::Location locEnd,
- mlir::Value mainOp,
- mlir::acc::ReductionRecipeOp recipe,
- size_t numBounds);
+ void createReductionRecipeCombiner(
+ mlir::Location loc, mlir::Location locEnd, mlir::Value mainOp,
+ mlir::acc::ReductionRecipeOp recipe, size_t numBounds, QualType origType,
+ llvm::ArrayRef<OpenACCReductionRecipe::CombinerRecipe> combinerRecipes);
void createInitRecipe(mlir::Location loc, mlir::Location locEnd,
SourceRange exprRange, mlir::Value mainOp,
@@ -169,7 +169,9 @@ public:
const Expr *varRef, const VarDecl *varRecipe, const VarDecl *temporary,
OpenACCReductionOperator reductionOp, DeclContext *dc, QualType origType,
size_t numBounds, llvm::ArrayRef<QualType> boundTypes, QualType baseType,
- mlir::Value mainOp) {
+ mlir::Value mainOp,
+ llvm::ArrayRef<OpenACCReductionRecipe::CombinerRecipe>
+ reductionCombinerRecipes) {
assert(!varRecipe->getType()->isSpecificBuiltinType(
BuiltinType::ArraySection) &&
"array section shouldn't make it to recipe creation");
@@ -208,7 +210,8 @@ public:
createInitRecipe(loc, locEnd, varRef->getSourceRange(), mainOp,
recipe.getInitRegion(), numBounds, boundTypes, varRecipe,
origType, /*emitInitExpr=*/true);
- createReductionRecipeCombiner(loc, locEnd, mainOp, recipe, numBounds);
+ createReductionRecipeCombiner(loc, locEnd, mainOp, recipe, numBounds,
+ origType, reductionCombinerRecipes);
} else {
static_assert(std::is_same_v<RecipeTy, mlir::acc::FirstprivateRecipeOp>);
createInitRecipe(loc, locEnd, varRef->getSourceRange(), mainOp,
diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
index 12837d9..7af3dc1 100644
--- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
+++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
@@ -2901,20 +2901,6 @@ mlir::LogicalResult cir::ThrowOp::verify() {
}
//===----------------------------------------------------------------------===//
-// AtomicCmpXchg
-//===----------------------------------------------------------------------===//
-
-LogicalResult cir::AtomicCmpXchg::verify() {
- mlir::Type pointeeType = getPtr().getType().getPointee();
-
- if (pointeeType != getExpected().getType() ||
- pointeeType != getDesired().getType())
- return emitOpError("ptr, expected and desired types must match");
-
- return success();
-}
-
-//===----------------------------------------------------------------------===//
// TypeInfoAttr
//===----------------------------------------------------------------------===//
diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
index f0d73ac..3abba3d 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
@@ -694,8 +694,8 @@ getLLVMMemOrder(std::optional<cir::MemOrder> memorder) {
llvm_unreachable("unknown memory order");
}
-mlir::LogicalResult CIRToLLVMAtomicCmpXchgLowering::matchAndRewrite(
- cir::AtomicCmpXchg op, OpAdaptor adaptor,
+mlir::LogicalResult CIRToLLVMAtomicCmpXchgOpLowering::matchAndRewrite(
+ cir::AtomicCmpXchgOp op, OpAdaptor adaptor,
mlir::ConversionPatternRewriter &rewriter) const {
mlir::Value expected = adaptor.getExpected();
mlir::Value desired = adaptor.getDesired();
@@ -719,8 +719,8 @@ mlir::LogicalResult CIRToLLVMAtomicCmpXchgLowering::matchAndRewrite(
return mlir::success();
}
-mlir::LogicalResult CIRToLLVMAtomicXchgLowering::matchAndRewrite(
- cir::AtomicXchg op, OpAdaptor adaptor,
+mlir::LogicalResult CIRToLLVMAtomicXchgOpLowering::matchAndRewrite(
+ cir::AtomicXchgOp op, OpAdaptor adaptor,
mlir::ConversionPatternRewriter &rewriter) const {
assert(!cir::MissingFeatures::atomicSyncScopeID());
mlir::LLVM::AtomicOrdering llvmOrder = getLLVMMemOrder(adaptor.getMemOrder());
diff --git a/clang/lib/CodeGen/CGStmt.cpp b/clang/lib/CodeGen/CGStmt.cpp
index 92636f2..fdc1a11 100644
--- a/clang/lib/CodeGen/CGStmt.cpp
+++ b/clang/lib/CodeGen/CGStmt.cpp
@@ -2674,7 +2674,8 @@ EmitAsmStores(CodeGenFunction &CGF, const AsmStmt &S,
const llvm::ArrayRef<LValue> ResultRegDests,
const llvm::ArrayRef<QualType> ResultRegQualTys,
const llvm::BitVector &ResultTypeRequiresCast,
- const llvm::BitVector &ResultRegIsFlagReg) {
+ const std::vector<std::optional<std::pair<unsigned, unsigned>>>
+ &ResultBounds) {
CGBuilderTy &Builder = CGF.Builder;
CodeGenModule &CGM = CGF.CGM;
llvm::LLVMContext &CTX = CGF.getLLVMContext();
@@ -2685,18 +2686,20 @@ EmitAsmStores(CodeGenFunction &CGF, const AsmStmt &S,
// ResultRegDests can be also populated by addReturnRegisterOutputs() above,
// in which case its size may grow.
assert(ResultTypeRequiresCast.size() <= ResultRegDests.size());
- assert(ResultRegIsFlagReg.size() <= ResultRegDests.size());
+ assert(ResultBounds.size() <= ResultRegDests.size());
for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
llvm::Value *Tmp = RegResults[i];
llvm::Type *TruncTy = ResultTruncRegTypes[i];
- if ((i < ResultRegIsFlagReg.size()) && ResultRegIsFlagReg[i]) {
- // Target must guarantee the Value `Tmp` here is lowered to a boolean
- // value.
- llvm::Constant *Two = llvm::ConstantInt::get(Tmp->getType(), 2);
+ if ((i < ResultBounds.size()) && ResultBounds[i].has_value()) {
+ const auto [LowerBound, UpperBound] = ResultBounds[i].value();
+ // FIXME: Support for nonzero lower bounds not yet implemented.
+ assert(LowerBound == 0 && "Output operand lower bound is not zero.");
+ llvm::Constant *UpperBoundConst =
+ llvm::ConstantInt::get(Tmp->getType(), UpperBound);
llvm::Value *IsBooleanValue =
- Builder.CreateCmp(llvm::CmpInst::ICMP_ULT, Tmp, Two);
+ Builder.CreateCmp(llvm::CmpInst::ICMP_ULT, Tmp, UpperBoundConst);
llvm::Function *FnAssume = CGM.getIntrinsic(llvm::Intrinsic::assume);
Builder.CreateCall(FnAssume, IsBooleanValue);
}
@@ -2825,7 +2828,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
std::vector<llvm::Type *> ArgElemTypes;
std::vector<llvm::Value*> Args;
llvm::BitVector ResultTypeRequiresCast;
- llvm::BitVector ResultRegIsFlagReg;
+ std::vector<std::optional<std::pair<unsigned, unsigned>>> ResultBounds;
// Keep track of inout constraints.
std::string InOutConstraints;
@@ -2883,8 +2886,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
ResultRegQualTys.push_back(QTy);
ResultRegDests.push_back(Dest);
- bool IsFlagReg = llvm::StringRef(OutputConstraint).starts_with("{@cc");
- ResultRegIsFlagReg.push_back(IsFlagReg);
+ ResultBounds.emplace_back(Info.getOutputOperandBounds());
llvm::Type *Ty = ConvertTypeForMem(QTy);
const bool RequiresCast = Info.allowsRegister() &&
@@ -3231,7 +3233,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
EmitAsmStores(*this, S, RegResults, ResultRegTypes, ResultTruncRegTypes,
ResultRegDests, ResultRegQualTys, ResultTypeRequiresCast,
- ResultRegIsFlagReg);
+ ResultBounds);
// If this is an asm goto with outputs, repeat EmitAsmStores, but with a
// different insertion point; one for each indirect destination and with
@@ -3242,7 +3244,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
Builder.SetInsertPoint(Succ, --(Succ->end()));
EmitAsmStores(*this, S, CBRRegResults[Succ], ResultRegTypes,
ResultTruncRegTypes, ResultRegDests, ResultRegQualTys,
- ResultTypeRequiresCast, ResultRegIsFlagReg);
+ ResultTypeRequiresCast, ResultBounds);
}
}
}
diff --git a/clang/lib/CodeGen/Targets/RISCV.cpp b/clang/lib/CodeGen/Targets/RISCV.cpp
index 0ef39b6..0d0941e 100644
--- a/clang/lib/CodeGen/Targets/RISCV.cpp
+++ b/clang/lib/CodeGen/Targets/RISCV.cpp
@@ -680,22 +680,22 @@ ABIArgInfo RISCVABIInfo::classifyArgumentType(QualType Ty, bool IsFixed,
if (const auto *ED = Ty->getAsEnumDecl())
Ty = ED->getIntegerType();
- // All integral types are promoted to XLen width
- if (Size < XLen && Ty->isIntegralOrEnumerationType()) {
- return extendType(Ty, CGT.ConvertType(Ty));
- }
-
if (const auto *EIT = Ty->getAs<BitIntType>()) {
- if (EIT->getNumBits() < XLen)
+
+ if (XLen == 64 && EIT->getNumBits() == 32)
return extendType(Ty, CGT.ConvertType(Ty));
- if (EIT->getNumBits() > 128 ||
- (!getContext().getTargetInfo().hasInt128Type() &&
- EIT->getNumBits() > 64))
- return getNaturalAlignIndirect(
- Ty, /*AddrSpace=*/getDataLayout().getAllocaAddrSpace(),
- /*ByVal=*/false);
+
+ if (EIT->getNumBits() <= 2 * XLen)
+ return ABIArgInfo::getExtend(Ty, CGT.ConvertType(Ty));
+ return getNaturalAlignIndirect(
+ Ty, /*AddrSpace=*/getDataLayout().getAllocaAddrSpace(),
+ /*ByVal=*/false);
}
+ // All integral types are promoted to XLen width
+ if (Size < XLen && Ty->isIntegralOrEnumerationType())
+ return extendType(Ty, CGT.ConvertType(Ty));
+
return ABIArgInfo::getDirect();
}
diff --git a/clang/lib/Format/FormatToken.cpp b/clang/lib/Format/FormatToken.cpp
index c2956a1..cb3fc1c 100644
--- a/clang/lib/Format/FormatToken.cpp
+++ b/clang/lib/Format/FormatToken.cpp
@@ -41,8 +41,7 @@ static constexpr std::array<StringRef, 14> QtPropertyKeywords = {
bool FormatToken::isQtProperty() const {
assert(llvm::is_sorted(QtPropertyKeywords));
- return std::binary_search(QtPropertyKeywords.begin(),
- QtPropertyKeywords.end(), TokenText);
+ return llvm::binary_search(QtPropertyKeywords, TokenText);
}
// Sorted common C++ non-keyword types.
diff --git a/clang/lib/Headers/avx2intrin.h b/clang/lib/Headers/avx2intrin.h
index 4aaca2d..e150aa6 100644
--- a/clang/lib/Headers/avx2intrin.h
+++ b/clang/lib/Headers/avx2intrin.h
@@ -834,10 +834,9 @@ _mm256_cmpgt_epi64(__m256i __a, __m256i __b)
/// \param __b
/// A 256-bit vector of [16 x i16] containing one of the source operands.
/// \returns A 256-bit vector of [16 x i16] containing the sums.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_hadd_epi16(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_ia32_phaddw256((__v16hi)__a, (__v16hi)__b);
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_hadd_epi16(__m256i __a, __m256i __b) {
+ return (__m256i)__builtin_ia32_phaddw256((__v16hi)__a, (__v16hi)__b);
}
/// Horizontally adds the adjacent pairs of 32-bit integers from two 256-bit
@@ -866,10 +865,9 @@ _mm256_hadd_epi16(__m256i __a, __m256i __b)
/// \param __b
/// A 256-bit vector of [8 x i32] containing one of the source operands.
/// \returns A 256-bit vector of [8 x i32] containing the sums.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_hadd_epi32(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_ia32_phaddd256((__v8si)__a, (__v8si)__b);
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_hadd_epi32(__m256i __a, __m256i __b) {
+ return (__m256i)__builtin_ia32_phaddd256((__v8si)__a, (__v8si)__b);
}
/// Horizontally adds the adjacent pairs of 16-bit integers from two 256-bit
@@ -901,10 +899,9 @@ _mm256_hadd_epi32(__m256i __a, __m256i __b)
/// \param __b
/// A 256-bit vector of [16 x i16] containing one of the source operands.
/// \returns A 256-bit vector of [16 x i16] containing the sums.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_hadds_epi16(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_ia32_phaddsw256((__v16hi)__a, (__v16hi)__b);
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_hadds_epi16(__m256i __a, __m256i __b) {
+ return (__m256i)__builtin_ia32_phaddsw256((__v16hi)__a, (__v16hi)__b);
}
/// Horizontally subtracts adjacent pairs of 16-bit integers from two 256-bit
@@ -937,10 +934,9 @@ _mm256_hadds_epi16(__m256i __a, __m256i __b)
/// \param __b
/// A 256-bit vector of [16 x i16] containing one of the source operands.
/// \returns A 256-bit vector of [16 x i16] containing the differences.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_hsub_epi16(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_ia32_phsubw256((__v16hi)__a, (__v16hi)__b);
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_hsub_epi16(__m256i __a, __m256i __b) {
+ return (__m256i)__builtin_ia32_phsubw256((__v16hi)__a, (__v16hi)__b);
}
/// Horizontally subtracts adjacent pairs of 32-bit integers from two 256-bit
@@ -969,10 +965,9 @@ _mm256_hsub_epi16(__m256i __a, __m256i __b)
/// \param __b
/// A 256-bit vector of [8 x i32] containing one of the source operands.
/// \returns A 256-bit vector of [8 x i32] containing the differences.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_hsub_epi32(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_ia32_phsubd256((__v8si)__a, (__v8si)__b);
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_hsub_epi32(__m256i __a, __m256i __b) {
+ return (__m256i)__builtin_ia32_phsubd256((__v8si)__a, (__v8si)__b);
}
/// Horizontally subtracts adjacent pairs of 16-bit integers from two 256-bit
@@ -1005,10 +1000,9 @@ _mm256_hsub_epi32(__m256i __a, __m256i __b)
/// \param __b
/// A 256-bit vector of [16 x i16] containing one of the source operands.
/// \returns A 256-bit vector of [16 x i16] containing the differences.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_hsubs_epi16(__m256i __a, __m256i __b)
-{
- return (__m256i)__builtin_ia32_phsubsw256((__v16hi)__a, (__v16hi)__b);
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_hsubs_epi16(__m256i __a, __m256i __b) {
+ return (__m256i)__builtin_ia32_phsubsw256((__v16hi)__a, (__v16hi)__b);
}
/// Multiplies each unsigned byte from the 256-bit integer vector in \a __a
diff --git a/clang/lib/Headers/avx512ifmaintrin.h b/clang/lib/Headers/avx512ifmaintrin.h
index f01b322..625a8ff 100644
--- a/clang/lib/Headers/avx512ifmaintrin.h
+++ b/clang/lib/Headers/avx512ifmaintrin.h
@@ -15,54 +15,53 @@
#define __IFMAINTRIN_H
/* Define the default attributes for the functions in this file. */
+#if defined(__cplusplus) && (__cplusplus >= 201103L)
+#define __DEFAULT_FN_ATTRS \
+ constexpr \
+ __attribute__((__always_inline__, __nodebug__, __target__("avx512ifma"), \
+ __min_vector_width__(512)))
+#else
#define __DEFAULT_FN_ATTRS \
__attribute__((__always_inline__, __nodebug__, __target__("avx512ifma"), \
__min_vector_width__(512)))
+#endif
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_madd52hi_epu64 (__m512i __X, __m512i __Y, __m512i __Z)
-{
- return (__m512i)__builtin_ia32_vpmadd52huq512((__v8di) __X, (__v8di) __Y,
- (__v8di) __Z);
+_mm512_madd52hi_epu64(__m512i __X, __m512i __Y, __m512i __Z) {
+ return (__m512i)__builtin_ia32_vpmadd52huq512((__v8di)__X, (__v8di)__Y,
+ (__v8di)__Z);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_madd52hi_epu64 (__m512i __W, __mmask8 __M, __m512i __X, __m512i __Y)
-{
- return (__m512i)__builtin_ia32_selectq_512(__M,
- (__v8di)_mm512_madd52hi_epu64(__W, __X, __Y),
- (__v8di)__W);
+static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_madd52hi_epu64(
+ __m512i __W, __mmask8 __M, __m512i __X, __m512i __Y) {
+ return (__m512i)__builtin_ia32_selectq_512(
+ __M, (__v8di)_mm512_madd52hi_epu64(__W, __X, __Y), (__v8di)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_madd52hi_epu64 (__mmask8 __M, __m512i __X, __m512i __Y, __m512i __Z)
-{
- return (__m512i)__builtin_ia32_selectq_512(__M,
- (__v8di)_mm512_madd52hi_epu64(__X, __Y, __Z),
- (__v8di)_mm512_setzero_si512());
+static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_madd52hi_epu64(
+ __mmask8 __M, __m512i __X, __m512i __Y, __m512i __Z) {
+ return (__m512i)__builtin_ia32_selectq_512(
+ __M, (__v8di)_mm512_madd52hi_epu64(__X, __Y, __Z),
+ (__v8di)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_madd52lo_epu64 (__m512i __X, __m512i __Y, __m512i __Z)
-{
- return (__m512i)__builtin_ia32_vpmadd52luq512((__v8di) __X, (__v8di) __Y,
- (__v8di) __Z);
+_mm512_madd52lo_epu64(__m512i __X, __m512i __Y, __m512i __Z) {
+ return (__m512i)__builtin_ia32_vpmadd52luq512((__v8di)__X, (__v8di)__Y,
+ (__v8di)__Z);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_madd52lo_epu64 (__m512i __W, __mmask8 __M, __m512i __X, __m512i __Y)
-{
- return (__m512i)__builtin_ia32_selectq_512(__M,
- (__v8di)_mm512_madd52lo_epu64(__W, __X, __Y),
- (__v8di)__W);
+static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_madd52lo_epu64(
+ __m512i __W, __mmask8 __M, __m512i __X, __m512i __Y) {
+ return (__m512i)__builtin_ia32_selectq_512(
+ __M, (__v8di)_mm512_madd52lo_epu64(__W, __X, __Y), (__v8di)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_madd52lo_epu64 (__mmask8 __M, __m512i __X, __m512i __Y, __m512i __Z)
-{
- return (__m512i)__builtin_ia32_selectq_512(__M,
- (__v8di)_mm512_madd52lo_epu64(__X, __Y, __Z),
- (__v8di)_mm512_setzero_si512());
+static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_madd52lo_epu64(
+ __mmask8 __M, __m512i __X, __m512i __Y, __m512i __Z) {
+ return (__m512i)__builtin_ia32_selectq_512(
+ __M, (__v8di)_mm512_madd52lo_epu64(__X, __Y, __Z),
+ (__v8di)_mm512_setzero_si512());
}
#undef __DEFAULT_FN_ATTRS
diff --git a/clang/lib/Headers/avx512ifmavlintrin.h b/clang/lib/Headers/avx512ifmavlintrin.h
index a72b561..c4449c7 100644
--- a/clang/lib/Headers/avx512ifmavlintrin.h
+++ b/clang/lib/Headers/avx512ifmavlintrin.h
@@ -8,13 +8,24 @@
*===-----------------------------------------------------------------------===
*/
#ifndef __IMMINTRIN_H
-#error "Never use <avx512ifmavlintrin.h> directly; include <immintrin.h> instead."
+#error \
+ "Never use <avx512ifmavlintrin.h> directly; include <immintrin.h> instead."
#endif
#ifndef __IFMAVLINTRIN_H
#define __IFMAVLINTRIN_H
/* Define the default attributes for the functions in this file. */
+#if defined(__cplusplus) && (__cplusplus >= 201103L)
+#define __DEFAULT_FN_ATTRS128 \
+ constexpr __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx512ifma,avx512vl"), \
+ __min_vector_width__(128)))
+#define __DEFAULT_FN_ATTRS256 \
+ constexpr __attribute__((__always_inline__, __nodebug__, \
+ __target__("avx512ifma,avx512vl"), \
+ __min_vector_width__(256)))
+#else
#define __DEFAULT_FN_ATTRS128 \
__attribute__((__always_inline__, __nodebug__, \
__target__("avx512ifma,avx512vl"), \
@@ -24,6 +35,8 @@
__target__("avx512ifma,avx512vl"), \
__min_vector_width__(256)))
+#endif
+
#define _mm_madd52hi_epu64(X, Y, Z) \
((__m128i)__builtin_ia32_vpmadd52huq128((__v2di)(X), (__v2di)(Y), \
(__v2di)(Z)))
@@ -41,70 +54,57 @@
(__v4di)(Z)))
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_madd52hi_epu64 (__m128i __W, __mmask8 __M, __m128i __X, __m128i __Y)
-{
- return (__m128i)__builtin_ia32_selectq_128(__M,
- (__v2di)_mm_madd52hi_epu64(__W, __X, __Y),
- (__v2di)__W);
+_mm_mask_madd52hi_epu64(__m128i __W, __mmask8 __M, __m128i __X, __m128i __Y) {
+ return (__m128i)__builtin_ia32_selectq_128(
+ __M, (__v2di)_mm_madd52hi_epu64(__W, __X, __Y), (__v2di)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_madd52hi_epu64 (__mmask8 __M, __m128i __X, __m128i __Y, __m128i __Z)
-{
- return (__m128i)__builtin_ia32_selectq_128(__M,
- (__v2di)_mm_madd52hi_epu64(__X, __Y, __Z),
- (__v2di)_mm_setzero_si128());
+_mm_maskz_madd52hi_epu64(__mmask8 __M, __m128i __X, __m128i __Y, __m128i __Z) {
+ return (__m128i)__builtin_ia32_selectq_128(
+ __M, (__v2di)_mm_madd52hi_epu64(__X, __Y, __Z),
+ (__v2di)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_madd52hi_epu64 (__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y)
-{
- return (__m256i)__builtin_ia32_selectq_256(__M,
- (__v4di)_mm256_madd52hi_epu64(__W, __X, __Y),
- (__v4di)__W);
+static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_madd52hi_epu64(
+ __m256i __W, __mmask8 __M, __m256i __X, __m256i __Y) {
+ return (__m256i)__builtin_ia32_selectq_256(
+ __M, (__v4di)_mm256_madd52hi_epu64(__W, __X, __Y), (__v4di)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_madd52hi_epu64 (__mmask8 __M, __m256i __X, __m256i __Y, __m256i __Z)
-{
- return (__m256i)__builtin_ia32_selectq_256(__M,
- (__v4di)_mm256_madd52hi_epu64(__X, __Y, __Z),
- (__v4di)_mm256_setzero_si256());
+static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_madd52hi_epu64(
+ __mmask8 __M, __m256i __X, __m256i __Y, __m256i __Z) {
+ return (__m256i)__builtin_ia32_selectq_256(
+ __M, (__v4di)_mm256_madd52hi_epu64(__X, __Y, __Z),
+ (__v4di)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_mask_madd52lo_epu64 (__m128i __W, __mmask8 __M, __m128i __X, __m128i __Y)
-{
- return (__m128i)__builtin_ia32_selectq_128(__M,
- (__v2di)_mm_madd52lo_epu64(__W, __X, __Y),
- (__v2di)__W);
+_mm_mask_madd52lo_epu64(__m128i __W, __mmask8 __M, __m128i __X, __m128i __Y) {
+ return (__m128i)__builtin_ia32_selectq_128(
+ __M, (__v2di)_mm_madd52lo_epu64(__W, __X, __Y), (__v2di)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_madd52lo_epu64 (__mmask8 __M, __m128i __X, __m128i __Y, __m128i __Z)
-{
- return (__m128i)__builtin_ia32_selectq_128(__M,
- (__v2di)_mm_madd52lo_epu64(__X, __Y, __Z),
- (__v2di)_mm_setzero_si128());
+_mm_maskz_madd52lo_epu64(__mmask8 __M, __m128i __X, __m128i __Y, __m128i __Z) {
+ return (__m128i)__builtin_ia32_selectq_128(
+ __M, (__v2di)_mm_madd52lo_epu64(__X, __Y, __Z),
+ (__v2di)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mask_madd52lo_epu64 (__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y)
-{
- return (__m256i)__builtin_ia32_selectq_256(__M,
- (__v4di)_mm256_madd52lo_epu64(__W, __X, __Y),
- (__v4di)__W);
+static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_mask_madd52lo_epu64(
+ __m256i __W, __mmask8 __M, __m256i __X, __m256i __Y) {
+ return (__m256i)__builtin_ia32_selectq_256(
+ __M, (__v4di)_mm256_madd52lo_epu64(__W, __X, __Y), (__v4di)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_madd52lo_epu64 (__mmask8 __M, __m256i __X, __m256i __Y, __m256i __Z)
-{
- return (__m256i)__builtin_ia32_selectq_256(__M,
- (__v4di)_mm256_madd52lo_epu64(__X, __Y, __Z),
- (__v4di)_mm256_setzero_si256());
+static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_madd52lo_epu64(
+ __mmask8 __M, __m256i __X, __m256i __Y, __m256i __Z) {
+ return (__m256i)__builtin_ia32_selectq_256(
+ __M, (__v4di)_mm256_madd52lo_epu64(__X, __Y, __Z),
+ (__v4di)_mm256_setzero_si256());
}
-
#undef __DEFAULT_FN_ATTRS128
#undef __DEFAULT_FN_ATTRS256
diff --git a/clang/lib/Headers/avxifmaintrin.h b/clang/lib/Headers/avxifmaintrin.h
index 5c782d2a..a2ef601 100644
--- a/clang/lib/Headers/avxifmaintrin.h
+++ b/clang/lib/Headers/avxifmaintrin.h
@@ -15,12 +15,21 @@
#define __AVXIFMAINTRIN_H
/* Define the default attributes for the functions in this file. */
+#if defined(__cplusplus) && (__cplusplus >= 201103L)
+#define __DEFAULT_FN_ATTRS128 \
+ constexpr __attribute__((__always_inline__, __nodebug__, \
+ __target__("avxifma"), __min_vector_width__(128)))
+#define __DEFAULT_FN_ATTRS256 \
+ constexpr __attribute__((__always_inline__, __nodebug__, \
+ __target__("avxifma"), __min_vector_width__(256)))
+#else
#define __DEFAULT_FN_ATTRS128 \
__attribute__((__always_inline__, __nodebug__, __target__("avxifma"), \
__min_vector_width__(128)))
#define __DEFAULT_FN_ATTRS256 \
__attribute__((__always_inline__, __nodebug__, __target__("avxifma"), \
__min_vector_width__(256)))
+#endif
// must vex-encoding
diff --git a/clang/lib/Headers/avxintrin.h b/clang/lib/Headers/avxintrin.h
index 123fa79..696ec31 100644
--- a/clang/lib/Headers/avxintrin.h
+++ b/clang/lib/Headers/avxintrin.h
@@ -694,9 +694,8 @@ _mm256_xor_ps(__m256 __a, __m256 __b)
/// elements of a vector of [4 x double].
/// \returns A 256-bit vector of [4 x double] containing the horizontal sums of
/// both operands.
-static __inline __m256d __DEFAULT_FN_ATTRS
-_mm256_hadd_pd(__m256d __a, __m256d __b)
-{
+static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm256_hadd_pd(__m256d __a, __m256d __b) {
return (__m256d)__builtin_ia32_haddpd256((__v4df)__a, (__v4df)__b);
}
@@ -717,9 +716,8 @@ _mm256_hadd_pd(__m256d __a, __m256d __b)
/// index 2, 3, 6, 7 of a vector of [8 x float].
/// \returns A 256-bit vector of [8 x float] containing the horizontal sums of
/// both operands.
-static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_hadd_ps(__m256 __a, __m256 __b)
-{
+static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_hadd_ps(__m256 __a,
+ __m256 __b) {
return (__m256)__builtin_ia32_haddps256((__v8sf)__a, (__v8sf)__b);
}
@@ -740,9 +738,8 @@ _mm256_hadd_ps(__m256 __a, __m256 __b)
/// odd-indexed elements of a vector of [4 x double].
/// \returns A 256-bit vector of [4 x double] containing the horizontal
/// differences of both operands.
-static __inline __m256d __DEFAULT_FN_ATTRS
-_mm256_hsub_pd(__m256d __a, __m256d __b)
-{
+static __inline __m256d __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm256_hsub_pd(__m256d __a, __m256d __b) {
return (__m256d)__builtin_ia32_hsubpd256((__v4df)__a, (__v4df)__b);
}
@@ -763,9 +760,8 @@ _mm256_hsub_pd(__m256d __a, __m256d __b)
/// elements with index 2, 3, 6, 7 of a vector of [8 x float].
/// \returns A 256-bit vector of [8 x float] containing the horizontal
/// differences of both operands.
-static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_hsub_ps(__m256 __a, __m256 __b)
-{
+static __inline __m256 __DEFAULT_FN_ATTRS_CONSTEXPR _mm256_hsub_ps(__m256 __a,
+ __m256 __b) {
return (__m256)__builtin_ia32_hsubps256((__v8sf)__a, (__v8sf)__b);
}
diff --git a/clang/lib/Headers/pmmintrin.h b/clang/lib/Headers/pmmintrin.h
index f0c9b2b..42bd343 100644
--- a/clang/lib/Headers/pmmintrin.h
+++ b/clang/lib/Headers/pmmintrin.h
@@ -83,9 +83,8 @@ _mm_addsub_ps(__m128 __a, __m128 __b)
/// destination.
/// \returns A 128-bit vector of [4 x float] containing the horizontal sums of
/// both operands.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_hadd_ps(__m128 __a, __m128 __b)
-{
+static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_hadd_ps(__m128 __a,
+ __m128 __b) {
return __builtin_ia32_haddps((__v4sf)__a, (__v4sf)__b);
}
@@ -106,9 +105,8 @@ _mm_hadd_ps(__m128 __a, __m128 __b)
/// bits of the destination.
/// \returns A 128-bit vector of [4 x float] containing the horizontal
/// differences of both operands.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_hsub_ps(__m128 __a, __m128 __b)
-{
+static __inline__ __m128 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_hsub_ps(__m128 __a,
+ __m128 __b) {
return __builtin_ia32_hsubps((__v4sf)__a, (__v4sf)__b);
}
@@ -168,9 +166,8 @@ _mm_moveldup_ps(__m128 __a)
/// A 128-bit vector of [2 x double] containing the right source operand.
/// \returns A 128-bit vector of [2 x double] containing the alternating sums
/// and differences of both operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_addsub_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_addsub_pd(__m128d __a, __m128d __b) {
return __builtin_ia32_addsubpd((__v2df)__a, (__v2df)__b);
}
@@ -191,9 +188,8 @@ _mm_addsub_pd(__m128d __a, __m128d __b)
/// destination.
/// \returns A 128-bit vector of [2 x double] containing the horizontal sums of
/// both operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_hadd_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_hadd_pd(__m128d __a, __m128d __b) {
return __builtin_ia32_haddpd((__v2df)__a, (__v2df)__b);
}
@@ -214,9 +210,8 @@ _mm_hadd_pd(__m128d __a, __m128d __b)
/// the destination.
/// \returns A 128-bit vector of [2 x double] containing the horizontal
/// differences of both operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_hsub_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_hsub_pd(__m128d __a, __m128d __b) {
return __builtin_ia32_hsubpd((__v2df)__a, (__v2df)__b);
}
diff --git a/clang/lib/Headers/tmmintrin.h b/clang/lib/Headers/tmmintrin.h
index 3fc9f98..9d007c8 100644
--- a/clang/lib/Headers/tmmintrin.h
+++ b/clang/lib/Headers/tmmintrin.h
@@ -202,10 +202,9 @@ _mm_abs_epi32(__m128i __a) {
/// destination.
/// \returns A 128-bit vector of [8 x i16] containing the horizontal sums of
/// both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_hadd_epi16(__m128i __a, __m128i __b)
-{
- return (__m128i)__builtin_ia32_phaddw128((__v8hi)__a, (__v8hi)__b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_hadd_epi16(__m128i __a, __m128i __b) {
+ return (__m128i)__builtin_ia32_phaddw128((__v8hi)__a, (__v8hi)__b);
}
/// Horizontally adds the adjacent pairs of values contained in 2 packed
@@ -225,10 +224,9 @@ _mm_hadd_epi16(__m128i __a, __m128i __b)
/// destination.
/// \returns A 128-bit vector of [4 x i32] containing the horizontal sums of
/// both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_hadd_epi32(__m128i __a, __m128i __b)
-{
- return (__m128i)__builtin_ia32_phaddd128((__v4si)__a, (__v4si)__b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_hadd_epi32(__m128i __a, __m128i __b) {
+ return (__m128i)__builtin_ia32_phaddd128((__v4si)__a, (__v4si)__b);
}
/// Horizontally adds the adjacent pairs of values contained in 2 packed
@@ -248,11 +246,10 @@ _mm_hadd_epi32(__m128i __a, __m128i __b)
/// destination.
/// \returns A 64-bit vector of [4 x i16] containing the horizontal sums of both
/// operands.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
-_mm_hadd_pi16(__m64 __a, __m64 __b)
-{
- return __trunc64(__builtin_ia32_phaddw128(
- (__v8hi)__builtin_shufflevector(__a, __b, 0, 1), (__v8hi){}));
+static __inline__ __m64 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_hadd_pi16(__m64 __a,
+ __m64 __b) {
+ return __trunc64(__builtin_ia32_phaddw128(
+ (__v8hi)__builtin_shufflevector(__a, __b, 0, 1), (__v8hi){}));
}
/// Horizontally adds the adjacent pairs of values contained in 2 packed
@@ -272,11 +269,10 @@ _mm_hadd_pi16(__m64 __a, __m64 __b)
/// destination.
/// \returns A 64-bit vector of [2 x i32] containing the horizontal sums of both
/// operands.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
-_mm_hadd_pi32(__m64 __a, __m64 __b)
-{
- return __trunc64(__builtin_ia32_phaddd128(
- (__v4si)__builtin_shufflevector(__a, __b, 0, 1), (__v4si){}));
+static __inline__ __m64 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_hadd_pi32(__m64 __a,
+ __m64 __b) {
+ return __trunc64(__builtin_ia32_phaddd128(
+ (__v4si)__builtin_shufflevector(__a, __b, 0, 1), (__v4si){}));
}
/// Horizontally adds, with saturation, the adjacent pairs of values contained
@@ -299,10 +295,9 @@ _mm_hadd_pi32(__m64 __a, __m64 __b)
/// destination.
/// \returns A 128-bit vector of [8 x i16] containing the horizontal saturated
/// sums of both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_hadds_epi16(__m128i __a, __m128i __b)
-{
- return (__m128i)__builtin_ia32_phaddsw128((__v8hi)__a, (__v8hi)__b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_hadds_epi16(__m128i __a, __m128i __b) {
+ return (__m128i)__builtin_ia32_phaddsw128((__v8hi)__a, (__v8hi)__b);
}
/// Horizontally adds, with saturation, the adjacent pairs of values contained
@@ -325,11 +320,10 @@ _mm_hadds_epi16(__m128i __a, __m128i __b)
/// destination.
/// \returns A 64-bit vector of [4 x i16] containing the horizontal saturated
/// sums of both operands.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
-_mm_hadds_pi16(__m64 __a, __m64 __b)
-{
- return __trunc64(__builtin_ia32_phaddsw128(
- (__v8hi)__builtin_shufflevector(__a, __b, 0, 1), (__v8hi){}));
+static __inline__ __m64 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_hadds_pi16(__m64 __a,
+ __m64 __b) {
+ return __trunc64(__builtin_ia32_phaddsw128(
+ (__v8hi)__builtin_shufflevector(__a, __b, 0, 1), (__v8hi){}));
}
/// Horizontally subtracts the adjacent pairs of values contained in 2
@@ -349,10 +343,9 @@ _mm_hadds_pi16(__m64 __a, __m64 __b)
/// the destination.
/// \returns A 128-bit vector of [8 x i16] containing the horizontal differences
/// of both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_hsub_epi16(__m128i __a, __m128i __b)
-{
- return (__m128i)__builtin_ia32_phsubw128((__v8hi)__a, (__v8hi)__b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_hsub_epi16(__m128i __a, __m128i __b) {
+ return (__m128i)__builtin_ia32_phsubw128((__v8hi)__a, (__v8hi)__b);
}
/// Horizontally subtracts the adjacent pairs of values contained in 2
@@ -372,10 +365,9 @@ _mm_hsub_epi16(__m128i __a, __m128i __b)
/// the destination.
/// \returns A 128-bit vector of [4 x i32] containing the horizontal differences
/// of both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_hsub_epi32(__m128i __a, __m128i __b)
-{
- return (__m128i)__builtin_ia32_phsubd128((__v4si)__a, (__v4si)__b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_hsub_epi32(__m128i __a, __m128i __b) {
+ return (__m128i)__builtin_ia32_phsubd128((__v4si)__a, (__v4si)__b);
}
/// Horizontally subtracts the adjacent pairs of values contained in 2
@@ -395,11 +387,10 @@ _mm_hsub_epi32(__m128i __a, __m128i __b)
/// the destination.
/// \returns A 64-bit vector of [4 x i16] containing the horizontal differences
/// of both operands.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
-_mm_hsub_pi16(__m64 __a, __m64 __b)
-{
- return __trunc64(__builtin_ia32_phsubw128(
- (__v8hi)__builtin_shufflevector(__a, __b, 0, 1), (__v8hi){}));
+static __inline__ __m64 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_hsub_pi16(__m64 __a,
+ __m64 __b) {
+ return __trunc64(__builtin_ia32_phsubw128(
+ (__v8hi)__builtin_shufflevector(__a, __b, 0, 1), (__v8hi){}));
}
/// Horizontally subtracts the adjacent pairs of values contained in 2
@@ -419,11 +410,10 @@ _mm_hsub_pi16(__m64 __a, __m64 __b)
/// the destination.
/// \returns A 64-bit vector of [2 x i32] containing the horizontal differences
/// of both operands.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
-_mm_hsub_pi32(__m64 __a, __m64 __b)
-{
- return __trunc64(__builtin_ia32_phsubd128(
- (__v4si)__builtin_shufflevector(__a, __b, 0, 1), (__v4si){}));
+static __inline__ __m64 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_hsub_pi32(__m64 __a,
+ __m64 __b) {
+ return __trunc64(__builtin_ia32_phsubd128(
+ (__v4si)__builtin_shufflevector(__a, __b, 0, 1), (__v4si){}));
}
/// Horizontally subtracts, with saturation, the adjacent pairs of values
@@ -446,10 +436,9 @@ _mm_hsub_pi32(__m64 __a, __m64 __b)
/// the destination.
/// \returns A 128-bit vector of [8 x i16] containing the horizontal saturated
/// differences of both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_hsubs_epi16(__m128i __a, __m128i __b)
-{
- return (__m128i)__builtin_ia32_phsubsw128((__v8hi)__a, (__v8hi)__b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_hsubs_epi16(__m128i __a, __m128i __b) {
+ return (__m128i)__builtin_ia32_phsubsw128((__v8hi)__a, (__v8hi)__b);
}
/// Horizontally subtracts, with saturation, the adjacent pairs of values
@@ -472,11 +461,10 @@ _mm_hsubs_epi16(__m128i __a, __m128i __b)
/// the destination.
/// \returns A 64-bit vector of [4 x i16] containing the horizontal saturated
/// differences of both operands.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
-_mm_hsubs_pi16(__m64 __a, __m64 __b)
-{
- return __trunc64(__builtin_ia32_phsubsw128(
- (__v8hi)__builtin_shufflevector(__a, __b, 0, 1), (__v8hi){}));
+static __inline__ __m64 __DEFAULT_FN_ATTRS_CONSTEXPR _mm_hsubs_pi16(__m64 __a,
+ __m64 __b) {
+ return __trunc64(__builtin_ia32_phsubsw128(
+ (__v8hi)__builtin_shufflevector(__a, __b, 0, 1), (__v8hi){}));
}
/// Multiplies corresponding pairs of packed 8-bit unsigned integer
@@ -556,10 +544,9 @@ _mm_maddubs_pi16(__m64 __a, __m64 __b) {
/// A 128-bit vector of [8 x i16] containing one of the source operands.
/// \returns A 128-bit vector of [8 x i16] containing the rounded and scaled
/// products of both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mulhrs_epi16(__m128i __a, __m128i __b)
-{
- return (__m128i)__builtin_ia32_pmulhrsw128((__v8hi)__a, (__v8hi)__b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mulhrs_epi16(__m128i __a,
+ __m128i __b) {
+ return (__m128i)__builtin_ia32_pmulhrsw128((__v8hi)__a, (__v8hi)__b);
}
/// Multiplies packed 16-bit signed integer values, truncates the 32-bit
diff --git a/clang/lib/Sema/SemaConcept.cpp b/clang/lib/Sema/SemaConcept.cpp
index 7c44efd..87dd682 100644
--- a/clang/lib/Sema/SemaConcept.cpp
+++ b/clang/lib/Sema/SemaConcept.cpp
@@ -606,15 +606,15 @@ ConstraintSatisfactionChecker::SubstitutionInTemplateArguments(
Constraint.mappingOccurenceList();
// The empty MLTAL situation should only occur when evaluating non-dependent
// constraints.
- if (!MLTAL.getNumSubstitutedLevels())
- MLTAL.addOuterTemplateArguments(TD, {}, /*Final=*/false);
- SubstitutedOuterMost =
- llvm::to_vector_of<TemplateArgument>(MLTAL.getOutermost());
+ if (MLTAL.getNumSubstitutedLevels())
+ SubstitutedOuterMost =
+ llvm::to_vector_of<TemplateArgument>(MLTAL.getOutermost());
unsigned Offset = 0;
for (unsigned I = 0, MappedIndex = 0; I < Used.size(); I++) {
TemplateArgument Arg;
if (Used[I])
- Arg = CTAI.SugaredConverted[MappedIndex++];
+ Arg = S.Context.getCanonicalTemplateArgument(
+ CTAI.SugaredConverted[MappedIndex++]);
if (I < SubstitutedOuterMost.size()) {
SubstitutedOuterMost[I] = Arg;
Offset = I + 1;
@@ -626,8 +626,10 @@ ConstraintSatisfactionChecker::SubstitutionInTemplateArguments(
if (Offset < SubstitutedOuterMost.size())
SubstitutedOuterMost.erase(SubstitutedOuterMost.begin() + Offset);
- MLTAL.replaceOutermostTemplateArguments(TD, SubstitutedOuterMost);
- return std::move(MLTAL);
+ MultiLevelTemplateArgumentList SubstitutedTemplateArgs;
+ SubstitutedTemplateArgs.addOuterTemplateArguments(TD, SubstitutedOuterMost,
+ /*Final=*/false);
+ return std::move(SubstitutedTemplateArgs);
}
ExprResult ConstraintSatisfactionChecker::EvaluateSlow(
diff --git a/clang/lib/Sema/SemaOpenACCClause.cpp b/clang/lib/Sema/SemaOpenACCClause.cpp
index ead9781..17078e8 100644
--- a/clang/lib/Sema/SemaOpenACCClause.cpp
+++ b/clang/lib/Sema/SemaOpenACCClause.cpp
@@ -1924,7 +1924,7 @@ bool SemaOpenACC::CheckReductionVarType(Expr *VarExpr) {
// off here. This will result in CurType being the actual 'type' of the
// expression, which is what we are looking to check.
QualType CurType = isa<ArraySectionExpr>(VarExpr)
- ? ArraySectionExpr::getBaseOriginalType(VarExpr)
+ ? cast<ArraySectionExpr>(VarExpr)->getElementType()
: VarExpr->getType();
// This can happen when we have a dependent type in an array element that the
diff --git a/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp b/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
index 85e3d20..73fd33a 100644
--- a/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
+++ b/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
@@ -5727,7 +5727,7 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
Function->setDeclarationNameLoc(NameLocPointsToPattern());
EnterExpressionEvaluationContextForFunction EvalContext(
- *this, Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
+ *this, Sema::ExpressionEvaluationContext::PotentiallyEvaluated, Function);
Qualifiers ThisTypeQuals;
CXXRecordDecl *ThisContext = nullptr;
diff --git a/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
index 0ae784c..1444114 100644
--- a/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
@@ -251,6 +251,8 @@ public:
const Expr *Ex,
const MemRegion *MR,
bool hypothetical);
+ static const StringLiteral *getStringLiteralFromRegion(const MemRegion *MR);
+
SVal getCStringLength(CheckerContext &C,
ProgramStateRef &state,
const Expr *Ex,
@@ -983,6 +985,21 @@ SVal CStringChecker::getCStringLengthForRegion(CheckerContext &C,
return strLength;
}
+const StringLiteral *
+CStringChecker::getStringLiteralFromRegion(const MemRegion *MR) {
+ switch (MR->getKind()) {
+ case MemRegion::StringRegionKind:
+ return cast<StringRegion>(MR)->getStringLiteral();
+ case MemRegion::NonParamVarRegionKind:
+ if (const VarDecl *Decl = cast<NonParamVarRegion>(MR)->getDecl();
+ Decl->getType().isConstQualified() && Decl->hasGlobalStorage())
+ return dyn_cast_or_null<StringLiteral>(Decl->getInit());
+ return nullptr;
+ default:
+ return nullptr;
+ }
+}
+
SVal CStringChecker::getCStringLength(CheckerContext &C, ProgramStateRef &state,
const Expr *Ex, SVal Buf,
bool hypothetical) const {
@@ -1013,30 +1030,19 @@ SVal CStringChecker::getCStringLength(CheckerContext &C, ProgramStateRef &state,
// its length. For anything we can't figure out, just return UnknownVal.
MR = MR->StripCasts();
- switch (MR->getKind()) {
- case MemRegion::StringRegionKind: {
- // Modifying the contents of string regions is undefined [C99 6.4.5p6],
- // so we can assume that the byte length is the correct C string length.
- SValBuilder &svalBuilder = C.getSValBuilder();
- QualType sizeTy = svalBuilder.getContext().getSizeType();
- const StringLiteral *strLit = cast<StringRegion>(MR)->getStringLiteral();
- return svalBuilder.makeIntVal(strLit->getLength(), sizeTy);
- }
- case MemRegion::NonParamVarRegionKind: {
+ if (const StringLiteral *StrLit = getStringLiteralFromRegion(MR)) {
// If we have a global constant with a string literal initializer,
// compute the initializer's length.
- const VarDecl *Decl = cast<NonParamVarRegion>(MR)->getDecl();
- if (Decl->getType().isConstQualified() && Decl->hasGlobalStorage()) {
- if (const Expr *Init = Decl->getInit()) {
- if (auto *StrLit = dyn_cast<StringLiteral>(Init)) {
- SValBuilder &SvalBuilder = C.getSValBuilder();
- QualType SizeTy = SvalBuilder.getContext().getSizeType();
- return SvalBuilder.makeIntVal(StrLit->getLength(), SizeTy);
- }
- }
- }
- [[fallthrough]];
+ // Modifying the contents of string regions is undefined [C99 6.4.5p6],
+ // so we can assume that the byte length is the correct C string length.
+ // FIXME: Embedded null characters are not handled.
+ SValBuilder &SVB = C.getSValBuilder();
+ return SVB.makeIntVal(StrLit->getLength(), SVB.getContext().getSizeType());
}
+
+ switch (MR->getKind()) {
+ case MemRegion::StringRegionKind:
+ case MemRegion::NonParamVarRegionKind:
case MemRegion::SymbolicRegionKind:
case MemRegion::AllocaRegionKind:
case MemRegion::ParamVarRegionKind:
@@ -1046,10 +1052,28 @@ SVal CStringChecker::getCStringLength(CheckerContext &C, ProgramStateRef &state,
case MemRegion::CompoundLiteralRegionKind:
// FIXME: Can we track this? Is it necessary?
return UnknownVal();
- case MemRegion::ElementRegionKind:
- // FIXME: How can we handle this? It's not good enough to subtract the
- // offset from the base string length; consider "123\x00567" and &a[5].
+ case MemRegion::ElementRegionKind: {
+ // If an offset into the string literal is used, use the original length
+ // minus the offset.
+ // FIXME: Embedded null characters are not handled.
+ const ElementRegion *ER = cast<ElementRegion>(MR);
+ const SubRegion *SuperReg =
+ cast<SubRegion>(ER->getSuperRegion()->StripCasts());
+ const StringLiteral *StrLit = getStringLiteralFromRegion(SuperReg);
+ if (!StrLit)
+ return UnknownVal();
+ SValBuilder &SVB = C.getSValBuilder();
+ NonLoc Idx = ER->getIndex();
+ QualType SizeTy = SVB.getContext().getSizeType();
+ NonLoc LengthVal =
+ SVB.makeIntVal(StrLit->getLength(), SizeTy).castAs<NonLoc>();
+ if (state->assume(SVB.evalBinOpNN(state, BO_LE, Idx, LengthVal,
+ SVB.getConditionType())
+ .castAs<DefinedOrUnknownSVal>(),
+ true))
+ return SVB.evalBinOp(state, BO_Sub, LengthVal, Idx, SizeTy);
return UnknownVal();
+ }
default:
// Other regions (mostly non-data) can't have a reliable C string length.
// In this case, an error is emitted and UndefinedVal is returned.
@@ -1074,6 +1098,7 @@ SVal CStringChecker::getCStringLength(CheckerContext &C, ProgramStateRef &state,
const StringLiteral *CStringChecker::getCStringLiteral(CheckerContext &C,
ProgramStateRef &state, const Expr *expr, SVal val) const {
+ // FIXME: use getStringLiteralFromRegion (and remove unused parameters)?
// Get the memory region pointed to by the val.
const MemRegion *bufRegion = val.getAsRegion();
diff --git a/clang/lib/StaticAnalyzer/Checkers/WebKit/ASTUtils.cpp b/clang/lib/StaticAnalyzer/Checkers/WebKit/ASTUtils.cpp
index 66cfccb..319aaca 100644
--- a/clang/lib/StaticAnalyzer/Checkers/WebKit/ASTUtils.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/WebKit/ASTUtils.cpp
@@ -93,7 +93,8 @@ bool tryToFindPtrOrigin(
if (auto *call = dyn_cast<CallExpr>(E)) {
if (auto *Callee = call->getCalleeDecl()) {
if (Callee->hasAttr<CFReturnsRetainedAttr>() ||
- Callee->hasAttr<NSReturnsRetainedAttr>()) {
+ Callee->hasAttr<NSReturnsRetainedAttr>() ||
+ Callee->hasAttr<NSReturnsAutoreleasedAttr>()) {
return callback(E, true);
}
}
diff --git a/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp b/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
index 84a9c43..6108931 100644
--- a/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
+++ b/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
@@ -1111,6 +1111,10 @@ SVal SimpleSValBuilder::evalBinOpLN(ProgramStateRef state,
assert(!BinaryOperator::isComparisonOp(op) &&
"arguments to comparison ops must be of the same type");
+ SVal simplifiedRhs = simplifySVal(state, rhs);
+ if (auto simplifiedRhsAsNonLoc = simplifiedRhs.getAs<NonLoc>())
+ rhs = *simplifiedRhsAsNonLoc;
+
// Special case: rhs is a zero constant.
if (rhs.isZeroConstant())
return lhs;