aboutsummaryrefslogtreecommitdiff
path: root/clang/lib
diff options
context:
space:
mode:
Diffstat (limited to 'clang/lib')
-rw-r--r--clang/lib/AST/ASTConcept.cpp2
-rw-r--r--clang/lib/AST/ByteCode/Context.cpp7
-rw-r--r--clang/lib/AST/ByteCode/InterpBuiltin.cpp175
-rw-r--r--clang/lib/AST/ByteCode/Program.cpp5
-rw-r--r--clang/lib/AST/Decl.cpp10
-rw-r--r--clang/lib/AST/ExprConstant.cpp131
-rw-r--r--clang/lib/Analysis/FlowSensitive/Models/CMakeLists.txt1
-rw-r--r--clang/lib/Analysis/FlowSensitive/Models/UncheckedOptionalAccessModel.cpp6
-rw-r--r--clang/lib/Analysis/FlowSensitive/Models/UncheckedStatusOrAccessModel.cpp295
-rw-r--r--clang/lib/Basic/Targets/Mips.cpp10
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenDecl.cpp10
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExpr.cpp182
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp165
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp5
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenFunction.cpp6
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenFunction.h25
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp7
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenStmt.cpp4
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenValue.h11
-rw-r--r--clang/lib/CIR/Dialect/IR/CIRDialect.cpp10
-rw-r--r--clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp31
-rw-r--r--clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp2
-rw-r--r--clang/lib/CodeGen/CGAtomic.cpp2
-rw-r--r--clang/lib/CodeGen/CGBlocks.cpp2
-rw-r--r--clang/lib/CodeGen/CGBuiltin.cpp2
-rw-r--r--clang/lib/CodeGen/CGCUDANV.cpp2
-rw-r--r--clang/lib/CodeGen/CGExpr.cpp6
-rw-r--r--clang/lib/CodeGen/CGObjCMac.cpp26
-rw-r--r--clang/lib/CodeGen/CGOpenMPRuntime.cpp6
-rw-r--r--clang/lib/CodeGen/CGPointerAuth.cpp4
-rw-r--r--clang/lib/CodeGen/CodeGenTypeCache.h2
-rw-r--r--clang/lib/CodeGen/ItaniumCXXABI.cpp24
-rw-r--r--clang/lib/CodeGen/MicrosoftCXXABI.cpp32
-rw-r--r--clang/lib/CodeGen/TargetBuiltins/ARM.cpp22
-rw-r--r--clang/lib/CodeGen/TargetBuiltins/PPC.cpp2
-rw-r--r--clang/lib/CodeGen/Targets/PPC.cpp2
-rw-r--r--clang/lib/Driver/Distro.cpp26
-rw-r--r--clang/lib/Driver/ToolChains/Arch/Mips.cpp6
-rw-r--r--clang/lib/Driver/ToolChains/Darwin.cpp14
-rw-r--r--clang/lib/Format/WhitespaceManager.cpp108
-rw-r--r--clang/lib/Frontend/FrontendAction.cpp2
-rw-r--r--clang/lib/Headers/__clang_hip_runtime_wrapper.h1
-rw-r--r--clang/lib/Headers/avx2intrin.h5
-rw-r--r--clang/lib/Headers/avx512dqintrin.h34
-rw-r--r--clang/lib/Headers/avx512fintrin.h30
-rw-r--r--clang/lib/Headers/avx512vldqintrin.h18
-rw-r--r--clang/lib/Headers/avx512vlintrin.h18
-rw-r--r--clang/lib/Headers/avxintrin.h10
-rw-r--r--clang/lib/Headers/emmintrin.h6
-rw-r--r--clang/lib/Headers/smmintrin.h3
-rw-r--r--clang/lib/Headers/xmmintrin.h4
-rw-r--r--clang/lib/Lex/PPLexerChange.cpp2
-rw-r--r--clang/lib/Parse/ParsePragma.cpp9
-rw-r--r--clang/lib/Sema/SemaChecking.cpp12
-rw-r--r--clang/lib/Sema/SemaConcept.cpp44
-rw-r--r--clang/lib/Sema/SemaExpr.cpp2
-rw-r--r--clang/lib/Sema/SemaInit.cpp11
-rw-r--r--clang/lib/Sema/SemaOpenACC.cpp93
-rw-r--r--clang/lib/Sema/SemaStmtAttr.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp19
60 files changed, 1409 insertions, 304 deletions
diff --git a/clang/lib/AST/ASTConcept.cpp b/clang/lib/AST/ASTConcept.cpp
index fd12bc4..9ea104c 100644
--- a/clang/lib/AST/ASTConcept.cpp
+++ b/clang/lib/AST/ASTConcept.cpp
@@ -86,7 +86,7 @@ void ConstraintSatisfaction::Profile(llvm::FoldingSetNodeID &ID,
ID.AddPointer(ConstraintOwner);
ID.AddInteger(TemplateArgs.size());
for (auto &Arg : TemplateArgs)
- Arg.Profile(ID, C);
+ C.getCanonicalTemplateArgument(Arg).Profile(ID, C);
}
ConceptReference *
diff --git a/clang/lib/AST/ByteCode/Context.cpp b/clang/lib/AST/ByteCode/Context.cpp
index 683e916..4f4b122 100644
--- a/clang/lib/AST/ByteCode/Context.cpp
+++ b/clang/lib/AST/ByteCode/Context.cpp
@@ -566,10 +566,15 @@ const Function *Context::getOrCreateFunction(const FunctionDecl *FuncDecl) {
// Assign descriptors to all parameters.
// Composite objects are lowered to pointers.
- for (const ParmVarDecl *PD : FuncDecl->parameters()) {
+ const auto *FuncProto = FuncDecl->getType()->getAs<FunctionProtoType>();
+ for (auto [ParamIndex, PD] : llvm::enumerate(FuncDecl->parameters())) {
bool IsConst = PD->getType().isConstQualified();
bool IsVolatile = PD->getType().isVolatileQualified();
+ if (!getASTContext().hasSameType(PD->getType(),
+ FuncProto->getParamType(ParamIndex)))
+ return nullptr;
+
OptPrimType T = classify(PD->getType());
PrimType PT = T.value_or(PT_Ptr);
Descriptor *Desc = P->createDescriptor(PD, PT, nullptr, std::nullopt,
diff --git a/clang/lib/AST/ByteCode/InterpBuiltin.cpp b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
index 0cb4910..39b991c 100644
--- a/clang/lib/AST/ByteCode/InterpBuiltin.cpp
+++ b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
@@ -2899,6 +2899,35 @@ static bool interp__builtin_ia32_test_op(
return true;
}
+static bool interp__builtin_ia32_movmsk_op(InterpState &S, CodePtr OpPC,
+ const CallExpr *Call) {
+ assert(Call->getNumArgs() == 1);
+
+ const Pointer &Source = S.Stk.pop<Pointer>();
+
+ unsigned SourceLen = Source.getNumElems();
+ QualType ElemQT = getElemType(Source);
+ OptPrimType ElemT = S.getContext().classify(ElemQT);
+ unsigned ResultLen =
+ S.getASTContext().getTypeSize(Call->getType()); // Always 32-bit integer.
+ APInt Result(ResultLen, 0);
+
+ for (unsigned I = 0; I != SourceLen; ++I) {
+ APInt Elem;
+ if (ElemQT->isIntegerType()) {
+ INT_TYPE_SWITCH_NO_BOOL(*ElemT, { Elem = Source.elem<T>(I).toAPSInt(); });
+ } else if (ElemQT->isRealFloatingType()) {
+ using T = PrimConv<PT_Float>::T;
+ Elem = Source.elem<T>(I).getAPFloat().bitcastToAPInt();
+ } else {
+ return false;
+ }
+ Result.setBitVal(I, Elem.isNegative());
+ }
+ pushInteger(S, Result, Call->getType());
+ return true;
+}
+
static bool interp__builtin_elementwise_triop(
InterpState &S, CodePtr OpPC, const CallExpr *Call,
llvm::function_ref<APInt(const APSInt &, const APSInt &, const APSInt &)>
@@ -2962,6 +2991,82 @@ static bool interp__builtin_elementwise_triop(
return true;
}
+static bool interp__builtin_x86_extract_vector(InterpState &S, CodePtr OpPC,
+ const CallExpr *Call,
+ unsigned ID) {
+ assert(Call->getNumArgs() == 2);
+
+ APSInt ImmAPS = popToAPSInt(S, Call->getArg(1));
+ uint64_t Index = ImmAPS.getZExtValue();
+
+ const Pointer &Src = S.Stk.pop<Pointer>();
+ if (!Src.getFieldDesc()->isPrimitiveArray())
+ return false;
+
+ const Pointer &Dst = S.Stk.peek<Pointer>();
+ if (!Dst.getFieldDesc()->isPrimitiveArray())
+ return false;
+
+ unsigned SrcElems = Src.getNumElems();
+ unsigned DstElems = Dst.getNumElems();
+
+ unsigned NumLanes = SrcElems / DstElems;
+ unsigned Lane = static_cast<unsigned>(Index % NumLanes);
+ unsigned ExtractPos = Lane * DstElems;
+
+ PrimType ElemT = Src.getFieldDesc()->getPrimType();
+
+ TYPE_SWITCH(ElemT, {
+ for (unsigned I = 0; I != DstElems; ++I) {
+ Dst.elem<T>(I) = Src.elem<T>(ExtractPos + I);
+ }
+ });
+
+ Dst.initializeAllElements();
+ return true;
+}
+
+static bool interp__builtin_x86_extract_vector_masked(InterpState &S,
+ CodePtr OpPC,
+ const CallExpr *Call,
+ unsigned ID) {
+ assert(Call->getNumArgs() == 4);
+
+ APSInt MaskAPS = popToAPSInt(S, Call->getArg(3));
+ const Pointer &Merge = S.Stk.pop<Pointer>();
+ APSInt ImmAPS = popToAPSInt(S, Call->getArg(1));
+ const Pointer &Src = S.Stk.pop<Pointer>();
+
+ if (!Src.getFieldDesc()->isPrimitiveArray() ||
+ !Merge.getFieldDesc()->isPrimitiveArray())
+ return false;
+
+ const Pointer &Dst = S.Stk.peek<Pointer>();
+ if (!Dst.getFieldDesc()->isPrimitiveArray())
+ return false;
+
+ unsigned SrcElems = Src.getNumElems();
+ unsigned DstElems = Dst.getNumElems();
+
+ unsigned NumLanes = SrcElems / DstElems;
+ unsigned Lane = static_cast<unsigned>(ImmAPS.getZExtValue() % NumLanes);
+ unsigned Base = Lane * DstElems;
+
+ PrimType ElemT = Src.getFieldDesc()->getPrimType();
+
+ TYPE_SWITCH(ElemT, {
+ for (unsigned I = 0; I != DstElems; ++I) {
+ if (MaskAPS[I])
+ Dst.elem<T>(I) = Src.elem<T>(Base + I);
+ else
+ Dst.elem<T>(I) = Merge.elem<T>(I);
+ }
+ });
+
+ Dst.initializeAllElements();
+ return true;
+}
+
static bool interp__builtin_x86_insert_subvector(InterpState &S, CodePtr OpPC,
const CallExpr *Call,
unsigned ID) {
@@ -3003,6 +3108,45 @@ static bool interp__builtin_x86_insert_subvector(InterpState &S, CodePtr OpPC,
return true;
}
+static bool interp__builtin_ia32_phminposuw(InterpState &S, CodePtr OpPC,
+ const CallExpr *Call) {
+ assert(Call->getNumArgs() == 1);
+
+ const Pointer &Source = S.Stk.pop<Pointer>();
+ const Pointer &Dest = S.Stk.peek<Pointer>();
+
+ unsigned SourceLen = Source.getNumElems();
+ QualType ElemQT = getElemType(Source);
+ OptPrimType ElemT = S.getContext().classify(ElemQT);
+ unsigned ElemBitWidth = S.getASTContext().getTypeSize(ElemQT);
+
+ bool DestUnsigned = Call->getCallReturnType(S.getASTContext())
+ ->castAs<VectorType>()
+ ->getElementType()
+ ->isUnsignedIntegerOrEnumerationType();
+
+ INT_TYPE_SWITCH_NO_BOOL(*ElemT, {
+ APSInt MinIndex(ElemBitWidth, DestUnsigned);
+ APSInt MinVal = Source.elem<T>(0).toAPSInt();
+
+ for (unsigned I = 1; I != SourceLen; ++I) {
+ APSInt Val = Source.elem<T>(I).toAPSInt();
+ if (MinVal.ugt(Val)) {
+ MinVal = Val;
+ MinIndex = I;
+ }
+ }
+
+ Dest.elem<T>(0) = static_cast<T>(MinVal);
+ Dest.elem<T>(1) = static_cast<T>(MinIndex);
+ for (unsigned I = 2; I != SourceLen; ++I) {
+ Dest.elem<T>(I) = static_cast<T>(APSInt(ElemBitWidth, DestUnsigned));
+ }
+ });
+ Dest.initializeAllElements();
+ return true;
+}
+
static bool interp__builtin_ia32_pternlog(InterpState &S, CodePtr OpPC,
const CallExpr *Call, bool MaskZ) {
assert(Call->getNumArgs() == 5);
@@ -3620,6 +3764,25 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
return LHS.isSigned() ? LHS.ssub_sat(RHS) : LHS.usub_sat(RHS);
});
+ case X86::BI__builtin_ia32_extract128i256:
+ case X86::BI__builtin_ia32_vextractf128_pd256:
+ case X86::BI__builtin_ia32_vextractf128_ps256:
+ case X86::BI__builtin_ia32_vextractf128_si256:
+ return interp__builtin_x86_extract_vector(S, OpPC, Call, BuiltinID);
+
+ case X86::BI__builtin_ia32_extractf32x4_256_mask:
+ case X86::BI__builtin_ia32_extractf32x4_mask:
+ case X86::BI__builtin_ia32_extractf32x8_mask:
+ case X86::BI__builtin_ia32_extractf64x2_256_mask:
+ case X86::BI__builtin_ia32_extractf64x2_512_mask:
+ case X86::BI__builtin_ia32_extractf64x4_mask:
+ case X86::BI__builtin_ia32_extracti32x4_256_mask:
+ case X86::BI__builtin_ia32_extracti32x4_mask:
+ case X86::BI__builtin_ia32_extracti32x8_mask:
+ case X86::BI__builtin_ia32_extracti64x2_256_mask:
+ case X86::BI__builtin_ia32_extracti64x2_512_mask:
+ case X86::BI__builtin_ia32_extracti64x4_mask:
+ return interp__builtin_x86_extract_vector_masked(S, OpPC, Call, BuiltinID);
case clang::X86::BI__builtin_ia32_pmulhrsw128:
case clang::X86::BI__builtin_ia32_pmulhrsw256:
@@ -3630,6 +3793,15 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
.extractBits(16, 1);
});
+ case clang::X86::BI__builtin_ia32_movmskps:
+ case clang::X86::BI__builtin_ia32_movmskpd:
+ case clang::X86::BI__builtin_ia32_pmovmskb128:
+ case clang::X86::BI__builtin_ia32_pmovmskb256:
+ case clang::X86::BI__builtin_ia32_movmskps256:
+ case clang::X86::BI__builtin_ia32_movmskpd256: {
+ return interp__builtin_ia32_movmsk_op(S, OpPC, Call);
+ }
+
case clang::X86::BI__builtin_ia32_pavgb128:
case clang::X86::BI__builtin_ia32_pavgw128:
case clang::X86::BI__builtin_ia32_pavgb256:
@@ -4087,6 +4259,9 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
S, OpPC, Call,
[](const APSInt &LHS, const APSInt &RHS) { return LHS + RHS; });
+ case X86::BI__builtin_ia32_phminposuw128:
+ return interp__builtin_ia32_phminposuw(S, OpPC, Call);
+
case X86::BI__builtin_ia32_pternlogd128_mask:
case X86::BI__builtin_ia32_pternlogd256_mask:
case X86::BI__builtin_ia32_pternlogd512_mask:
diff --git a/clang/lib/AST/ByteCode/Program.cpp b/clang/lib/AST/ByteCode/Program.cpp
index e653782..e0b2852 100644
--- a/clang/lib/AST/ByteCode/Program.cpp
+++ b/clang/lib/AST/ByteCode/Program.cpp
@@ -226,7 +226,10 @@ UnsignedOrNone Program::createGlobal(const ValueDecl *VD, const Expr *Init) {
Globals[PIdx] = NewGlobal;
// All pointers pointing to the previous extern decl now point to the
// new decl.
- RedeclBlock->movePointersTo(NewGlobal->block());
+ // A previous iteration might've already fixed up the pointers for this
+ // global.
+ if (RedeclBlock != NewGlobal->block())
+ RedeclBlock->movePointersTo(NewGlobal->block());
}
}
PIdx = *Idx;
diff --git a/clang/lib/AST/Decl.cpp b/clang/lib/AST/Decl.cpp
index f048076..8579e51 100644
--- a/clang/lib/AST/Decl.cpp
+++ b/clang/lib/AST/Decl.cpp
@@ -3380,11 +3380,11 @@ bool FunctionDecl::isMSVCRTEntryPoint() const {
return false;
return llvm::StringSwitch<bool>(getName())
- .Cases("main", // an ANSI console app
- "wmain", // a Unicode console App
- "WinMain", // an ANSI GUI app
- "wWinMain", // a Unicode GUI app
- "DllMain", // a DLL
+ .Cases({"main", // an ANSI console app
+ "wmain", // a Unicode console App
+ "WinMain", // an ANSI GUI app
+ "wWinMain", // a Unicode GUI app
+ "DllMain"}, // a DLL
true)
.Default(false);
}
diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp
index e308c17..00aaaab 100644
--- a/clang/lib/AST/ExprConstant.cpp
+++ b/clang/lib/AST/ExprConstant.cpp
@@ -11811,6 +11811,73 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) {
return LHS.isSigned() ? LHS.ssub_sat(RHS) : LHS.usub_sat(RHS);
});
+ case X86::BI__builtin_ia32_extract128i256:
+ case X86::BI__builtin_ia32_vextractf128_pd256:
+ case X86::BI__builtin_ia32_vextractf128_ps256:
+ case X86::BI__builtin_ia32_vextractf128_si256: {
+ APValue SourceVec, SourceImm;
+ if (!EvaluateAsRValue(Info, E->getArg(0), SourceVec) ||
+ !EvaluateAsRValue(Info, E->getArg(1), SourceImm))
+ return false;
+
+ if (!SourceVec.isVector())
+ return false;
+
+ const auto *RetVT = E->getType()->castAs<VectorType>();
+ unsigned RetLen = RetVT->getNumElements();
+ unsigned Idx = SourceImm.getInt().getZExtValue() & 1;
+
+ SmallVector<APValue, 32> ResultElements;
+ ResultElements.reserve(RetLen);
+
+ for (unsigned I = 0; I < RetLen; I++)
+ ResultElements.push_back(SourceVec.getVectorElt(Idx * RetLen + I));
+
+ return Success(APValue(ResultElements.data(), RetLen), E);
+ }
+
+ case X86::BI__builtin_ia32_extracti32x4_256_mask:
+ case X86::BI__builtin_ia32_extractf32x4_256_mask:
+ case X86::BI__builtin_ia32_extracti32x4_mask:
+ case X86::BI__builtin_ia32_extractf32x4_mask:
+ case X86::BI__builtin_ia32_extracti32x8_mask:
+ case X86::BI__builtin_ia32_extractf32x8_mask:
+ case X86::BI__builtin_ia32_extracti64x2_256_mask:
+ case X86::BI__builtin_ia32_extractf64x2_256_mask:
+ case X86::BI__builtin_ia32_extracti64x2_512_mask:
+ case X86::BI__builtin_ia32_extractf64x2_512_mask:
+ case X86::BI__builtin_ia32_extracti64x4_mask:
+ case X86::BI__builtin_ia32_extractf64x4_mask: {
+ APValue SourceVec, MergeVec;
+ APSInt Imm, MaskImm;
+
+ if (!EvaluateAsRValue(Info, E->getArg(0), SourceVec) ||
+ !EvaluateInteger(E->getArg(1), Imm, Info) ||
+ !EvaluateAsRValue(Info, E->getArg(2), MergeVec) ||
+ !EvaluateInteger(E->getArg(3), MaskImm, Info))
+ return false;
+
+ const auto *RetVT = E->getType()->castAs<VectorType>();
+ unsigned RetLen = RetVT->getNumElements();
+
+ if (!SourceVec.isVector() || !MergeVec.isVector())
+ return false;
+ unsigned SrcLen = SourceVec.getVectorLength();
+ unsigned Lanes = SrcLen / RetLen;
+ unsigned Lane = static_cast<unsigned>(Imm.getZExtValue() % Lanes);
+ unsigned Base = Lane * RetLen;
+
+ SmallVector<APValue, 32> ResultElements;
+ ResultElements.reserve(RetLen);
+ for (unsigned I = 0; I < RetLen; ++I) {
+ if (MaskImm[I])
+ ResultElements.push_back(SourceVec.getVectorElt(Base + I));
+ else
+ ResultElements.push_back(MergeVec.getVectorElt(I));
+ }
+ return Success(APValue(ResultElements.data(), ResultElements.size()), E);
+ }
+
case clang::X86::BI__builtin_ia32_pavgb128:
case clang::X86::BI__builtin_ia32_pavgw128:
case clang::X86::BI__builtin_ia32_pavgb256:
@@ -12353,6 +12420,40 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) {
return Success(R, E);
}
+ case X86::BI__builtin_ia32_phminposuw128: {
+ APValue Source;
+ if (!Evaluate(Source, Info, E->getArg(0)))
+ return false;
+ unsigned SourceLen = Source.getVectorLength();
+ const VectorType *VT = E->getArg(0)->getType()->castAs<VectorType>();
+ QualType ElemQT = VT->getElementType();
+ unsigned ElemBitWidth = Info.Ctx.getTypeSize(ElemQT);
+
+ APInt MinIndex(ElemBitWidth, 0);
+ APInt MinVal = Source.getVectorElt(0).getInt();
+ for (unsigned I = 1; I != SourceLen; ++I) {
+ APInt Val = Source.getVectorElt(I).getInt();
+ if (MinVal.ugt(Val)) {
+ MinVal = Val;
+ MinIndex = I;
+ }
+ }
+
+ bool ResultUnsigned = E->getCallReturnType(Info.Ctx)
+ ->castAs<VectorType>()
+ ->getElementType()
+ ->isUnsignedIntegerOrEnumerationType();
+
+ SmallVector<APValue, 8> Result;
+ Result.reserve(SourceLen);
+ Result.emplace_back(APSInt(MinVal, ResultUnsigned));
+ Result.emplace_back(APSInt(MinIndex, ResultUnsigned));
+ for (unsigned I = 0; I != SourceLen - 2; ++I) {
+ Result.emplace_back(APSInt(APInt(ElemBitWidth, 0), ResultUnsigned));
+ }
+ return Success(APValue(Result.data(), Result.size()), E);
+ }
+
case X86::BI__builtin_ia32_pternlogd128_mask:
case X86::BI__builtin_ia32_pternlogd256_mask:
case X86::BI__builtin_ia32_pternlogd512_mask:
@@ -15268,6 +15369,36 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
return Success(CarryOut, E);
}
+ case clang::X86::BI__builtin_ia32_movmskps:
+ case clang::X86::BI__builtin_ia32_movmskpd:
+ case clang::X86::BI__builtin_ia32_pmovmskb128:
+ case clang::X86::BI__builtin_ia32_pmovmskb256:
+ case clang::X86::BI__builtin_ia32_movmskps256:
+ case clang::X86::BI__builtin_ia32_movmskpd256: {
+ APValue Source;
+ if (!Evaluate(Source, Info, E->getArg(0)))
+ return false;
+ unsigned SourceLen = Source.getVectorLength();
+ const VectorType *VT = E->getArg(0)->getType()->castAs<VectorType>();
+ QualType ElemQT = VT->getElementType();
+ unsigned ResultLen = Info.Ctx.getTypeSize(
+ E->getCallReturnType(Info.Ctx)); // Always 32-bit integer.
+ APInt Result(ResultLen, 0);
+
+ for (unsigned I = 0; I != SourceLen; ++I) {
+ APInt Elem;
+ if (ElemQT->isIntegerType()) {
+ Elem = Source.getVectorElt(I).getInt();
+ } else if (ElemQT->isRealFloatingType()) {
+ Elem = Source.getVectorElt(I).getFloat().bitcastToAPInt();
+ } else {
+ return false;
+ }
+ Result.setBitVal(I, Elem.isNegative());
+ }
+ return Success(Result, E);
+ }
+
case clang::X86::BI__builtin_ia32_bextr_u32:
case clang::X86::BI__builtin_ia32_bextr_u64:
case clang::X86::BI__builtin_ia32_bextri_u32:
diff --git a/clang/lib/Analysis/FlowSensitive/Models/CMakeLists.txt b/clang/lib/Analysis/FlowSensitive/Models/CMakeLists.txt
index 89bbe87..d1236f5 100644
--- a/clang/lib/Analysis/FlowSensitive/Models/CMakeLists.txt
+++ b/clang/lib/Analysis/FlowSensitive/Models/CMakeLists.txt
@@ -1,6 +1,7 @@
add_clang_library(clangAnalysisFlowSensitiveModels
ChromiumCheckModel.cpp
UncheckedOptionalAccessModel.cpp
+ UncheckedStatusOrAccessModel.cpp
LINK_LIBS
clangAnalysis
diff --git a/clang/lib/Analysis/FlowSensitive/Models/UncheckedOptionalAccessModel.cpp b/clang/lib/Analysis/FlowSensitive/Models/UncheckedOptionalAccessModel.cpp
index bb703ef..0fa333e 100644
--- a/clang/lib/Analysis/FlowSensitive/Models/UncheckedOptionalAccessModel.cpp
+++ b/clang/lib/Analysis/FlowSensitive/Models/UncheckedOptionalAccessModel.cpp
@@ -241,9 +241,9 @@ auto nulloptTypeDecl() {
auto hasNulloptType() { return hasType(nulloptTypeDecl()); }
auto inPlaceClass() {
- return recordDecl(hasAnyName("std::in_place_t", "absl::in_place_t",
- "base::in_place_t", "folly::in_place_t",
- "bsl::in_place_t"));
+ return namedDecl(hasAnyName("std::in_place_t", "absl::in_place_t",
+ "base::in_place_t", "folly::in_place_t",
+ "bsl::in_place_t"));
}
auto isOptionalNulloptConstructor() {
diff --git a/clang/lib/Analysis/FlowSensitive/Models/UncheckedStatusOrAccessModel.cpp b/clang/lib/Analysis/FlowSensitive/Models/UncheckedStatusOrAccessModel.cpp
new file mode 100644
index 0000000..75b0959
--- /dev/null
+++ b/clang/lib/Analysis/FlowSensitive/Models/UncheckedStatusOrAccessModel.cpp
@@ -0,0 +1,295 @@
+//===- UncheckedStatusOrAccessModel.cpp -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/FlowSensitive/Models/UncheckedStatusOrAccessModel.h"
+
+#include <cassert>
+#include <utility>
+
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/TypeBase.h"
+#include "clang/ASTMatchers/ASTMatchFinder.h"
+#include "clang/ASTMatchers/ASTMatchers.h"
+#include "clang/ASTMatchers/ASTMatchersInternal.h"
+#include "clang/Analysis/CFG.h"
+#include "clang/Analysis/FlowSensitive/CFGMatchSwitch.h"
+#include "clang/Analysis/FlowSensitive/DataflowAnalysis.h"
+#include "clang/Analysis/FlowSensitive/DataflowEnvironment.h"
+#include "clang/Analysis/FlowSensitive/MatchSwitch.h"
+#include "clang/Analysis/FlowSensitive/StorageLocation.h"
+#include "clang/Analysis/FlowSensitive/Value.h"
+#include "clang/Basic/LLVM.h"
+#include "clang/Basic/SourceLocation.h"
+#include "llvm/ADT/StringMap.h"
+
+namespace clang::dataflow::statusor_model {
+namespace {
+
+using ::clang::ast_matchers::MatchFinder;
+using ::clang::ast_matchers::StatementMatcher;
+
+} // namespace
+
+static bool namespaceEquals(const NamespaceDecl *NS,
+ clang::ArrayRef<clang::StringRef> NamespaceNames) {
+ while (!NamespaceNames.empty() && NS) {
+ if (NS->getName() != NamespaceNames.consume_back())
+ return false;
+ NS = dyn_cast_or_null<NamespaceDecl>(NS->getParent());
+ }
+ return NamespaceNames.empty() && !NS;
+}
+
+// TODO: move this to a proper place to share with the rest of clang
+static bool isTypeNamed(QualType Type, clang::ArrayRef<clang::StringRef> NS,
+ StringRef Name) {
+ if (Type.isNull())
+ return false;
+ if (auto *RD = Type->getAsRecordDecl())
+ if (RD->getName() == Name)
+ if (const auto *N = dyn_cast_or_null<NamespaceDecl>(RD->getDeclContext()))
+ return namespaceEquals(N, NS);
+ return false;
+}
+
+static bool isStatusOrOperatorBaseType(QualType Type) {
+ return isTypeNamed(Type, {"absl", "internal_statusor"}, "OperatorBase");
+}
+
+static bool isSafeUnwrap(RecordStorageLocation *StatusOrLoc,
+ const Environment &Env) {
+ if (!StatusOrLoc)
+ return false;
+ auto &StatusLoc = locForStatus(*StatusOrLoc);
+ auto *OkVal = Env.get<BoolValue>(locForOk(StatusLoc));
+ return OkVal != nullptr && Env.proves(OkVal->formula());
+}
+
+static ClassTemplateSpecializationDecl *
+getStatusOrBaseClass(const QualType &Ty) {
+ auto *RD = Ty->getAsCXXRecordDecl();
+ if (RD == nullptr)
+ return nullptr;
+ if (isStatusOrType(Ty) ||
+ // In case we are analyzing code under OperatorBase itself that uses
+ // operator* (e.g. to implement operator->).
+ isStatusOrOperatorBaseType(Ty))
+ return cast<ClassTemplateSpecializationDecl>(RD);
+ if (!RD->hasDefinition())
+ return nullptr;
+ for (const auto &Base : RD->bases())
+ if (auto *QT = getStatusOrBaseClass(Base.getType()))
+ return QT;
+ return nullptr;
+}
+
+static QualType getStatusOrValueType(ClassTemplateSpecializationDecl *TRD) {
+ return TRD->getTemplateArgs().get(0).getAsType();
+}
+
+static auto isStatusOrMemberCallWithName(llvm::StringRef member_name) {
+ using namespace ::clang::ast_matchers; // NOLINT: Too many names
+ return cxxMemberCallExpr(
+ on(expr(unless(cxxThisExpr()))),
+ callee(cxxMethodDecl(
+ hasName(member_name),
+ ofClass(anyOf(statusOrClass(), statusOrOperatorBaseClass())))));
+}
+
+static auto isStatusOrOperatorCallWithName(llvm::StringRef operator_name) {
+ using namespace ::clang::ast_matchers; // NOLINT: Too many names
+ return cxxOperatorCallExpr(
+ hasOverloadedOperatorName(operator_name),
+ callee(cxxMethodDecl(
+ ofClass(anyOf(statusOrClass(), statusOrOperatorBaseClass())))));
+}
+
+static auto valueCall() {
+ using namespace ::clang::ast_matchers; // NOLINT: Too many names
+ return anyOf(isStatusOrMemberCallWithName("value"),
+ isStatusOrMemberCallWithName("ValueOrDie"));
+}
+
+static auto valueOperatorCall() {
+ using namespace ::clang::ast_matchers; // NOLINT: Too many names
+ return expr(anyOf(isStatusOrOperatorCallWithName("*"),
+ isStatusOrOperatorCallWithName("->")));
+}
+
+static auto
+buildDiagnoseMatchSwitch(const UncheckedStatusOrAccessModelOptions &Options) {
+ return CFGMatchSwitchBuilder<const Environment,
+ llvm::SmallVector<SourceLocation>>()
+ // StatusOr::value, StatusOr::ValueOrDie
+ .CaseOfCFGStmt<CXXMemberCallExpr>(
+ valueCall(),
+ [](const CXXMemberCallExpr *E,
+ const ast_matchers::MatchFinder::MatchResult &,
+ const Environment &Env) {
+ if (!isSafeUnwrap(getImplicitObjectLocation(*E, Env), Env))
+ return llvm::SmallVector<SourceLocation>({E->getExprLoc()});
+ return llvm::SmallVector<SourceLocation>();
+ })
+
+ // StatusOr::operator*, StatusOr::operator->
+ .CaseOfCFGStmt<CXXOperatorCallExpr>(
+ valueOperatorCall(),
+ [](const CXXOperatorCallExpr *E,
+ const ast_matchers::MatchFinder::MatchResult &,
+ const Environment &Env) {
+ RecordStorageLocation *StatusOrLoc =
+ Env.get<RecordStorageLocation>(*E->getArg(0));
+ if (!isSafeUnwrap(StatusOrLoc, Env))
+ return llvm::SmallVector<SourceLocation>({E->getOperatorLoc()});
+ return llvm::SmallVector<SourceLocation>();
+ })
+ .Build();
+}
+
+UncheckedStatusOrAccessDiagnoser::UncheckedStatusOrAccessDiagnoser(
+ UncheckedStatusOrAccessModelOptions Options)
+ : DiagnoseMatchSwitch(buildDiagnoseMatchSwitch(Options)) {}
+
+llvm::SmallVector<SourceLocation> UncheckedStatusOrAccessDiagnoser::operator()(
+ const CFGElement &Elt, ASTContext &Ctx,
+ const TransferStateForDiagnostics<UncheckedStatusOrAccessModel::Lattice>
+ &State) {
+ return DiagnoseMatchSwitch(Elt, Ctx, State.Env);
+}
+
+BoolValue &initializeStatus(RecordStorageLocation &StatusLoc,
+ Environment &Env) {
+ auto &OkVal = Env.makeAtomicBoolValue();
+ Env.setValue(locForOk(StatusLoc), OkVal);
+ return OkVal;
+}
+
+BoolValue &initializeStatusOr(RecordStorageLocation &StatusOrLoc,
+ Environment &Env) {
+ return initializeStatus(locForStatus(StatusOrLoc), Env);
+}
+
+clang::ast_matchers::DeclarationMatcher statusOrClass() {
+ using namespace ::clang::ast_matchers; // NOLINT: Too many names
+ return classTemplateSpecializationDecl(
+ hasName("absl::StatusOr"),
+ hasTemplateArgument(0, refersToType(type().bind("T"))));
+}
+
+clang::ast_matchers::DeclarationMatcher statusClass() {
+ using namespace ::clang::ast_matchers; // NOLINT: Too many names
+ return cxxRecordDecl(hasName("absl::Status"));
+}
+
+clang::ast_matchers::DeclarationMatcher statusOrOperatorBaseClass() {
+ using namespace ::clang::ast_matchers; // NOLINT: Too many names
+ return classTemplateSpecializationDecl(
+ hasName("absl::internal_statusor::OperatorBase"));
+}
+
+clang::ast_matchers::TypeMatcher statusOrType() {
+ using namespace ::clang::ast_matchers; // NOLINT: Too many names
+ return hasCanonicalType(qualType(hasDeclaration(statusOrClass())));
+}
+
+bool isRecordTypeWithName(QualType Type, llvm::StringRef TypeName) {
+ return Type->isRecordType() &&
+ Type->getAsCXXRecordDecl()->getQualifiedNameAsString() == TypeName;
+}
+
+bool isStatusOrType(QualType Type) {
+ return isTypeNamed(Type, {"absl"}, "StatusOr");
+}
+
+bool isStatusType(QualType Type) {
+ return isTypeNamed(Type, {"absl"}, "Status");
+}
+
+llvm::StringMap<QualType> getSyntheticFields(QualType Ty, QualType StatusType,
+ const CXXRecordDecl &RD) {
+ if (auto *TRD = getStatusOrBaseClass(Ty))
+ return {{"status", StatusType}, {"value", getStatusOrValueType(TRD)}};
+ if (isStatusType(Ty) || (RD.hasDefinition() &&
+ RD.isDerivedFrom(StatusType->getAsCXXRecordDecl())))
+ return {{"ok", RD.getASTContext().BoolTy}};
+ return {};
+}
+
+RecordStorageLocation &locForStatus(RecordStorageLocation &StatusOrLoc) {
+ return cast<RecordStorageLocation>(StatusOrLoc.getSyntheticField("status"));
+}
+
+StorageLocation &locForOk(RecordStorageLocation &StatusLoc) {
+ return StatusLoc.getSyntheticField("ok");
+}
+
+BoolValue &valForOk(RecordStorageLocation &StatusLoc, Environment &Env) {
+ if (auto *Val = Env.get<BoolValue>(locForOk(StatusLoc)))
+ return *Val;
+ return initializeStatus(StatusLoc, Env);
+}
+
+static void transferStatusOrOkCall(const CXXMemberCallExpr *Expr,
+ const MatchFinder::MatchResult &,
+ LatticeTransferState &State) {
+ RecordStorageLocation *StatusOrLoc =
+ getImplicitObjectLocation(*Expr, State.Env);
+ if (StatusOrLoc == nullptr)
+ return;
+
+ auto &OkVal = valForOk(locForStatus(*StatusOrLoc), State.Env);
+ State.Env.setValue(*Expr, OkVal);
+}
+
+CFGMatchSwitch<LatticeTransferState>
+buildTransferMatchSwitch(ASTContext &Ctx,
+ CFGMatchSwitchBuilder<LatticeTransferState> Builder) {
+ using namespace ::clang::ast_matchers; // NOLINT: Too many names
+ return std::move(Builder)
+ .CaseOfCFGStmt<CXXMemberCallExpr>(isStatusOrMemberCallWithName("ok"),
+ transferStatusOrOkCall)
+ .Build();
+}
+
+QualType findStatusType(const ASTContext &Ctx) {
+ for (Type *Ty : Ctx.getTypes())
+ if (isStatusType(QualType(Ty, 0)))
+ return QualType(Ty, 0);
+
+ return QualType();
+}
+
+UncheckedStatusOrAccessModel::UncheckedStatusOrAccessModel(ASTContext &Ctx,
+ Environment &Env)
+ : DataflowAnalysis<UncheckedStatusOrAccessModel,
+ UncheckedStatusOrAccessModel::Lattice>(Ctx),
+ TransferMatchSwitch(buildTransferMatchSwitch(Ctx, {})) {
+ QualType StatusType = findStatusType(Ctx);
+ Env.getDataflowAnalysisContext().setSyntheticFieldCallback(
+ [StatusType](QualType Ty) -> llvm::StringMap<QualType> {
+ CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
+ if (RD == nullptr)
+ return {};
+
+ if (auto Fields = getSyntheticFields(Ty, StatusType, *RD);
+ !Fields.empty())
+ return Fields;
+ return {};
+ });
+}
+
+void UncheckedStatusOrAccessModel::transfer(const CFGElement &Elt, Lattice &L,
+ Environment &Env) {
+ LatticeTransferState State(L, Env);
+ TransferMatchSwitch(Elt, getASTContext(), State);
+}
+
+} // namespace clang::dataflow::statusor_model
diff --git a/clang/lib/Basic/Targets/Mips.cpp b/clang/lib/Basic/Targets/Mips.cpp
index de6ccff..a999d14 100644
--- a/clang/lib/Basic/Targets/Mips.cpp
+++ b/clang/lib/Basic/Targets/Mips.cpp
@@ -68,11 +68,11 @@ void MipsTargetInfo::fillValidCPUList(
unsigned MipsTargetInfo::getISARev() const {
return llvm::StringSwitch<unsigned>(getCPU())
- .Cases("mips32", "mips64", 1)
- .Cases("mips32r2", "mips64r2", "octeon", "octeon+", 2)
- .Cases("mips32r3", "mips64r3", 3)
- .Cases("mips32r5", "mips64r5", "p5600", 5)
- .Cases("mips32r6", "mips64r6", "i6400", "i6500", 6)
+ .Cases({"mips32", "mips64"}, 1)
+ .Cases({"mips32r2", "mips64r2", "octeon", "octeon+"}, 2)
+ .Cases({"mips32r3", "mips64r3"}, 3)
+ .Cases({"mips32r5", "mips64r5", "p5600"}, 5)
+ .Cases({"mips32r6", "mips64r6", "i6400", "i6500"}, 6)
.Default(0);
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
index 4a19d91..5667273 100644
--- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
@@ -740,6 +740,16 @@ struct CallStackRestore final : EHScopeStack::Cleanup {
};
} // namespace
+/// Push the standard destructor for the given type as
+/// at least a normal cleanup.
+void CIRGenFunction::pushDestroy(QualType::DestructionKind dtorKind,
+ Address addr, QualType type) {
+ assert(dtorKind && "cannot push destructor for trivial type");
+
+ CleanupKind cleanupKind = getCleanupKind(dtorKind);
+ pushDestroy(cleanupKind, addr, type, getDestroyer(dtorKind));
+}
+
void CIRGenFunction::pushDestroy(CleanupKind cleanupKind, Address addr,
QualType type, Destroyer *destroyer) {
pushFullExprCleanup<DestroyObject>(cleanupKind, addr, type, destroyer);
diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
index 4897c29..9732c9c 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
@@ -1626,14 +1626,15 @@ LValue CIRGenFunction::emitBinaryOperatorLValue(const BinaryOperator *e) {
/// Emit code to compute the specified expression which
/// can have any type. The result is returned as an RValue struct.
-RValue CIRGenFunction::emitAnyExpr(const Expr *e, AggValueSlot aggSlot) {
+RValue CIRGenFunction::emitAnyExpr(const Expr *e, AggValueSlot aggSlot,
+ bool ignoreResult) {
switch (CIRGenFunction::getEvaluationKind(e->getType())) {
case cir::TEK_Scalar:
return RValue::get(emitScalarExpr(e));
case cir::TEK_Complex:
return RValue::getComplex(emitComplexExpr(e));
case cir::TEK_Aggregate: {
- if (aggSlot.isIgnored())
+ if (!ignoreResult && aggSlot.isIgnored())
aggSlot = createAggTemp(e->getType(), getLoc(e->getSourceRange()),
getCounterAggTmpAsString());
emitAggExpr(e, aggSlot);
@@ -1869,8 +1870,7 @@ RValue CIRGenFunction::emitCallExpr(const clang::CallExpr *e,
/// Emit code to compute the specified expression, ignoring the result.
void CIRGenFunction::emitIgnoredExpr(const Expr *e) {
if (e->isPRValue()) {
- assert(!cir::MissingFeatures::aggValueSlot());
- emitAnyExpr(e);
+ emitAnyExpr(e, AggValueSlot::ignored(), /*ignoreResult=*/true);
return;
}
@@ -2394,6 +2394,180 @@ LValue CIRGenFunction::emitPredefinedLValue(const PredefinedExpr *e) {
return emitStringLiteralLValue(sl, gvName);
}
+LValue CIRGenFunction::emitOpaqueValueLValue(const OpaqueValueExpr *e) {
+ assert(OpaqueValueMappingData::shouldBindAsLValue(e));
+ return getOrCreateOpaqueLValueMapping(e);
+}
+
+namespace {
+// Handle the case where the condition is a constant evaluatable simple integer,
+// which means we don't have to separately handle the true/false blocks.
+std::optional<LValue> handleConditionalOperatorLValueSimpleCase(
+ CIRGenFunction &cgf, const AbstractConditionalOperator *e) {
+ const Expr *condExpr = e->getCond();
+ llvm::APSInt condExprVal;
+ if (!cgf.constantFoldsToSimpleInteger(condExpr, condExprVal))
+ return std::nullopt;
+
+ const Expr *live = e->getTrueExpr(), *dead = e->getFalseExpr();
+ if (!condExprVal.getBoolValue())
+ std::swap(live, dead);
+
+ if (cgf.containsLabel(dead))
+ return std::nullopt;
+
+ // If the true case is live, we need to track its region.
+ assert(!cir::MissingFeatures::incrementProfileCounter());
+ assert(!cir::MissingFeatures::pgoUse());
+ // If a throw expression we emit it and return an undefined lvalue
+ // because it can't be used.
+ if (auto *throwExpr = dyn_cast<CXXThrowExpr>(live->IgnoreParens())) {
+ cgf.emitCXXThrowExpr(throwExpr);
+ // Return an undefined lvalue - the throw terminates execution
+ // so this value will never actually be used
+ mlir::Type elemTy = cgf.convertType(dead->getType());
+ mlir::Value undefPtr =
+ cgf.getBuilder().getNullPtr(cgf.getBuilder().getPointerTo(elemTy),
+ cgf.getLoc(throwExpr->getSourceRange()));
+ return cgf.makeAddrLValue(Address(undefPtr, elemTy, CharUnits::One()),
+ dead->getType());
+ }
+ return cgf.emitLValue(live);
+}
+
+/// Emit the operand of a glvalue conditional operator. This is either a glvalue
+/// or a (possibly-parenthesized) throw-expression. If this is a throw, no
+/// LValue is returned and the current block has been terminated.
+static std::optional<LValue> emitLValueOrThrowExpression(CIRGenFunction &cgf,
+ const Expr *operand) {
+ if (auto *throwExpr = dyn_cast<CXXThrowExpr>(operand->IgnoreParens())) {
+ cgf.emitCXXThrowExpr(throwExpr);
+ return std::nullopt;
+ }
+
+ return cgf.emitLValue(operand);
+}
+} // namespace
+
+// Create and generate the 3 blocks for a conditional operator.
+// Leaves the 'current block' in the continuation basic block.
+template <typename FuncTy>
+CIRGenFunction::ConditionalInfo
+CIRGenFunction::emitConditionalBlocks(const AbstractConditionalOperator *e,
+ const FuncTy &branchGenFunc) {
+ ConditionalInfo info;
+ ConditionalEvaluation eval(*this);
+ mlir::Location loc = getLoc(e->getSourceRange());
+ CIRGenBuilderTy &builder = getBuilder();
+
+ mlir::Value condV = emitOpOnBoolExpr(loc, e->getCond());
+ SmallVector<mlir::OpBuilder::InsertPoint, 2> insertPoints{};
+ mlir::Type yieldTy{};
+
+ auto emitBranch = [&](mlir::OpBuilder &b, mlir::Location loc,
+ const Expr *expr, std::optional<LValue> &resultLV) {
+ CIRGenFunction::LexicalScope lexScope{*this, loc, b.getInsertionBlock()};
+ curLexScope->setAsTernary();
+
+ assert(!cir::MissingFeatures::incrementProfileCounter());
+ eval.beginEvaluation();
+ resultLV = branchGenFunc(*this, expr);
+ mlir::Value resultPtr = resultLV ? resultLV->getPointer() : mlir::Value();
+ eval.endEvaluation();
+
+ if (resultPtr) {
+ yieldTy = resultPtr.getType();
+ cir::YieldOp::create(b, loc, resultPtr);
+ } else {
+ // If LHS or RHS is a void expression we need
+ // to patch arms as to properly match yield types.
+ // If the current block's terminator is an UnreachableOp (from a throw),
+ // we don't need a yield
+ if (builder.getInsertionBlock()->mightHaveTerminator()) {
+ mlir::Operation *terminator =
+ builder.getInsertionBlock()->getTerminator();
+ if (isa_and_nonnull<cir::UnreachableOp>(terminator))
+ insertPoints.push_back(b.saveInsertionPoint());
+ }
+ }
+ };
+
+ info.result = cir::TernaryOp::create(
+ builder, loc, condV,
+ /*trueBuilder=*/
+ [&](mlir::OpBuilder &b, mlir::Location loc) {
+ emitBranch(b, loc, e->getTrueExpr(), info.lhs);
+ },
+ /*falseBuilder=*/
+ [&](mlir::OpBuilder &b, mlir::Location loc) {
+ emitBranch(b, loc, e->getFalseExpr(), info.rhs);
+ })
+ .getResult();
+
+ // If both arms are void, so be it.
+ if (!yieldTy)
+ yieldTy = VoidTy;
+
+ // Insert required yields.
+ for (mlir::OpBuilder::InsertPoint &toInsert : insertPoints) {
+ mlir::OpBuilder::InsertionGuard guard(builder);
+ builder.restoreInsertionPoint(toInsert);
+
+ // Block does not return: build empty yield.
+ if (!yieldTy) {
+ cir::YieldOp::create(builder, loc);
+ } else { // Block returns: set null yield value.
+ mlir::Value op0 = builder.getNullValue(yieldTy, loc);
+ cir::YieldOp::create(builder, loc, op0);
+ }
+ }
+
+ return info;
+}
+
+LValue CIRGenFunction::emitConditionalOperatorLValue(
+ const AbstractConditionalOperator *expr) {
+ if (!expr->isGLValue()) {
+ // ?: here should be an aggregate.
+ assert(hasAggregateEvaluationKind(expr->getType()) &&
+ "Unexpected conditional operator!");
+ return emitAggExprToLValue(expr);
+ }
+
+ OpaqueValueMapping binding(*this, expr);
+ if (std::optional<LValue> res =
+ handleConditionalOperatorLValueSimpleCase(*this, expr))
+ return *res;
+
+ ConditionalInfo info =
+ emitConditionalBlocks(expr, [](CIRGenFunction &cgf, const Expr *e) {
+ return emitLValueOrThrowExpression(cgf, e);
+ });
+
+ if ((info.lhs && !info.lhs->isSimple()) ||
+ (info.rhs && !info.rhs->isSimple())) {
+ cgm.errorNYI(expr->getSourceRange(),
+ "unsupported conditional operator with non-simple lvalue");
+ return LValue();
+ }
+
+ if (info.lhs && info.rhs) {
+ Address lhsAddr = info.lhs->getAddress();
+ Address rhsAddr = info.rhs->getAddress();
+ Address result(info.result, lhsAddr.getElementType(),
+ std::min(lhsAddr.getAlignment(), rhsAddr.getAlignment()));
+ AlignmentSource alignSource =
+ std::max(info.lhs->getBaseInfo().getAlignmentSource(),
+ info.rhs->getBaseInfo().getAlignmentSource());
+ assert(!cir::MissingFeatures::opTBAA());
+ return makeAddrLValue(result, expr->getType(), LValueBaseInfo(alignSource));
+ }
+
+ assert((info.lhs || info.rhs) &&
+ "both operands of glvalue conditional are throw-expressions?");
+ return info.lhs ? *info.lhs : *info.rhs;
+}
+
/// An LValue is a candidate for having its loads and stores be made atomic if
/// we are operating under /volatile:ms *and* the LValue itself is volatile and
/// performing such an operation can be performed without a libcall.
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp
index 901b937..568cbdb 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp
@@ -24,6 +24,73 @@ using namespace clang;
using namespace clang::CIRGen;
namespace {
+// FIXME(cir): This should be a common helper between CIRGen
+// and traditional CodeGen
+/// Is the value of the given expression possibly a reference to or
+/// into a __block variable?
+static bool isBlockVarRef(const Expr *e) {
+ // Make sure we look through parens.
+ e = e->IgnoreParens();
+
+ // Check for a direct reference to a __block variable.
+ if (const DeclRefExpr *dre = dyn_cast<DeclRefExpr>(e)) {
+ const VarDecl *var = dyn_cast<VarDecl>(dre->getDecl());
+ return (var && var->hasAttr<BlocksAttr>());
+ }
+
+ // More complicated stuff.
+
+ // Binary operators.
+ if (const BinaryOperator *op = dyn_cast<BinaryOperator>(e)) {
+ // For an assignment or pointer-to-member operation, just care
+ // about the LHS.
+ if (op->isAssignmentOp() || op->isPtrMemOp())
+ return isBlockVarRef(op->getLHS());
+
+ // For a comma, just care about the RHS.
+ if (op->getOpcode() == BO_Comma)
+ return isBlockVarRef(op->getRHS());
+
+ // FIXME: pointer arithmetic?
+ return false;
+
+ // Check both sides of a conditional operator.
+ } else if (const AbstractConditionalOperator *op =
+ dyn_cast<AbstractConditionalOperator>(e)) {
+ return isBlockVarRef(op->getTrueExpr()) ||
+ isBlockVarRef(op->getFalseExpr());
+
+ // OVEs are required to support BinaryConditionalOperators.
+ } else if (const OpaqueValueExpr *op = dyn_cast<OpaqueValueExpr>(e)) {
+ if (const Expr *src = op->getSourceExpr())
+ return isBlockVarRef(src);
+
+ // Casts are necessary to get things like (*(int*)&var) = foo().
+ // We don't really care about the kind of cast here, except
+ // we don't want to look through l2r casts, because it's okay
+ // to get the *value* in a __block variable.
+ } else if (const CastExpr *cast = dyn_cast<CastExpr>(e)) {
+ if (cast->getCastKind() == CK_LValueToRValue)
+ return false;
+ return isBlockVarRef(cast->getSubExpr());
+
+ // Handle unary operators. Again, just aggressively look through
+ // it, ignoring the operation.
+ } else if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(e)) {
+ return isBlockVarRef(uop->getSubExpr());
+
+ // Look into the base of a field access.
+ } else if (const MemberExpr *mem = dyn_cast<MemberExpr>(e)) {
+ return isBlockVarRef(mem->getBase());
+
+ // Look into the base of a subscript.
+ } else if (const ArraySubscriptExpr *sub = dyn_cast<ArraySubscriptExpr>(e)) {
+ return isBlockVarRef(sub->getBase());
+ }
+
+ return false;
+}
+
class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
CIRGenFunction &cgf;
@@ -41,9 +108,7 @@ class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
AggValueSlot ensureSlot(mlir::Location loc, QualType t) {
if (!dest.isIgnored())
return dest;
-
- cgf.cgm.errorNYI(loc, "Slot for ignored address");
- return dest;
+ return cgf.createAggTemp(t, loc, "agg.tmp.ensured");
}
void ensureDest(mlir::Location loc, QualType ty) {
@@ -89,6 +154,47 @@ public:
(void)cgf.emitCompoundStmt(*e->getSubStmt(), &retAlloca, dest);
}
+ void VisitBinAssign(const BinaryOperator *e) {
+ // For an assignment to work, the value on the right has
+ // to be compatible with the value on the left.
+ assert(cgf.getContext().hasSameUnqualifiedType(e->getLHS()->getType(),
+ e->getRHS()->getType()) &&
+ "Invalid assignment");
+
+ if (isBlockVarRef(e->getLHS()) &&
+ e->getRHS()->HasSideEffects(cgf.getContext())) {
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "block var reference with side effects");
+ return;
+ }
+
+ LValue lhs = cgf.emitLValue(e->getLHS());
+
+ // If we have an atomic type, evaluate into the destination and then
+ // do an atomic copy.
+ assert(!cir::MissingFeatures::atomicTypes());
+
+ // Codegen the RHS so that it stores directly into the LHS.
+ assert(!cir::MissingFeatures::aggValueSlotGC());
+ AggValueSlot lhsSlot = AggValueSlot::forLValue(
+ lhs, AggValueSlot::IsDestructed, AggValueSlot::IsAliased,
+ AggValueSlot::MayOverlap);
+
+ // A non-volatile aggregate destination might have volatile member.
+ if (!lhsSlot.isVolatile() && cgf.hasVolatileMember(e->getLHS()->getType()))
+ lhsSlot.setVolatile(true);
+
+ cgf.emitAggExpr(e->getRHS(), lhsSlot);
+
+ // Copy into the destination if the assignment isn't ignored.
+ emitFinalDestCopy(e->getType(), lhs);
+
+ if (!dest.isIgnored() && !dest.isExternallyDestructed() &&
+ e->getType().isDestructedType() == QualType::DK_nontrivial_c_struct)
+ cgf.pushDestroy(QualType::DK_nontrivial_c_struct, dest.getAddress(),
+ e->getType());
+ }
+
void VisitDeclRefExpr(DeclRefExpr *e) { emitAggLoadOfLValue(e); }
void VisitInitListExpr(InitListExpr *e);
@@ -170,19 +276,10 @@ public:
void VisitConstantExpr(ConstantExpr *e) {
cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitConstantExpr");
}
- void VisitMemberExpr(MemberExpr *e) {
- cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitMemberExpr");
- }
- void VisitUnaryDeref(UnaryOperator *e) {
- cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitUnaryDeref");
- }
- void VisitStringLiteral(StringLiteral *e) {
- cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitStringLiteral");
- }
- void VisitCompoundLiteralExpr(CompoundLiteralExpr *e) {
- cgf.cgm.errorNYI(e->getSourceRange(),
- "AggExprEmitter: VisitCompoundLiteralExpr");
- }
+ void VisitMemberExpr(MemberExpr *e) { emitAggLoadOfLValue(e); }
+ void VisitUnaryDeref(UnaryOperator *e) { emitAggLoadOfLValue(e); }
+ void VisitStringLiteral(StringLiteral *e) { emitAggLoadOfLValue(e); }
+ void VisitCompoundLiteralExpr(CompoundLiteralExpr *e);
void VisitPredefinedExpr(const PredefinedExpr *e) {
cgf.cgm.errorNYI(e->getSourceRange(),
"AggExprEmitter: VisitPredefinedExpr");
@@ -195,9 +292,6 @@ public:
cgf.cgm.errorNYI(e->getSourceRange(),
"AggExprEmitter: VisitPointerToDataMemberBinaryOperator");
}
- void VisitBinAssign(const BinaryOperator *e) {
- cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitBinAssign");
- }
void VisitBinComma(const BinaryOperator *e) {
cgf.emitIgnoredExpr(e->getLHS());
Visit(e->getRHS());
@@ -325,6 +419,31 @@ void AggExprEmitter::emitAggLoadOfLValue(const Expr *e) {
emitFinalDestCopy(e->getType(), lv);
}
+void AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *e) {
+ if (dest.isPotentiallyAliased() && e->getType().isPODType(cgf.getContext())) {
+ // For a POD type, just emit a load of the lvalue + a copy, because our
+ // compound literal might alias the destination.
+ emitAggLoadOfLValue(e);
+ return;
+ }
+
+ AggValueSlot slot = ensureSlot(cgf.getLoc(e->getSourceRange()), e->getType());
+
+ // Block-scope compound literals are destroyed at the end of the enclosing
+ // scope in C.
+ bool destruct =
+ !cgf.getLangOpts().CPlusPlus && !slot.isExternallyDestructed();
+ if (destruct)
+ slot.setExternallyDestructed();
+
+ cgf.emitAggExpr(e->getInitializer(), slot);
+
+ if (destruct)
+ if ([[maybe_unused]] QualType::DestructionKind dtorKind =
+ e->getType().isDestructedType())
+ cgf.cgm.errorNYI(e->getSourceRange(), "compound literal with destructor");
+}
+
void AggExprEmitter::emitArrayInit(Address destPtr, cir::ArrayType arrayTy,
QualType arrayQTy, Expr *e,
ArrayRef<Expr *> args, Expr *arrayFiller) {
@@ -487,7 +606,8 @@ void AggExprEmitter::emitCopy(QualType type, const AggValueSlot &dest,
LValue destLV = cgf.makeAddrLValue(dest.getAddress(), type);
LValue srcLV = cgf.makeAddrLValue(src.getAddress(), type);
assert(!cir::MissingFeatures::aggValueSlotVolatile());
- cgf.emitAggregateCopy(destLV, srcLV, type, dest.mayOverlap());
+ cgf.emitAggregateCopy(destLV, srcLV, type, dest.mayOverlap(),
+ dest.isVolatile() || src.isVolatile());
}
void AggExprEmitter::emitInitializationToLValue(Expr *e, LValue lv) {
@@ -788,7 +908,8 @@ void CIRGenFunction::emitAggExpr(const Expr *e, AggValueSlot slot) {
}
void CIRGenFunction::emitAggregateCopy(LValue dest, LValue src, QualType ty,
- AggValueSlot::Overlap_t mayOverlap) {
+ AggValueSlot::Overlap_t mayOverlap,
+ bool isVolatile) {
// TODO(cir): this function needs improvements, commented code for now since
// this will be touched again soon.
assert(!ty->isAnyComplexType() && "Unexpected copy of complex");
@@ -844,7 +965,7 @@ void CIRGenFunction::emitAggregateCopy(LValue dest, LValue src, QualType ty,
cgm.errorNYI("emitAggregateCopy: GC");
[[maybe_unused]] cir::CopyOp copyOp =
- builder.createCopy(destPtr.getPointer(), srcPtr.getPointer());
+ builder.createCopy(destPtr.getPointer(), srcPtr.getPointer(), isVolatile);
assert(!cir::MissingFeatures::opTBAA());
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp
index fcde487..d8f4943 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp
@@ -196,9 +196,8 @@ public:
return Visit(e->getSubExpr());
}
mlir::Value VisitCXXDefaultArgExpr(CXXDefaultArgExpr *dae) {
- cgf.cgm.errorNYI(dae->getExprLoc(),
- "ComplexExprEmitter VisitCXXDefaultArgExpr");
- return {};
+ CIRGenFunction::CXXDefaultArgExprScope scope(cgf, dae);
+ return Visit(dae->getExpr());
}
mlir::Value VisitCXXDefaultInitExpr(CXXDefaultInitExpr *die) {
CIRGenFunction::CXXDefaultInitExprScope scope(cgf, die);
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
index ba36cbe..25a46df 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
@@ -822,6 +822,10 @@ LValue CIRGenFunction::emitLValue(const Expr *e) {
std::string("l-value not implemented for '") +
e->getStmtClassName() + "'");
return LValue();
+ case Expr::ConditionalOperatorClass:
+ return emitConditionalOperatorLValue(cast<ConditionalOperator>(e));
+ case Expr::BinaryConditionalOperatorClass:
+ return emitConditionalOperatorLValue(cast<BinaryConditionalOperator>(e));
case Expr::ArraySubscriptExprClass:
return emitArraySubscriptExpr(cast<ArraySubscriptExpr>(e));
case Expr::UnaryOperatorClass:
@@ -866,6 +870,8 @@ LValue CIRGenFunction::emitLValue(const Expr *e) {
return emitCastLValue(cast<CastExpr>(e));
case Expr::MaterializeTemporaryExprClass:
return emitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(e));
+ case Expr::OpaqueValueExprClass:
+ return emitOpaqueValueLValue(cast<OpaqueValueExpr>(e));
case Expr::ChooseExprClass:
return emitLValue(cast<ChooseExpr>(e)->getChosenSubExpr());
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h
index 3c36f5c..5a71126 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.h
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h
@@ -733,6 +733,11 @@ public:
SourceLocExprScopeGuard sourceLocScope;
};
+ struct CXXDefaultArgExprScope : SourceLocExprScopeGuard {
+ CXXDefaultArgExprScope(CIRGenFunction &cfg, const CXXDefaultArgExpr *e)
+ : SourceLocExprScopeGuard(e, cfg.curSourceLocExprScope) {}
+ };
+
LValue makeNaturalAlignPointeeAddrLValue(mlir::Value v, clang::QualType t);
LValue makeNaturalAlignAddrLValue(mlir::Value val, QualType ty);
@@ -853,6 +858,13 @@ public:
FunctionArgList args, clang::SourceLocation loc,
clang::SourceLocation startLoc);
+ /// returns true if aggregate type has a volatile member.
+ bool hasVolatileMember(QualType t) {
+ if (const auto *rd = t->getAsRecordDecl())
+ return rd->hasVolatileMember();
+ return false;
+ }
+
/// The cleanup depth enclosing all the cleanups associated with the
/// parameters.
EHScopeStack::stable_iterator prologueCleanupDepth;
@@ -1077,6 +1089,9 @@ public:
static Destroyer destroyCXXObject;
+ void pushDestroy(QualType::DestructionKind dtorKind, Address addr,
+ QualType type);
+
void pushDestroy(CleanupKind kind, Address addr, QualType type,
Destroyer *destroyer);
@@ -1131,14 +1146,16 @@ public:
/// occupied by some other object. More efficient code can often be
/// generated if not.
void emitAggregateCopy(LValue dest, LValue src, QualType eltTy,
- AggValueSlot::Overlap_t mayOverlap);
+ AggValueSlot::Overlap_t mayOverlap,
+ bool isVolatile = false);
/// Emit code to compute the specified expression which can have any type. The
/// result is returned as an RValue struct. If this is an aggregate
/// expression, the aggloc/agglocvolatile arguments indicate where the result
/// should be returned.
RValue emitAnyExpr(const clang::Expr *e,
- AggValueSlot aggSlot = AggValueSlot::ignored());
+ AggValueSlot aggSlot = AggValueSlot::ignored(),
+ bool ignoreResult = false);
/// Emits the code necessary to evaluate an arbitrary expression into the
/// given memory location.
@@ -1518,6 +1535,10 @@ public:
LValue emitMemberExpr(const MemberExpr *e);
+ LValue emitOpaqueValueLValue(const OpaqueValueExpr *e);
+
+ LValue emitConditionalOperatorLValue(const AbstractConditionalOperator *expr);
+
/// Given an expression with a pointer type, emit the value and compute our
/// best estimate of the alignment of the pointee.
///
diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp b/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp
index f638d39..be063033 100644
--- a/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp
@@ -590,15 +590,18 @@ void OpenACCRecipeBuilderBase::createReductionRecipeCombiner(
} else {
// else we have to handle each individual field after after a
// get-element.
+ const CIRGenRecordLayout &layout =
+ cgf.cgm.getTypes().getCIRGenRecordLayout(rd);
for (const auto &[field, combiner] :
llvm::zip_equal(rd->fields(), combinerRecipes)) {
mlir::Type fieldType = cgf.convertType(field->getType());
auto fieldPtr = cir::PointerType::get(fieldType);
+ unsigned fieldIndex = layout.getCIRFieldNo(field);
mlir::Value lhsField = builder.createGetMember(
- loc, fieldPtr, lhsArg, field->getName(), field->getFieldIndex());
+ loc, fieldPtr, lhsArg, field->getName(), fieldIndex);
mlir::Value rhsField = builder.createGetMember(
- loc, fieldPtr, rhsArg, field->getName(), field->getFieldIndex());
+ loc, fieldPtr, rhsArg, field->getName(), fieldIndex);
emitSingleCombiner(lhsField, rhsField, combiner);
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
index 5ba64dd..ad8c4d0 100644
--- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
@@ -475,8 +475,8 @@ mlir::LogicalResult CIRGenFunction::emitReturnStmt(const ReturnStmt &s) {
}
break;
case cir::TEK_Complex:
- getCIRGenModule().errorNYI(s.getSourceRange(),
- "complex function return type");
+ emitComplexExprIntoLValue(rv, makeAddrLValue(returnValue, rv->getType()),
+ /*isInit=*/true);
break;
case cir::TEK_Aggregate:
assert(!cir::MissingFeatures::aggValueSlotGC());
diff --git a/clang/lib/CIR/CodeGen/CIRGenValue.h b/clang/lib/CIR/CodeGen/CIRGenValue.h
index c05142e..ab245a77 100644
--- a/clang/lib/CIR/CodeGen/CIRGenValue.h
+++ b/clang/lib/CIR/CodeGen/CIRGenValue.h
@@ -380,6 +380,15 @@ public:
clang::Qualifiers getQualifiers() const { return quals; }
+ bool isVolatile() const { return quals.hasVolatile(); }
+
+ void setVolatile(bool flag) {
+ if (flag)
+ quals.addVolatile();
+ else
+ quals.removeVolatile();
+ }
+
Address getAddress() const { return addr; }
bool isIgnored() const { return !addr.isValid(); }
@@ -390,6 +399,8 @@ public:
IsZeroed_t isZeroed() const { return IsZeroed_t(zeroedFlag); }
+ IsAliased_t isPotentiallyAliased() const { return IsAliased_t(aliasedFlag); }
+
RValue asRValue() const {
if (isIgnored())
return RValue::getIgnored();
diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
index b4c3704..ed606b7 100644
--- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
+++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
@@ -1978,13 +1978,19 @@ void cir::TernaryOp::build(
result.addOperands(cond);
OpBuilder::InsertionGuard guard(builder);
Region *trueRegion = result.addRegion();
- Block *block = builder.createBlock(trueRegion);
+ builder.createBlock(trueRegion);
trueBuilder(builder, result.location);
Region *falseRegion = result.addRegion();
builder.createBlock(falseRegion);
falseBuilder(builder, result.location);
- auto yield = dyn_cast<YieldOp>(block->getTerminator());
+ // Get result type from whichever branch has a yield (the other may have
+ // unreachable from a throw expression)
+ auto yield =
+ dyn_cast_or_null<cir::YieldOp>(trueRegion->back().getTerminator());
+ if (!yield)
+ yield = dyn_cast_or_null<cir::YieldOp>(falseRegion->back().getTerminator());
+
assert((yield && yield.getNumOperands() <= 1) &&
"expected zero or one result type");
if (yield.getNumOperands() == 1)
diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp
index 26e5c05..8589a2e 100644
--- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp
+++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp
@@ -505,10 +505,19 @@ public:
Block *trueBlock = &trueRegion.front();
mlir::Operation *trueTerminator = trueRegion.back().getTerminator();
rewriter.setInsertionPointToEnd(&trueRegion.back());
- auto trueYieldOp = dyn_cast<cir::YieldOp>(trueTerminator);
- rewriter.replaceOpWithNewOp<cir::BrOp>(trueYieldOp, trueYieldOp.getArgs(),
- continueBlock);
+ // Handle both yield and unreachable terminators (throw expressions)
+ if (auto trueYieldOp = dyn_cast<cir::YieldOp>(trueTerminator)) {
+ rewriter.replaceOpWithNewOp<cir::BrOp>(trueYieldOp, trueYieldOp.getArgs(),
+ continueBlock);
+ } else if (isa<cir::UnreachableOp>(trueTerminator)) {
+ // Terminator is unreachable (e.g., from throw), just keep it
+ } else {
+ trueTerminator->emitError("unexpected terminator in ternary true region, "
+ "expected yield or unreachable, got: ")
+ << trueTerminator->getName();
+ return mlir::failure();
+ }
rewriter.inlineRegionBefore(trueRegion, continueBlock);
Block *falseBlock = continueBlock;
@@ -517,9 +526,19 @@ public:
falseBlock = &falseRegion.front();
mlir::Operation *falseTerminator = falseRegion.back().getTerminator();
rewriter.setInsertionPointToEnd(&falseRegion.back());
- auto falseYieldOp = dyn_cast<cir::YieldOp>(falseTerminator);
- rewriter.replaceOpWithNewOp<cir::BrOp>(falseYieldOp, falseYieldOp.getArgs(),
- continueBlock);
+
+ // Handle both yield and unreachable terminators (throw expressions)
+ if (auto falseYieldOp = dyn_cast<cir::YieldOp>(falseTerminator)) {
+ rewriter.replaceOpWithNewOp<cir::BrOp>(
+ falseYieldOp, falseYieldOp.getArgs(), continueBlock);
+ } else if (isa<cir::UnreachableOp>(falseTerminator)) {
+ // Terminator is unreachable (e.g., from throw), just keep it
+ } else {
+ falseTerminator->emitError("unexpected terminator in ternary false "
+ "region, expected yield or unreachable, got: ")
+ << falseTerminator->getName();
+ return mlir::failure();
+ }
rewriter.inlineRegionBefore(falseRegion, continueBlock);
rewriter.setInsertionPointToEnd(condBlock);
diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
index 0243bf1..dc26dac 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
@@ -182,7 +182,7 @@ mlir::LogicalResult CIRToLLVMCopyOpLowering::matchAndRewrite(
rewriter, op.getLoc(), rewriter.getI32Type(), op.getLength(layout));
assert(!cir::MissingFeatures::aggValueSlotVolatile());
rewriter.replaceOpWithNewOp<mlir::LLVM::MemcpyOp>(
- op, adaptor.getDst(), adaptor.getSrc(), length, /*isVolatile=*/false);
+ op, adaptor.getDst(), adaptor.getSrc(), length, op.getIsVolatile());
return mlir::success();
}
diff --git a/clang/lib/CodeGen/CGAtomic.cpp b/clang/lib/CodeGen/CGAtomic.cpp
index 5bf1816..a012581 100644
--- a/clang/lib/CodeGen/CGAtomic.cpp
+++ b/clang/lib/CodeGen/CGAtomic.cpp
@@ -86,7 +86,7 @@ namespace {
llvm::Value *StoragePtr = CGF.Builder.CreateConstGEP1_64(
CGF.Int8Ty, BitFieldPtr, OffsetInChars.getQuantity());
StoragePtr = CGF.Builder.CreateAddrSpaceCast(
- StoragePtr, CGF.UnqualPtrTy, "atomic_bitfield_base");
+ StoragePtr, CGF.DefaultPtrTy, "atomic_bitfield_base");
BFI = OrigBFI;
BFI.Offset = Offset;
BFI.StorageSize = AtomicSizeInBits;
diff --git a/clang/lib/CodeGen/CGBlocks.cpp b/clang/lib/CodeGen/CGBlocks.cpp
index 597127ab..20a595e 100644
--- a/clang/lib/CodeGen/CGBlocks.cpp
+++ b/clang/lib/CodeGen/CGBlocks.cpp
@@ -1207,7 +1207,7 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr *E,
} else {
// Bitcast the block literal to a generic block literal.
BlockPtr =
- Builder.CreatePointerCast(BlockPtr, UnqualPtrTy, "block.literal");
+ Builder.CreatePointerCast(BlockPtr, DefaultPtrTy, "block.literal");
// Get pointer to the block invoke function
FuncPtr = Builder.CreateStructGEP(GenBlockTy, BlockPtr, 3);
diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index 92dba32..fd14cd6 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -1673,7 +1673,7 @@ static llvm::Value *EmitX86BitTestIntrinsic(CodeGenFunction &CGF,
CGF.getLLVMContext(),
CGF.getContext().getTypeSize(E->getArg(1)->getType()));
llvm::FunctionType *FTy =
- llvm::FunctionType::get(CGF.Int8Ty, {CGF.UnqualPtrTy, IntType}, false);
+ llvm::FunctionType::get(CGF.Int8Ty, {CGF.DefaultPtrTy, IntType}, false);
llvm::InlineAsm *IA =
llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
diff --git a/clang/lib/CodeGen/CGCUDANV.cpp b/clang/lib/CodeGen/CGCUDANV.cpp
index cb16fe1..b463f88 100644
--- a/clang/lib/CodeGen/CGCUDANV.cpp
+++ b/clang/lib/CodeGen/CGCUDANV.cpp
@@ -230,7 +230,7 @@ CGNVCUDARuntime::CGNVCUDARuntime(CodeGenModule &CGM)
IntTy = CGM.IntTy;
SizeTy = CGM.SizeTy;
VoidTy = CGM.VoidTy;
- PtrTy = CGM.UnqualPtrTy;
+ PtrTy = CGM.DefaultPtrTy;
if (CGM.getLangOpts().OffloadViaLLVM)
Prefix = "llvm";
diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp
index 8439ec7..fd73314 100644
--- a/clang/lib/CodeGen/CGExpr.cpp
+++ b/clang/lib/CodeGen/CGExpr.cpp
@@ -1756,7 +1756,7 @@ LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E,
const char *Name) {
ErrorUnsupported(E, Name);
llvm::Type *ElTy = ConvertType(E->getType());
- llvm::Type *Ty = UnqualPtrTy;
+ llvm::Type *Ty = DefaultPtrTy;
return MakeAddrLValue(
Address(llvm::UndefValue::get(Ty), ElTy, CharUnits::One()), E->getType());
}
@@ -4270,7 +4270,7 @@ void CodeGenFunction::EmitCfiCheckFail() {
llvm::StructType::get(Int8Ty, SourceLocationTy, VoidPtrTy);
llvm::Value *V = Builder.CreateConstGEP2_32(
- CfiCheckFailDataTy, Builder.CreatePointerCast(Data, UnqualPtrTy), 0, 0);
+ CfiCheckFailDataTy, Builder.CreatePointerCast(Data, DefaultPtrTy), 0, 0);
Address CheckKindAddr(V, Int8Ty, getIntAlign());
llvm::Value *CheckKind = Builder.CreateLoad(CheckKindAddr);
@@ -5711,7 +5711,7 @@ std::optional<LValue> HandleConditionalOperatorLValueSimpleCase(
if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Live->IgnoreParens())) {
CGF.EmitCXXThrowExpr(ThrowExpr);
llvm::Type *ElemTy = CGF.ConvertType(Dead->getType());
- llvm::Type *Ty = CGF.UnqualPtrTy;
+ llvm::Type *Ty = CGF.DefaultPtrTy;
return CGF.MakeAddrLValue(
Address(llvm::UndefValue::get(Ty), ElemTy, CharUnits::One()),
Dead->getType());
diff --git a/clang/lib/CodeGen/CGObjCMac.cpp b/clang/lib/CodeGen/CGObjCMac.cpp
index c571821a..cb5bb40 100644
--- a/clang/lib/CodeGen/CGObjCMac.cpp
+++ b/clang/lib/CodeGen/CGObjCMac.cpp
@@ -338,7 +338,7 @@ public:
/// GcReadWeakFn -- LLVM objc_read_weak (id *src) function.
llvm::FunctionCallee getGcReadWeakFn() {
// id objc_read_weak (id *)
- llvm::Type *args[] = {CGM.UnqualPtrTy};
+ llvm::Type *args[] = {CGM.DefaultPtrTy};
llvm::FunctionType *FTy = llvm::FunctionType::get(ObjectPtrTy, args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_read_weak");
}
@@ -346,7 +346,7 @@ public:
/// GcAssignWeakFn -- LLVM objc_assign_weak function.
llvm::FunctionCallee getGcAssignWeakFn() {
// id objc_assign_weak (id, id *)
- llvm::Type *args[] = {ObjectPtrTy, CGM.UnqualPtrTy};
+ llvm::Type *args[] = {ObjectPtrTy, CGM.DefaultPtrTy};
llvm::FunctionType *FTy = llvm::FunctionType::get(ObjectPtrTy, args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_assign_weak");
}
@@ -354,7 +354,7 @@ public:
/// GcAssignGlobalFn -- LLVM objc_assign_global function.
llvm::FunctionCallee getGcAssignGlobalFn() {
// id objc_assign_global(id, id *)
- llvm::Type *args[] = {ObjectPtrTy, CGM.UnqualPtrTy};
+ llvm::Type *args[] = {ObjectPtrTy, CGM.DefaultPtrTy};
llvm::FunctionType *FTy = llvm::FunctionType::get(ObjectPtrTy, args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_assign_global");
}
@@ -362,7 +362,7 @@ public:
/// GcAssignThreadLocalFn -- LLVM objc_assign_threadlocal function.
llvm::FunctionCallee getGcAssignThreadLocalFn() {
// id objc_assign_threadlocal(id src, id * dest)
- llvm::Type *args[] = {ObjectPtrTy, CGM.UnqualPtrTy};
+ llvm::Type *args[] = {ObjectPtrTy, CGM.DefaultPtrTy};
llvm::FunctionType *FTy = llvm::FunctionType::get(ObjectPtrTy, args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_assign_threadlocal");
}
@@ -370,7 +370,7 @@ public:
/// GcAssignIvarFn -- LLVM objc_assign_ivar function.
llvm::FunctionCallee getGcAssignIvarFn() {
// id objc_assign_ivar(id, id *, ptrdiff_t)
- llvm::Type *args[] = {ObjectPtrTy, CGM.UnqualPtrTy, CGM.PtrDiffTy};
+ llvm::Type *args[] = {ObjectPtrTy, CGM.DefaultPtrTy, CGM.PtrDiffTy};
llvm::FunctionType *FTy = llvm::FunctionType::get(ObjectPtrTy, args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_assign_ivar");
}
@@ -386,7 +386,7 @@ public:
/// GcAssignStrongCastFn -- LLVM objc_assign_strongCast function.
llvm::FunctionCallee getGcAssignStrongCastFn() {
// id objc_assign_strongCast(id, id *)
- llvm::Type *args[] = {ObjectPtrTy, CGM.UnqualPtrTy};
+ llvm::Type *args[] = {ObjectPtrTy, CGM.DefaultPtrTy};
llvm::FunctionType *FTy = llvm::FunctionType::get(ObjectPtrTy, args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_assign_strongCast");
}
@@ -517,7 +517,7 @@ public:
/// ExceptionTryEnterFn - LLVM objc_exception_try_enter function.
llvm::FunctionCallee getExceptionTryEnterFn() {
- llvm::Type *params[] = {CGM.UnqualPtrTy};
+ llvm::Type *params[] = {CGM.DefaultPtrTy};
return CGM.CreateRuntimeFunction(
llvm::FunctionType::get(CGM.VoidTy, params, false),
"objc_exception_try_enter");
@@ -525,7 +525,7 @@ public:
/// ExceptionTryExitFn - LLVM objc_exception_try_exit function.
llvm::FunctionCallee getExceptionTryExitFn() {
- llvm::Type *params[] = {CGM.UnqualPtrTy};
+ llvm::Type *params[] = {CGM.DefaultPtrTy};
return CGM.CreateRuntimeFunction(
llvm::FunctionType::get(CGM.VoidTy, params, false),
"objc_exception_try_exit");
@@ -533,7 +533,7 @@ public:
/// ExceptionExtractFn - LLVM objc_exception_extract function.
llvm::FunctionCallee getExceptionExtractFn() {
- llvm::Type *params[] = {CGM.UnqualPtrTy};
+ llvm::Type *params[] = {CGM.DefaultPtrTy};
return CGM.CreateRuntimeFunction(
llvm::FunctionType::get(ObjectPtrTy, params, false),
"objc_exception_extract");
@@ -550,7 +550,7 @@ public:
/// SetJmpFn - LLVM _setjmp function.
llvm::FunctionCallee getSetJmpFn() {
// This is specifically the prototype for x86.
- llvm::Type *params[] = {CGM.UnqualPtrTy};
+ llvm::Type *params[] = {CGM.DefaultPtrTy};
return CGM.CreateRuntimeFunction(
llvm::FunctionType::get(CGM.Int32Ty, params, false), "_setjmp",
llvm::AttributeList::get(CGM.getLLVMContext(),
@@ -1927,7 +1927,7 @@ CGObjCCommonMac::GenerateConstantNSString(const StringLiteral *Literal) {
// If we don't already have it, construct the type for a constant NSString.
if (!NSConstantStringType) {
NSConstantStringType =
- llvm::StructType::create({CGM.UnqualPtrTy, CGM.Int8PtrTy, CGM.IntTy},
+ llvm::StructType::create({CGM.DefaultPtrTy, CGM.Int8PtrTy, CGM.IntTy},
"struct.__builtin_NSString");
}
@@ -5959,7 +5959,7 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(
Int8PtrTy, PropertyListPtrTy);
// ImpnfABITy - LLVM for id (*)(id, SEL, ...)
- ImpnfABITy = CGM.UnqualPtrTy;
+ ImpnfABITy = CGM.DefaultPtrTy;
// struct _class_t {
// struct _class_t *isa;
@@ -6380,7 +6380,7 @@ void CGObjCNonFragileABIMac::GenerateClass(const ObjCImplementationDecl *ID) {
CGM.getModule(), ObjCTypes.ImpnfABITy, false,
llvm::GlobalValue::ExternalLinkage, nullptr, "_objc_empty_vtable");
else
- ObjCEmptyVtableVar = llvm::ConstantPointerNull::get(CGM.UnqualPtrTy);
+ ObjCEmptyVtableVar = llvm::ConstantPointerNull::get(CGM.DefaultPtrTy);
}
// FIXME: Is this correct (that meta class size is never computed)?
diff --git a/clang/lib/CodeGen/CGOpenMPRuntime.cpp b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
index 1ff2be7..85b2404 100644
--- a/clang/lib/CodeGen/CGOpenMPRuntime.cpp
+++ b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
@@ -1714,12 +1714,12 @@ llvm::Function *CGOpenMPRuntime::emitThreadPrivateVarDefinition(
// Copying constructor for the threadprivate variable.
// Must be NULL - reserved by runtime, but currently it requires that this
// parameter is always NULL. Otherwise it fires assertion.
- CopyCtor = llvm::Constant::getNullValue(CGM.UnqualPtrTy);
+ CopyCtor = llvm::Constant::getNullValue(CGM.DefaultPtrTy);
if (Ctor == nullptr) {
- Ctor = llvm::Constant::getNullValue(CGM.UnqualPtrTy);
+ Ctor = llvm::Constant::getNullValue(CGM.DefaultPtrTy);
}
if (Dtor == nullptr) {
- Dtor = llvm::Constant::getNullValue(CGM.UnqualPtrTy);
+ Dtor = llvm::Constant::getNullValue(CGM.DefaultPtrTy);
}
if (!CGF) {
auto *InitFunctionTy =
diff --git a/clang/lib/CodeGen/CGPointerAuth.cpp b/clang/lib/CodeGen/CGPointerAuth.cpp
index 375f87a..dbb7bc9 100644
--- a/clang/lib/CodeGen/CGPointerAuth.cpp
+++ b/clang/lib/CodeGen/CGPointerAuth.cpp
@@ -426,10 +426,10 @@ CodeGenModule::getConstantSignedPointer(llvm::Constant *Pointer, unsigned Key,
llvm::ConstantInt *OtherDiscriminator) {
llvm::Constant *AddressDiscriminator;
if (StorageAddress) {
- assert(StorageAddress->getType() == UnqualPtrTy);
+ assert(StorageAddress->getType() == DefaultPtrTy);
AddressDiscriminator = StorageAddress;
} else {
- AddressDiscriminator = llvm::Constant::getNullValue(UnqualPtrTy);
+ AddressDiscriminator = llvm::Constant::getNullValue(DefaultPtrTy);
}
llvm::ConstantInt *IntegerDiscriminator;
diff --git a/clang/lib/CodeGen/CodeGenTypeCache.h b/clang/lib/CodeGen/CodeGenTypeCache.h
index e273ebe..015306b 100644
--- a/clang/lib/CodeGen/CodeGenTypeCache.h
+++ b/clang/lib/CodeGen/CodeGenTypeCache.h
@@ -53,7 +53,7 @@ struct CodeGenTypeCache {
/// void*, void** in the target's default address space (often 0)
union {
- llvm::PointerType *UnqualPtrTy;
+ llvm::PointerType *DefaultPtrTy;
llvm::PointerType *VoidPtrTy;
llvm::PointerType *Int8PtrTy;
llvm::PointerType *VoidPtrPtrTy;
diff --git a/clang/lib/CodeGen/ItaniumCXXABI.cpp b/clang/lib/CodeGen/ItaniumCXXABI.cpp
index 9e195a9..65c4763 100644
--- a/clang/lib/CodeGen/ItaniumCXXABI.cpp
+++ b/clang/lib/CodeGen/ItaniumCXXABI.cpp
@@ -774,7 +774,7 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
} else {
llvm::Value *VFPAddr =
CGF.Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
- VirtualFn = CGF.Builder.CreateAlignedLoad(CGF.UnqualPtrTy, VFPAddr,
+ VirtualFn = CGF.Builder.CreateAlignedLoad(CGF.DefaultPtrTy, VFPAddr,
CGF.getPointerAlign(),
"memptr.virtualfn");
}
@@ -816,7 +816,7 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
// function pointer.
CGF.EmitBlock(FnNonVirtual);
llvm::Value *NonVirtualFn =
- Builder.CreateIntToPtr(FnAsInt, CGF.UnqualPtrTy, "memptr.nonvirtualfn");
+ Builder.CreateIntToPtr(FnAsInt, CGF.DefaultPtrTy, "memptr.nonvirtualfn");
// Check the function pointer if CFI on member function pointers is enabled.
if (ShouldEmitCFICheck) {
@@ -856,7 +856,7 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
// We're done.
CGF.EmitBlock(FnEnd);
- llvm::PHINode *CalleePtr = Builder.CreatePHI(CGF.UnqualPtrTy, 2);
+ llvm::PHINode *CalleePtr = Builder.CreatePHI(CGF.DefaultPtrTy, 2);
CalleePtr->addIncoming(VirtualFn, FnVirtual);
CalleePtr->addIncoming(NonVirtualFn, FnNonVirtual);
@@ -1403,7 +1403,7 @@ void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
// Grab the vtable pointer as an intptr_t*.
auto *ClassDecl = ElementType->castAsCXXRecordDecl();
- llvm::Value *VTable = CGF.GetVTablePtr(Ptr, CGF.UnqualPtrTy, ClassDecl);
+ llvm::Value *VTable = CGF.GetVTablePtr(Ptr, CGF.DefaultPtrTy, ClassDecl);
// Track back to entry -2 and pull out the offset there.
llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
@@ -1749,7 +1749,7 @@ llvm::Value *ItaniumCXXABI::emitExactDynamicCast(
auto AuthenticateVTable = [&](Address ThisAddr, const CXXRecordDecl *Decl) {
if (!CGF.getLangOpts().PointerAuthCalls)
return;
- (void)CGF.GetVTablePtr(ThisAddr, CGF.UnqualPtrTy, Decl,
+ (void)CGF.GetVTablePtr(ThisAddr, CGF.DefaultPtrTy, Decl,
CodeGenFunction::VTableAuthMode::MustTrap);
};
@@ -1775,7 +1775,7 @@ llvm::Value *ItaniumCXXABI::emitExactDynamicCast(
if (PerformPostCastAuthentication)
VTable = CGF.EmitPointerAuthAuth(StrippingAuthInfo, VTable);
} else
- VTable = CGF.GetVTablePtr(ThisAddr, CGF.UnqualPtrTy, SrcDecl);
+ VTable = CGF.GetVTablePtr(ThisAddr, CGF.DefaultPtrTy, SrcDecl);
// Compare the vptr against the expected vptr for the destination type at
// this offset.
@@ -1828,7 +1828,7 @@ llvm::Value *ItaniumCXXABI::emitDynamicCastToVoid(CodeGenFunction &CGF,
if (CGM.getItaniumVTableContext().isRelativeLayout()) {
// Get the vtable pointer.
llvm::Value *VTable =
- CGF.GetVTablePtr(ThisAddr, CGF.UnqualPtrTy, ClassDecl);
+ CGF.GetVTablePtr(ThisAddr, CGF.DefaultPtrTy, ClassDecl);
// Get the offset-to-top from the vtable.
OffsetToTop =
@@ -1841,7 +1841,7 @@ llvm::Value *ItaniumCXXABI::emitDynamicCastToVoid(CodeGenFunction &CGF,
// Get the vtable pointer.
llvm::Value *VTable =
- CGF.GetVTablePtr(ThisAddr, CGF.UnqualPtrTy, ClassDecl);
+ CGF.GetVTablePtr(ThisAddr, CGF.DefaultPtrTy, ClassDecl);
// Get the offset-to-top from the vtable.
OffsetToTop =
@@ -2578,7 +2578,7 @@ llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
// We can't simply ignore this load using nosanitize metadata because
// the metadata may be lost.
llvm::FunctionType *FTy =
- llvm::FunctionType::get(CGF.SizeTy, CGF.UnqualPtrTy, false);
+ llvm::FunctionType::get(CGF.SizeTy, CGF.DefaultPtrTy, false);
llvm::FunctionCallee F =
CGM.CreateRuntimeFunction(FTy, "__asan_load_cxx_array_cookie");
return CGF.Builder.CreateCall(F, numElementsPtr.emitRawPointer(CGF));
@@ -2921,7 +2921,7 @@ static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
// We're assuming that the destructor function is something we can
// reasonably call with the default CC.
- llvm::Type *dtorTy = CGF.UnqualPtrTy;
+ llvm::Type *dtorTy = CGF.DefaultPtrTy;
// Preserve address space of addr.
auto AddrAS = addr ? addr->getType()->getPointerAddressSpace() : 0;
@@ -5035,7 +5035,7 @@ static void InitCatchParam(CodeGenFunction &CGF,
auto catchRD = CatchType->getAsCXXRecordDecl();
CharUnits caughtExnAlignment = CGF.CGM.getClassPointerAlignment(catchRD);
- llvm::Type *PtrTy = CGF.UnqualPtrTy; // addrspace 0 ok
+ llvm::Type *PtrTy = CGF.DefaultPtrTy;
// Check for a copy expression. If we don't have a copy expression,
// that means a trivial copy is okay.
@@ -5244,7 +5244,7 @@ void XLCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
llvm::FunctionCallee Dtor,
llvm::Constant *Addr) {
if (D.getTLSKind() != VarDecl::TLS_None) {
- llvm::PointerType *PtrTy = CGF.UnqualPtrTy;
+ llvm::PointerType *PtrTy = CGF.DefaultPtrTy;
// extern "C" int __pt_atexit_np(int flags, int(*)(int,...), ...);
llvm::FunctionType *AtExitTy =
diff --git a/clang/lib/CodeGen/MicrosoftCXXABI.cpp b/clang/lib/CodeGen/MicrosoftCXXABI.cpp
index 19d9265..71e2449 100644
--- a/clang/lib/CodeGen/MicrosoftCXXABI.cpp
+++ b/clang/lib/CodeGen/MicrosoftCXXABI.cpp
@@ -528,7 +528,7 @@ public:
CGM.IntTy,
CGM.IntTy,
CGM.IntTy,
- getImageRelativeType(CGM.UnqualPtrTy),
+ getImageRelativeType(CGM.DefaultPtrTy),
};
BaseClassDescriptorType = llvm::StructType::create(
CGM.getLLVMContext(), FieldTypes, "rtti.BaseClassDescriptor");
@@ -540,7 +540,7 @@ public:
return ClassHierarchyDescriptorType;
// Forward-declare RTTIClassHierarchyDescriptor to break a cycle.
llvm::Type *FieldTypes[] = {CGM.IntTy, CGM.IntTy, CGM.IntTy,
- getImageRelativeType(CGM.UnqualPtrTy)};
+ getImageRelativeType(CGM.DefaultPtrTy)};
ClassHierarchyDescriptorType =
llvm::StructType::create(FieldTypes, "rtti.ClassHierarchyDescriptor");
return ClassHierarchyDescriptorType;
@@ -554,7 +554,7 @@ public:
CGM.IntTy,
CGM.IntTy,
getImageRelativeType(CGM.Int8PtrTy),
- getImageRelativeType(CGM.UnqualPtrTy),
+ getImageRelativeType(CGM.DefaultPtrTy),
getImageRelativeType(CGM.VoidTy),
};
llvm::ArrayRef<llvm::Type *> FieldTypesRef(FieldTypes);
@@ -752,7 +752,7 @@ public:
llvm::SmallString<23> CTATypeName("eh.CatchableTypeArray.");
CTATypeName += llvm::utostr(NumEntries);
- llvm::Type *CTType = getImageRelativeType(CGM.UnqualPtrTy);
+ llvm::Type *CTType = getImageRelativeType(CGM.DefaultPtrTy);
llvm::Type *FieldTypes[] = {
CGM.IntTy, // NumEntries
llvm::ArrayType::get(CTType, NumEntries) // CatchableTypes
@@ -779,7 +779,7 @@ public:
llvm::FunctionCallee getThrowFn() {
// _CxxThrowException is passed an exception object and a ThrowInfo object
// which describes the exception.
- llvm::Type *Args[] = {CGM.Int8PtrTy, CGM.UnqualPtrTy};
+ llvm::Type *Args[] = {CGM.Int8PtrTy, CGM.DefaultPtrTy};
llvm::FunctionType *FTy =
llvm::FunctionType::get(CGM.VoidTy, Args, /*isVarArg=*/false);
llvm::FunctionCallee Throw =
@@ -920,7 +920,7 @@ void MicrosoftCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
void MicrosoftCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) {
llvm::Value *Args[] = {llvm::ConstantPointerNull::get(CGM.Int8PtrTy),
- llvm::ConstantPointerNull::get(CGM.UnqualPtrTy)};
+ llvm::ConstantPointerNull::get(CGM.DefaultPtrTy)};
llvm::FunctionCallee Fn = getThrowFn();
if (isNoReturn)
CGF.EmitNoreturnRuntimeCallOrInvoke(Fn, Args);
@@ -1969,13 +1969,13 @@ CGCallee MicrosoftCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
SourceLocation Loc) {
CGBuilderTy &Builder = CGF.Builder;
- Ty = CGF.UnqualPtrTy;
+ Ty = CGF.DefaultPtrTy;
Address VPtr =
adjustThisArgumentForVirtualFunctionCall(CGF, GD, This, true);
auto *MethodDecl = cast<CXXMethodDecl>(GD.getDecl());
llvm::Value *VTable =
- CGF.GetVTablePtr(VPtr, CGF.UnqualPtrTy, MethodDecl->getParent());
+ CGF.GetVTablePtr(VPtr, CGF.DefaultPtrTy, MethodDecl->getParent());
MicrosoftVTableContext &VFTContext = CGM.getMicrosoftVTableContext();
MethodVFTableLocation ML = VFTContext.getMethodVFTableLocation(GD);
@@ -2136,9 +2136,9 @@ MicrosoftCXXABI::EmitVirtualMemPtrThunk(const CXXMethodDecl *MD,
// Load the vfptr and then callee from the vftable. The callee should have
// adjusted 'this' so that the vfptr is at offset zero.
- llvm::Type *ThunkPtrTy = CGF.UnqualPtrTy;
+ llvm::Type *ThunkPtrTy = CGF.DefaultPtrTy;
llvm::Value *VTable =
- CGF.GetVTablePtr(getThisAddress(CGF), CGF.UnqualPtrTy, MD->getParent());
+ CGF.GetVTablePtr(getThisAddress(CGF), CGF.DefaultPtrTy, MD->getParent());
llvm::Value *VFuncPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
ThunkPtrTy, VTable, ML.Index, "vfn");
@@ -2562,7 +2562,7 @@ static ConstantAddress getInitThreadEpochPtr(CodeGenModule &CGM) {
static llvm::FunctionCallee getInitThreadHeaderFn(CodeGenModule &CGM) {
llvm::FunctionType *FTy =
llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()),
- CGM.UnqualPtrTy, /*isVarArg=*/false);
+ CGM.DefaultPtrTy, /*isVarArg=*/false);
return CGM.CreateRuntimeFunction(
FTy, "_Init_thread_header",
llvm::AttributeList::get(CGM.getLLVMContext(),
@@ -2574,7 +2574,7 @@ static llvm::FunctionCallee getInitThreadHeaderFn(CodeGenModule &CGM) {
static llvm::FunctionCallee getInitThreadFooterFn(CodeGenModule &CGM) {
llvm::FunctionType *FTy =
llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()),
- CGM.UnqualPtrTy, /*isVarArg=*/false);
+ CGM.DefaultPtrTy, /*isVarArg=*/false);
return CGM.CreateRuntimeFunction(
FTy, "_Init_thread_footer",
llvm::AttributeList::get(CGM.getLLVMContext(),
@@ -2586,7 +2586,7 @@ static llvm::FunctionCallee getInitThreadFooterFn(CodeGenModule &CGM) {
static llvm::FunctionCallee getInitThreadAbortFn(CodeGenModule &CGM) {
llvm::FunctionType *FTy =
llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()),
- CGM.UnqualPtrTy, /*isVarArg=*/false);
+ CGM.DefaultPtrTy, /*isVarArg=*/false);
return CGM.CreateRuntimeFunction(
FTy, "_Init_thread_abort",
llvm::AttributeList::get(CGM.getLLVMContext(),
@@ -3169,7 +3169,7 @@ MicrosoftCXXABI::GetVBaseOffsetFromVBPtr(CodeGenFunction &CGF,
}
llvm::Value *VBTable =
- Builder.CreateAlignedLoad(CGM.UnqualPtrTy, VBPtr, VBPtrAlign, "vbtable");
+ Builder.CreateAlignedLoad(CGM.DefaultPtrTy, VBPtr, VBPtrAlign, "vbtable");
// Translate from byte offset to table index. It improves analyzability.
llvm::Value *VBTableIndex = Builder.CreateAShr(
@@ -3825,7 +3825,7 @@ MSRTTIBuilder::getBaseClassArray(SmallVectorImpl<MSRTTIClass> &Classes) {
// mode) bytes of padding. We provide a pointer sized amount of padding by
// adding +1 to Classes.size(). The sections have pointer alignment and are
// marked pick-any so it shouldn't matter.
- llvm::Type *PtrType = ABI.getImageRelativeType(CGM.UnqualPtrTy);
+ llvm::Type *PtrType = ABI.getImageRelativeType(CGM.DefaultPtrTy);
auto *ArrType = llvm::ArrayType::get(PtrType, Classes.size() + 1);
auto *BCA =
new llvm::GlobalVariable(Module, ArrType,
@@ -4372,7 +4372,7 @@ llvm::GlobalVariable *MicrosoftCXXABI::getCatchableTypeArray(QualType T) {
CatchableTypes.insert(getCatchableType(getContext().VoidPtrTy));
uint32_t NumEntries = CatchableTypes.size();
- llvm::Type *CTType = getImageRelativeType(CGM.UnqualPtrTy);
+ llvm::Type *CTType = getImageRelativeType(CGM.DefaultPtrTy);
llvm::ArrayType *AT = llvm::ArrayType::get(CTType, NumEntries);
llvm::StructType *CTAType = getCatchableTypeArrayType(NumEntries);
llvm::Constant *Fields[] = {
diff --git a/clang/lib/CodeGen/TargetBuiltins/ARM.cpp b/clang/lib/CodeGen/TargetBuiltins/ARM.cpp
index 2429a43..60f9b86 100644
--- a/clang/lib/CodeGen/TargetBuiltins/ARM.cpp
+++ b/clang/lib/CodeGen/TargetBuiltins/ARM.cpp
@@ -2037,7 +2037,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
case NEON::BI__builtin_neon_vld1q_x3_v:
case NEON::BI__builtin_neon_vld1_x4_v:
case NEON::BI__builtin_neon_vld1q_x4_v: {
- llvm::Type *Tys[2] = {VTy, UnqualPtrTy};
+ llvm::Type *Tys[2] = {VTy, DefaultPtrTy};
Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld1xN");
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
@@ -2263,11 +2263,11 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
// in AArch64 it comes last. We may want to stick to one or another.
if (Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::aarch64_be ||
Arch == llvm::Triple::aarch64_32) {
- llvm::Type *Tys[2] = {VTy, UnqualPtrTy};
+ llvm::Type *Tys[2] = {VTy, DefaultPtrTy};
std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "");
}
- llvm::Type *Tys[2] = {UnqualPtrTy, VTy};
+ llvm::Type *Tys[2] = {DefaultPtrTy, VTy};
return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "");
}
case NEON::BI__builtin_neon_vsubhn_v: {
@@ -2858,7 +2858,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Function *F = CGM.getIntrinsic(
BuiltinID == clang::ARM::BI__builtin_arm_ldaex ? Intrinsic::arm_ldaex
: Intrinsic::arm_ldrex,
- UnqualPtrTy);
+ DefaultPtrTy);
CallInst *Val = Builder.CreateCall(F, LoadAddr, "ldrex");
Val->addParamAttr(
0, Attribute::get(getLLVMContext(), Attribute::ElementType, IntTy));
@@ -5225,7 +5225,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
CGM.getIntrinsic(BuiltinID == clang::AArch64::BI__builtin_arm_ldaex
? Intrinsic::aarch64_ldaxr
: Intrinsic::aarch64_ldxr,
- UnqualPtrTy);
+ DefaultPtrTy);
CallInst *Val = Builder.CreateCall(F, LoadAddr, "ldxr");
Val->addParamAttr(
0, Attribute::get(getLLVMContext(), Attribute::ElementType, IntTy));
@@ -7482,42 +7482,42 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
}
case NEON::BI__builtin_neon_vld2_v:
case NEON::BI__builtin_neon_vld2q_v: {
- llvm::Type *Tys[2] = {VTy, UnqualPtrTy};
+ llvm::Type *Tys[2] = {VTy, DefaultPtrTy};
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld3_v:
case NEON::BI__builtin_neon_vld3q_v: {
- llvm::Type *Tys[2] = {VTy, UnqualPtrTy};
+ llvm::Type *Tys[2] = {VTy, DefaultPtrTy};
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld4_v:
case NEON::BI__builtin_neon_vld4q_v: {
- llvm::Type *Tys[2] = {VTy, UnqualPtrTy};
+ llvm::Type *Tys[2] = {VTy, DefaultPtrTy};
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld2_dup_v:
case NEON::BI__builtin_neon_vld2q_dup_v: {
- llvm::Type *Tys[2] = {VTy, UnqualPtrTy};
+ llvm::Type *Tys[2] = {VTy, DefaultPtrTy};
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2r, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld3_dup_v:
case NEON::BI__builtin_neon_vld3q_dup_v: {
- llvm::Type *Tys[2] = {VTy, UnqualPtrTy};
+ llvm::Type *Tys[2] = {VTy, DefaultPtrTy};
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3r, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld4_dup_v:
case NEON::BI__builtin_neon_vld4q_dup_v: {
- llvm::Type *Tys[2] = {VTy, UnqualPtrTy};
+ llvm::Type *Tys[2] = {VTy, DefaultPtrTy};
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4r, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
diff --git a/clang/lib/CodeGen/TargetBuiltins/PPC.cpp b/clang/lib/CodeGen/TargetBuiltins/PPC.cpp
index e71dc9e..44d5938 100644
--- a/clang/lib/CodeGen/TargetBuiltins/PPC.cpp
+++ b/clang/lib/CodeGen/TargetBuiltins/PPC.cpp
@@ -59,7 +59,7 @@ static llvm::Value *emitPPCLoadReserveIntrinsic(CodeGenFunction &CGF,
Constraints += MachineClobbers;
}
- llvm::Type *PtrType = CGF.UnqualPtrTy;
+ llvm::Type *PtrType = CGF.DefaultPtrTy;
llvm::FunctionType *FTy = llvm::FunctionType::get(RetType, {PtrType}, false);
llvm::InlineAsm *IA =
diff --git a/clang/lib/CodeGen/Targets/PPC.cpp b/clang/lib/CodeGen/Targets/PPC.cpp
index 380e8c0..35e7655 100644
--- a/clang/lib/CodeGen/Targets/PPC.cpp
+++ b/clang/lib/CodeGen/Targets/PPC.cpp
@@ -490,7 +490,7 @@ RValue PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList,
llvm::Type *DirectTy = CGF.ConvertType(Ty), *ElementTy = DirectTy;
if (isIndirect)
- DirectTy = CGF.UnqualPtrTy;
+ DirectTy = CGF.DefaultPtrTy;
// Case 1: consume registers.
Address RegAddr = Address::invalid();
diff --git a/clang/lib/Driver/Distro.cpp b/clang/lib/Driver/Distro.cpp
index 838e087..df10458 100644
--- a/clang/lib/Driver/Distro.cpp
+++ b/clang/lib/Driver/Distro.cpp
@@ -61,10 +61,6 @@ static Distro::DistroType DetectLsbRelease(llvm::vfs::FileSystem &VFS) {
if (Version == Distro::UnknownDistro &&
Line.starts_with("DISTRIB_CODENAME="))
Version = llvm::StringSwitch<Distro::DistroType>(Line.substr(17))
- .Case("maverick", Distro::UbuntuMaverick)
- .Case("natty", Distro::UbuntuNatty)
- .Case("oneiric", Distro::UbuntuOneiric)
- .Case("precise", Distro::UbuntuPrecise)
.Case("quantal", Distro::UbuntuQuantal)
.Case("raring", Distro::UbuntuRaring)
.Case("saucy", Distro::UbuntuSaucy)
@@ -120,13 +116,17 @@ static Distro::DistroType DetectDistro(llvm::vfs::FileSystem &VFS) {
if (Data.starts_with("Fedora release"))
return Distro::Fedora;
if (Data.starts_with("Red Hat Enterprise Linux") ||
- Data.starts_with("CentOS") || Data.starts_with("Scientific Linux")) {
+ Data.starts_with("CentOS") || Data.starts_with("AlmaLinux") ||
+ Data.starts_with("Rocky Linux") ||
+ Data.starts_with("Scientific Linux")) {
+ if (Data.contains("release 10"))
+ return Distro::RHEL10;
+ if (Data.contains("release 9"))
+ return Distro::RHEL9;
+ if (Data.contains("release 8"))
+ return Distro::RHEL8;
if (Data.contains("release 7"))
return Distro::RHEL7;
- else if (Data.contains("release 6"))
- return Distro::RHEL6;
- else if (Data.contains("release 5"))
- return Distro::RHEL5;
}
return Distro::UnknownDistro;
}
@@ -139,12 +139,6 @@ static Distro::DistroType DetectDistro(llvm::vfs::FileSystem &VFS) {
int MajorVersion;
if (!Data.split('.').first.getAsInteger(10, MajorVersion)) {
switch (MajorVersion) {
- case 5:
- return Distro::DebianLenny;
- case 6:
- return Distro::DebianSqueeze;
- case 7:
- return Distro::DebianWheezy;
case 8:
return Distro::DebianJessie;
case 9:
@@ -166,8 +160,6 @@ static Distro::DistroType DetectDistro(llvm::vfs::FileSystem &VFS) {
}
}
return llvm::StringSwitch<Distro::DistroType>(Data.split("\n").first)
- .Case("squeeze/sid", Distro::DebianSqueeze)
- .Case("wheezy/sid", Distro::DebianWheezy)
.Case("jessie/sid", Distro::DebianJessie)
.Case("stretch/sid", Distro::DebianStretch)
.Case("buster/sid", Distro::DebianBuster)
diff --git a/clang/lib/Driver/ToolChains/Arch/Mips.cpp b/clang/lib/Driver/ToolChains/Arch/Mips.cpp
index bac8681..227c6a0 100644
--- a/clang/lib/Driver/ToolChains/Arch/Mips.cpp
+++ b/clang/lib/Driver/ToolChains/Arch/Mips.cpp
@@ -482,9 +482,9 @@ bool mips::isFPXXDefault(const llvm::Triple &Triple, StringRef CPUName,
return false;
return llvm::StringSwitch<bool>(CPUName)
- .Cases("mips2", "mips3", "mips4", "mips5", true)
- .Cases("mips32", "mips32r2", "mips32r3", "mips32r5", true)
- .Cases("mips64", "mips64r2", "mips64r3", "mips64r5", true)
+ .Cases({"mips2", "mips3", "mips4", "mips5"}, true)
+ .Cases({"mips32", "mips32r2", "mips32r3", "mips32r5"}, true)
+ .Cases({"mips64", "mips64r2", "mips64r3", "mips64r5"}, true)
.Default(false);
}
diff --git a/clang/lib/Driver/ToolChains/Darwin.cpp b/clang/lib/Driver/ToolChains/Darwin.cpp
index d2356eb..cc5bcd1 100644
--- a/clang/lib/Driver/ToolChains/Darwin.cpp
+++ b/clang/lib/Driver/ToolChains/Darwin.cpp
@@ -51,15 +51,15 @@ llvm::Triple::ArchType darwin::getArchTypeForMachOArchName(StringRef Str) {
// translation.
return llvm::StringSwitch<llvm::Triple::ArchType>(Str)
- .Cases("i386", "i486", "i486SX", "i586", "i686", llvm::Triple::x86)
- .Cases("pentium", "pentpro", "pentIIm3", "pentIIm5", "pentium4",
+ .Cases({"i386", "i486", "i486SX", "i586", "i686"}, llvm::Triple::x86)
+ .Cases({"pentium", "pentpro", "pentIIm3", "pentIIm5", "pentium4"},
llvm::Triple::x86)
- .Cases("x86_64", "x86_64h", llvm::Triple::x86_64)
+ .Cases({"x86_64", "x86_64h"}, llvm::Triple::x86_64)
// This is derived from the driver.
- .Cases("arm", "armv4t", "armv5", "armv6", "armv6m", llvm::Triple::arm)
- .Cases("armv7", "armv7em", "armv7k", "armv7m", llvm::Triple::arm)
- .Cases("armv7s", "xscale", llvm::Triple::arm)
- .Cases("arm64", "arm64e", llvm::Triple::aarch64)
+ .Cases({"arm", "armv4t", "armv5", "armv6", "armv6m"}, llvm::Triple::arm)
+ .Cases({"armv7", "armv7em", "armv7k", "armv7m"}, llvm::Triple::arm)
+ .Cases({"armv7s", "xscale"}, llvm::Triple::arm)
+ .Cases({"arm64", "arm64e"}, llvm::Triple::aarch64)
.Case("arm64_32", llvm::Triple::aarch64_32)
.Case("r600", llvm::Triple::r600)
.Case("amdgcn", llvm::Triple::amdgcn)
diff --git a/clang/lib/Format/WhitespaceManager.cpp b/clang/lib/Format/WhitespaceManager.cpp
index aae2f3e..65fc65e 100644
--- a/clang/lib/Format/WhitespaceManager.cpp
+++ b/clang/lib/Format/WhitespaceManager.cpp
@@ -506,15 +506,15 @@ static unsigned AlignTokens(const FormatStyle &Style, F &&Matches,
MatchedIndices.clear();
};
- unsigned i = StartAt;
- for (unsigned e = Changes.size(); i != e; ++i) {
- auto &CurrentChange = Changes[i];
+ unsigned I = StartAt;
+ for (unsigned E = Changes.size(); I != E; ++I) {
+ auto &CurrentChange = Changes[I];
if (CurrentChange.indentAndNestingLevel() < IndentAndNestingLevel)
break;
if (CurrentChange.NewlinesBefore != 0) {
CommasBeforeMatch = 0;
- EndOfSequence = i;
+ EndOfSequence = I;
// Whether to break the alignment sequence because of an empty line.
bool EmptyLineBreak =
@@ -530,8 +530,8 @@ static unsigned AlignTokens(const FormatStyle &Style, F &&Matches,
// A new line starts, re-initialize line status tracking bools.
// Keep the match state if a string literal is continued on this line.
- if (i == 0 || CurrentChange.Tok->isNot(tok::string_literal) ||
- Changes[i - 1].Tok->isNot(tok::string_literal)) {
+ if (I == 0 || CurrentChange.Tok->isNot(tok::string_literal) ||
+ Changes[I - 1].Tok->isNot(tok::string_literal)) {
FoundMatchOnLine = false;
}
LineIsComment = true;
@@ -547,8 +547,8 @@ static unsigned AlignTokens(const FormatStyle &Style, F &&Matches,
IndentAndNestingLevel) {
// Call AlignTokens recursively, skipping over this scope block.
const auto StoppedAt =
- AlignTokens(Style, Matches, Changes, i, ACS, RightJustify);
- i = StoppedAt - 1;
+ AlignTokens(Style, Matches, Changes, I, ACS, RightJustify);
+ I = StoppedAt - 1;
continue;
}
}
@@ -559,7 +559,7 @@ static unsigned AlignTokens(const FormatStyle &Style, F &&Matches,
// If there is more than one matching token per line, or if the number of
// preceding commas, do not match anymore, end the sequence.
if (FoundMatchOnLine || CommasBeforeMatch != CommasBeforeLastMatch) {
- MatchedIndices.push_back(i);
+ MatchedIndices.push_back(I);
AlignCurrentSequence();
}
@@ -567,29 +567,69 @@ static unsigned AlignTokens(const FormatStyle &Style, F &&Matches,
FoundMatchOnLine = true;
if (StartOfSequence == 0)
- StartOfSequence = i;
+ StartOfSequence = I;
unsigned ChangeWidthLeft = CurrentChange.StartOfTokenColumn;
unsigned ChangeWidthAnchor = 0;
unsigned ChangeWidthRight = 0;
+ unsigned CurrentChangeWidthRight = 0;
if (RightJustify)
if (ACS.PadOperators)
ChangeWidthAnchor = CurrentChange.TokenLength;
else
ChangeWidthLeft += CurrentChange.TokenLength;
else
- ChangeWidthRight = CurrentChange.TokenLength;
- for (unsigned j = i + 1; j != e && Changes[j].NewlinesBefore == 0; ++j) {
- ChangeWidthRight += Changes[j].Spaces;
+ CurrentChangeWidthRight = CurrentChange.TokenLength;
+ const FormatToken *MatchingParenToEncounter = nullptr;
+ for (unsigned J = I + 1;
+ J != E && (Changes[J].NewlinesBefore == 0 || MatchingParenToEncounter);
+ ++J) {
+ const auto &Change = Changes[J];
+ const auto *Tok = Change.Tok;
+
+ if (Tok->MatchingParen) {
+ if (Tok->isOneOf(tok::l_paren, tok::l_brace, tok::l_square,
+ TT_TemplateOpener) &&
+ !MatchingParenToEncounter) {
+ // If the next token is on the next line, we probably don't need to
+ // check the following lengths, because it most likely isn't aligned
+ // with the rest.
+ if (J + 1 != E && Changes[J + 1].NewlinesBefore == 0)
+ MatchingParenToEncounter = Tok->MatchingParen;
+ } else if (MatchingParenToEncounter == Tok->MatchingParen) {
+ MatchingParenToEncounter = nullptr;
+ }
+ }
+
+ if (Change.NewlinesBefore != 0) {
+ ChangeWidthRight = std::max(ChangeWidthRight, CurrentChangeWidthRight);
+ const auto ChangeWidthStart = ChangeWidthLeft + ChangeWidthAnchor;
+ // If the position of the current token is columnwise before the begin
+ // of the alignment, we drop out here, because the next line does not
+ // have to be moved with the previous one(s) for the alignment. E.g.:
+ // int i1 = 1; | <- ColumnLimit | int i1 = 1;
+ // int j = 0; | Without the break -> | int j = 0;
+ // int k = bar( | We still want to align the = | int k = bar(
+ // argument1, | here, even if we can't move | argument1,
+ // argument2); | the following lines. | argument2);
+ if (static_cast<unsigned>(Change.Spaces) < ChangeWidthStart)
+ break;
+ CurrentChangeWidthRight = Change.Spaces - ChangeWidthStart;
+ } else {
+ CurrentChangeWidthRight += Change.Spaces;
+ }
+
// Changes are generally 1:1 with the tokens, but a change could also be
// inside of a token, in which case it's counted more than once: once for
// the whitespace surrounding the token (!IsInsideToken) and once for
// each whitespace change within it (IsInsideToken).
// Therefore, changes inside of a token should only count the space.
- if (!Changes[j].IsInsideToken)
- ChangeWidthRight += Changes[j].TokenLength;
+ if (!Change.IsInsideToken)
+ CurrentChangeWidthRight += Change.TokenLength;
}
+ ChangeWidthRight = std::max(ChangeWidthRight, CurrentChangeWidthRight);
+
// If we are restricted by the maximum column width, end the sequence.
unsigned NewLeft = std::max(ChangeWidthLeft, WidthLeft);
unsigned NewAnchor = std::max(ChangeWidthAnchor, WidthAnchor);
@@ -598,7 +638,7 @@ static unsigned AlignTokens(const FormatStyle &Style, F &&Matches,
if (Style.ColumnLimit != 0 &&
Style.ColumnLimit < NewLeft + NewAnchor + NewRight) {
AlignCurrentSequence();
- StartOfSequence = i;
+ StartOfSequence = I;
WidthLeft = ChangeWidthLeft;
WidthAnchor = ChangeWidthAnchor;
WidthRight = ChangeWidthRight;
@@ -607,12 +647,12 @@ static unsigned AlignTokens(const FormatStyle &Style, F &&Matches,
WidthAnchor = NewAnchor;
WidthRight = NewRight;
}
- MatchedIndices.push_back(i);
+ MatchedIndices.push_back(I);
}
- EndOfSequence = i;
+ EndOfSequence = I;
AlignCurrentSequence();
- return i;
+ return I;
}
// Aligns a sequence of matching tokens, on the MinColumn column.
@@ -663,7 +703,7 @@ void WhitespaceManager::alignConsecutiveMacros() {
auto AlignMacrosMatches = [](const Change &C) {
const FormatToken *Current = C.Tok;
- unsigned SpacesRequiredBefore = 1;
+ assert(Current);
if (Current->SpacesRequiredBefore == 0 || !Current->Previous)
return false;
@@ -672,22 +712,22 @@ void WhitespaceManager::alignConsecutiveMacros() {
// If token is a ")", skip over the parameter list, to the
// token that precedes the "("
- if (Current->is(tok::r_paren) && Current->MatchingParen) {
- Current = Current->MatchingParen->Previous;
- SpacesRequiredBefore = 0;
- }
-
- if (!Current || Current->isNot(tok::identifier))
- return false;
-
- if (!Current->Previous || Current->Previous->isNot(tok::pp_define))
+ if (Current->is(tok::r_paren)) {
+ const auto *MatchingParen = Current->MatchingParen;
+ // For a macro function, 0 spaces are required between the
+ // identifier and the lparen that opens the parameter list.
+ if (!MatchingParen || MatchingParen->SpacesRequiredBefore > 0 ||
+ !MatchingParen->Previous) {
+ return false;
+ }
+ Current = MatchingParen->Previous;
+ } else if (Current->Next->SpacesRequiredBefore != 1) {
+ // For a simple macro, 1 space is required between the
+ // identifier and the first token of the defined value.
return false;
+ }
- // For a macro function, 0 spaces are required between the
- // identifier and the lparen that opens the parameter list.
- // For a simple macro, 1 space is required between the
- // identifier and the first token of the defined value.
- return Current->Next->SpacesRequiredBefore == SpacesRequiredBefore;
+ return Current->endsSequence(tok::identifier, tok::pp_define);
};
AlignTokens<decltype(AlignMacrosMatches) &, /*SimpleCheck=*/true>(
diff --git a/clang/lib/Frontend/FrontendAction.cpp b/clang/lib/Frontend/FrontendAction.cpp
index 1b63c40..0daa20a 100644
--- a/clang/lib/Frontend/FrontendAction.cpp
+++ b/clang/lib/Frontend/FrontendAction.cpp
@@ -629,7 +629,7 @@ static std::error_code collectModuleHeaderIncludes(
// Check whether this entry has an extension typically associated with
// headers.
if (!llvm::StringSwitch<bool>(llvm::sys::path::extension(Dir->path()))
- .Cases(".h", ".H", ".hh", ".hpp", true)
+ .Cases({".h", ".H", ".hh", ".hpp"}, true)
.Default(false))
continue;
diff --git a/clang/lib/Headers/__clang_hip_runtime_wrapper.h b/clang/lib/Headers/__clang_hip_runtime_wrapper.h
index fb0ece9..19ce7a5 100644
--- a/clang/lib/Headers/__clang_hip_runtime_wrapper.h
+++ b/clang/lib/Headers/__clang_hip_runtime_wrapper.h
@@ -26,6 +26,7 @@
#define __managed__ __attribute__((managed))
#define __cluster_dims__(...) __attribute__((cluster_dims(__VA_ARGS__)))
+#define __no_cluster__ __attribute__((no_cluster))
#if !defined(__cplusplus) || __cplusplus < 201103L
#define nullptr NULL;
diff --git a/clang/lib/Headers/avx2intrin.h b/clang/lib/Headers/avx2intrin.h
index d35bc0e..fdb825f 100644
--- a/clang/lib/Headers/avx2intrin.h
+++ b/clang/lib/Headers/avx2intrin.h
@@ -1298,9 +1298,8 @@ _mm256_min_epu32(__m256i __a, __m256i __b) {
/// \param __a
/// A 256-bit integer vector containing the source bytes.
/// \returns The 32-bit integer mask.
-static __inline__ int __DEFAULT_FN_ATTRS256
-_mm256_movemask_epi8(__m256i __a)
-{
+static __inline__ int __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_movemask_epi8(__m256i __a) {
return __builtin_ia32_pmovmskb256((__v32qi)__a);
}
diff --git a/clang/lib/Headers/avx512dqintrin.h b/clang/lib/Headers/avx512dqintrin.h
index 3681cca..fef1a2d 100644
--- a/clang/lib/Headers/avx512dqintrin.h
+++ b/clang/lib/Headers/avx512dqintrin.h
@@ -1200,10 +1200,10 @@ _mm512_maskz_broadcast_i64x2(__mmask8 __M, __m128i __A) {
(__v8di)_mm512_setzero_si512());
}
-#define _mm512_extractf32x8_ps(A, imm) \
- ((__m256)__builtin_ia32_extractf32x8_mask((__v16sf)(__m512)(A), (int)(imm), \
- (__v8sf)_mm256_undefined_ps(), \
- (__mmask8)-1))
+#define _mm512_extractf32x8_ps(A, imm) \
+ ((__m256)__builtin_ia32_extractf32x8_mask((__v16sf)(__m512)(A), (int)(imm), \
+ (__v8sf)_mm256_setzero_ps(), \
+ (__mmask8) - 1))
#define _mm512_mask_extractf32x8_ps(W, U, A, imm) \
((__m256)__builtin_ia32_extractf32x8_mask((__v16sf)(__m512)(A), (int)(imm), \
@@ -1215,11 +1215,10 @@ _mm512_maskz_broadcast_i64x2(__mmask8 __M, __m128i __A) {
(__v8sf)_mm256_setzero_ps(), \
(__mmask8)(U)))
-#define _mm512_extractf64x2_pd(A, imm) \
- ((__m128d)__builtin_ia32_extractf64x2_512_mask((__v8df)(__m512d)(A), \
- (int)(imm), \
- (__v2df)_mm_undefined_pd(), \
- (__mmask8)-1))
+#define _mm512_extractf64x2_pd(A, imm) \
+ ((__m128d)__builtin_ia32_extractf64x2_512_mask( \
+ (__v8df)(__m512d)(A), (int)(imm), (__v2df)_mm_setzero_pd(), \
+ (__mmask8) - 1))
#define _mm512_mask_extractf64x2_pd(W, U, A, imm) \
((__m128d)__builtin_ia32_extractf64x2_512_mask((__v8df)(__m512d)(A), \
@@ -1233,10 +1232,10 @@ _mm512_maskz_broadcast_i64x2(__mmask8 __M, __m128i __A) {
(__v2df)_mm_setzero_pd(), \
(__mmask8)(U)))
-#define _mm512_extracti32x8_epi32(A, imm) \
- ((__m256i)__builtin_ia32_extracti32x8_mask((__v16si)(__m512i)(A), (int)(imm), \
- (__v8si)_mm256_undefined_si256(), \
- (__mmask8)-1))
+#define _mm512_extracti32x8_epi32(A, imm) \
+ ((__m256i)__builtin_ia32_extracti32x8_mask( \
+ (__v16si)(__m512i)(A), (int)(imm), (__v8si)_mm256_setzero_si256(), \
+ (__mmask8) - 1))
#define _mm512_mask_extracti32x8_epi32(W, U, A, imm) \
((__m256i)__builtin_ia32_extracti32x8_mask((__v16si)(__m512i)(A), (int)(imm), \
@@ -1248,11 +1247,10 @@ _mm512_maskz_broadcast_i64x2(__mmask8 __M, __m128i __A) {
(__v8si)_mm256_setzero_si256(), \
(__mmask8)(U)))
-#define _mm512_extracti64x2_epi64(A, imm) \
- ((__m128i)__builtin_ia32_extracti64x2_512_mask((__v8di)(__m512i)(A), \
- (int)(imm), \
- (__v2di)_mm_undefined_si128(), \
- (__mmask8)-1))
+#define _mm512_extracti64x2_epi64(A, imm) \
+ ((__m128i)__builtin_ia32_extracti64x2_512_mask( \
+ (__v8di)(__m512i)(A), (int)(imm), (__v2di)_mm_setzero_si128(), \
+ (__mmask8) - 1))
#define _mm512_mask_extracti64x2_epi64(W, U, A, imm) \
((__m128i)__builtin_ia32_extracti64x2_512_mask((__v8di)(__m512i)(A), \
diff --git a/clang/lib/Headers/avx512fintrin.h b/clang/lib/Headers/avx512fintrin.h
index 07de036..18c4a44 100644
--- a/clang/lib/Headers/avx512fintrin.h
+++ b/clang/lib/Headers/avx512fintrin.h
@@ -3156,10 +3156,10 @@ _mm512_maskz_permutex2var_epi64(__mmask8 __U, __m512i __A, __m512i __I,
(__v16si)_mm512_setzero_si512()))
/* Vector Extract */
-#define _mm512_extractf64x4_pd(A, I) \
- ((__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(I), \
- (__v4df)_mm256_undefined_pd(), \
- (__mmask8)-1))
+#define _mm512_extractf64x4_pd(A, I) \
+ ((__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(I), \
+ (__v4df)_mm256_setzero_pd(), \
+ (__mmask8) - 1))
#define _mm512_mask_extractf64x4_pd(W, U, A, imm) \
((__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(imm), \
@@ -3171,10 +3171,10 @@ _mm512_maskz_permutex2var_epi64(__mmask8 __U, __m512i __A, __m512i __I,
(__v4df)_mm256_setzero_pd(), \
(__mmask8)(U)))
-#define _mm512_extractf32x4_ps(A, I) \
- ((__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(I), \
- (__v4sf)_mm_undefined_ps(), \
- (__mmask8)-1))
+#define _mm512_extractf32x4_ps(A, I) \
+ ((__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(I), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8) - 1))
#define _mm512_mask_extractf32x4_ps(W, U, A, imm) \
((__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(imm), \
@@ -7089,10 +7089,10 @@ _mm512_mask_cvtepi64_storeu_epi16 (void *__P, __mmask8 __M, __m512i __A)
__builtin_ia32_pmovqw512mem_mask ((__v8hi *) __P, (__v8di) __A, __M);
}
-#define _mm512_extracti32x4_epi32(A, imm) \
- ((__m128i)__builtin_ia32_extracti32x4_mask((__v16si)(__m512i)(A), (int)(imm), \
- (__v4si)_mm_undefined_si128(), \
- (__mmask8)-1))
+#define _mm512_extracti32x4_epi32(A, imm) \
+ ((__m128i)__builtin_ia32_extracti32x4_mask( \
+ (__v16si)(__m512i)(A), (int)(imm), (__v4si)_mm_setzero_si128(), \
+ (__mmask8) - 1))
#define _mm512_mask_extracti32x4_epi32(W, U, A, imm) \
((__m128i)__builtin_ia32_extracti32x4_mask((__v16si)(__m512i)(A), (int)(imm), \
@@ -7104,10 +7104,10 @@ _mm512_mask_cvtepi64_storeu_epi16 (void *__P, __mmask8 __M, __m512i __A)
(__v4si)_mm_setzero_si128(), \
(__mmask8)(U)))
-#define _mm512_extracti64x4_epi64(A, imm) \
+#define _mm512_extracti64x4_epi64(A, imm) \
((__m256i)__builtin_ia32_extracti64x4_mask((__v8di)(__m512i)(A), (int)(imm), \
- (__v4di)_mm256_undefined_si256(), \
- (__mmask8)-1))
+ (__v4di)_mm256_setzero_si256(), \
+ (__mmask8) - 1))
#define _mm512_mask_extracti64x4_epi64(W, U, A, imm) \
((__m256i)__builtin_ia32_extracti64x4_mask((__v8di)(__m512i)(A), (int)(imm), \
diff --git a/clang/lib/Headers/avx512vldqintrin.h b/clang/lib/Headers/avx512vldqintrin.h
index ee7974e..707d039 100644
--- a/clang/lib/Headers/avx512vldqintrin.h
+++ b/clang/lib/Headers/avx512vldqintrin.h
@@ -1062,11 +1062,10 @@ _mm256_maskz_broadcast_i64x2(__mmask8 __M, __m128i __A) {
(__v4di)_mm256_setzero_si256());
}
-#define _mm256_extractf64x2_pd(A, imm) \
- ((__m128d)__builtin_ia32_extractf64x2_256_mask((__v4df)(__m256d)(A), \
- (int)(imm), \
- (__v2df)_mm_undefined_pd(), \
- (__mmask8)-1))
+#define _mm256_extractf64x2_pd(A, imm) \
+ ((__m128d)__builtin_ia32_extractf64x2_256_mask( \
+ (__v4df)(__m256d)(A), (int)(imm), (__v2df)_mm_setzero_pd(), \
+ (__mmask8) - 1))
#define _mm256_mask_extractf64x2_pd(W, U, A, imm) \
((__m128d)__builtin_ia32_extractf64x2_256_mask((__v4df)(__m256d)(A), \
@@ -1080,11 +1079,10 @@ _mm256_maskz_broadcast_i64x2(__mmask8 __M, __m128i __A) {
(__v2df)_mm_setzero_pd(), \
(__mmask8)(U)))
-#define _mm256_extracti64x2_epi64(A, imm) \
- ((__m128i)__builtin_ia32_extracti64x2_256_mask((__v4di)(__m256i)(A), \
- (int)(imm), \
- (__v2di)_mm_undefined_si128(), \
- (__mmask8)-1))
+#define _mm256_extracti64x2_epi64(A, imm) \
+ ((__m128i)__builtin_ia32_extracti64x2_256_mask( \
+ (__v4di)(__m256i)(A), (int)(imm), (__v2di)_mm_setzero_si128(), \
+ (__mmask8) - 1))
#define _mm256_mask_extracti64x2_epi64(W, U, A, imm) \
((__m128i)__builtin_ia32_extracti64x2_256_mask((__v4di)(__m256i)(A), \
diff --git a/clang/lib/Headers/avx512vlintrin.h b/clang/lib/Headers/avx512vlintrin.h
index 676b5a0..92bb444 100644
--- a/clang/lib/Headers/avx512vlintrin.h
+++ b/clang/lib/Headers/avx512vlintrin.h
@@ -7545,11 +7545,10 @@ _mm256_mask_cvtepi64_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A)
__builtin_ia32_pmovqw256mem_mask ((__v8hi *) __P, (__v4di) __A, __M);
}
-#define _mm256_extractf32x4_ps(A, imm) \
- ((__m128)__builtin_ia32_extractf32x4_256_mask((__v8sf)(__m256)(A), \
- (int)(imm), \
- (__v4sf)_mm_undefined_ps(), \
- (__mmask8)-1))
+#define _mm256_extractf32x4_ps(A, imm) \
+ ((__m128)__builtin_ia32_extractf32x4_256_mask( \
+ (__v8sf)(__m256)(A), (int)(imm), (__v4sf)_mm_setzero_ps(), \
+ (__mmask8) - 1))
#define _mm256_mask_extractf32x4_ps(W, U, A, imm) \
((__m128)__builtin_ia32_extractf32x4_256_mask((__v8sf)(__m256)(A), \
@@ -7563,11 +7562,10 @@ _mm256_mask_cvtepi64_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A)
(__v4sf)_mm_setzero_ps(), \
(__mmask8)(U)))
-#define _mm256_extracti32x4_epi32(A, imm) \
- ((__m128i)__builtin_ia32_extracti32x4_256_mask((__v8si)(__m256i)(A), \
- (int)(imm), \
- (__v4si)_mm_undefined_si128(), \
- (__mmask8)-1))
+#define _mm256_extracti32x4_epi32(A, imm) \
+ ((__m128i)__builtin_ia32_extracti32x4_256_mask( \
+ (__v8si)(__m256i)(A), (int)(imm), (__v4si)_mm_setzero_si128(), \
+ (__mmask8) - 1))
#define _mm256_mask_extracti32x4_epi32(W, U, A, imm) \
((__m128i)__builtin_ia32_extracti32x4_256_mask((__v8si)(__m256i)(A), \
diff --git a/clang/lib/Headers/avxintrin.h b/clang/lib/Headers/avxintrin.h
index 696ec31..4aef924 100644
--- a/clang/lib/Headers/avxintrin.h
+++ b/clang/lib/Headers/avxintrin.h
@@ -2941,9 +2941,8 @@ _mm256_testnzc_si256(__m256i __a, __m256i __b) {
/// A 256-bit vector of [4 x double] containing the double-precision
/// floating point values with sign bits to be extracted.
/// \returns The sign bits from the operand, written to bits [3:0].
-static __inline int __DEFAULT_FN_ATTRS
-_mm256_movemask_pd(__m256d __a)
-{
+static __inline int __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm256_movemask_pd(__m256d __a) {
return __builtin_ia32_movmskpd256((__v4df)__a);
}
@@ -2959,9 +2958,8 @@ _mm256_movemask_pd(__m256d __a)
/// A 256-bit vector of [8 x float] containing the single-precision floating
/// point values with sign bits to be extracted.
/// \returns The sign bits from the operand, written to bits [7:0].
-static __inline int __DEFAULT_FN_ATTRS
-_mm256_movemask_ps(__m256 __a)
-{
+static __inline int __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm256_movemask_ps(__m256 __a) {
return __builtin_ia32_movmskps256((__v8sf)__a);
}
diff --git a/clang/lib/Headers/emmintrin.h b/clang/lib/Headers/emmintrin.h
index 454e9a2..dbe5ca0 100644
--- a/clang/lib/Headers/emmintrin.h
+++ b/clang/lib/Headers/emmintrin.h
@@ -4280,7 +4280,8 @@ _mm_packus_epi16(__m128i __a, __m128i __b) {
/// A 128-bit integer vector containing the values with bits to be extracted.
/// \returns The most significant bits from each 8-bit element in \a __a,
/// written to bits [15:0]. The other bits are assigned zeros.
-static __inline__ int __DEFAULT_FN_ATTRS _mm_movemask_epi8(__m128i __a) {
+static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_movemask_epi8(__m128i __a) {
return __builtin_ia32_pmovmskb128((__v16qi)__a);
}
@@ -4699,7 +4700,8 @@ _mm_unpacklo_pd(__m128d __a, __m128d __b) {
/// be extracted.
/// \returns The sign bits from each of the double-precision elements in \a __a,
/// written to bits [1:0]. The remaining bits are assigned values of zero.
-static __inline__ int __DEFAULT_FN_ATTRS _mm_movemask_pd(__m128d __a) {
+static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_movemask_pd(__m128d __a) {
return __builtin_ia32_movmskpd((__v2df)__a);
}
diff --git a/clang/lib/Headers/smmintrin.h b/clang/lib/Headers/smmintrin.h
index 4f197d5..511a135 100644
--- a/clang/lib/Headers/smmintrin.h
+++ b/clang/lib/Headers/smmintrin.h
@@ -1524,7 +1524,8 @@ _mm_packus_epi32(__m128i __V1, __m128i __V2) {
/// \returns A 128-bit value where bits [15:0] contain the minimum value found
/// in parameter \a __V, bits [18:16] contain the index of the minimum value
/// and the remaining bits are set to 0.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_minpos_epu16(__m128i __V) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_minpos_epu16(__m128i __V) {
return (__m128i)__builtin_ia32_phminposuw128((__v8hi)__V);
}
diff --git a/clang/lib/Headers/xmmintrin.h b/clang/lib/Headers/xmmintrin.h
index 605409c..fe6afdc 100644
--- a/clang/lib/Headers/xmmintrin.h
+++ b/clang/lib/Headers/xmmintrin.h
@@ -3014,9 +3014,7 @@ _mm_cvtps_pi8(__m128 __a)
/// \returns A 32-bit integer value. Bits [3:0] contain the sign bits from each
/// single-precision floating-point element of the parameter. Bits [31:4] are
/// set to zero.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_movemask_ps(__m128 __a)
-{
+static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR _mm_movemask_ps(__m128 __a) {
return __builtin_ia32_movmskps((__v4sf)__a);
}
diff --git a/clang/lib/Lex/PPLexerChange.cpp b/clang/lib/Lex/PPLexerChange.cpp
index d8f61c0..b014124 100644
--- a/clang/lib/Lex/PPLexerChange.cpp
+++ b/clang/lib/Lex/PPLexerChange.cpp
@@ -302,7 +302,7 @@ void Preprocessor::diagnoseMissingHeaderInUmbrellaDir(const Module &Mod) {
// Check whether this entry has an extension typically associated with
// headers.
if (!StringSwitch<bool>(llvm::sys::path::extension(Entry->path()))
- .Cases(".h", ".H", ".hh", ".hpp", true)
+ .Cases({".h", ".H", ".hh", ".hpp"}, true)
.Default(false))
continue;
diff --git a/clang/lib/Parse/ParsePragma.cpp b/clang/lib/Parse/ParsePragma.cpp
index 9893381..7c2b928 100644
--- a/clang/lib/Parse/ParsePragma.cpp
+++ b/clang/lib/Parse/ParsePragma.cpp
@@ -1419,10 +1419,11 @@ bool Parser::HandlePragmaLoopHint(LoopHint &Hint) {
// Return a valid hint if pragma unroll or nounroll were specified
// without an argument.
- auto IsLoopHint = llvm::StringSwitch<bool>(PragmaNameInfo->getName())
- .Cases("unroll", "nounroll", "unroll_and_jam",
- "nounroll_and_jam", true)
- .Default(false);
+ auto IsLoopHint =
+ llvm::StringSwitch<bool>(PragmaNameInfo->getName())
+ .Cases({"unroll", "nounroll", "unroll_and_jam", "nounroll_and_jam"},
+ true)
+ .Default(false);
if (Toks.empty() && IsLoopHint) {
ConsumeAnnotationToken();
diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp
index ef1be23..2990fd6 100644
--- a/clang/lib/Sema/SemaChecking.cpp
+++ b/clang/lib/Sema/SemaChecking.cpp
@@ -6926,13 +6926,13 @@ StringRef Sema::GetFormatStringTypeName(FormatStringType FST) {
FormatStringType Sema::GetFormatStringType(StringRef Flavor) {
return llvm::StringSwitch<FormatStringType>(Flavor)
- .Cases("gnu_scanf", "scanf", FormatStringType::Scanf)
- .Cases("gnu_printf", "printf", "printf0", "syslog",
+ .Cases({"gnu_scanf", "scanf"}, FormatStringType::Scanf)
+ .Cases({"gnu_printf", "printf", "printf0", "syslog"},
FormatStringType::Printf)
- .Cases("NSString", "CFString", FormatStringType::NSString)
- .Cases("gnu_strftime", "strftime", FormatStringType::Strftime)
- .Cases("gnu_strfmon", "strfmon", FormatStringType::Strfmon)
- .Cases("kprintf", "cmn_err", "vcmn_err", "zcmn_err",
+ .Cases({"NSString", "CFString"}, FormatStringType::NSString)
+ .Cases({"gnu_strftime", "strftime"}, FormatStringType::Strftime)
+ .Cases({"gnu_strfmon", "strfmon"}, FormatStringType::Strfmon)
+ .Cases({"kprintf", "cmn_err", "vcmn_err", "zcmn_err"},
FormatStringType::Kprintf)
.Case("freebsd_kprintf", FormatStringType::FreeBSDKPrintf)
.Case("os_trace", FormatStringType::OSLog)
diff --git a/clang/lib/Sema/SemaConcept.cpp b/clang/lib/Sema/SemaConcept.cpp
index 829bd87..54cbfe4 100644
--- a/clang/lib/Sema/SemaConcept.cpp
+++ b/clang/lib/Sema/SemaConcept.cpp
@@ -570,6 +570,11 @@ ConstraintSatisfactionChecker::SubstitutionInTemplateArguments(
if (!Constraint.hasParameterMapping())
return std::move(MLTAL);
+ // The mapping is empty, meaning no template arguments are needed for
+ // evaluation.
+ if (Constraint.getParameterMapping().empty())
+ return MultiLevelTemplateArgumentList();
+
TemplateDeductionInfo Info(Constraint.getBeginLoc());
Sema::InstantiatingTemplate Inst(
S, Constraint.getBeginLoc(),
@@ -736,8 +741,9 @@ ExprResult ConstraintSatisfactionChecker::Evaluate(
UnsubstitutedConstraintSatisfactionCacheResult Cache;
Cache.Satisfaction.ContainsErrors = Satisfaction.ContainsErrors;
Cache.Satisfaction.IsSatisfied = Satisfaction.IsSatisfied;
- std::copy(Satisfaction.Details.begin() + Size, Satisfaction.Details.end(),
- std::back_inserter(Cache.Satisfaction.Details));
+ Cache.Satisfaction.Details.insert(Cache.Satisfaction.Details.end(),
+ Satisfaction.Details.begin() + Size,
+ Satisfaction.Details.end());
Cache.SubstExpr = E;
S.UnsubstitutedConstraintSatisfactionCache.insert({ID, std::move(Cache)});
@@ -868,8 +874,9 @@ ExprResult ConstraintSatisfactionChecker::Evaluate(
UnsubstitutedConstraintSatisfactionCacheResult Cache;
Cache.Satisfaction.ContainsErrors = Satisfaction.ContainsErrors;
Cache.Satisfaction.IsSatisfied = Satisfaction.IsSatisfied;
- std::copy(Satisfaction.Details.begin() + Size, Satisfaction.Details.end(),
- std::back_inserter(Cache.Satisfaction.Details));
+ Cache.Satisfaction.Details.insert(Cache.Satisfaction.Details.end(),
+ Satisfaction.Details.begin() + Size,
+ Satisfaction.Details.end());
Cache.SubstExpr = E;
S.UnsubstitutedConstraintSatisfactionCache.insert({ID, std::move(Cache)});
return E;
@@ -1012,8 +1019,9 @@ ExprResult ConstraintSatisfactionChecker::Evaluate(
UnsubstitutedConstraintSatisfactionCacheResult Cache;
Cache.Satisfaction.ContainsErrors = Satisfaction.ContainsErrors;
Cache.Satisfaction.IsSatisfied = Satisfaction.IsSatisfied;
- std::copy(Satisfaction.Details.begin() + Size, Satisfaction.Details.end(),
- std::back_inserter(Cache.Satisfaction.Details));
+ Cache.Satisfaction.Details.insert(Cache.Satisfaction.Details.end(),
+ Satisfaction.Details.begin() + Size,
+ Satisfaction.Details.end());
Cache.SubstExpr = CE;
S.UnsubstitutedConstraintSatisfactionCache.insert({ID, std::move(Cache)});
return CE;
@@ -1217,10 +1225,10 @@ bool Sema::CheckConstraintSatisfaction(
return false;
}
-static const ExprResult
-SubstituteConceptsInConstrainExpression(Sema &S, const NamedDecl *D,
- const ConceptSpecializationExpr *CSE,
- UnsignedOrNone SubstIndex) {
+static ExprResult
+SubstituteConceptsInConstraintExpression(Sema &S, const NamedDecl *D,
+ const ConceptSpecializationExpr *CSE,
+ UnsignedOrNone SubstIndex) {
// [C++2c] [temp.constr.normal]
// Otherwise, to form CE, any non-dependent concept template argument Ai
@@ -1255,7 +1263,7 @@ bool Sema::CheckConstraintSatisfaction(
const ConceptSpecializationExpr *ConstraintExpr,
ConstraintSatisfaction &Satisfaction) {
- ExprResult Res = SubstituteConceptsInConstrainExpression(
+ ExprResult Res = SubstituteConceptsInConstraintExpression(
*this, nullptr, ConstraintExpr, ArgPackSubstIndex);
if (!Res.isUsable())
return true;
@@ -2017,8 +2025,13 @@ void SubstituteParameterMappings::buildParameterMapping(
SemaRef.MarkUsedTemplateParameters(Args->arguments(),
/*Depth=*/0, OccurringIndices);
}
+ unsigned Size = OccurringIndices.count();
+ // When the constraint is independent of any template parameters,
+ // we build an empty mapping so that we can distinguish these cases
+ // from cases where no mapping exists at all, e.g. when there are only atomic
+ // constraints.
TemplateArgumentLoc *TempArgs =
- new (SemaRef.Context) TemplateArgumentLoc[OccurringIndices.count()];
+ new (SemaRef.Context) TemplateArgumentLoc[Size];
llvm::SmallVector<NamedDecl *> UsedParams;
for (unsigned I = 0, J = 0, C = TemplateParams->size(); I != C; ++I) {
SourceLocation Loc = ArgsAsWritten->NumTemplateArgs > I
@@ -2039,7 +2052,6 @@ void SubstituteParameterMappings::buildParameterMapping(
TemplateParams->getLAngleLoc(), UsedParams,
/*RAngleLoc=*/SourceLocation(),
/*RequiresClause=*/nullptr);
- unsigned Size = OccurringIndices.count();
N.updateParameterMapping(
std::move(OccurringIndices), std::move(OccurringIndicesForSubsumption),
MutableArrayRef<TemplateArgumentLoc>{TempArgs, Size}, UsedList);
@@ -2050,6 +2062,10 @@ bool SubstituteParameterMappings::substitute(
if (!N.hasParameterMapping())
buildParameterMapping(N);
+ // If the parameter mapping is empty, there is nothing to substitute.
+ if (N.getParameterMapping().empty())
+ return false;
+
SourceLocation InstLocBegin, InstLocEnd;
llvm::ArrayRef Arguments = ArgsAsWritten->arguments();
if (Arguments.empty()) {
@@ -2289,7 +2305,7 @@ NormalizedConstraint *NormalizedConstraint::fromConstraintExpr(
ConceptDecl *CD = CSE->getNamedConcept()->getCanonicalDecl();
ExprResult Res =
- SubstituteConceptsInConstrainExpression(S, D, CSE, SubstIndex);
+ SubstituteConceptsInConstraintExpression(S, D, CSE, SubstIndex);
if (!Res.isUsable())
return nullptr;
diff --git a/clang/lib/Sema/SemaExpr.cpp b/clang/lib/Sema/SemaExpr.cpp
index dca9d6e..a50c276 100644
--- a/clang/lib/Sema/SemaExpr.cpp
+++ b/clang/lib/Sema/SemaExpr.cpp
@@ -12811,7 +12811,7 @@ QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS,
if (auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(DC)) {
if (CTSD->isInStdNamespace() &&
llvm::StringSwitch<bool>(CTSD->getName())
- .Cases("less", "less_equal", "greater", "greater_equal", true)
+ .Cases({"less", "less_equal", "greater", "greater_equal"}, true)
.Default(false)) {
if (RHSType->isNullPtrType())
RHS = ImpCastExprToType(RHS.get(), LHSType, CK_NullToPointer);
diff --git a/clang/lib/Sema/SemaInit.cpp b/clang/lib/Sema/SemaInit.cpp
index f7974eb..7debe33 100644
--- a/clang/lib/Sema/SemaInit.cpp
+++ b/clang/lib/Sema/SemaInit.cpp
@@ -672,11 +672,12 @@ ExprResult InitListChecker::PerformEmptyInit(SourceLocation Loc,
IsInStd = true;
}
- if (IsInStd && llvm::StringSwitch<bool>(R->getName())
- .Cases("basic_string", "deque", "forward_list", true)
- .Cases("list", "map", "multimap", "multiset", true)
- .Cases("priority_queue", "queue", "set", "stack", true)
- .Cases("unordered_map", "unordered_set", "vector", true)
+ if (IsInStd &&
+ llvm::StringSwitch<bool>(R->getName())
+ .Cases({"basic_string", "deque", "forward_list"}, true)
+ .Cases({"list", "map", "multimap", "multiset"}, true)
+ .Cases({"priority_queue", "queue", "set", "stack"}, true)
+ .Cases({"unordered_map", "unordered_set", "vector"}, true)
.Default(false)) {
InitSeq.InitializeFrom(
SemaRef, Entity,
diff --git a/clang/lib/Sema/SemaOpenACC.cpp b/clang/lib/Sema/SemaOpenACC.cpp
index ca99834..3bb8080 100644
--- a/clang/lib/Sema/SemaOpenACC.cpp
+++ b/clang/lib/Sema/SemaOpenACC.cpp
@@ -2996,6 +2996,8 @@ bool SemaOpenACC::CreateReductionCombinerRecipe(
case OpenACCReductionOperator::Max:
case OpenACCReductionOperator::Min:
+ BinOp = BinaryOperatorKind::BO_LT;
+ break;
case OpenACCReductionOperator::And:
case OpenACCReductionOperator::Or:
// We just want a 'NYI' error in the backend, so leave an empty combiner
@@ -3011,26 +3013,80 @@ bool SemaOpenACC::CreateReductionCombinerRecipe(
assert(!VarTy->isArrayType() && "Only 1 level of array allowed");
+ enum class CombinerFailureKind {
+ None = 0,
+ BinOp = 1,
+ Conditional = 2,
+ Assignment = 3,
+ };
+
+ auto genCombiner = [&, this](DeclRefExpr *LHSDRE, DeclRefExpr *RHSDRE)
+ -> std::pair<ExprResult, CombinerFailureKind> {
+ ExprResult BinOpRes =
+ SemaRef.BuildBinOp(SemaRef.getCurScope(), Loc, BinOp, LHSDRE, RHSDRE,
+ /*ForFoldExpr=*/false);
+ switch (ReductionOperator) {
+ case OpenACCReductionOperator::Addition:
+ case OpenACCReductionOperator::Multiplication:
+ case OpenACCReductionOperator::BitwiseAnd:
+ case OpenACCReductionOperator::BitwiseOr:
+ case OpenACCReductionOperator::BitwiseXOr:
+ // These 5 are simple and are being done as compound operators, so we can
+ // immediately quit here.
+ return {BinOpRes, BinOpRes.isUsable() ? CombinerFailureKind::None
+ : CombinerFailureKind::BinOp};
+ case OpenACCReductionOperator::Max:
+ case OpenACCReductionOperator::Min: {
+ // These are done as:
+ // LHS = (LHS < RHS) ? LHS : RHS; and LHS = (LHS < RHS) ? RHS : LHS;
+ //
+ // The BinOpRes should have been created with the less-than, so we just
+ // have to build the conditional and assignment.
+ if (!BinOpRes.isUsable())
+ return {BinOpRes, CombinerFailureKind::BinOp};
+
+ // Create the correct conditional operator, swapping the results
+ // (true/false value) depending on min/max.
+ ExprResult CondRes;
+ if (ReductionOperator == OpenACCReductionOperator::Min)
+ CondRes = SemaRef.ActOnConditionalOp(Loc, Loc, BinOpRes.get(), LHSDRE,
+ RHSDRE);
+ else
+ CondRes = SemaRef.ActOnConditionalOp(Loc, Loc, BinOpRes.get(), RHSDRE,
+ LHSDRE);
+
+ if (!CondRes.isUsable())
+ return {CondRes, CombinerFailureKind::Conditional};
+
+ // Build assignment.
+ ExprResult Assignment = SemaRef.BuildBinOp(SemaRef.getCurScope(), Loc,
+ BinaryOperatorKind::BO_Assign,
+ LHSDRE, CondRes.get(),
+ /*ForFoldExpr=*/false);
+ return {Assignment, Assignment.isUsable()
+ ? CombinerFailureKind::None
+ : CombinerFailureKind::Assignment};
+ }
+ case OpenACCReductionOperator::And:
+ case OpenACCReductionOperator::Or:
+ llvm_unreachable("And/Or not implemented, but should fail earlier");
+ case OpenACCReductionOperator::Invalid:
+ llvm_unreachable("Invalid should have been caught above");
+ }
+ };
+
auto tryCombiner = [&, this](DeclRefExpr *LHSDRE, DeclRefExpr *RHSDRE,
bool IncludeTrap) {
- // TODO: OpenACC: we have to figure out based on the bin-op how to do the
- // ones that we can't just use compound operators for. So &&, ||, max, and
- // min aren't really clear what we could do here.
if (IncludeTrap) {
// Trap all of the errors here, we'll emit our own at the end.
Sema::TentativeAnalysisScope Trap{SemaRef};
-
- return SemaRef.BuildBinOp(SemaRef.getCurScope(), Loc, BinOp, LHSDRE,
- RHSDRE,
- /*ForFoldExpr=*/false);
- } else {
- return SemaRef.BuildBinOp(SemaRef.getCurScope(), Loc, BinOp, LHSDRE,
- RHSDRE,
- /*ForFoldExpr=*/false);
+ return genCombiner(LHSDRE, RHSDRE);
}
+ return genCombiner(LHSDRE, RHSDRE);
};
struct CombinerAttemptTy {
+ CombinerFailureKind FailKind;
VarDecl *LHS;
DeclRefExpr *LHSDRE;
VarDecl *RHS;
@@ -3058,9 +3114,11 @@ bool SemaOpenACC::CreateReductionCombinerRecipe(
RHSDecl->getBeginLoc()},
Ty, clang::VK_LValue, RHSDecl, nullptr, NOUR_None);
- ExprResult BinOpResult = tryCombiner(LHSDRE, RHSDRE, /*IncludeTrap=*/true);
+ std::pair<ExprResult, CombinerFailureKind> BinOpResult =
+ tryCombiner(LHSDRE, RHSDRE, /*IncludeTrap=*/true);
- return {LHSDecl, LHSDRE, RHSDecl, RHSDRE, BinOpResult.get()};
+ return {BinOpResult.second, LHSDecl, LHSDRE, RHSDecl, RHSDRE,
+ BinOpResult.first.get()};
};
CombinerAttemptTy TopLevelCombinerInfo = formCombiner(VarTy);
@@ -3081,12 +3139,20 @@ bool SemaOpenACC::CreateReductionCombinerRecipe(
}
}
+ auto EmitFailureNote = [&](CombinerFailureKind CFK) {
+ if (CFK == CombinerFailureKind::BinOp)
+ return Diag(Loc, diag::note_acc_reduction_combiner_forming)
+ << CFK << BinaryOperator::getOpcodeStr(BinOp);
+ return Diag(Loc, diag::note_acc_reduction_combiner_forming) << CFK;
+ };
+
// Since the 'root' level didn't fail, the only thing that could be successful
// is a struct that we decompose on its individual fields.
RecordDecl *RD = VarTy->getAsRecordDecl();
if (!RD) {
Diag(Loc, diag::err_acc_reduction_recipe_no_op) << VarTy;
+ EmitFailureNote(TopLevelCombinerInfo.FailKind);
tryCombiner(TopLevelCombinerInfo.LHSDRE, TopLevelCombinerInfo.RHSDRE,
/*IncludeTrap=*/false);
return true;
@@ -3098,6 +3164,7 @@ bool SemaOpenACC::CreateReductionCombinerRecipe(
if (!FieldCombinerInfo.Op || FieldCombinerInfo.Op->containsErrors()) {
Diag(Loc, diag::err_acc_reduction_recipe_no_op) << FD->getType();
Diag(FD->getBeginLoc(), diag::note_acc_reduction_recipe_noop_field) << RD;
+ EmitFailureNote(FieldCombinerInfo.FailKind);
tryCombiner(FieldCombinerInfo.LHSDRE, FieldCombinerInfo.RHSDRE,
/*IncludeTrap=*/false);
return true;
diff --git a/clang/lib/Sema/SemaStmtAttr.cpp b/clang/lib/Sema/SemaStmtAttr.cpp
index 50acc83..27fd556 100644
--- a/clang/lib/Sema/SemaStmtAttr.cpp
+++ b/clang/lib/Sema/SemaStmtAttr.cpp
@@ -81,7 +81,7 @@ static Attr *handleLoopHintAttr(Sema &S, Stmt *St, const ParsedAttr &A,
StringRef PragmaName =
llvm::StringSwitch<StringRef>(
PragmaNameLoc->getIdentifierInfo()->getName())
- .Cases("unroll", "nounroll", "unroll_and_jam", "nounroll_and_jam",
+ .Cases({"unroll", "nounroll", "unroll_and_jam", "nounroll_and_jam"},
PragmaNameLoc->getIdentifierInfo()->getName())
.Default("clang loop");
diff --git a/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp b/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
index f6a3e79..871400e 100644
--- a/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
+++ b/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
@@ -723,6 +723,7 @@ AnalysisConsumer::getModeForDecl(Decl *D, AnalysisMode Mode) {
}
static UnsignedEPStat PathRunningTime("PathRunningTime");
+static UnsignedEPStat SyntaxRunningTime("SyntaxRunningTime");
void AnalysisConsumer::HandleCode(Decl *D, AnalysisMode Mode,
ExprEngine::InliningModes IMode,
@@ -761,6 +762,8 @@ void AnalysisConsumer::HandleCode(Decl *D, AnalysisMode Mode,
SyntaxCheckTimer->stopTimer();
llvm::TimeRecord CheckerEndTime = SyntaxCheckTimer->getTotalTime();
CheckerEndTime -= CheckerStartTime;
+ FunctionSummaries.findOrInsertSummary(D)->second.SyntaxRunningTime =
+ std::lround(CheckerEndTime.getWallTime() * 1000);
DisplayTime(CheckerEndTime);
if (AnalyzerTimers && ShouldClearTimersToPreventDisplayingThem) {
AnalyzerTimers->clear();
@@ -792,11 +795,23 @@ void AnalysisConsumer::RunPathSensitiveChecks(Decl *D,
if (!CFG)
return;
+ CFGSize.set(CFG->size());
+
+ auto *DeclContext = Mgr->getAnalysisDeclContext(D);
// See if the LiveVariables analysis scales.
- if (!Mgr->getAnalysisDeclContext(D)->getAnalysis<RelaxedLiveVariables>())
+ if (!DeclContext->getAnalysis<RelaxedLiveVariables>())
return;
- CFGSize.set(CFG->size());
+ // DeclContext declaration is the redeclaration of D that has a body.
+ const Decl *DefDecl = DeclContext->getDecl();
+
+ // Get the SyntaxRunningTime from the function summary, because it is computed
+ // during the AM_Syntax analysis, which is done at a different point in time
+ // and in different order, but always before AM_Path.
+ if (const auto *Summary = FunctionSummaries.findSummary(DefDecl);
+ Summary && Summary->SyntaxRunningTime.has_value()) {
+ SyntaxRunningTime.set(*Summary->SyntaxRunningTime);
+ }
ExprEngine Eng(CTU, *Mgr, VisitedCallees, &FunctionSummaries, IMode);