aboutsummaryrefslogtreecommitdiff
path: root/clang/lib
diff options
context:
space:
mode:
Diffstat (limited to 'clang/lib')
-rw-r--r--clang/lib/AST/ASTConcept.cpp2
-rw-r--r--clang/lib/AST/ByteCode/Context.cpp7
-rw-r--r--clang/lib/AST/ByteCode/InterpBuiltin.cpp184
-rw-r--r--clang/lib/AST/ByteCode/Program.cpp5
-rw-r--r--clang/lib/AST/Decl.cpp10
-rw-r--r--clang/lib/AST/ExprConstant.cpp139
-rw-r--r--clang/lib/Analysis/ExprMutationAnalyzer.cpp20
-rw-r--r--clang/lib/Analysis/FlowSensitive/Models/CMakeLists.txt1
-rw-r--r--clang/lib/Analysis/FlowSensitive/Models/UncheckedOptionalAccessModel.cpp6
-rw-r--r--clang/lib/Analysis/FlowSensitive/Models/UncheckedStatusOrAccessModel.cpp290
-rw-r--r--clang/lib/Basic/Targets/Mips.cpp10
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenDecl.cpp10
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExpr.cpp182
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp165
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp5
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenFunction.cpp6
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenFunction.h25
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp7
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenStmt.cpp4
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenValue.h11
-rw-r--r--clang/lib/CIR/Dialect/IR/CIRDialect.cpp10
-rw-r--r--clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp31
-rw-r--r--clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp2
-rw-r--r--clang/lib/CodeGen/CGAtomic.cpp155
-rw-r--r--clang/lib/CodeGen/CGBlocks.cpp2
-rw-r--r--clang/lib/CodeGen/CGBuiltin.cpp2
-rw-r--r--clang/lib/CodeGen/CGCUDANV.cpp2
-rw-r--r--clang/lib/CodeGen/CGExpr.cpp6
-rw-r--r--clang/lib/CodeGen/CGObjCMac.cpp26
-rw-r--r--clang/lib/CodeGen/CGOpenMPRuntime.cpp6
-rw-r--r--clang/lib/CodeGen/CGPointerAuth.cpp4
-rw-r--r--clang/lib/CodeGen/CodeGenModule.cpp12
-rw-r--r--clang/lib/CodeGen/CodeGenTypeCache.h2
-rw-r--r--clang/lib/CodeGen/ItaniumCXXABI.cpp24
-rw-r--r--clang/lib/CodeGen/MicrosoftCXXABI.cpp32
-rw-r--r--clang/lib/CodeGen/TargetBuiltins/ARM.cpp22
-rw-r--r--clang/lib/CodeGen/TargetBuiltins/PPC.cpp2
-rw-r--r--clang/lib/CodeGen/Targets/PPC.cpp2
-rw-r--r--clang/lib/Driver/Distro.cpp26
-rw-r--r--clang/lib/Driver/ToolChains/Arch/Mips.cpp6
-rw-r--r--clang/lib/Driver/ToolChains/Darwin.cpp14
-rw-r--r--clang/lib/Format/BreakableToken.cpp6
-rw-r--r--clang/lib/Format/ContinuationIndenter.cpp23
-rw-r--r--clang/lib/Format/DefinitionBlockSeparator.cpp2
-rw-r--r--clang/lib/Format/Format.cpp22
-rw-r--r--clang/lib/Format/FormatToken.cpp13
-rw-r--r--clang/lib/Format/FormatToken.h3
-rw-r--r--clang/lib/Format/TokenAnnotator.cpp92
-rw-r--r--clang/lib/Format/WhitespaceManager.cpp198
-rw-r--r--clang/lib/Frontend/FrontendAction.cpp2
-rw-r--r--clang/lib/Headers/__clang_hip_runtime_wrapper.h1
-rw-r--r--clang/lib/Headers/avx2intrin.h13
-rw-r--r--clang/lib/Headers/avx512bwintrin.h15
-rw-r--r--clang/lib/Headers/avx512dqintrin.h34
-rw-r--r--clang/lib/Headers/avx512fintrin.h30
-rw-r--r--clang/lib/Headers/avx512vlbwintrin.h8
-rw-r--r--clang/lib/Headers/avx512vldqintrin.h18
-rw-r--r--clang/lib/Headers/avx512vlintrin.h18
-rw-r--r--clang/lib/Headers/avxintrin.h10
-rw-r--r--clang/lib/Headers/emmintrin.h6
-rw-r--r--clang/lib/Headers/smmintrin.h3
-rw-r--r--clang/lib/Headers/tmmintrin.h13
-rw-r--r--clang/lib/Headers/xmmintrin.h4
-rw-r--r--clang/lib/Lex/PPLexerChange.cpp2
-rw-r--r--clang/lib/Parse/ParsePragma.cpp9
-rw-r--r--clang/lib/Sema/SemaChecking.cpp21
-rw-r--r--clang/lib/Sema/SemaConcept.cpp78
-rw-r--r--clang/lib/Sema/SemaExpr.cpp2
-rw-r--r--clang/lib/Sema/SemaInit.cpp11
-rw-r--r--clang/lib/Sema/SemaOpenACC.cpp93
-rw-r--r--clang/lib/Sema/SemaRISCV.cpp3
-rw-r--r--clang/lib/Sema/SemaStmtAttr.cpp2
-rw-r--r--clang/lib/Sema/SemaTemplateInstantiate.cpp93
-rw-r--r--clang/lib/Sema/SemaTemplateInstantiateDecl.cpp36
-rw-r--r--clang/lib/Sema/TreeTransform.h12
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/BlockInCriticalSectionChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp8
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/StdVariantChecker.cpp4
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/TaggedUnionModeling.h2
-rw-r--r--clang/lib/StaticAnalyzer/Core/CheckerManager.cpp11
-rw-r--r--clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp62
-rw-r--r--clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp19
-rw-r--r--clang/lib/Support/RISCVVIntrinsicUtils.cpp5
84 files changed, 1837 insertions, 651 deletions
diff --git a/clang/lib/AST/ASTConcept.cpp b/clang/lib/AST/ASTConcept.cpp
index fd12bc4..9ea104c 100644
--- a/clang/lib/AST/ASTConcept.cpp
+++ b/clang/lib/AST/ASTConcept.cpp
@@ -86,7 +86,7 @@ void ConstraintSatisfaction::Profile(llvm::FoldingSetNodeID &ID,
ID.AddPointer(ConstraintOwner);
ID.AddInteger(TemplateArgs.size());
for (auto &Arg : TemplateArgs)
- Arg.Profile(ID, C);
+ C.getCanonicalTemplateArgument(Arg).Profile(ID, C);
}
ConceptReference *
diff --git a/clang/lib/AST/ByteCode/Context.cpp b/clang/lib/AST/ByteCode/Context.cpp
index 683e916..4f4b122 100644
--- a/clang/lib/AST/ByteCode/Context.cpp
+++ b/clang/lib/AST/ByteCode/Context.cpp
@@ -566,10 +566,15 @@ const Function *Context::getOrCreateFunction(const FunctionDecl *FuncDecl) {
// Assign descriptors to all parameters.
// Composite objects are lowered to pointers.
- for (const ParmVarDecl *PD : FuncDecl->parameters()) {
+ const auto *FuncProto = FuncDecl->getType()->getAs<FunctionProtoType>();
+ for (auto [ParamIndex, PD] : llvm::enumerate(FuncDecl->parameters())) {
bool IsConst = PD->getType().isConstQualified();
bool IsVolatile = PD->getType().isVolatileQualified();
+ if (!getASTContext().hasSameType(PD->getType(),
+ FuncProto->getParamType(ParamIndex)))
+ return nullptr;
+
OptPrimType T = classify(PD->getType());
PrimType PT = T.value_or(PT_Ptr);
Descriptor *Desc = P->createDescriptor(PD, PT, nullptr, std::nullopt,
diff --git a/clang/lib/AST/ByteCode/InterpBuiltin.cpp b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
index 5838cf8..39b991c 100644
--- a/clang/lib/AST/ByteCode/InterpBuiltin.cpp
+++ b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
@@ -2899,6 +2899,35 @@ static bool interp__builtin_ia32_test_op(
return true;
}
+static bool interp__builtin_ia32_movmsk_op(InterpState &S, CodePtr OpPC,
+ const CallExpr *Call) {
+ assert(Call->getNumArgs() == 1);
+
+ const Pointer &Source = S.Stk.pop<Pointer>();
+
+ unsigned SourceLen = Source.getNumElems();
+ QualType ElemQT = getElemType(Source);
+ OptPrimType ElemT = S.getContext().classify(ElemQT);
+ unsigned ResultLen =
+ S.getASTContext().getTypeSize(Call->getType()); // Always 32-bit integer.
+ APInt Result(ResultLen, 0);
+
+ for (unsigned I = 0; I != SourceLen; ++I) {
+ APInt Elem;
+ if (ElemQT->isIntegerType()) {
+ INT_TYPE_SWITCH_NO_BOOL(*ElemT, { Elem = Source.elem<T>(I).toAPSInt(); });
+ } else if (ElemQT->isRealFloatingType()) {
+ using T = PrimConv<PT_Float>::T;
+ Elem = Source.elem<T>(I).getAPFloat().bitcastToAPInt();
+ } else {
+ return false;
+ }
+ Result.setBitVal(I, Elem.isNegative());
+ }
+ pushInteger(S, Result, Call->getType());
+ return true;
+}
+
static bool interp__builtin_elementwise_triop(
InterpState &S, CodePtr OpPC, const CallExpr *Call,
llvm::function_ref<APInt(const APSInt &, const APSInt &, const APSInt &)>
@@ -2962,6 +2991,82 @@ static bool interp__builtin_elementwise_triop(
return true;
}
+static bool interp__builtin_x86_extract_vector(InterpState &S, CodePtr OpPC,
+ const CallExpr *Call,
+ unsigned ID) {
+ assert(Call->getNumArgs() == 2);
+
+ APSInt ImmAPS = popToAPSInt(S, Call->getArg(1));
+ uint64_t Index = ImmAPS.getZExtValue();
+
+ const Pointer &Src = S.Stk.pop<Pointer>();
+ if (!Src.getFieldDesc()->isPrimitiveArray())
+ return false;
+
+ const Pointer &Dst = S.Stk.peek<Pointer>();
+ if (!Dst.getFieldDesc()->isPrimitiveArray())
+ return false;
+
+ unsigned SrcElems = Src.getNumElems();
+ unsigned DstElems = Dst.getNumElems();
+
+ unsigned NumLanes = SrcElems / DstElems;
+ unsigned Lane = static_cast<unsigned>(Index % NumLanes);
+ unsigned ExtractPos = Lane * DstElems;
+
+ PrimType ElemT = Src.getFieldDesc()->getPrimType();
+
+ TYPE_SWITCH(ElemT, {
+ for (unsigned I = 0; I != DstElems; ++I) {
+ Dst.elem<T>(I) = Src.elem<T>(ExtractPos + I);
+ }
+ });
+
+ Dst.initializeAllElements();
+ return true;
+}
+
+static bool interp__builtin_x86_extract_vector_masked(InterpState &S,
+ CodePtr OpPC,
+ const CallExpr *Call,
+ unsigned ID) {
+ assert(Call->getNumArgs() == 4);
+
+ APSInt MaskAPS = popToAPSInt(S, Call->getArg(3));
+ const Pointer &Merge = S.Stk.pop<Pointer>();
+ APSInt ImmAPS = popToAPSInt(S, Call->getArg(1));
+ const Pointer &Src = S.Stk.pop<Pointer>();
+
+ if (!Src.getFieldDesc()->isPrimitiveArray() ||
+ !Merge.getFieldDesc()->isPrimitiveArray())
+ return false;
+
+ const Pointer &Dst = S.Stk.peek<Pointer>();
+ if (!Dst.getFieldDesc()->isPrimitiveArray())
+ return false;
+
+ unsigned SrcElems = Src.getNumElems();
+ unsigned DstElems = Dst.getNumElems();
+
+ unsigned NumLanes = SrcElems / DstElems;
+ unsigned Lane = static_cast<unsigned>(ImmAPS.getZExtValue() % NumLanes);
+ unsigned Base = Lane * DstElems;
+
+ PrimType ElemT = Src.getFieldDesc()->getPrimType();
+
+ TYPE_SWITCH(ElemT, {
+ for (unsigned I = 0; I != DstElems; ++I) {
+ if (MaskAPS[I])
+ Dst.elem<T>(I) = Src.elem<T>(Base + I);
+ else
+ Dst.elem<T>(I) = Merge.elem<T>(I);
+ }
+ });
+
+ Dst.initializeAllElements();
+ return true;
+}
+
static bool interp__builtin_x86_insert_subvector(InterpState &S, CodePtr OpPC,
const CallExpr *Call,
unsigned ID) {
@@ -3003,6 +3108,45 @@ static bool interp__builtin_x86_insert_subvector(InterpState &S, CodePtr OpPC,
return true;
}
+static bool interp__builtin_ia32_phminposuw(InterpState &S, CodePtr OpPC,
+ const CallExpr *Call) {
+ assert(Call->getNumArgs() == 1);
+
+ const Pointer &Source = S.Stk.pop<Pointer>();
+ const Pointer &Dest = S.Stk.peek<Pointer>();
+
+ unsigned SourceLen = Source.getNumElems();
+ QualType ElemQT = getElemType(Source);
+ OptPrimType ElemT = S.getContext().classify(ElemQT);
+ unsigned ElemBitWidth = S.getASTContext().getTypeSize(ElemQT);
+
+ bool DestUnsigned = Call->getCallReturnType(S.getASTContext())
+ ->castAs<VectorType>()
+ ->getElementType()
+ ->isUnsignedIntegerOrEnumerationType();
+
+ INT_TYPE_SWITCH_NO_BOOL(*ElemT, {
+ APSInt MinIndex(ElemBitWidth, DestUnsigned);
+ APSInt MinVal = Source.elem<T>(0).toAPSInt();
+
+ for (unsigned I = 1; I != SourceLen; ++I) {
+ APSInt Val = Source.elem<T>(I).toAPSInt();
+ if (MinVal.ugt(Val)) {
+ MinVal = Val;
+ MinIndex = I;
+ }
+ }
+
+ Dest.elem<T>(0) = static_cast<T>(MinVal);
+ Dest.elem<T>(1) = static_cast<T>(MinIndex);
+ for (unsigned I = 2; I != SourceLen; ++I) {
+ Dest.elem<T>(I) = static_cast<T>(APSInt(ElemBitWidth, DestUnsigned));
+ }
+ });
+ Dest.initializeAllElements();
+ return true;
+}
+
static bool interp__builtin_ia32_pternlog(InterpState &S, CodePtr OpPC,
const CallExpr *Call, bool MaskZ) {
assert(Call->getNumArgs() == 5);
@@ -3620,6 +3764,43 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
return LHS.isSigned() ? LHS.ssub_sat(RHS) : LHS.usub_sat(RHS);
});
+ case X86::BI__builtin_ia32_extract128i256:
+ case X86::BI__builtin_ia32_vextractf128_pd256:
+ case X86::BI__builtin_ia32_vextractf128_ps256:
+ case X86::BI__builtin_ia32_vextractf128_si256:
+ return interp__builtin_x86_extract_vector(S, OpPC, Call, BuiltinID);
+
+ case X86::BI__builtin_ia32_extractf32x4_256_mask:
+ case X86::BI__builtin_ia32_extractf32x4_mask:
+ case X86::BI__builtin_ia32_extractf32x8_mask:
+ case X86::BI__builtin_ia32_extractf64x2_256_mask:
+ case X86::BI__builtin_ia32_extractf64x2_512_mask:
+ case X86::BI__builtin_ia32_extractf64x4_mask:
+ case X86::BI__builtin_ia32_extracti32x4_256_mask:
+ case X86::BI__builtin_ia32_extracti32x4_mask:
+ case X86::BI__builtin_ia32_extracti32x8_mask:
+ case X86::BI__builtin_ia32_extracti64x2_256_mask:
+ case X86::BI__builtin_ia32_extracti64x2_512_mask:
+ case X86::BI__builtin_ia32_extracti64x4_mask:
+ return interp__builtin_x86_extract_vector_masked(S, OpPC, Call, BuiltinID);
+
+ case clang::X86::BI__builtin_ia32_pmulhrsw128:
+ case clang::X86::BI__builtin_ia32_pmulhrsw256:
+ case clang::X86::BI__builtin_ia32_pmulhrsw512:
+ return interp__builtin_elementwise_int_binop(
+ S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
+ return (llvm::APIntOps::mulsExtended(LHS, RHS).ashr(14) + 1)
+ .extractBits(16, 1);
+ });
+
+ case clang::X86::BI__builtin_ia32_movmskps:
+ case clang::X86::BI__builtin_ia32_movmskpd:
+ case clang::X86::BI__builtin_ia32_pmovmskb128:
+ case clang::X86::BI__builtin_ia32_pmovmskb256:
+ case clang::X86::BI__builtin_ia32_movmskps256:
+ case clang::X86::BI__builtin_ia32_movmskpd256: {
+ return interp__builtin_ia32_movmsk_op(S, OpPC, Call);
+ }
case clang::X86::BI__builtin_ia32_pavgb128:
case clang::X86::BI__builtin_ia32_pavgw128:
@@ -4078,6 +4259,9 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call,
S, OpPC, Call,
[](const APSInt &LHS, const APSInt &RHS) { return LHS + RHS; });
+ case X86::BI__builtin_ia32_phminposuw128:
+ return interp__builtin_ia32_phminposuw(S, OpPC, Call);
+
case X86::BI__builtin_ia32_pternlogd128_mask:
case X86::BI__builtin_ia32_pternlogd256_mask:
case X86::BI__builtin_ia32_pternlogd512_mask:
diff --git a/clang/lib/AST/ByteCode/Program.cpp b/clang/lib/AST/ByteCode/Program.cpp
index e653782..e0b2852 100644
--- a/clang/lib/AST/ByteCode/Program.cpp
+++ b/clang/lib/AST/ByteCode/Program.cpp
@@ -226,7 +226,10 @@ UnsignedOrNone Program::createGlobal(const ValueDecl *VD, const Expr *Init) {
Globals[PIdx] = NewGlobal;
// All pointers pointing to the previous extern decl now point to the
// new decl.
- RedeclBlock->movePointersTo(NewGlobal->block());
+ // A previous iteration might've already fixed up the pointers for this
+ // global.
+ if (RedeclBlock != NewGlobal->block())
+ RedeclBlock->movePointersTo(NewGlobal->block());
}
}
PIdx = *Idx;
diff --git a/clang/lib/AST/Decl.cpp b/clang/lib/AST/Decl.cpp
index f048076..8579e51 100644
--- a/clang/lib/AST/Decl.cpp
+++ b/clang/lib/AST/Decl.cpp
@@ -3380,11 +3380,11 @@ bool FunctionDecl::isMSVCRTEntryPoint() const {
return false;
return llvm::StringSwitch<bool>(getName())
- .Cases("main", // an ANSI console app
- "wmain", // a Unicode console App
- "WinMain", // an ANSI GUI app
- "wWinMain", // a Unicode GUI app
- "DllMain", // a DLL
+ .Cases({"main", // an ANSI console app
+ "wmain", // a Unicode console App
+ "WinMain", // an ANSI GUI app
+ "wWinMain", // a Unicode GUI app
+ "DllMain"}, // a DLL
true)
.Default(false);
}
diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp
index 16141b2..00aaaab 100644
--- a/clang/lib/AST/ExprConstant.cpp
+++ b/clang/lib/AST/ExprConstant.cpp
@@ -11811,6 +11811,73 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) {
return LHS.isSigned() ? LHS.ssub_sat(RHS) : LHS.usub_sat(RHS);
});
+ case X86::BI__builtin_ia32_extract128i256:
+ case X86::BI__builtin_ia32_vextractf128_pd256:
+ case X86::BI__builtin_ia32_vextractf128_ps256:
+ case X86::BI__builtin_ia32_vextractf128_si256: {
+ APValue SourceVec, SourceImm;
+ if (!EvaluateAsRValue(Info, E->getArg(0), SourceVec) ||
+ !EvaluateAsRValue(Info, E->getArg(1), SourceImm))
+ return false;
+
+ if (!SourceVec.isVector())
+ return false;
+
+ const auto *RetVT = E->getType()->castAs<VectorType>();
+ unsigned RetLen = RetVT->getNumElements();
+ unsigned Idx = SourceImm.getInt().getZExtValue() & 1;
+
+ SmallVector<APValue, 32> ResultElements;
+ ResultElements.reserve(RetLen);
+
+ for (unsigned I = 0; I < RetLen; I++)
+ ResultElements.push_back(SourceVec.getVectorElt(Idx * RetLen + I));
+
+ return Success(APValue(ResultElements.data(), RetLen), E);
+ }
+
+ case X86::BI__builtin_ia32_extracti32x4_256_mask:
+ case X86::BI__builtin_ia32_extractf32x4_256_mask:
+ case X86::BI__builtin_ia32_extracti32x4_mask:
+ case X86::BI__builtin_ia32_extractf32x4_mask:
+ case X86::BI__builtin_ia32_extracti32x8_mask:
+ case X86::BI__builtin_ia32_extractf32x8_mask:
+ case X86::BI__builtin_ia32_extracti64x2_256_mask:
+ case X86::BI__builtin_ia32_extractf64x2_256_mask:
+ case X86::BI__builtin_ia32_extracti64x2_512_mask:
+ case X86::BI__builtin_ia32_extractf64x2_512_mask:
+ case X86::BI__builtin_ia32_extracti64x4_mask:
+ case X86::BI__builtin_ia32_extractf64x4_mask: {
+ APValue SourceVec, MergeVec;
+ APSInt Imm, MaskImm;
+
+ if (!EvaluateAsRValue(Info, E->getArg(0), SourceVec) ||
+ !EvaluateInteger(E->getArg(1), Imm, Info) ||
+ !EvaluateAsRValue(Info, E->getArg(2), MergeVec) ||
+ !EvaluateInteger(E->getArg(3), MaskImm, Info))
+ return false;
+
+ const auto *RetVT = E->getType()->castAs<VectorType>();
+ unsigned RetLen = RetVT->getNumElements();
+
+ if (!SourceVec.isVector() || !MergeVec.isVector())
+ return false;
+ unsigned SrcLen = SourceVec.getVectorLength();
+ unsigned Lanes = SrcLen / RetLen;
+ unsigned Lane = static_cast<unsigned>(Imm.getZExtValue() % Lanes);
+ unsigned Base = Lane * RetLen;
+
+ SmallVector<APValue, 32> ResultElements;
+ ResultElements.reserve(RetLen);
+ for (unsigned I = 0; I < RetLen; ++I) {
+ if (MaskImm[I])
+ ResultElements.push_back(SourceVec.getVectorElt(Base + I));
+ else
+ ResultElements.push_back(MergeVec.getVectorElt(I));
+ }
+ return Success(APValue(ResultElements.data(), ResultElements.size()), E);
+ }
+
case clang::X86::BI__builtin_ia32_pavgb128:
case clang::X86::BI__builtin_ia32_pavgw128:
case clang::X86::BI__builtin_ia32_pavgb256:
@@ -11819,6 +11886,14 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) {
case clang::X86::BI__builtin_ia32_pavgw512:
return EvaluateBinOpExpr(llvm::APIntOps::avgCeilU);
+ case clang::X86::BI__builtin_ia32_pmulhrsw128:
+ case clang::X86::BI__builtin_ia32_pmulhrsw256:
+ case clang::X86::BI__builtin_ia32_pmulhrsw512:
+ return EvaluateBinOpExpr([](const APSInt &LHS, const APSInt &RHS) {
+ return (llvm::APIntOps::mulsExtended(LHS, RHS).ashr(14) + 1)
+ .extractBits(16, 1);
+ });
+
case clang::X86::BI__builtin_ia32_pmaddubsw128:
case clang::X86::BI__builtin_ia32_pmaddubsw256:
case clang::X86::BI__builtin_ia32_pmaddubsw512:
@@ -12345,6 +12420,40 @@ bool VectorExprEvaluator::VisitCallExpr(const CallExpr *E) {
return Success(R, E);
}
+ case X86::BI__builtin_ia32_phminposuw128: {
+ APValue Source;
+ if (!Evaluate(Source, Info, E->getArg(0)))
+ return false;
+ unsigned SourceLen = Source.getVectorLength();
+ const VectorType *VT = E->getArg(0)->getType()->castAs<VectorType>();
+ QualType ElemQT = VT->getElementType();
+ unsigned ElemBitWidth = Info.Ctx.getTypeSize(ElemQT);
+
+ APInt MinIndex(ElemBitWidth, 0);
+ APInt MinVal = Source.getVectorElt(0).getInt();
+ for (unsigned I = 1; I != SourceLen; ++I) {
+ APInt Val = Source.getVectorElt(I).getInt();
+ if (MinVal.ugt(Val)) {
+ MinVal = Val;
+ MinIndex = I;
+ }
+ }
+
+ bool ResultUnsigned = E->getCallReturnType(Info.Ctx)
+ ->castAs<VectorType>()
+ ->getElementType()
+ ->isUnsignedIntegerOrEnumerationType();
+
+ SmallVector<APValue, 8> Result;
+ Result.reserve(SourceLen);
+ Result.emplace_back(APSInt(MinVal, ResultUnsigned));
+ Result.emplace_back(APSInt(MinIndex, ResultUnsigned));
+ for (unsigned I = 0; I != SourceLen - 2; ++I) {
+ Result.emplace_back(APSInt(APInt(ElemBitWidth, 0), ResultUnsigned));
+ }
+ return Success(APValue(Result.data(), Result.size()), E);
+ }
+
case X86::BI__builtin_ia32_pternlogd128_mask:
case X86::BI__builtin_ia32_pternlogd256_mask:
case X86::BI__builtin_ia32_pternlogd512_mask:
@@ -15260,6 +15369,36 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
return Success(CarryOut, E);
}
+ case clang::X86::BI__builtin_ia32_movmskps:
+ case clang::X86::BI__builtin_ia32_movmskpd:
+ case clang::X86::BI__builtin_ia32_pmovmskb128:
+ case clang::X86::BI__builtin_ia32_pmovmskb256:
+ case clang::X86::BI__builtin_ia32_movmskps256:
+ case clang::X86::BI__builtin_ia32_movmskpd256: {
+ APValue Source;
+ if (!Evaluate(Source, Info, E->getArg(0)))
+ return false;
+ unsigned SourceLen = Source.getVectorLength();
+ const VectorType *VT = E->getArg(0)->getType()->castAs<VectorType>();
+ QualType ElemQT = VT->getElementType();
+ unsigned ResultLen = Info.Ctx.getTypeSize(
+ E->getCallReturnType(Info.Ctx)); // Always 32-bit integer.
+ APInt Result(ResultLen, 0);
+
+ for (unsigned I = 0; I != SourceLen; ++I) {
+ APInt Elem;
+ if (ElemQT->isIntegerType()) {
+ Elem = Source.getVectorElt(I).getInt();
+ } else if (ElemQT->isRealFloatingType()) {
+ Elem = Source.getVectorElt(I).getFloat().bitcastToAPInt();
+ } else {
+ return false;
+ }
+ Result.setBitVal(I, Elem.isNegative());
+ }
+ return Success(Result, E);
+ }
+
case clang::X86::BI__builtin_ia32_bextr_u32:
case clang::X86::BI__builtin_ia32_bextr_u64:
case clang::X86::BI__builtin_ia32_bextri_u32:
diff --git a/clang/lib/Analysis/ExprMutationAnalyzer.cpp b/clang/lib/Analysis/ExprMutationAnalyzer.cpp
index 1e376da..75b17c54 100644
--- a/clang/lib/Analysis/ExprMutationAnalyzer.cpp
+++ b/clang/lib/Analysis/ExprMutationAnalyzer.cpp
@@ -140,7 +140,8 @@ class ExprPointeeResolve {
// explicit cast will be checked in `findPointeeToNonConst`
const CastKind kind = ICE->getCastKind();
if (kind == CK_LValueToRValue || kind == CK_DerivedToBase ||
- kind == CK_UncheckedDerivedToBase)
+ kind == CK_UncheckedDerivedToBase ||
+ (kind == CK_NoOp && (ICE->getType() == ICE->getSubExpr()->getType())))
return resolveExpr(ICE->getSubExpr());
return false;
}
@@ -788,13 +789,16 @@ ExprMutationAnalyzer::Analyzer::findPointeeToNonConst(const Expr *Exp) {
// FIXME: false positive if the pointee does not change in lambda
const auto CaptureNoConst = lambdaExpr(hasCaptureInit(Exp));
- const auto Matches =
- match(stmt(anyOf(forEachDescendant(
- stmt(anyOf(AssignToNonConst, PassAsNonConstArg,
- CastToNonConst, CaptureNoConst))
- .bind("stmt")),
- forEachDescendant(InitToNonConst))),
- Stm, Context);
+ const auto ReturnNoConst =
+ returnStmt(hasReturnValue(canResolveToExprPointee(Exp)));
+
+ const auto Matches = match(
+ stmt(anyOf(forEachDescendant(
+ stmt(anyOf(AssignToNonConst, PassAsNonConstArg,
+ CastToNonConst, CaptureNoConst, ReturnNoConst))
+ .bind("stmt")),
+ forEachDescendant(InitToNonConst))),
+ Stm, Context);
return selectFirst<Stmt>("stmt", Matches);
}
diff --git a/clang/lib/Analysis/FlowSensitive/Models/CMakeLists.txt b/clang/lib/Analysis/FlowSensitive/Models/CMakeLists.txt
index 89bbe87..d1236f5 100644
--- a/clang/lib/Analysis/FlowSensitive/Models/CMakeLists.txt
+++ b/clang/lib/Analysis/FlowSensitive/Models/CMakeLists.txt
@@ -1,6 +1,7 @@
add_clang_library(clangAnalysisFlowSensitiveModels
ChromiumCheckModel.cpp
UncheckedOptionalAccessModel.cpp
+ UncheckedStatusOrAccessModel.cpp
LINK_LIBS
clangAnalysis
diff --git a/clang/lib/Analysis/FlowSensitive/Models/UncheckedOptionalAccessModel.cpp b/clang/lib/Analysis/FlowSensitive/Models/UncheckedOptionalAccessModel.cpp
index bb703ef..0fa333e 100644
--- a/clang/lib/Analysis/FlowSensitive/Models/UncheckedOptionalAccessModel.cpp
+++ b/clang/lib/Analysis/FlowSensitive/Models/UncheckedOptionalAccessModel.cpp
@@ -241,9 +241,9 @@ auto nulloptTypeDecl() {
auto hasNulloptType() { return hasType(nulloptTypeDecl()); }
auto inPlaceClass() {
- return recordDecl(hasAnyName("std::in_place_t", "absl::in_place_t",
- "base::in_place_t", "folly::in_place_t",
- "bsl::in_place_t"));
+ return namedDecl(hasAnyName("std::in_place_t", "absl::in_place_t",
+ "base::in_place_t", "folly::in_place_t",
+ "bsl::in_place_t"));
}
auto isOptionalNulloptConstructor() {
diff --git a/clang/lib/Analysis/FlowSensitive/Models/UncheckedStatusOrAccessModel.cpp b/clang/lib/Analysis/FlowSensitive/Models/UncheckedStatusOrAccessModel.cpp
new file mode 100644
index 0000000..c88a470
--- /dev/null
+++ b/clang/lib/Analysis/FlowSensitive/Models/UncheckedStatusOrAccessModel.cpp
@@ -0,0 +1,290 @@
+//===- UncheckedStatusOrAccessModel.cpp -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/FlowSensitive/Models/UncheckedStatusOrAccessModel.h"
+
+#include <cassert>
+#include <utility>
+
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/TypeBase.h"
+#include "clang/ASTMatchers/ASTMatchFinder.h"
+#include "clang/ASTMatchers/ASTMatchers.h"
+#include "clang/ASTMatchers/ASTMatchersInternal.h"
+#include "clang/Analysis/CFG.h"
+#include "clang/Analysis/FlowSensitive/CFGMatchSwitch.h"
+#include "clang/Analysis/FlowSensitive/DataflowAnalysis.h"
+#include "clang/Analysis/FlowSensitive/DataflowEnvironment.h"
+#include "clang/Analysis/FlowSensitive/MatchSwitch.h"
+#include "clang/Analysis/FlowSensitive/StorageLocation.h"
+#include "clang/Analysis/FlowSensitive/Value.h"
+#include "clang/Basic/LLVM.h"
+#include "clang/Basic/SourceLocation.h"
+#include "llvm/ADT/StringMap.h"
+
+namespace clang::dataflow::statusor_model {
+namespace {
+
+using ::clang::ast_matchers::MatchFinder;
+using ::clang::ast_matchers::StatementMatcher;
+
+} // namespace
+
+static bool namespaceEquals(const NamespaceDecl *NS,
+ clang::ArrayRef<clang::StringRef> NamespaceNames) {
+ while (!NamespaceNames.empty() && NS) {
+ if (NS->getName() != NamespaceNames.consume_back())
+ return false;
+ NS = dyn_cast_or_null<NamespaceDecl>(NS->getParent());
+ }
+ return NamespaceNames.empty() && !NS;
+}
+
+// TODO: move this to a proper place to share with the rest of clang
+static bool isTypeNamed(QualType Type, clang::ArrayRef<clang::StringRef> NS,
+ StringRef Name) {
+ if (Type.isNull())
+ return false;
+ if (auto *RD = Type->getAsRecordDecl())
+ if (RD->getName() == Name)
+ if (const auto *N = dyn_cast_or_null<NamespaceDecl>(RD->getDeclContext()))
+ return namespaceEquals(N, NS);
+ return false;
+}
+
+static bool isStatusOrOperatorBaseType(QualType Type) {
+ return isTypeNamed(Type, {"absl", "internal_statusor"}, "OperatorBase");
+}
+
+static bool isSafeUnwrap(RecordStorageLocation *StatusOrLoc,
+ const Environment &Env) {
+ if (!StatusOrLoc)
+ return false;
+ auto &StatusLoc = locForStatus(*StatusOrLoc);
+ auto *OkVal = Env.get<BoolValue>(locForOk(StatusLoc));
+ return OkVal != nullptr && Env.proves(OkVal->formula());
+}
+
+static ClassTemplateSpecializationDecl *
+getStatusOrBaseClass(const QualType &Ty) {
+ auto *RD = Ty->getAsCXXRecordDecl();
+ if (RD == nullptr)
+ return nullptr;
+ if (isStatusOrType(Ty) ||
+ // In case we are analyzing code under OperatorBase itself that uses
+ // operator* (e.g. to implement operator->).
+ isStatusOrOperatorBaseType(Ty))
+ return cast<ClassTemplateSpecializationDecl>(RD);
+ if (!RD->hasDefinition())
+ return nullptr;
+ for (const auto &Base : RD->bases())
+ if (auto *QT = getStatusOrBaseClass(Base.getType()))
+ return QT;
+ return nullptr;
+}
+
+static QualType getStatusOrValueType(ClassTemplateSpecializationDecl *TRD) {
+ return TRD->getTemplateArgs().get(0).getAsType();
+}
+
+static auto isStatusOrMemberCallWithName(llvm::StringRef member_name) {
+ using namespace ::clang::ast_matchers; // NOLINT: Too many names
+ return cxxMemberCallExpr(
+ on(expr(unless(cxxThisExpr()))),
+ callee(cxxMethodDecl(
+ hasName(member_name),
+ ofClass(anyOf(statusOrClass(), statusOrOperatorBaseClass())))));
+}
+
+static auto isStatusOrOperatorCallWithName(llvm::StringRef operator_name) {
+ using namespace ::clang::ast_matchers; // NOLINT: Too many names
+ return cxxOperatorCallExpr(
+ hasOverloadedOperatorName(operator_name),
+ callee(cxxMethodDecl(
+ ofClass(anyOf(statusOrClass(), statusOrOperatorBaseClass())))));
+}
+
+static auto valueCall() {
+ using namespace ::clang::ast_matchers; // NOLINT: Too many names
+ return anyOf(isStatusOrMemberCallWithName("value"),
+ isStatusOrMemberCallWithName("ValueOrDie"));
+}
+
+static auto valueOperatorCall() {
+ using namespace ::clang::ast_matchers; // NOLINT: Too many names
+ return expr(anyOf(isStatusOrOperatorCallWithName("*"),
+ isStatusOrOperatorCallWithName("->")));
+}
+
+static auto
+buildDiagnoseMatchSwitch(const UncheckedStatusOrAccessModelOptions &Options) {
+ return CFGMatchSwitchBuilder<const Environment,
+ llvm::SmallVector<SourceLocation>>()
+ // StatusOr::value, StatusOr::ValueOrDie
+ .CaseOfCFGStmt<CXXMemberCallExpr>(
+ valueCall(),
+ [](const CXXMemberCallExpr *E,
+ const ast_matchers::MatchFinder::MatchResult &,
+ const Environment &Env) {
+ if (!isSafeUnwrap(getImplicitObjectLocation(*E, Env), Env))
+ return llvm::SmallVector<SourceLocation>({E->getExprLoc()});
+ return llvm::SmallVector<SourceLocation>();
+ })
+
+ // StatusOr::operator*, StatusOr::operator->
+ .CaseOfCFGStmt<CXXOperatorCallExpr>(
+ valueOperatorCall(),
+ [](const CXXOperatorCallExpr *E,
+ const ast_matchers::MatchFinder::MatchResult &,
+ const Environment &Env) {
+ RecordStorageLocation *StatusOrLoc =
+ Env.get<RecordStorageLocation>(*E->getArg(0));
+ if (!isSafeUnwrap(StatusOrLoc, Env))
+ return llvm::SmallVector<SourceLocation>({E->getOperatorLoc()});
+ return llvm::SmallVector<SourceLocation>();
+ })
+ .Build();
+}
+
+UncheckedStatusOrAccessDiagnoser::UncheckedStatusOrAccessDiagnoser(
+ UncheckedStatusOrAccessModelOptions Options)
+ : DiagnoseMatchSwitch(buildDiagnoseMatchSwitch(Options)) {}
+
+llvm::SmallVector<SourceLocation> UncheckedStatusOrAccessDiagnoser::operator()(
+ const CFGElement &Elt, ASTContext &Ctx,
+ const TransferStateForDiagnostics<UncheckedStatusOrAccessModel::Lattice>
+ &State) {
+ return DiagnoseMatchSwitch(Elt, Ctx, State.Env);
+}
+
+BoolValue &initializeStatus(RecordStorageLocation &StatusLoc,
+ Environment &Env) {
+ auto &OkVal = Env.makeAtomicBoolValue();
+ Env.setValue(locForOk(StatusLoc), OkVal);
+ return OkVal;
+}
+
+BoolValue &initializeStatusOr(RecordStorageLocation &StatusOrLoc,
+ Environment &Env) {
+ return initializeStatus(locForStatus(StatusOrLoc), Env);
+}
+
+clang::ast_matchers::DeclarationMatcher statusOrClass() {
+ using namespace ::clang::ast_matchers; // NOLINT: Too many names
+ return classTemplateSpecializationDecl(
+ hasName("absl::StatusOr"),
+ hasTemplateArgument(0, refersToType(type().bind("T"))));
+}
+
+clang::ast_matchers::DeclarationMatcher statusClass() {
+ using namespace ::clang::ast_matchers; // NOLINT: Too many names
+ return cxxRecordDecl(hasName("absl::Status"));
+}
+
+clang::ast_matchers::DeclarationMatcher statusOrOperatorBaseClass() {
+ using namespace ::clang::ast_matchers; // NOLINT: Too many names
+ return classTemplateSpecializationDecl(
+ hasName("absl::internal_statusor::OperatorBase"));
+}
+
+clang::ast_matchers::TypeMatcher statusOrType() {
+ using namespace ::clang::ast_matchers; // NOLINT: Too many names
+ return hasCanonicalType(qualType(hasDeclaration(statusOrClass())));
+}
+
+bool isStatusOrType(QualType Type) {
+ return isTypeNamed(Type, {"absl"}, "StatusOr");
+}
+
+bool isStatusType(QualType Type) {
+ return isTypeNamed(Type, {"absl"}, "Status");
+}
+
+llvm::StringMap<QualType> getSyntheticFields(QualType Ty, QualType StatusType,
+ const CXXRecordDecl &RD) {
+ if (auto *TRD = getStatusOrBaseClass(Ty))
+ return {{"status", StatusType}, {"value", getStatusOrValueType(TRD)}};
+ if (isStatusType(Ty) || (RD.hasDefinition() &&
+ RD.isDerivedFrom(StatusType->getAsCXXRecordDecl())))
+ return {{"ok", RD.getASTContext().BoolTy}};
+ return {};
+}
+
+RecordStorageLocation &locForStatus(RecordStorageLocation &StatusOrLoc) {
+ return cast<RecordStorageLocation>(StatusOrLoc.getSyntheticField("status"));
+}
+
+StorageLocation &locForOk(RecordStorageLocation &StatusLoc) {
+ return StatusLoc.getSyntheticField("ok");
+}
+
+BoolValue &valForOk(RecordStorageLocation &StatusLoc, Environment &Env) {
+ if (auto *Val = Env.get<BoolValue>(locForOk(StatusLoc)))
+ return *Val;
+ return initializeStatus(StatusLoc, Env);
+}
+
+static void transferStatusOrOkCall(const CXXMemberCallExpr *Expr,
+ const MatchFinder::MatchResult &,
+ LatticeTransferState &State) {
+ RecordStorageLocation *StatusOrLoc =
+ getImplicitObjectLocation(*Expr, State.Env);
+ if (StatusOrLoc == nullptr)
+ return;
+
+ auto &OkVal = valForOk(locForStatus(*StatusOrLoc), State.Env);
+ State.Env.setValue(*Expr, OkVal);
+}
+
+CFGMatchSwitch<LatticeTransferState>
+buildTransferMatchSwitch(ASTContext &Ctx,
+ CFGMatchSwitchBuilder<LatticeTransferState> Builder) {
+ using namespace ::clang::ast_matchers; // NOLINT: Too many names
+ return std::move(Builder)
+ .CaseOfCFGStmt<CXXMemberCallExpr>(isStatusOrMemberCallWithName("ok"),
+ transferStatusOrOkCall)
+ .Build();
+}
+
+QualType findStatusType(const ASTContext &Ctx) {
+ for (Type *Ty : Ctx.getTypes())
+ if (isStatusType(QualType(Ty, 0)))
+ return QualType(Ty, 0);
+
+ return QualType();
+}
+
+UncheckedStatusOrAccessModel::UncheckedStatusOrAccessModel(ASTContext &Ctx,
+ Environment &Env)
+ : DataflowAnalysis<UncheckedStatusOrAccessModel,
+ UncheckedStatusOrAccessModel::Lattice>(Ctx),
+ TransferMatchSwitch(buildTransferMatchSwitch(Ctx, {})) {
+ QualType StatusType = findStatusType(Ctx);
+ Env.getDataflowAnalysisContext().setSyntheticFieldCallback(
+ [StatusType](QualType Ty) -> llvm::StringMap<QualType> {
+ CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
+ if (RD == nullptr)
+ return {};
+
+ if (auto Fields = getSyntheticFields(Ty, StatusType, *RD);
+ !Fields.empty())
+ return Fields;
+ return {};
+ });
+}
+
+void UncheckedStatusOrAccessModel::transfer(const CFGElement &Elt, Lattice &L,
+ Environment &Env) {
+ LatticeTransferState State(L, Env);
+ TransferMatchSwitch(Elt, getASTContext(), State);
+}
+
+} // namespace clang::dataflow::statusor_model
diff --git a/clang/lib/Basic/Targets/Mips.cpp b/clang/lib/Basic/Targets/Mips.cpp
index de6ccff..a999d14 100644
--- a/clang/lib/Basic/Targets/Mips.cpp
+++ b/clang/lib/Basic/Targets/Mips.cpp
@@ -68,11 +68,11 @@ void MipsTargetInfo::fillValidCPUList(
unsigned MipsTargetInfo::getISARev() const {
return llvm::StringSwitch<unsigned>(getCPU())
- .Cases("mips32", "mips64", 1)
- .Cases("mips32r2", "mips64r2", "octeon", "octeon+", 2)
- .Cases("mips32r3", "mips64r3", 3)
- .Cases("mips32r5", "mips64r5", "p5600", 5)
- .Cases("mips32r6", "mips64r6", "i6400", "i6500", 6)
+ .Cases({"mips32", "mips64"}, 1)
+ .Cases({"mips32r2", "mips64r2", "octeon", "octeon+"}, 2)
+ .Cases({"mips32r3", "mips64r3"}, 3)
+ .Cases({"mips32r5", "mips64r5", "p5600"}, 5)
+ .Cases({"mips32r6", "mips64r6", "i6400", "i6500"}, 6)
.Default(0);
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
index 4a19d91..5667273 100644
--- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
@@ -740,6 +740,16 @@ struct CallStackRestore final : EHScopeStack::Cleanup {
};
} // namespace
+/// Push the standard destructor for the given type as
+/// at least a normal cleanup.
+void CIRGenFunction::pushDestroy(QualType::DestructionKind dtorKind,
+ Address addr, QualType type) {
+ assert(dtorKind && "cannot push destructor for trivial type");
+
+ CleanupKind cleanupKind = getCleanupKind(dtorKind);
+ pushDestroy(cleanupKind, addr, type, getDestroyer(dtorKind));
+}
+
void CIRGenFunction::pushDestroy(CleanupKind cleanupKind, Address addr,
QualType type, Destroyer *destroyer) {
pushFullExprCleanup<DestroyObject>(cleanupKind, addr, type, destroyer);
diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
index 4897c29..9732c9c 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
@@ -1626,14 +1626,15 @@ LValue CIRGenFunction::emitBinaryOperatorLValue(const BinaryOperator *e) {
/// Emit code to compute the specified expression which
/// can have any type. The result is returned as an RValue struct.
-RValue CIRGenFunction::emitAnyExpr(const Expr *e, AggValueSlot aggSlot) {
+RValue CIRGenFunction::emitAnyExpr(const Expr *e, AggValueSlot aggSlot,
+ bool ignoreResult) {
switch (CIRGenFunction::getEvaluationKind(e->getType())) {
case cir::TEK_Scalar:
return RValue::get(emitScalarExpr(e));
case cir::TEK_Complex:
return RValue::getComplex(emitComplexExpr(e));
case cir::TEK_Aggregate: {
- if (aggSlot.isIgnored())
+ if (!ignoreResult && aggSlot.isIgnored())
aggSlot = createAggTemp(e->getType(), getLoc(e->getSourceRange()),
getCounterAggTmpAsString());
emitAggExpr(e, aggSlot);
@@ -1869,8 +1870,7 @@ RValue CIRGenFunction::emitCallExpr(const clang::CallExpr *e,
/// Emit code to compute the specified expression, ignoring the result.
void CIRGenFunction::emitIgnoredExpr(const Expr *e) {
if (e->isPRValue()) {
- assert(!cir::MissingFeatures::aggValueSlot());
- emitAnyExpr(e);
+ emitAnyExpr(e, AggValueSlot::ignored(), /*ignoreResult=*/true);
return;
}
@@ -2394,6 +2394,180 @@ LValue CIRGenFunction::emitPredefinedLValue(const PredefinedExpr *e) {
return emitStringLiteralLValue(sl, gvName);
}
+LValue CIRGenFunction::emitOpaqueValueLValue(const OpaqueValueExpr *e) {
+ assert(OpaqueValueMappingData::shouldBindAsLValue(e));
+ return getOrCreateOpaqueLValueMapping(e);
+}
+
+namespace {
+// Handle the case where the condition is a constant evaluatable simple integer,
+// which means we don't have to separately handle the true/false blocks.
+std::optional<LValue> handleConditionalOperatorLValueSimpleCase(
+ CIRGenFunction &cgf, const AbstractConditionalOperator *e) {
+ const Expr *condExpr = e->getCond();
+ llvm::APSInt condExprVal;
+ if (!cgf.constantFoldsToSimpleInteger(condExpr, condExprVal))
+ return std::nullopt;
+
+ const Expr *live = e->getTrueExpr(), *dead = e->getFalseExpr();
+ if (!condExprVal.getBoolValue())
+ std::swap(live, dead);
+
+ if (cgf.containsLabel(dead))
+ return std::nullopt;
+
+ // If the true case is live, we need to track its region.
+ assert(!cir::MissingFeatures::incrementProfileCounter());
+ assert(!cir::MissingFeatures::pgoUse());
+ // If a throw expression we emit it and return an undefined lvalue
+ // because it can't be used.
+ if (auto *throwExpr = dyn_cast<CXXThrowExpr>(live->IgnoreParens())) {
+ cgf.emitCXXThrowExpr(throwExpr);
+ // Return an undefined lvalue - the throw terminates execution
+ // so this value will never actually be used
+ mlir::Type elemTy = cgf.convertType(dead->getType());
+ mlir::Value undefPtr =
+ cgf.getBuilder().getNullPtr(cgf.getBuilder().getPointerTo(elemTy),
+ cgf.getLoc(throwExpr->getSourceRange()));
+ return cgf.makeAddrLValue(Address(undefPtr, elemTy, CharUnits::One()),
+ dead->getType());
+ }
+ return cgf.emitLValue(live);
+}
+
+/// Emit the operand of a glvalue conditional operator. This is either a glvalue
+/// or a (possibly-parenthesized) throw-expression. If this is a throw, no
+/// LValue is returned and the current block has been terminated.
+static std::optional<LValue> emitLValueOrThrowExpression(CIRGenFunction &cgf,
+ const Expr *operand) {
+ if (auto *throwExpr = dyn_cast<CXXThrowExpr>(operand->IgnoreParens())) {
+ cgf.emitCXXThrowExpr(throwExpr);
+ return std::nullopt;
+ }
+
+ return cgf.emitLValue(operand);
+}
+} // namespace
+
+// Create and generate the 3 blocks for a conditional operator.
+// Leaves the 'current block' in the continuation basic block.
+template <typename FuncTy>
+CIRGenFunction::ConditionalInfo
+CIRGenFunction::emitConditionalBlocks(const AbstractConditionalOperator *e,
+ const FuncTy &branchGenFunc) {
+ ConditionalInfo info;
+ ConditionalEvaluation eval(*this);
+ mlir::Location loc = getLoc(e->getSourceRange());
+ CIRGenBuilderTy &builder = getBuilder();
+
+ mlir::Value condV = emitOpOnBoolExpr(loc, e->getCond());
+ SmallVector<mlir::OpBuilder::InsertPoint, 2> insertPoints{};
+ mlir::Type yieldTy{};
+
+ auto emitBranch = [&](mlir::OpBuilder &b, mlir::Location loc,
+ const Expr *expr, std::optional<LValue> &resultLV) {
+ CIRGenFunction::LexicalScope lexScope{*this, loc, b.getInsertionBlock()};
+ curLexScope->setAsTernary();
+
+ assert(!cir::MissingFeatures::incrementProfileCounter());
+ eval.beginEvaluation();
+ resultLV = branchGenFunc(*this, expr);
+ mlir::Value resultPtr = resultLV ? resultLV->getPointer() : mlir::Value();
+ eval.endEvaluation();
+
+ if (resultPtr) {
+ yieldTy = resultPtr.getType();
+ cir::YieldOp::create(b, loc, resultPtr);
+ } else {
+ // If LHS or RHS is a void expression we need
+ // to patch arms as to properly match yield types.
+ // If the current block's terminator is an UnreachableOp (from a throw),
+ // we don't need a yield
+ if (builder.getInsertionBlock()->mightHaveTerminator()) {
+ mlir::Operation *terminator =
+ builder.getInsertionBlock()->getTerminator();
+ if (isa_and_nonnull<cir::UnreachableOp>(terminator))
+ insertPoints.push_back(b.saveInsertionPoint());
+ }
+ }
+ };
+
+ info.result = cir::TernaryOp::create(
+ builder, loc, condV,
+ /*trueBuilder=*/
+ [&](mlir::OpBuilder &b, mlir::Location loc) {
+ emitBranch(b, loc, e->getTrueExpr(), info.lhs);
+ },
+ /*falseBuilder=*/
+ [&](mlir::OpBuilder &b, mlir::Location loc) {
+ emitBranch(b, loc, e->getFalseExpr(), info.rhs);
+ })
+ .getResult();
+
+ // If both arms are void, so be it.
+ if (!yieldTy)
+ yieldTy = VoidTy;
+
+ // Insert required yields.
+ for (mlir::OpBuilder::InsertPoint &toInsert : insertPoints) {
+ mlir::OpBuilder::InsertionGuard guard(builder);
+ builder.restoreInsertionPoint(toInsert);
+
+ // Block does not return: build empty yield.
+ if (!yieldTy) {
+ cir::YieldOp::create(builder, loc);
+ } else { // Block returns: set null yield value.
+ mlir::Value op0 = builder.getNullValue(yieldTy, loc);
+ cir::YieldOp::create(builder, loc, op0);
+ }
+ }
+
+ return info;
+}
+
+LValue CIRGenFunction::emitConditionalOperatorLValue(
+ const AbstractConditionalOperator *expr) {
+ if (!expr->isGLValue()) {
+ // ?: here should be an aggregate.
+ assert(hasAggregateEvaluationKind(expr->getType()) &&
+ "Unexpected conditional operator!");
+ return emitAggExprToLValue(expr);
+ }
+
+ OpaqueValueMapping binding(*this, expr);
+ if (std::optional<LValue> res =
+ handleConditionalOperatorLValueSimpleCase(*this, expr))
+ return *res;
+
+ ConditionalInfo info =
+ emitConditionalBlocks(expr, [](CIRGenFunction &cgf, const Expr *e) {
+ return emitLValueOrThrowExpression(cgf, e);
+ });
+
+ if ((info.lhs && !info.lhs->isSimple()) ||
+ (info.rhs && !info.rhs->isSimple())) {
+ cgm.errorNYI(expr->getSourceRange(),
+ "unsupported conditional operator with non-simple lvalue");
+ return LValue();
+ }
+
+ if (info.lhs && info.rhs) {
+ Address lhsAddr = info.lhs->getAddress();
+ Address rhsAddr = info.rhs->getAddress();
+ Address result(info.result, lhsAddr.getElementType(),
+ std::min(lhsAddr.getAlignment(), rhsAddr.getAlignment()));
+ AlignmentSource alignSource =
+ std::max(info.lhs->getBaseInfo().getAlignmentSource(),
+ info.rhs->getBaseInfo().getAlignmentSource());
+ assert(!cir::MissingFeatures::opTBAA());
+ return makeAddrLValue(result, expr->getType(), LValueBaseInfo(alignSource));
+ }
+
+ assert((info.lhs || info.rhs) &&
+ "both operands of glvalue conditional are throw-expressions?");
+ return info.lhs ? *info.lhs : *info.rhs;
+}
+
/// An LValue is a candidate for having its loads and stores be made atomic if
/// we are operating under /volatile:ms *and* the LValue itself is volatile and
/// performing such an operation can be performed without a libcall.
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp
index 901b937..568cbdb 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp
@@ -24,6 +24,73 @@ using namespace clang;
using namespace clang::CIRGen;
namespace {
+// FIXME(cir): This should be a common helper between CIRGen
+// and traditional CodeGen
+/// Is the value of the given expression possibly a reference to or
+/// into a __block variable?
+static bool isBlockVarRef(const Expr *e) {
+ // Make sure we look through parens.
+ e = e->IgnoreParens();
+
+ // Check for a direct reference to a __block variable.
+ if (const DeclRefExpr *dre = dyn_cast<DeclRefExpr>(e)) {
+ const VarDecl *var = dyn_cast<VarDecl>(dre->getDecl());
+ return (var && var->hasAttr<BlocksAttr>());
+ }
+
+ // More complicated stuff.
+
+ // Binary operators.
+ if (const BinaryOperator *op = dyn_cast<BinaryOperator>(e)) {
+ // For an assignment or pointer-to-member operation, just care
+ // about the LHS.
+ if (op->isAssignmentOp() || op->isPtrMemOp())
+ return isBlockVarRef(op->getLHS());
+
+ // For a comma, just care about the RHS.
+ if (op->getOpcode() == BO_Comma)
+ return isBlockVarRef(op->getRHS());
+
+ // FIXME: pointer arithmetic?
+ return false;
+
+ // Check both sides of a conditional operator.
+ } else if (const AbstractConditionalOperator *op =
+ dyn_cast<AbstractConditionalOperator>(e)) {
+ return isBlockVarRef(op->getTrueExpr()) ||
+ isBlockVarRef(op->getFalseExpr());
+
+ // OVEs are required to support BinaryConditionalOperators.
+ } else if (const OpaqueValueExpr *op = dyn_cast<OpaqueValueExpr>(e)) {
+ if (const Expr *src = op->getSourceExpr())
+ return isBlockVarRef(src);
+
+ // Casts are necessary to get things like (*(int*)&var) = foo().
+ // We don't really care about the kind of cast here, except
+ // we don't want to look through l2r casts, because it's okay
+ // to get the *value* in a __block variable.
+ } else if (const CastExpr *cast = dyn_cast<CastExpr>(e)) {
+ if (cast->getCastKind() == CK_LValueToRValue)
+ return false;
+ return isBlockVarRef(cast->getSubExpr());
+
+ // Handle unary operators. Again, just aggressively look through
+ // it, ignoring the operation.
+ } else if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(e)) {
+ return isBlockVarRef(uop->getSubExpr());
+
+ // Look into the base of a field access.
+ } else if (const MemberExpr *mem = dyn_cast<MemberExpr>(e)) {
+ return isBlockVarRef(mem->getBase());
+
+ // Look into the base of a subscript.
+ } else if (const ArraySubscriptExpr *sub = dyn_cast<ArraySubscriptExpr>(e)) {
+ return isBlockVarRef(sub->getBase());
+ }
+
+ return false;
+}
+
class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
CIRGenFunction &cgf;
@@ -41,9 +108,7 @@ class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
AggValueSlot ensureSlot(mlir::Location loc, QualType t) {
if (!dest.isIgnored())
return dest;
-
- cgf.cgm.errorNYI(loc, "Slot for ignored address");
- return dest;
+ return cgf.createAggTemp(t, loc, "agg.tmp.ensured");
}
void ensureDest(mlir::Location loc, QualType ty) {
@@ -89,6 +154,47 @@ public:
(void)cgf.emitCompoundStmt(*e->getSubStmt(), &retAlloca, dest);
}
+ void VisitBinAssign(const BinaryOperator *e) {
+ // For an assignment to work, the value on the right has
+ // to be compatible with the value on the left.
+ assert(cgf.getContext().hasSameUnqualifiedType(e->getLHS()->getType(),
+ e->getRHS()->getType()) &&
+ "Invalid assignment");
+
+ if (isBlockVarRef(e->getLHS()) &&
+ e->getRHS()->HasSideEffects(cgf.getContext())) {
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "block var reference with side effects");
+ return;
+ }
+
+ LValue lhs = cgf.emitLValue(e->getLHS());
+
+ // If we have an atomic type, evaluate into the destination and then
+ // do an atomic copy.
+ assert(!cir::MissingFeatures::atomicTypes());
+
+ // Codegen the RHS so that it stores directly into the LHS.
+ assert(!cir::MissingFeatures::aggValueSlotGC());
+ AggValueSlot lhsSlot = AggValueSlot::forLValue(
+ lhs, AggValueSlot::IsDestructed, AggValueSlot::IsAliased,
+ AggValueSlot::MayOverlap);
+
+ // A non-volatile aggregate destination might have volatile member.
+ if (!lhsSlot.isVolatile() && cgf.hasVolatileMember(e->getLHS()->getType()))
+ lhsSlot.setVolatile(true);
+
+ cgf.emitAggExpr(e->getRHS(), lhsSlot);
+
+ // Copy into the destination if the assignment isn't ignored.
+ emitFinalDestCopy(e->getType(), lhs);
+
+ if (!dest.isIgnored() && !dest.isExternallyDestructed() &&
+ e->getType().isDestructedType() == QualType::DK_nontrivial_c_struct)
+ cgf.pushDestroy(QualType::DK_nontrivial_c_struct, dest.getAddress(),
+ e->getType());
+ }
+
void VisitDeclRefExpr(DeclRefExpr *e) { emitAggLoadOfLValue(e); }
void VisitInitListExpr(InitListExpr *e);
@@ -170,19 +276,10 @@ public:
void VisitConstantExpr(ConstantExpr *e) {
cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitConstantExpr");
}
- void VisitMemberExpr(MemberExpr *e) {
- cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitMemberExpr");
- }
- void VisitUnaryDeref(UnaryOperator *e) {
- cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitUnaryDeref");
- }
- void VisitStringLiteral(StringLiteral *e) {
- cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitStringLiteral");
- }
- void VisitCompoundLiteralExpr(CompoundLiteralExpr *e) {
- cgf.cgm.errorNYI(e->getSourceRange(),
- "AggExprEmitter: VisitCompoundLiteralExpr");
- }
+ void VisitMemberExpr(MemberExpr *e) { emitAggLoadOfLValue(e); }
+ void VisitUnaryDeref(UnaryOperator *e) { emitAggLoadOfLValue(e); }
+ void VisitStringLiteral(StringLiteral *e) { emitAggLoadOfLValue(e); }
+ void VisitCompoundLiteralExpr(CompoundLiteralExpr *e);
void VisitPredefinedExpr(const PredefinedExpr *e) {
cgf.cgm.errorNYI(e->getSourceRange(),
"AggExprEmitter: VisitPredefinedExpr");
@@ -195,9 +292,6 @@ public:
cgf.cgm.errorNYI(e->getSourceRange(),
"AggExprEmitter: VisitPointerToDataMemberBinaryOperator");
}
- void VisitBinAssign(const BinaryOperator *e) {
- cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitBinAssign");
- }
void VisitBinComma(const BinaryOperator *e) {
cgf.emitIgnoredExpr(e->getLHS());
Visit(e->getRHS());
@@ -325,6 +419,31 @@ void AggExprEmitter::emitAggLoadOfLValue(const Expr *e) {
emitFinalDestCopy(e->getType(), lv);
}
+void AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *e) {
+ if (dest.isPotentiallyAliased() && e->getType().isPODType(cgf.getContext())) {
+ // For a POD type, just emit a load of the lvalue + a copy, because our
+ // compound literal might alias the destination.
+ emitAggLoadOfLValue(e);
+ return;
+ }
+
+ AggValueSlot slot = ensureSlot(cgf.getLoc(e->getSourceRange()), e->getType());
+
+ // Block-scope compound literals are destroyed at the end of the enclosing
+ // scope in C.
+ bool destruct =
+ !cgf.getLangOpts().CPlusPlus && !slot.isExternallyDestructed();
+ if (destruct)
+ slot.setExternallyDestructed();
+
+ cgf.emitAggExpr(e->getInitializer(), slot);
+
+ if (destruct)
+ if ([[maybe_unused]] QualType::DestructionKind dtorKind =
+ e->getType().isDestructedType())
+ cgf.cgm.errorNYI(e->getSourceRange(), "compound literal with destructor");
+}
+
void AggExprEmitter::emitArrayInit(Address destPtr, cir::ArrayType arrayTy,
QualType arrayQTy, Expr *e,
ArrayRef<Expr *> args, Expr *arrayFiller) {
@@ -487,7 +606,8 @@ void AggExprEmitter::emitCopy(QualType type, const AggValueSlot &dest,
LValue destLV = cgf.makeAddrLValue(dest.getAddress(), type);
LValue srcLV = cgf.makeAddrLValue(src.getAddress(), type);
assert(!cir::MissingFeatures::aggValueSlotVolatile());
- cgf.emitAggregateCopy(destLV, srcLV, type, dest.mayOverlap());
+ cgf.emitAggregateCopy(destLV, srcLV, type, dest.mayOverlap(),
+ dest.isVolatile() || src.isVolatile());
}
void AggExprEmitter::emitInitializationToLValue(Expr *e, LValue lv) {
@@ -788,7 +908,8 @@ void CIRGenFunction::emitAggExpr(const Expr *e, AggValueSlot slot) {
}
void CIRGenFunction::emitAggregateCopy(LValue dest, LValue src, QualType ty,
- AggValueSlot::Overlap_t mayOverlap) {
+ AggValueSlot::Overlap_t mayOverlap,
+ bool isVolatile) {
// TODO(cir): this function needs improvements, commented code for now since
// this will be touched again soon.
assert(!ty->isAnyComplexType() && "Unexpected copy of complex");
@@ -844,7 +965,7 @@ void CIRGenFunction::emitAggregateCopy(LValue dest, LValue src, QualType ty,
cgm.errorNYI("emitAggregateCopy: GC");
[[maybe_unused]] cir::CopyOp copyOp =
- builder.createCopy(destPtr.getPointer(), srcPtr.getPointer());
+ builder.createCopy(destPtr.getPointer(), srcPtr.getPointer(), isVolatile);
assert(!cir::MissingFeatures::opTBAA());
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp
index fcde487..d8f4943 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp
@@ -196,9 +196,8 @@ public:
return Visit(e->getSubExpr());
}
mlir::Value VisitCXXDefaultArgExpr(CXXDefaultArgExpr *dae) {
- cgf.cgm.errorNYI(dae->getExprLoc(),
- "ComplexExprEmitter VisitCXXDefaultArgExpr");
- return {};
+ CIRGenFunction::CXXDefaultArgExprScope scope(cgf, dae);
+ return Visit(dae->getExpr());
}
mlir::Value VisitCXXDefaultInitExpr(CXXDefaultInitExpr *die) {
CIRGenFunction::CXXDefaultInitExprScope scope(cgf, die);
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
index ba36cbe..25a46df 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
@@ -822,6 +822,10 @@ LValue CIRGenFunction::emitLValue(const Expr *e) {
std::string("l-value not implemented for '") +
e->getStmtClassName() + "'");
return LValue();
+ case Expr::ConditionalOperatorClass:
+ return emitConditionalOperatorLValue(cast<ConditionalOperator>(e));
+ case Expr::BinaryConditionalOperatorClass:
+ return emitConditionalOperatorLValue(cast<BinaryConditionalOperator>(e));
case Expr::ArraySubscriptExprClass:
return emitArraySubscriptExpr(cast<ArraySubscriptExpr>(e));
case Expr::UnaryOperatorClass:
@@ -866,6 +870,8 @@ LValue CIRGenFunction::emitLValue(const Expr *e) {
return emitCastLValue(cast<CastExpr>(e));
case Expr::MaterializeTemporaryExprClass:
return emitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(e));
+ case Expr::OpaqueValueExprClass:
+ return emitOpaqueValueLValue(cast<OpaqueValueExpr>(e));
case Expr::ChooseExprClass:
return emitLValue(cast<ChooseExpr>(e)->getChosenSubExpr());
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h
index 3c36f5c..5a71126 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.h
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h
@@ -733,6 +733,11 @@ public:
SourceLocExprScopeGuard sourceLocScope;
};
+ struct CXXDefaultArgExprScope : SourceLocExprScopeGuard {
+ CXXDefaultArgExprScope(CIRGenFunction &cfg, const CXXDefaultArgExpr *e)
+ : SourceLocExprScopeGuard(e, cfg.curSourceLocExprScope) {}
+ };
+
LValue makeNaturalAlignPointeeAddrLValue(mlir::Value v, clang::QualType t);
LValue makeNaturalAlignAddrLValue(mlir::Value val, QualType ty);
@@ -853,6 +858,13 @@ public:
FunctionArgList args, clang::SourceLocation loc,
clang::SourceLocation startLoc);
+ /// returns true if aggregate type has a volatile member.
+ bool hasVolatileMember(QualType t) {
+ if (const auto *rd = t->getAsRecordDecl())
+ return rd->hasVolatileMember();
+ return false;
+ }
+
/// The cleanup depth enclosing all the cleanups associated with the
/// parameters.
EHScopeStack::stable_iterator prologueCleanupDepth;
@@ -1077,6 +1089,9 @@ public:
static Destroyer destroyCXXObject;
+ void pushDestroy(QualType::DestructionKind dtorKind, Address addr,
+ QualType type);
+
void pushDestroy(CleanupKind kind, Address addr, QualType type,
Destroyer *destroyer);
@@ -1131,14 +1146,16 @@ public:
/// occupied by some other object. More efficient code can often be
/// generated if not.
void emitAggregateCopy(LValue dest, LValue src, QualType eltTy,
- AggValueSlot::Overlap_t mayOverlap);
+ AggValueSlot::Overlap_t mayOverlap,
+ bool isVolatile = false);
/// Emit code to compute the specified expression which can have any type. The
/// result is returned as an RValue struct. If this is an aggregate
/// expression, the aggloc/agglocvolatile arguments indicate where the result
/// should be returned.
RValue emitAnyExpr(const clang::Expr *e,
- AggValueSlot aggSlot = AggValueSlot::ignored());
+ AggValueSlot aggSlot = AggValueSlot::ignored(),
+ bool ignoreResult = false);
/// Emits the code necessary to evaluate an arbitrary expression into the
/// given memory location.
@@ -1518,6 +1535,10 @@ public:
LValue emitMemberExpr(const MemberExpr *e);
+ LValue emitOpaqueValueLValue(const OpaqueValueExpr *e);
+
+ LValue emitConditionalOperatorLValue(const AbstractConditionalOperator *expr);
+
/// Given an expression with a pointer type, emit the value and compute our
/// best estimate of the alignment of the pointee.
///
diff --git a/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp b/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp
index f638d39..be063033 100644
--- a/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenOpenACCRecipe.cpp
@@ -590,15 +590,18 @@ void OpenACCRecipeBuilderBase::createReductionRecipeCombiner(
} else {
// else we have to handle each individual field after after a
// get-element.
+ const CIRGenRecordLayout &layout =
+ cgf.cgm.getTypes().getCIRGenRecordLayout(rd);
for (const auto &[field, combiner] :
llvm::zip_equal(rd->fields(), combinerRecipes)) {
mlir::Type fieldType = cgf.convertType(field->getType());
auto fieldPtr = cir::PointerType::get(fieldType);
+ unsigned fieldIndex = layout.getCIRFieldNo(field);
mlir::Value lhsField = builder.createGetMember(
- loc, fieldPtr, lhsArg, field->getName(), field->getFieldIndex());
+ loc, fieldPtr, lhsArg, field->getName(), fieldIndex);
mlir::Value rhsField = builder.createGetMember(
- loc, fieldPtr, rhsArg, field->getName(), field->getFieldIndex());
+ loc, fieldPtr, rhsArg, field->getName(), fieldIndex);
emitSingleCombiner(lhsField, rhsField, combiner);
}
diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
index 5ba64dd..ad8c4d0 100644
--- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
@@ -475,8 +475,8 @@ mlir::LogicalResult CIRGenFunction::emitReturnStmt(const ReturnStmt &s) {
}
break;
case cir::TEK_Complex:
- getCIRGenModule().errorNYI(s.getSourceRange(),
- "complex function return type");
+ emitComplexExprIntoLValue(rv, makeAddrLValue(returnValue, rv->getType()),
+ /*isInit=*/true);
break;
case cir::TEK_Aggregate:
assert(!cir::MissingFeatures::aggValueSlotGC());
diff --git a/clang/lib/CIR/CodeGen/CIRGenValue.h b/clang/lib/CIR/CodeGen/CIRGenValue.h
index c05142e..ab245a77 100644
--- a/clang/lib/CIR/CodeGen/CIRGenValue.h
+++ b/clang/lib/CIR/CodeGen/CIRGenValue.h
@@ -380,6 +380,15 @@ public:
clang::Qualifiers getQualifiers() const { return quals; }
+ bool isVolatile() const { return quals.hasVolatile(); }
+
+ void setVolatile(bool flag) {
+ if (flag)
+ quals.addVolatile();
+ else
+ quals.removeVolatile();
+ }
+
Address getAddress() const { return addr; }
bool isIgnored() const { return !addr.isValid(); }
@@ -390,6 +399,8 @@ public:
IsZeroed_t isZeroed() const { return IsZeroed_t(zeroedFlag); }
+ IsAliased_t isPotentiallyAliased() const { return IsAliased_t(aliasedFlag); }
+
RValue asRValue() const {
if (isIgnored())
return RValue::getIgnored();
diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
index b4c3704..ed606b7 100644
--- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
+++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
@@ -1978,13 +1978,19 @@ void cir::TernaryOp::build(
result.addOperands(cond);
OpBuilder::InsertionGuard guard(builder);
Region *trueRegion = result.addRegion();
- Block *block = builder.createBlock(trueRegion);
+ builder.createBlock(trueRegion);
trueBuilder(builder, result.location);
Region *falseRegion = result.addRegion();
builder.createBlock(falseRegion);
falseBuilder(builder, result.location);
- auto yield = dyn_cast<YieldOp>(block->getTerminator());
+ // Get result type from whichever branch has a yield (the other may have
+ // unreachable from a throw expression)
+ auto yield =
+ dyn_cast_or_null<cir::YieldOp>(trueRegion->back().getTerminator());
+ if (!yield)
+ yield = dyn_cast_or_null<cir::YieldOp>(falseRegion->back().getTerminator());
+
assert((yield && yield.getNumOperands() <= 1) &&
"expected zero or one result type");
if (yield.getNumOperands() == 1)
diff --git a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp
index 26e5c05..8589a2e 100644
--- a/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp
+++ b/clang/lib/CIR/Dialect/Transforms/FlattenCFG.cpp
@@ -505,10 +505,19 @@ public:
Block *trueBlock = &trueRegion.front();
mlir::Operation *trueTerminator = trueRegion.back().getTerminator();
rewriter.setInsertionPointToEnd(&trueRegion.back());
- auto trueYieldOp = dyn_cast<cir::YieldOp>(trueTerminator);
- rewriter.replaceOpWithNewOp<cir::BrOp>(trueYieldOp, trueYieldOp.getArgs(),
- continueBlock);
+ // Handle both yield and unreachable terminators (throw expressions)
+ if (auto trueYieldOp = dyn_cast<cir::YieldOp>(trueTerminator)) {
+ rewriter.replaceOpWithNewOp<cir::BrOp>(trueYieldOp, trueYieldOp.getArgs(),
+ continueBlock);
+ } else if (isa<cir::UnreachableOp>(trueTerminator)) {
+ // Terminator is unreachable (e.g., from throw), just keep it
+ } else {
+ trueTerminator->emitError("unexpected terminator in ternary true region, "
+ "expected yield or unreachable, got: ")
+ << trueTerminator->getName();
+ return mlir::failure();
+ }
rewriter.inlineRegionBefore(trueRegion, continueBlock);
Block *falseBlock = continueBlock;
@@ -517,9 +526,19 @@ public:
falseBlock = &falseRegion.front();
mlir::Operation *falseTerminator = falseRegion.back().getTerminator();
rewriter.setInsertionPointToEnd(&falseRegion.back());
- auto falseYieldOp = dyn_cast<cir::YieldOp>(falseTerminator);
- rewriter.replaceOpWithNewOp<cir::BrOp>(falseYieldOp, falseYieldOp.getArgs(),
- continueBlock);
+
+ // Handle both yield and unreachable terminators (throw expressions)
+ if (auto falseYieldOp = dyn_cast<cir::YieldOp>(falseTerminator)) {
+ rewriter.replaceOpWithNewOp<cir::BrOp>(
+ falseYieldOp, falseYieldOp.getArgs(), continueBlock);
+ } else if (isa<cir::UnreachableOp>(falseTerminator)) {
+ // Terminator is unreachable (e.g., from throw), just keep it
+ } else {
+ falseTerminator->emitError("unexpected terminator in ternary false "
+ "region, expected yield or unreachable, got: ")
+ << falseTerminator->getName();
+ return mlir::failure();
+ }
rewriter.inlineRegionBefore(falseRegion, continueBlock);
rewriter.setInsertionPointToEnd(condBlock);
diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
index 0243bf1..dc26dac 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
@@ -182,7 +182,7 @@ mlir::LogicalResult CIRToLLVMCopyOpLowering::matchAndRewrite(
rewriter, op.getLoc(), rewriter.getI32Type(), op.getLength(layout));
assert(!cir::MissingFeatures::aggValueSlotVolatile());
rewriter.replaceOpWithNewOp<mlir::LLVM::MemcpyOp>(
- op, adaptor.getDst(), adaptor.getSrc(), length, /*isVolatile=*/false);
+ op, adaptor.getDst(), adaptor.getSrc(), length, op.getIsVolatile());
return mlir::success();
}
diff --git a/clang/lib/CodeGen/CGAtomic.cpp b/clang/lib/CodeGen/CGAtomic.cpp
index d95dab3..a012581 100644
--- a/clang/lib/CodeGen/CGAtomic.cpp
+++ b/clang/lib/CodeGen/CGAtomic.cpp
@@ -86,7 +86,7 @@ namespace {
llvm::Value *StoragePtr = CGF.Builder.CreateConstGEP1_64(
CGF.Int8Ty, BitFieldPtr, OffsetInChars.getQuantity());
StoragePtr = CGF.Builder.CreateAddrSpaceCast(
- StoragePtr, CGF.UnqualPtrTy, "atomic_bitfield_base");
+ StoragePtr, CGF.DefaultPtrTy, "atomic_bitfield_base");
BFI = OrigBFI;
BFI.Offset = Offset;
BFI.StorageSize = AtomicSizeInBits;
@@ -374,10 +374,9 @@ bool AtomicInfo::emitMemSetZeroIfNecessary() const {
}
static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
- Address Dest, Address Ptr,
- Address Val1, Address Val2,
- uint64_t Size,
- llvm::AtomicOrdering SuccessOrder,
+ Address Dest, Address Ptr, Address Val1,
+ Address Val2, Address ExpectedResult,
+ uint64_t Size, llvm::AtomicOrdering SuccessOrder,
llvm::AtomicOrdering FailureOrder,
llvm::SyncScope::ID Scope) {
// Note that cmpxchg doesn't support weak cmpxchg, at least at the moment.
@@ -411,8 +410,30 @@ static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
CGF.Builder.SetInsertPoint(StoreExpectedBB);
// Update the memory at Expected with Old's value.
- auto *I = CGF.Builder.CreateStore(Old, Val1);
- CGF.addInstToCurrentSourceAtom(I, Old);
+ llvm::Type *ExpectedType = ExpectedResult.getElementType();
+ const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
+ uint64_t ExpectedSizeInBytes = DL.getTypeStoreSize(ExpectedType);
+
+ if (ExpectedSizeInBytes == Size) {
+ // Sizes match: store directly
+ auto *I = CGF.Builder.CreateStore(Old, ExpectedResult);
+ CGF.addInstToCurrentSourceAtom(I, Old);
+ } else {
+ // store only the first ExpectedSizeInBytes bytes of Old
+ llvm::Type *OldType = Old->getType();
+
+ // Allocate temporary storage for Old value
+ Address OldTmp =
+ CGF.CreateTempAlloca(OldType, Ptr.getAlignment(), "old.tmp");
+
+ // Store Old into this temporary
+ auto *I = CGF.Builder.CreateStore(Old, OldTmp);
+ CGF.addInstToCurrentSourceAtom(I, Old);
+
+ // Perform memcpy for first ExpectedSizeInBytes bytes
+ CGF.Builder.CreateMemCpy(ExpectedResult, OldTmp, ExpectedSizeInBytes,
+ /*isVolatile=*/false);
+ }
// Finally, branch to the exit point.
CGF.Builder.CreateBr(ContinueBB);
@@ -425,13 +446,11 @@ static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
/// Given an ordering required on success, emit all possible cmpxchg
/// instructions to cope with the provided (but possibly only dynamically known)
/// FailureOrder.
-static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
- bool IsWeak, Address Dest, Address Ptr,
- Address Val1, Address Val2,
- llvm::Value *FailureOrderVal,
- uint64_t Size,
- llvm::AtomicOrdering SuccessOrder,
- llvm::SyncScope::ID Scope) {
+static void emitAtomicCmpXchgFailureSet(
+ CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak, Address Dest, Address Ptr,
+ Address Val1, Address Val2, Address ExpectedResult,
+ llvm::Value *FailureOrderVal, uint64_t Size,
+ llvm::AtomicOrdering SuccessOrder, llvm::SyncScope::ID Scope) {
llvm::AtomicOrdering FailureOrder;
if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
auto FOS = FO->getSExtValue();
@@ -458,8 +477,8 @@ static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
// success argument". This condition has been lifted and the only
// precondition is 31.7.2.18. Effectively treat this as a DR and skip
// language version checks.
- emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
- FailureOrder, Scope);
+ emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, ExpectedResult,
+ Size, SuccessOrder, FailureOrder, Scope);
return;
}
@@ -483,18 +502,19 @@ static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
// Emit all the different atomics
CGF.Builder.SetInsertPoint(MonotonicBB);
- emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
- Size, SuccessOrder, llvm::AtomicOrdering::Monotonic, Scope);
+ emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, ExpectedResult, Size,
+ SuccessOrder, llvm::AtomicOrdering::Monotonic, Scope);
CGF.Builder.CreateBr(ContBB);
CGF.Builder.SetInsertPoint(AcquireBB);
- emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
- llvm::AtomicOrdering::Acquire, Scope);
+ emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, ExpectedResult, Size,
+ SuccessOrder, llvm::AtomicOrdering::Acquire, Scope);
CGF.Builder.CreateBr(ContBB);
CGF.Builder.SetInsertPoint(SeqCstBB);
- emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
- llvm::AtomicOrdering::SequentiallyConsistent, Scope);
+ emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, ExpectedResult, Size,
+ SuccessOrder, llvm::AtomicOrdering::SequentiallyConsistent,
+ Scope);
CGF.Builder.CreateBr(ContBB);
CGF.Builder.SetInsertPoint(ContBB);
@@ -538,8 +558,9 @@ static llvm::Value *EmitPostAtomicMinMax(CGBuilderTy &Builder,
static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
Address Ptr, Address Val1, Address Val2,
- llvm::Value *IsWeak, llvm::Value *FailureOrder,
- uint64_t Size, llvm::AtomicOrdering Order,
+ Address ExpectedResult, llvm::Value *IsWeak,
+ llvm::Value *FailureOrder, uint64_t Size,
+ llvm::AtomicOrdering Order,
llvm::SyncScope::ID Scope) {
llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
bool PostOpMinMax = false;
@@ -554,13 +575,15 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
- FailureOrder, Size, Order, Scope);
+ ExpectedResult, FailureOrder, Size, Order,
+ Scope);
return;
case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
- FailureOrder, Size, Order, Scope);
+ ExpectedResult, FailureOrder, Size, Order,
+ Scope);
return;
case AtomicExpr::AO__atomic_compare_exchange:
case AtomicExpr::AO__atomic_compare_exchange_n:
@@ -568,7 +591,8 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
case AtomicExpr::AO__scoped_atomic_compare_exchange_n: {
if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr,
- Val1, Val2, FailureOrder, Size, Order, Scope);
+ Val1, Val2, ExpectedResult, FailureOrder,
+ Size, Order, Scope);
} else {
// Create all the relevant BB's
llvm::BasicBlock *StrongBB =
@@ -582,12 +606,14 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
CGF.Builder.SetInsertPoint(StrongBB);
emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
- FailureOrder, Size, Order, Scope);
+ ExpectedResult, FailureOrder, Size, Order,
+ Scope);
CGF.Builder.CreateBr(ContBB);
CGF.Builder.SetInsertPoint(WeakBB);
emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
- FailureOrder, Size, Order, Scope);
+ ExpectedResult, FailureOrder, Size, Order,
+ Scope);
CGF.Builder.CreateBr(ContBB);
CGF.Builder.SetInsertPoint(ContBB);
@@ -797,9 +823,9 @@ EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *Expr, Address Dest,
Address Ptr, Address Val1, Address Val2,
- llvm::Value *IsWeak, llvm::Value *FailureOrder,
- uint64_t Size, llvm::AtomicOrdering Order,
- llvm::Value *Scope) {
+ Address OriginalVal1, llvm::Value *IsWeak,
+ llvm::Value *FailureOrder, uint64_t Size,
+ llvm::AtomicOrdering Order, llvm::Value *Scope) {
auto ScopeModel = Expr->getScopeModel();
// LLVM atomic instructions always have sync scope. If clang atomic
@@ -816,8 +842,8 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *Expr, Address Dest,
Order, CGF.getLLVMContext());
else
SS = llvm::SyncScope::System;
- EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
- Order, SS);
+ EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
+ FailureOrder, Size, Order, SS);
return;
}
@@ -826,8 +852,8 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *Expr, Address Dest,
auto SCID = CGF.getTargetHooks().getLLVMSyncScopeID(
CGF.CGM.getLangOpts(), ScopeModel->map(SC->getZExtValue()),
Order, CGF.CGM.getLLVMContext());
- EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
- Order, SCID);
+ EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
+ FailureOrder, Size, Order, SCID);
return;
}
@@ -852,12 +878,11 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *Expr, Address Dest,
SI->addCase(Builder.getInt32(S), B);
Builder.SetInsertPoint(B);
- EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
- Order,
- CGF.getTargetHooks().getLLVMSyncScopeID(CGF.CGM.getLangOpts(),
- ScopeModel->map(S),
- Order,
- CGF.getLLVMContext()));
+ EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
+ FailureOrder, Size, Order,
+ CGF.getTargetHooks().getLLVMSyncScopeID(
+ CGF.CGM.getLangOpts(), ScopeModel->map(S), Order,
+ CGF.getLLVMContext()));
Builder.CreateBr(ContBB);
}
@@ -1058,6 +1083,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
LValue AtomicVal = MakeAddrLValue(Ptr, AtomicTy);
AtomicInfo Atomics(*this, AtomicVal);
+ Address OriginalVal1 = Val1;
if (ShouldCastToIntPtrTy) {
Ptr = Atomics.castToAtomicIntPointer(Ptr);
if (Val1.isValid())
@@ -1301,30 +1327,32 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
if (llvm::isValidAtomicOrderingCABI(ord))
switch ((llvm::AtomicOrderingCABI)ord) {
case llvm::AtomicOrderingCABI::relaxed:
- EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
- llvm::AtomicOrdering::Monotonic, Scope);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
+ OrderFail, Size, llvm::AtomicOrdering::Monotonic, Scope);
break;
case llvm::AtomicOrderingCABI::consume:
case llvm::AtomicOrderingCABI::acquire:
if (IsStore)
break; // Avoid crashing on code with undefined behavior
- EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
- llvm::AtomicOrdering::Acquire, Scope);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
+ OrderFail, Size, llvm::AtomicOrdering::Acquire, Scope);
break;
case llvm::AtomicOrderingCABI::release:
if (IsLoad)
break; // Avoid crashing on code with undefined behavior
- EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
- llvm::AtomicOrdering::Release, Scope);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
+ OrderFail, Size, llvm::AtomicOrdering::Release, Scope);
break;
case llvm::AtomicOrderingCABI::acq_rel:
if (IsLoad || IsStore)
break; // Avoid crashing on code with undefined behavior
- EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
- llvm::AtomicOrdering::AcquireRelease, Scope);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
+ OrderFail, Size, llvm::AtomicOrdering::AcquireRelease,
+ Scope);
break;
case llvm::AtomicOrderingCABI::seq_cst:
- EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
+ OrderFail, Size,
llvm::AtomicOrdering::SequentiallyConsistent, Scope);
break;
}
@@ -1360,13 +1388,13 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
// Emit all the different atomics
Builder.SetInsertPoint(MonotonicBB);
- EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
- llvm::AtomicOrdering::Monotonic, Scope);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak, OrderFail,
+ Size, llvm::AtomicOrdering::Monotonic, Scope);
Builder.CreateBr(ContBB);
if (!IsStore) {
Builder.SetInsertPoint(AcquireBB);
- EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
- llvm::AtomicOrdering::Acquire, Scope);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
+ OrderFail, Size, llvm::AtomicOrdering::Acquire, Scope);
Builder.CreateBr(ContBB);
SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
AcquireBB);
@@ -1375,23 +1403,23 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
}
if (!IsLoad) {
Builder.SetInsertPoint(ReleaseBB);
- EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
- llvm::AtomicOrdering::Release, Scope);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
+ OrderFail, Size, llvm::AtomicOrdering::Release, Scope);
Builder.CreateBr(ContBB);
SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::release),
ReleaseBB);
}
if (!IsLoad && !IsStore) {
Builder.SetInsertPoint(AcqRelBB);
- EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
- llvm::AtomicOrdering::AcquireRelease, Scope);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
+ OrderFail, Size, llvm::AtomicOrdering::AcquireRelease, Scope);
Builder.CreateBr(ContBB);
SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acq_rel),
AcqRelBB);
}
Builder.SetInsertPoint(SeqCstBB);
- EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
- llvm::AtomicOrdering::SequentiallyConsistent, Scope);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak, OrderFail,
+ Size, llvm::AtomicOrdering::SequentiallyConsistent, Scope);
Builder.CreateBr(ContBB);
SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
SeqCstBB);
@@ -1417,6 +1445,11 @@ Address AtomicInfo::convertToAtomicIntPointer(Address Addr) const {
uint64_t SourceSizeInBits = CGF.CGM.getDataLayout().getTypeSizeInBits(Ty);
if (SourceSizeInBits != AtomicSizeInBits) {
Address Tmp = CreateTempAlloca();
+ CGF.Builder.CreateMemSet(
+ Tmp.emitRawPointer(CGF), llvm::ConstantInt::get(CGF.Int8Ty, 0),
+ CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits).getQuantity(),
+ Tmp.getAlignment().getAsAlign());
+
CGF.Builder.CreateMemCpy(Tmp, Addr,
std::min(AtomicSizeInBits, SourceSizeInBits) / 8);
Addr = Tmp;
diff --git a/clang/lib/CodeGen/CGBlocks.cpp b/clang/lib/CodeGen/CGBlocks.cpp
index 597127ab..20a595e 100644
--- a/clang/lib/CodeGen/CGBlocks.cpp
+++ b/clang/lib/CodeGen/CGBlocks.cpp
@@ -1207,7 +1207,7 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr *E,
} else {
// Bitcast the block literal to a generic block literal.
BlockPtr =
- Builder.CreatePointerCast(BlockPtr, UnqualPtrTy, "block.literal");
+ Builder.CreatePointerCast(BlockPtr, DefaultPtrTy, "block.literal");
// Get pointer to the block invoke function
FuncPtr = Builder.CreateStructGEP(GenBlockTy, BlockPtr, 3);
diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index 92dba32..fd14cd6 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -1673,7 +1673,7 @@ static llvm::Value *EmitX86BitTestIntrinsic(CodeGenFunction &CGF,
CGF.getLLVMContext(),
CGF.getContext().getTypeSize(E->getArg(1)->getType()));
llvm::FunctionType *FTy =
- llvm::FunctionType::get(CGF.Int8Ty, {CGF.UnqualPtrTy, IntType}, false);
+ llvm::FunctionType::get(CGF.Int8Ty, {CGF.DefaultPtrTy, IntType}, false);
llvm::InlineAsm *IA =
llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
diff --git a/clang/lib/CodeGen/CGCUDANV.cpp b/clang/lib/CodeGen/CGCUDANV.cpp
index cb16fe1..b463f88 100644
--- a/clang/lib/CodeGen/CGCUDANV.cpp
+++ b/clang/lib/CodeGen/CGCUDANV.cpp
@@ -230,7 +230,7 @@ CGNVCUDARuntime::CGNVCUDARuntime(CodeGenModule &CGM)
IntTy = CGM.IntTy;
SizeTy = CGM.SizeTy;
VoidTy = CGM.VoidTy;
- PtrTy = CGM.UnqualPtrTy;
+ PtrTy = CGM.DefaultPtrTy;
if (CGM.getLangOpts().OffloadViaLLVM)
Prefix = "llvm";
diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp
index 8439ec7..fd73314 100644
--- a/clang/lib/CodeGen/CGExpr.cpp
+++ b/clang/lib/CodeGen/CGExpr.cpp
@@ -1756,7 +1756,7 @@ LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E,
const char *Name) {
ErrorUnsupported(E, Name);
llvm::Type *ElTy = ConvertType(E->getType());
- llvm::Type *Ty = UnqualPtrTy;
+ llvm::Type *Ty = DefaultPtrTy;
return MakeAddrLValue(
Address(llvm::UndefValue::get(Ty), ElTy, CharUnits::One()), E->getType());
}
@@ -4270,7 +4270,7 @@ void CodeGenFunction::EmitCfiCheckFail() {
llvm::StructType::get(Int8Ty, SourceLocationTy, VoidPtrTy);
llvm::Value *V = Builder.CreateConstGEP2_32(
- CfiCheckFailDataTy, Builder.CreatePointerCast(Data, UnqualPtrTy), 0, 0);
+ CfiCheckFailDataTy, Builder.CreatePointerCast(Data, DefaultPtrTy), 0, 0);
Address CheckKindAddr(V, Int8Ty, getIntAlign());
llvm::Value *CheckKind = Builder.CreateLoad(CheckKindAddr);
@@ -5711,7 +5711,7 @@ std::optional<LValue> HandleConditionalOperatorLValueSimpleCase(
if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Live->IgnoreParens())) {
CGF.EmitCXXThrowExpr(ThrowExpr);
llvm::Type *ElemTy = CGF.ConvertType(Dead->getType());
- llvm::Type *Ty = CGF.UnqualPtrTy;
+ llvm::Type *Ty = CGF.DefaultPtrTy;
return CGF.MakeAddrLValue(
Address(llvm::UndefValue::get(Ty), ElemTy, CharUnits::One()),
Dead->getType());
diff --git a/clang/lib/CodeGen/CGObjCMac.cpp b/clang/lib/CodeGen/CGObjCMac.cpp
index c571821a..cb5bb40 100644
--- a/clang/lib/CodeGen/CGObjCMac.cpp
+++ b/clang/lib/CodeGen/CGObjCMac.cpp
@@ -338,7 +338,7 @@ public:
/// GcReadWeakFn -- LLVM objc_read_weak (id *src) function.
llvm::FunctionCallee getGcReadWeakFn() {
// id objc_read_weak (id *)
- llvm::Type *args[] = {CGM.UnqualPtrTy};
+ llvm::Type *args[] = {CGM.DefaultPtrTy};
llvm::FunctionType *FTy = llvm::FunctionType::get(ObjectPtrTy, args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_read_weak");
}
@@ -346,7 +346,7 @@ public:
/// GcAssignWeakFn -- LLVM objc_assign_weak function.
llvm::FunctionCallee getGcAssignWeakFn() {
// id objc_assign_weak (id, id *)
- llvm::Type *args[] = {ObjectPtrTy, CGM.UnqualPtrTy};
+ llvm::Type *args[] = {ObjectPtrTy, CGM.DefaultPtrTy};
llvm::FunctionType *FTy = llvm::FunctionType::get(ObjectPtrTy, args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_assign_weak");
}
@@ -354,7 +354,7 @@ public:
/// GcAssignGlobalFn -- LLVM objc_assign_global function.
llvm::FunctionCallee getGcAssignGlobalFn() {
// id objc_assign_global(id, id *)
- llvm::Type *args[] = {ObjectPtrTy, CGM.UnqualPtrTy};
+ llvm::Type *args[] = {ObjectPtrTy, CGM.DefaultPtrTy};
llvm::FunctionType *FTy = llvm::FunctionType::get(ObjectPtrTy, args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_assign_global");
}
@@ -362,7 +362,7 @@ public:
/// GcAssignThreadLocalFn -- LLVM objc_assign_threadlocal function.
llvm::FunctionCallee getGcAssignThreadLocalFn() {
// id objc_assign_threadlocal(id src, id * dest)
- llvm::Type *args[] = {ObjectPtrTy, CGM.UnqualPtrTy};
+ llvm::Type *args[] = {ObjectPtrTy, CGM.DefaultPtrTy};
llvm::FunctionType *FTy = llvm::FunctionType::get(ObjectPtrTy, args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_assign_threadlocal");
}
@@ -370,7 +370,7 @@ public:
/// GcAssignIvarFn -- LLVM objc_assign_ivar function.
llvm::FunctionCallee getGcAssignIvarFn() {
// id objc_assign_ivar(id, id *, ptrdiff_t)
- llvm::Type *args[] = {ObjectPtrTy, CGM.UnqualPtrTy, CGM.PtrDiffTy};
+ llvm::Type *args[] = {ObjectPtrTy, CGM.DefaultPtrTy, CGM.PtrDiffTy};
llvm::FunctionType *FTy = llvm::FunctionType::get(ObjectPtrTy, args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_assign_ivar");
}
@@ -386,7 +386,7 @@ public:
/// GcAssignStrongCastFn -- LLVM objc_assign_strongCast function.
llvm::FunctionCallee getGcAssignStrongCastFn() {
// id objc_assign_strongCast(id, id *)
- llvm::Type *args[] = {ObjectPtrTy, CGM.UnqualPtrTy};
+ llvm::Type *args[] = {ObjectPtrTy, CGM.DefaultPtrTy};
llvm::FunctionType *FTy = llvm::FunctionType::get(ObjectPtrTy, args, false);
return CGM.CreateRuntimeFunction(FTy, "objc_assign_strongCast");
}
@@ -517,7 +517,7 @@ public:
/// ExceptionTryEnterFn - LLVM objc_exception_try_enter function.
llvm::FunctionCallee getExceptionTryEnterFn() {
- llvm::Type *params[] = {CGM.UnqualPtrTy};
+ llvm::Type *params[] = {CGM.DefaultPtrTy};
return CGM.CreateRuntimeFunction(
llvm::FunctionType::get(CGM.VoidTy, params, false),
"objc_exception_try_enter");
@@ -525,7 +525,7 @@ public:
/// ExceptionTryExitFn - LLVM objc_exception_try_exit function.
llvm::FunctionCallee getExceptionTryExitFn() {
- llvm::Type *params[] = {CGM.UnqualPtrTy};
+ llvm::Type *params[] = {CGM.DefaultPtrTy};
return CGM.CreateRuntimeFunction(
llvm::FunctionType::get(CGM.VoidTy, params, false),
"objc_exception_try_exit");
@@ -533,7 +533,7 @@ public:
/// ExceptionExtractFn - LLVM objc_exception_extract function.
llvm::FunctionCallee getExceptionExtractFn() {
- llvm::Type *params[] = {CGM.UnqualPtrTy};
+ llvm::Type *params[] = {CGM.DefaultPtrTy};
return CGM.CreateRuntimeFunction(
llvm::FunctionType::get(ObjectPtrTy, params, false),
"objc_exception_extract");
@@ -550,7 +550,7 @@ public:
/// SetJmpFn - LLVM _setjmp function.
llvm::FunctionCallee getSetJmpFn() {
// This is specifically the prototype for x86.
- llvm::Type *params[] = {CGM.UnqualPtrTy};
+ llvm::Type *params[] = {CGM.DefaultPtrTy};
return CGM.CreateRuntimeFunction(
llvm::FunctionType::get(CGM.Int32Ty, params, false), "_setjmp",
llvm::AttributeList::get(CGM.getLLVMContext(),
@@ -1927,7 +1927,7 @@ CGObjCCommonMac::GenerateConstantNSString(const StringLiteral *Literal) {
// If we don't already have it, construct the type for a constant NSString.
if (!NSConstantStringType) {
NSConstantStringType =
- llvm::StructType::create({CGM.UnqualPtrTy, CGM.Int8PtrTy, CGM.IntTy},
+ llvm::StructType::create({CGM.DefaultPtrTy, CGM.Int8PtrTy, CGM.IntTy},
"struct.__builtin_NSString");
}
@@ -5959,7 +5959,7 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(
Int8PtrTy, PropertyListPtrTy);
// ImpnfABITy - LLVM for id (*)(id, SEL, ...)
- ImpnfABITy = CGM.UnqualPtrTy;
+ ImpnfABITy = CGM.DefaultPtrTy;
// struct _class_t {
// struct _class_t *isa;
@@ -6380,7 +6380,7 @@ void CGObjCNonFragileABIMac::GenerateClass(const ObjCImplementationDecl *ID) {
CGM.getModule(), ObjCTypes.ImpnfABITy, false,
llvm::GlobalValue::ExternalLinkage, nullptr, "_objc_empty_vtable");
else
- ObjCEmptyVtableVar = llvm::ConstantPointerNull::get(CGM.UnqualPtrTy);
+ ObjCEmptyVtableVar = llvm::ConstantPointerNull::get(CGM.DefaultPtrTy);
}
// FIXME: Is this correct (that meta class size is never computed)?
diff --git a/clang/lib/CodeGen/CGOpenMPRuntime.cpp b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
index 1ff2be7..85b2404 100644
--- a/clang/lib/CodeGen/CGOpenMPRuntime.cpp
+++ b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
@@ -1714,12 +1714,12 @@ llvm::Function *CGOpenMPRuntime::emitThreadPrivateVarDefinition(
// Copying constructor for the threadprivate variable.
// Must be NULL - reserved by runtime, but currently it requires that this
// parameter is always NULL. Otherwise it fires assertion.
- CopyCtor = llvm::Constant::getNullValue(CGM.UnqualPtrTy);
+ CopyCtor = llvm::Constant::getNullValue(CGM.DefaultPtrTy);
if (Ctor == nullptr) {
- Ctor = llvm::Constant::getNullValue(CGM.UnqualPtrTy);
+ Ctor = llvm::Constant::getNullValue(CGM.DefaultPtrTy);
}
if (Dtor == nullptr) {
- Dtor = llvm::Constant::getNullValue(CGM.UnqualPtrTy);
+ Dtor = llvm::Constant::getNullValue(CGM.DefaultPtrTy);
}
if (!CGF) {
auto *InitFunctionTy =
diff --git a/clang/lib/CodeGen/CGPointerAuth.cpp b/clang/lib/CodeGen/CGPointerAuth.cpp
index 375f87a..dbb7bc9 100644
--- a/clang/lib/CodeGen/CGPointerAuth.cpp
+++ b/clang/lib/CodeGen/CGPointerAuth.cpp
@@ -426,10 +426,10 @@ CodeGenModule::getConstantSignedPointer(llvm::Constant *Pointer, unsigned Key,
llvm::ConstantInt *OtherDiscriminator) {
llvm::Constant *AddressDiscriminator;
if (StorageAddress) {
- assert(StorageAddress->getType() == UnqualPtrTy);
+ assert(StorageAddress->getType() == DefaultPtrTy);
AddressDiscriminator = StorageAddress;
} else {
- AddressDiscriminator = llvm::Constant::getNullValue(UnqualPtrTy);
+ AddressDiscriminator = llvm::Constant::getNullValue(DefaultPtrTy);
}
llvm::ConstantInt *IntegerDiscriminator;
diff --git a/clang/lib/CodeGen/CodeGenModule.cpp b/clang/lib/CodeGen/CodeGenModule.cpp
index c5eb14e..1085f45 100644
--- a/clang/lib/CodeGen/CodeGenModule.cpp
+++ b/clang/lib/CodeGen/CodeGenModule.cpp
@@ -83,6 +83,7 @@ static llvm::cl::opt<bool> LimitedCoverage(
llvm::cl::desc("Emit limited coverage mapping information (experimental)"));
static const char AnnotationSection[] = "llvm.metadata";
+static constexpr auto ErrnoTBAAMDName = "llvm.errno.tbaa";
static CGCXXABI *createCXXABI(CodeGenModule &CGM) {
switch (CGM.getContext().getCXXABIKind()) {
@@ -1583,6 +1584,17 @@ void CodeGenModule::Release() {
}
}
}
+
+ // Emit `!llvm.errno.tbaa`, a module-level metadata that specifies the TBAA
+ // for an int access. This allows LLVM to reason about what memory can be
+ // accessed by certain library calls that only touch errno.
+ if (TBAA) {
+ TBAAAccessInfo TBAAInfo = getTBAAAccessInfo(Context.IntTy);
+ if (llvm::MDNode *IntegerNode = getTBAAAccessTagInfo(TBAAInfo)) {
+ auto *ErrnoTBAAMD = TheModule.getOrInsertNamedMetadata(ErrnoTBAAMDName);
+ ErrnoTBAAMD->addOperand(IntegerNode);
+ }
+ }
}
void CodeGenModule::EmitOpenCLMetadata() {
diff --git a/clang/lib/CodeGen/CodeGenTypeCache.h b/clang/lib/CodeGen/CodeGenTypeCache.h
index e273ebe..015306b 100644
--- a/clang/lib/CodeGen/CodeGenTypeCache.h
+++ b/clang/lib/CodeGen/CodeGenTypeCache.h
@@ -53,7 +53,7 @@ struct CodeGenTypeCache {
/// void*, void** in the target's default address space (often 0)
union {
- llvm::PointerType *UnqualPtrTy;
+ llvm::PointerType *DefaultPtrTy;
llvm::PointerType *VoidPtrTy;
llvm::PointerType *Int8PtrTy;
llvm::PointerType *VoidPtrPtrTy;
diff --git a/clang/lib/CodeGen/ItaniumCXXABI.cpp b/clang/lib/CodeGen/ItaniumCXXABI.cpp
index 9e195a9..65c4763 100644
--- a/clang/lib/CodeGen/ItaniumCXXABI.cpp
+++ b/clang/lib/CodeGen/ItaniumCXXABI.cpp
@@ -774,7 +774,7 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
} else {
llvm::Value *VFPAddr =
CGF.Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
- VirtualFn = CGF.Builder.CreateAlignedLoad(CGF.UnqualPtrTy, VFPAddr,
+ VirtualFn = CGF.Builder.CreateAlignedLoad(CGF.DefaultPtrTy, VFPAddr,
CGF.getPointerAlign(),
"memptr.virtualfn");
}
@@ -816,7 +816,7 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
// function pointer.
CGF.EmitBlock(FnNonVirtual);
llvm::Value *NonVirtualFn =
- Builder.CreateIntToPtr(FnAsInt, CGF.UnqualPtrTy, "memptr.nonvirtualfn");
+ Builder.CreateIntToPtr(FnAsInt, CGF.DefaultPtrTy, "memptr.nonvirtualfn");
// Check the function pointer if CFI on member function pointers is enabled.
if (ShouldEmitCFICheck) {
@@ -856,7 +856,7 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
// We're done.
CGF.EmitBlock(FnEnd);
- llvm::PHINode *CalleePtr = Builder.CreatePHI(CGF.UnqualPtrTy, 2);
+ llvm::PHINode *CalleePtr = Builder.CreatePHI(CGF.DefaultPtrTy, 2);
CalleePtr->addIncoming(VirtualFn, FnVirtual);
CalleePtr->addIncoming(NonVirtualFn, FnNonVirtual);
@@ -1403,7 +1403,7 @@ void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
// Grab the vtable pointer as an intptr_t*.
auto *ClassDecl = ElementType->castAsCXXRecordDecl();
- llvm::Value *VTable = CGF.GetVTablePtr(Ptr, CGF.UnqualPtrTy, ClassDecl);
+ llvm::Value *VTable = CGF.GetVTablePtr(Ptr, CGF.DefaultPtrTy, ClassDecl);
// Track back to entry -2 and pull out the offset there.
llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
@@ -1749,7 +1749,7 @@ llvm::Value *ItaniumCXXABI::emitExactDynamicCast(
auto AuthenticateVTable = [&](Address ThisAddr, const CXXRecordDecl *Decl) {
if (!CGF.getLangOpts().PointerAuthCalls)
return;
- (void)CGF.GetVTablePtr(ThisAddr, CGF.UnqualPtrTy, Decl,
+ (void)CGF.GetVTablePtr(ThisAddr, CGF.DefaultPtrTy, Decl,
CodeGenFunction::VTableAuthMode::MustTrap);
};
@@ -1775,7 +1775,7 @@ llvm::Value *ItaniumCXXABI::emitExactDynamicCast(
if (PerformPostCastAuthentication)
VTable = CGF.EmitPointerAuthAuth(StrippingAuthInfo, VTable);
} else
- VTable = CGF.GetVTablePtr(ThisAddr, CGF.UnqualPtrTy, SrcDecl);
+ VTable = CGF.GetVTablePtr(ThisAddr, CGF.DefaultPtrTy, SrcDecl);
// Compare the vptr against the expected vptr for the destination type at
// this offset.
@@ -1828,7 +1828,7 @@ llvm::Value *ItaniumCXXABI::emitDynamicCastToVoid(CodeGenFunction &CGF,
if (CGM.getItaniumVTableContext().isRelativeLayout()) {
// Get the vtable pointer.
llvm::Value *VTable =
- CGF.GetVTablePtr(ThisAddr, CGF.UnqualPtrTy, ClassDecl);
+ CGF.GetVTablePtr(ThisAddr, CGF.DefaultPtrTy, ClassDecl);
// Get the offset-to-top from the vtable.
OffsetToTop =
@@ -1841,7 +1841,7 @@ llvm::Value *ItaniumCXXABI::emitDynamicCastToVoid(CodeGenFunction &CGF,
// Get the vtable pointer.
llvm::Value *VTable =
- CGF.GetVTablePtr(ThisAddr, CGF.UnqualPtrTy, ClassDecl);
+ CGF.GetVTablePtr(ThisAddr, CGF.DefaultPtrTy, ClassDecl);
// Get the offset-to-top from the vtable.
OffsetToTop =
@@ -2578,7 +2578,7 @@ llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
// We can't simply ignore this load using nosanitize metadata because
// the metadata may be lost.
llvm::FunctionType *FTy =
- llvm::FunctionType::get(CGF.SizeTy, CGF.UnqualPtrTy, false);
+ llvm::FunctionType::get(CGF.SizeTy, CGF.DefaultPtrTy, false);
llvm::FunctionCallee F =
CGM.CreateRuntimeFunction(FTy, "__asan_load_cxx_array_cookie");
return CGF.Builder.CreateCall(F, numElementsPtr.emitRawPointer(CGF));
@@ -2921,7 +2921,7 @@ static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
// We're assuming that the destructor function is something we can
// reasonably call with the default CC.
- llvm::Type *dtorTy = CGF.UnqualPtrTy;
+ llvm::Type *dtorTy = CGF.DefaultPtrTy;
// Preserve address space of addr.
auto AddrAS = addr ? addr->getType()->getPointerAddressSpace() : 0;
@@ -5035,7 +5035,7 @@ static void InitCatchParam(CodeGenFunction &CGF,
auto catchRD = CatchType->getAsCXXRecordDecl();
CharUnits caughtExnAlignment = CGF.CGM.getClassPointerAlignment(catchRD);
- llvm::Type *PtrTy = CGF.UnqualPtrTy; // addrspace 0 ok
+ llvm::Type *PtrTy = CGF.DefaultPtrTy;
// Check for a copy expression. If we don't have a copy expression,
// that means a trivial copy is okay.
@@ -5244,7 +5244,7 @@ void XLCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
llvm::FunctionCallee Dtor,
llvm::Constant *Addr) {
if (D.getTLSKind() != VarDecl::TLS_None) {
- llvm::PointerType *PtrTy = CGF.UnqualPtrTy;
+ llvm::PointerType *PtrTy = CGF.DefaultPtrTy;
// extern "C" int __pt_atexit_np(int flags, int(*)(int,...), ...);
llvm::FunctionType *AtExitTy =
diff --git a/clang/lib/CodeGen/MicrosoftCXXABI.cpp b/clang/lib/CodeGen/MicrosoftCXXABI.cpp
index 19d9265..71e2449 100644
--- a/clang/lib/CodeGen/MicrosoftCXXABI.cpp
+++ b/clang/lib/CodeGen/MicrosoftCXXABI.cpp
@@ -528,7 +528,7 @@ public:
CGM.IntTy,
CGM.IntTy,
CGM.IntTy,
- getImageRelativeType(CGM.UnqualPtrTy),
+ getImageRelativeType(CGM.DefaultPtrTy),
};
BaseClassDescriptorType = llvm::StructType::create(
CGM.getLLVMContext(), FieldTypes, "rtti.BaseClassDescriptor");
@@ -540,7 +540,7 @@ public:
return ClassHierarchyDescriptorType;
// Forward-declare RTTIClassHierarchyDescriptor to break a cycle.
llvm::Type *FieldTypes[] = {CGM.IntTy, CGM.IntTy, CGM.IntTy,
- getImageRelativeType(CGM.UnqualPtrTy)};
+ getImageRelativeType(CGM.DefaultPtrTy)};
ClassHierarchyDescriptorType =
llvm::StructType::create(FieldTypes, "rtti.ClassHierarchyDescriptor");
return ClassHierarchyDescriptorType;
@@ -554,7 +554,7 @@ public:
CGM.IntTy,
CGM.IntTy,
getImageRelativeType(CGM.Int8PtrTy),
- getImageRelativeType(CGM.UnqualPtrTy),
+ getImageRelativeType(CGM.DefaultPtrTy),
getImageRelativeType(CGM.VoidTy),
};
llvm::ArrayRef<llvm::Type *> FieldTypesRef(FieldTypes);
@@ -752,7 +752,7 @@ public:
llvm::SmallString<23> CTATypeName("eh.CatchableTypeArray.");
CTATypeName += llvm::utostr(NumEntries);
- llvm::Type *CTType = getImageRelativeType(CGM.UnqualPtrTy);
+ llvm::Type *CTType = getImageRelativeType(CGM.DefaultPtrTy);
llvm::Type *FieldTypes[] = {
CGM.IntTy, // NumEntries
llvm::ArrayType::get(CTType, NumEntries) // CatchableTypes
@@ -779,7 +779,7 @@ public:
llvm::FunctionCallee getThrowFn() {
// _CxxThrowException is passed an exception object and a ThrowInfo object
// which describes the exception.
- llvm::Type *Args[] = {CGM.Int8PtrTy, CGM.UnqualPtrTy};
+ llvm::Type *Args[] = {CGM.Int8PtrTy, CGM.DefaultPtrTy};
llvm::FunctionType *FTy =
llvm::FunctionType::get(CGM.VoidTy, Args, /*isVarArg=*/false);
llvm::FunctionCallee Throw =
@@ -920,7 +920,7 @@ void MicrosoftCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
void MicrosoftCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) {
llvm::Value *Args[] = {llvm::ConstantPointerNull::get(CGM.Int8PtrTy),
- llvm::ConstantPointerNull::get(CGM.UnqualPtrTy)};
+ llvm::ConstantPointerNull::get(CGM.DefaultPtrTy)};
llvm::FunctionCallee Fn = getThrowFn();
if (isNoReturn)
CGF.EmitNoreturnRuntimeCallOrInvoke(Fn, Args);
@@ -1969,13 +1969,13 @@ CGCallee MicrosoftCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
SourceLocation Loc) {
CGBuilderTy &Builder = CGF.Builder;
- Ty = CGF.UnqualPtrTy;
+ Ty = CGF.DefaultPtrTy;
Address VPtr =
adjustThisArgumentForVirtualFunctionCall(CGF, GD, This, true);
auto *MethodDecl = cast<CXXMethodDecl>(GD.getDecl());
llvm::Value *VTable =
- CGF.GetVTablePtr(VPtr, CGF.UnqualPtrTy, MethodDecl->getParent());
+ CGF.GetVTablePtr(VPtr, CGF.DefaultPtrTy, MethodDecl->getParent());
MicrosoftVTableContext &VFTContext = CGM.getMicrosoftVTableContext();
MethodVFTableLocation ML = VFTContext.getMethodVFTableLocation(GD);
@@ -2136,9 +2136,9 @@ MicrosoftCXXABI::EmitVirtualMemPtrThunk(const CXXMethodDecl *MD,
// Load the vfptr and then callee from the vftable. The callee should have
// adjusted 'this' so that the vfptr is at offset zero.
- llvm::Type *ThunkPtrTy = CGF.UnqualPtrTy;
+ llvm::Type *ThunkPtrTy = CGF.DefaultPtrTy;
llvm::Value *VTable =
- CGF.GetVTablePtr(getThisAddress(CGF), CGF.UnqualPtrTy, MD->getParent());
+ CGF.GetVTablePtr(getThisAddress(CGF), CGF.DefaultPtrTy, MD->getParent());
llvm::Value *VFuncPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
ThunkPtrTy, VTable, ML.Index, "vfn");
@@ -2562,7 +2562,7 @@ static ConstantAddress getInitThreadEpochPtr(CodeGenModule &CGM) {
static llvm::FunctionCallee getInitThreadHeaderFn(CodeGenModule &CGM) {
llvm::FunctionType *FTy =
llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()),
- CGM.UnqualPtrTy, /*isVarArg=*/false);
+ CGM.DefaultPtrTy, /*isVarArg=*/false);
return CGM.CreateRuntimeFunction(
FTy, "_Init_thread_header",
llvm::AttributeList::get(CGM.getLLVMContext(),
@@ -2574,7 +2574,7 @@ static llvm::FunctionCallee getInitThreadHeaderFn(CodeGenModule &CGM) {
static llvm::FunctionCallee getInitThreadFooterFn(CodeGenModule &CGM) {
llvm::FunctionType *FTy =
llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()),
- CGM.UnqualPtrTy, /*isVarArg=*/false);
+ CGM.DefaultPtrTy, /*isVarArg=*/false);
return CGM.CreateRuntimeFunction(
FTy, "_Init_thread_footer",
llvm::AttributeList::get(CGM.getLLVMContext(),
@@ -2586,7 +2586,7 @@ static llvm::FunctionCallee getInitThreadFooterFn(CodeGenModule &CGM) {
static llvm::FunctionCallee getInitThreadAbortFn(CodeGenModule &CGM) {
llvm::FunctionType *FTy =
llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()),
- CGM.UnqualPtrTy, /*isVarArg=*/false);
+ CGM.DefaultPtrTy, /*isVarArg=*/false);
return CGM.CreateRuntimeFunction(
FTy, "_Init_thread_abort",
llvm::AttributeList::get(CGM.getLLVMContext(),
@@ -3169,7 +3169,7 @@ MicrosoftCXXABI::GetVBaseOffsetFromVBPtr(CodeGenFunction &CGF,
}
llvm::Value *VBTable =
- Builder.CreateAlignedLoad(CGM.UnqualPtrTy, VBPtr, VBPtrAlign, "vbtable");
+ Builder.CreateAlignedLoad(CGM.DefaultPtrTy, VBPtr, VBPtrAlign, "vbtable");
// Translate from byte offset to table index. It improves analyzability.
llvm::Value *VBTableIndex = Builder.CreateAShr(
@@ -3825,7 +3825,7 @@ MSRTTIBuilder::getBaseClassArray(SmallVectorImpl<MSRTTIClass> &Classes) {
// mode) bytes of padding. We provide a pointer sized amount of padding by
// adding +1 to Classes.size(). The sections have pointer alignment and are
// marked pick-any so it shouldn't matter.
- llvm::Type *PtrType = ABI.getImageRelativeType(CGM.UnqualPtrTy);
+ llvm::Type *PtrType = ABI.getImageRelativeType(CGM.DefaultPtrTy);
auto *ArrType = llvm::ArrayType::get(PtrType, Classes.size() + 1);
auto *BCA =
new llvm::GlobalVariable(Module, ArrType,
@@ -4372,7 +4372,7 @@ llvm::GlobalVariable *MicrosoftCXXABI::getCatchableTypeArray(QualType T) {
CatchableTypes.insert(getCatchableType(getContext().VoidPtrTy));
uint32_t NumEntries = CatchableTypes.size();
- llvm::Type *CTType = getImageRelativeType(CGM.UnqualPtrTy);
+ llvm::Type *CTType = getImageRelativeType(CGM.DefaultPtrTy);
llvm::ArrayType *AT = llvm::ArrayType::get(CTType, NumEntries);
llvm::StructType *CTAType = getCatchableTypeArrayType(NumEntries);
llvm::Constant *Fields[] = {
diff --git a/clang/lib/CodeGen/TargetBuiltins/ARM.cpp b/clang/lib/CodeGen/TargetBuiltins/ARM.cpp
index 2429a43..60f9b86 100644
--- a/clang/lib/CodeGen/TargetBuiltins/ARM.cpp
+++ b/clang/lib/CodeGen/TargetBuiltins/ARM.cpp
@@ -2037,7 +2037,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
case NEON::BI__builtin_neon_vld1q_x3_v:
case NEON::BI__builtin_neon_vld1_x4_v:
case NEON::BI__builtin_neon_vld1q_x4_v: {
- llvm::Type *Tys[2] = {VTy, UnqualPtrTy};
+ llvm::Type *Tys[2] = {VTy, DefaultPtrTy};
Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld1xN");
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
@@ -2263,11 +2263,11 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
// in AArch64 it comes last. We may want to stick to one or another.
if (Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::aarch64_be ||
Arch == llvm::Triple::aarch64_32) {
- llvm::Type *Tys[2] = {VTy, UnqualPtrTy};
+ llvm::Type *Tys[2] = {VTy, DefaultPtrTy};
std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "");
}
- llvm::Type *Tys[2] = {UnqualPtrTy, VTy};
+ llvm::Type *Tys[2] = {DefaultPtrTy, VTy};
return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "");
}
case NEON::BI__builtin_neon_vsubhn_v: {
@@ -2858,7 +2858,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Function *F = CGM.getIntrinsic(
BuiltinID == clang::ARM::BI__builtin_arm_ldaex ? Intrinsic::arm_ldaex
: Intrinsic::arm_ldrex,
- UnqualPtrTy);
+ DefaultPtrTy);
CallInst *Val = Builder.CreateCall(F, LoadAddr, "ldrex");
Val->addParamAttr(
0, Attribute::get(getLLVMContext(), Attribute::ElementType, IntTy));
@@ -5225,7 +5225,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
CGM.getIntrinsic(BuiltinID == clang::AArch64::BI__builtin_arm_ldaex
? Intrinsic::aarch64_ldaxr
: Intrinsic::aarch64_ldxr,
- UnqualPtrTy);
+ DefaultPtrTy);
CallInst *Val = Builder.CreateCall(F, LoadAddr, "ldxr");
Val->addParamAttr(
0, Attribute::get(getLLVMContext(), Attribute::ElementType, IntTy));
@@ -7482,42 +7482,42 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
}
case NEON::BI__builtin_neon_vld2_v:
case NEON::BI__builtin_neon_vld2q_v: {
- llvm::Type *Tys[2] = {VTy, UnqualPtrTy};
+ llvm::Type *Tys[2] = {VTy, DefaultPtrTy};
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld3_v:
case NEON::BI__builtin_neon_vld3q_v: {
- llvm::Type *Tys[2] = {VTy, UnqualPtrTy};
+ llvm::Type *Tys[2] = {VTy, DefaultPtrTy};
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld4_v:
case NEON::BI__builtin_neon_vld4q_v: {
- llvm::Type *Tys[2] = {VTy, UnqualPtrTy};
+ llvm::Type *Tys[2] = {VTy, DefaultPtrTy};
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld2_dup_v:
case NEON::BI__builtin_neon_vld2q_dup_v: {
- llvm::Type *Tys[2] = {VTy, UnqualPtrTy};
+ llvm::Type *Tys[2] = {VTy, DefaultPtrTy};
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2r, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld3_dup_v:
case NEON::BI__builtin_neon_vld3q_dup_v: {
- llvm::Type *Tys[2] = {VTy, UnqualPtrTy};
+ llvm::Type *Tys[2] = {VTy, DefaultPtrTy};
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3r, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld4_dup_v:
case NEON::BI__builtin_neon_vld4q_dup_v: {
- llvm::Type *Tys[2] = {VTy, UnqualPtrTy};
+ llvm::Type *Tys[2] = {VTy, DefaultPtrTy};
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4r, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
diff --git a/clang/lib/CodeGen/TargetBuiltins/PPC.cpp b/clang/lib/CodeGen/TargetBuiltins/PPC.cpp
index e71dc9e..44d5938 100644
--- a/clang/lib/CodeGen/TargetBuiltins/PPC.cpp
+++ b/clang/lib/CodeGen/TargetBuiltins/PPC.cpp
@@ -59,7 +59,7 @@ static llvm::Value *emitPPCLoadReserveIntrinsic(CodeGenFunction &CGF,
Constraints += MachineClobbers;
}
- llvm::Type *PtrType = CGF.UnqualPtrTy;
+ llvm::Type *PtrType = CGF.DefaultPtrTy;
llvm::FunctionType *FTy = llvm::FunctionType::get(RetType, {PtrType}, false);
llvm::InlineAsm *IA =
diff --git a/clang/lib/CodeGen/Targets/PPC.cpp b/clang/lib/CodeGen/Targets/PPC.cpp
index 380e8c0..35e7655 100644
--- a/clang/lib/CodeGen/Targets/PPC.cpp
+++ b/clang/lib/CodeGen/Targets/PPC.cpp
@@ -490,7 +490,7 @@ RValue PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList,
llvm::Type *DirectTy = CGF.ConvertType(Ty), *ElementTy = DirectTy;
if (isIndirect)
- DirectTy = CGF.UnqualPtrTy;
+ DirectTy = CGF.DefaultPtrTy;
// Case 1: consume registers.
Address RegAddr = Address::invalid();
diff --git a/clang/lib/Driver/Distro.cpp b/clang/lib/Driver/Distro.cpp
index 838e087..df10458 100644
--- a/clang/lib/Driver/Distro.cpp
+++ b/clang/lib/Driver/Distro.cpp
@@ -61,10 +61,6 @@ static Distro::DistroType DetectLsbRelease(llvm::vfs::FileSystem &VFS) {
if (Version == Distro::UnknownDistro &&
Line.starts_with("DISTRIB_CODENAME="))
Version = llvm::StringSwitch<Distro::DistroType>(Line.substr(17))
- .Case("maverick", Distro::UbuntuMaverick)
- .Case("natty", Distro::UbuntuNatty)
- .Case("oneiric", Distro::UbuntuOneiric)
- .Case("precise", Distro::UbuntuPrecise)
.Case("quantal", Distro::UbuntuQuantal)
.Case("raring", Distro::UbuntuRaring)
.Case("saucy", Distro::UbuntuSaucy)
@@ -120,13 +116,17 @@ static Distro::DistroType DetectDistro(llvm::vfs::FileSystem &VFS) {
if (Data.starts_with("Fedora release"))
return Distro::Fedora;
if (Data.starts_with("Red Hat Enterprise Linux") ||
- Data.starts_with("CentOS") || Data.starts_with("Scientific Linux")) {
+ Data.starts_with("CentOS") || Data.starts_with("AlmaLinux") ||
+ Data.starts_with("Rocky Linux") ||
+ Data.starts_with("Scientific Linux")) {
+ if (Data.contains("release 10"))
+ return Distro::RHEL10;
+ if (Data.contains("release 9"))
+ return Distro::RHEL9;
+ if (Data.contains("release 8"))
+ return Distro::RHEL8;
if (Data.contains("release 7"))
return Distro::RHEL7;
- else if (Data.contains("release 6"))
- return Distro::RHEL6;
- else if (Data.contains("release 5"))
- return Distro::RHEL5;
}
return Distro::UnknownDistro;
}
@@ -139,12 +139,6 @@ static Distro::DistroType DetectDistro(llvm::vfs::FileSystem &VFS) {
int MajorVersion;
if (!Data.split('.').first.getAsInteger(10, MajorVersion)) {
switch (MajorVersion) {
- case 5:
- return Distro::DebianLenny;
- case 6:
- return Distro::DebianSqueeze;
- case 7:
- return Distro::DebianWheezy;
case 8:
return Distro::DebianJessie;
case 9:
@@ -166,8 +160,6 @@ static Distro::DistroType DetectDistro(llvm::vfs::FileSystem &VFS) {
}
}
return llvm::StringSwitch<Distro::DistroType>(Data.split("\n").first)
- .Case("squeeze/sid", Distro::DebianSqueeze)
- .Case("wheezy/sid", Distro::DebianWheezy)
.Case("jessie/sid", Distro::DebianJessie)
.Case("stretch/sid", Distro::DebianStretch)
.Case("buster/sid", Distro::DebianBuster)
diff --git a/clang/lib/Driver/ToolChains/Arch/Mips.cpp b/clang/lib/Driver/ToolChains/Arch/Mips.cpp
index bac8681..227c6a0 100644
--- a/clang/lib/Driver/ToolChains/Arch/Mips.cpp
+++ b/clang/lib/Driver/ToolChains/Arch/Mips.cpp
@@ -482,9 +482,9 @@ bool mips::isFPXXDefault(const llvm::Triple &Triple, StringRef CPUName,
return false;
return llvm::StringSwitch<bool>(CPUName)
- .Cases("mips2", "mips3", "mips4", "mips5", true)
- .Cases("mips32", "mips32r2", "mips32r3", "mips32r5", true)
- .Cases("mips64", "mips64r2", "mips64r3", "mips64r5", true)
+ .Cases({"mips2", "mips3", "mips4", "mips5"}, true)
+ .Cases({"mips32", "mips32r2", "mips32r3", "mips32r5"}, true)
+ .Cases({"mips64", "mips64r2", "mips64r3", "mips64r5"}, true)
.Default(false);
}
diff --git a/clang/lib/Driver/ToolChains/Darwin.cpp b/clang/lib/Driver/ToolChains/Darwin.cpp
index d2356eb..cc5bcd1 100644
--- a/clang/lib/Driver/ToolChains/Darwin.cpp
+++ b/clang/lib/Driver/ToolChains/Darwin.cpp
@@ -51,15 +51,15 @@ llvm::Triple::ArchType darwin::getArchTypeForMachOArchName(StringRef Str) {
// translation.
return llvm::StringSwitch<llvm::Triple::ArchType>(Str)
- .Cases("i386", "i486", "i486SX", "i586", "i686", llvm::Triple::x86)
- .Cases("pentium", "pentpro", "pentIIm3", "pentIIm5", "pentium4",
+ .Cases({"i386", "i486", "i486SX", "i586", "i686"}, llvm::Triple::x86)
+ .Cases({"pentium", "pentpro", "pentIIm3", "pentIIm5", "pentium4"},
llvm::Triple::x86)
- .Cases("x86_64", "x86_64h", llvm::Triple::x86_64)
+ .Cases({"x86_64", "x86_64h"}, llvm::Triple::x86_64)
// This is derived from the driver.
- .Cases("arm", "armv4t", "armv5", "armv6", "armv6m", llvm::Triple::arm)
- .Cases("armv7", "armv7em", "armv7k", "armv7m", llvm::Triple::arm)
- .Cases("armv7s", "xscale", llvm::Triple::arm)
- .Cases("arm64", "arm64e", llvm::Triple::aarch64)
+ .Cases({"arm", "armv4t", "armv5", "armv6", "armv6m"}, llvm::Triple::arm)
+ .Cases({"armv7", "armv7em", "armv7k", "armv7m"}, llvm::Triple::arm)
+ .Cases({"armv7s", "xscale"}, llvm::Triple::arm)
+ .Cases({"arm64", "arm64e"}, llvm::Triple::aarch64)
.Case("arm64_32", llvm::Triple::aarch64_32)
.Case("r600", llvm::Triple::r600)
.Case("amdgcn", llvm::Triple::amdgcn)
diff --git a/clang/lib/Format/BreakableToken.cpp b/clang/lib/Format/BreakableToken.cpp
index 29db200..994a427 100644
--- a/clang/lib/Format/BreakableToken.cpp
+++ b/clang/lib/Format/BreakableToken.cpp
@@ -306,8 +306,10 @@ BreakableStringLiteralUsingOperators::BreakableStringLiteralUsingOperators(
// In Verilog, all strings are quoted by double quotes, joined by commas,
// and wrapped in braces. The comma is always before the newline.
assert(QuoteStyle == DoubleQuotes);
- LeftBraceQuote = Style.Cpp11BracedListStyle ? "{\"" : "{ \"";
- RightBraceQuote = Style.Cpp11BracedListStyle ? "\"}" : "\" }";
+ LeftBraceQuote =
+ Style.Cpp11BracedListStyle != FormatStyle::BLS_Block ? "{\"" : "{ \"";
+ RightBraceQuote =
+ Style.Cpp11BracedListStyle != FormatStyle::BLS_Block ? "\"}" : "\" }";
Postfix = "\",";
Prefix = "\"";
} else {
diff --git a/clang/lib/Format/ContinuationIndenter.cpp b/clang/lib/Format/ContinuationIndenter.cpp
index cd4c1aa..37c10c6 100644
--- a/clang/lib/Format/ContinuationIndenter.cpp
+++ b/clang/lib/Format/ContinuationIndenter.cpp
@@ -411,7 +411,7 @@ bool ContinuationIndenter::mustBreak(const LineState &State) {
}
if (CurrentState.BreakBeforeClosingBrace &&
(Current.closesBlockOrBlockTypeList(Style) ||
- (Current.is(tok::r_brace) &&
+ (Current.is(tok::r_brace) && Current.MatchingParen &&
Current.isBlockIndentedInitRBrace(Style)))) {
return true;
}
@@ -433,7 +433,7 @@ bool ContinuationIndenter::mustBreak(const LineState &State) {
}
if ((startsNextParameter(Current, Style) || Previous.is(tok::semi) ||
(Previous.is(TT_TemplateCloser) && Current.is(TT_StartOfName) &&
- State.Line->First->isNot(TT_AttributeSquare) && Style.isCpp() &&
+ State.Line->First->isNot(TT_AttributeLSquare) && Style.isCpp() &&
// FIXME: This is a temporary workaround for the case where clang-format
// sets BreakBeforeParameter to avoid bin packing and this creates a
// completely unnecessary line break after a template type that isn't
@@ -833,7 +833,7 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun,
auto IsOpeningBracket = [&](const FormatToken &Tok) {
auto IsStartOfBracedList = [&]() {
return Tok.is(tok::l_brace) && Tok.isNot(BK_Block) &&
- Style.Cpp11BracedListStyle;
+ Style.Cpp11BracedListStyle != FormatStyle::BLS_Block;
};
if (Tok.isNoneOf(tok::l_paren, TT_TemplateOpener, tok::l_square) &&
!IsStartOfBracedList()) {
@@ -925,7 +925,12 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun,
TT_TableGenDAGArgOpenerToBreak) &&
!(Current.MacroParent && Previous.MacroParent) &&
(Current.isNot(TT_LineComment) ||
- Previous.isOneOf(BK_BracedInit, TT_VerilogMultiLineListLParen)) &&
+ (Previous.is(BK_BracedInit) &&
+ (Style.Cpp11BracedListStyle != FormatStyle::BLS_FunctionCall ||
+ !Previous.Previous ||
+ Previous.Previous->isNoneOf(tok::identifier, tok::l_paren,
+ BK_BracedInit))) ||
+ Previous.is(TT_VerilogMultiLineListLParen)) &&
!IsInTemplateString(Current)) {
CurrentState.Indent = State.Column + Spaces;
CurrentState.IsAligned = true;
@@ -1369,7 +1374,8 @@ unsigned ContinuationIndenter::getNewLineColumn(const LineState &State) {
}
if (Current.is(TT_LambdaArrow) &&
Previous.isOneOf(tok::kw_noexcept, tok::kw_mutable, tok::kw_constexpr,
- tok::kw_consteval, tok::kw_static, TT_AttributeSquare)) {
+ tok::kw_consteval, tok::kw_static,
+ TT_AttributeRSquare)) {
return ContinuationIndent;
}
if ((Current.isOneOf(tok::r_brace, tok::r_square) ||
@@ -1494,9 +1500,10 @@ unsigned ContinuationIndenter::getNewLineColumn(const LineState &State) {
Current.isNot(tok::l_paren) &&
!Current.endsSequence(TT_StartOfName, TT_AttributeMacro,
TT_PointerOrReference)) ||
- PreviousNonComment->isOneOf(
- TT_AttributeRParen, TT_AttributeSquare, TT_FunctionAnnotationRParen,
- TT_JavaAnnotation, TT_LeadingJavaAnnotation))) ||
+ PreviousNonComment->isOneOf(TT_AttributeRParen, TT_AttributeRSquare,
+ TT_FunctionAnnotationRParen,
+ TT_JavaAnnotation,
+ TT_LeadingJavaAnnotation))) ||
(!Style.IndentWrappedFunctionNames &&
NextNonComment->isOneOf(tok::kw_operator, TT_FunctionDeclarationName))) {
return std::max(CurrentState.LastSpace, CurrentState.Indent);
diff --git a/clang/lib/Format/DefinitionBlockSeparator.cpp b/clang/lib/Format/DefinitionBlockSeparator.cpp
index 3f4ce5f..855f2ef 100644
--- a/clang/lib/Format/DefinitionBlockSeparator.cpp
+++ b/clang/lib/Format/DefinitionBlockSeparator.cpp
@@ -169,7 +169,7 @@ void DefinitionBlockSeparator::separateBlocks(
}
}
- if (Style.isCSharp() && OperateLine->First->is(TT_AttributeSquare))
+ if (Style.isCSharp() && OperateLine->First->is(TT_AttributeLSquare))
return true;
return false;
};
diff --git a/clang/lib/Format/Format.cpp b/clang/lib/Format/Format.cpp
index 093e88f..edd126c 100644
--- a/clang/lib/Format/Format.cpp
+++ b/clang/lib/Format/Format.cpp
@@ -304,6 +304,18 @@ struct ScalarEnumerationTraits<FormatStyle::BreakTemplateDeclarationsStyle> {
}
};
+template <> struct ScalarEnumerationTraits<FormatStyle::BracedListStyle> {
+ static void enumeration(IO &IO, FormatStyle::BracedListStyle &Value) {
+ IO.enumCase(Value, "Block", FormatStyle::BLS_Block);
+ IO.enumCase(Value, "FunctionCall", FormatStyle::BLS_FunctionCall);
+ IO.enumCase(Value, "AlignFirstComment", FormatStyle::BLS_AlignFirstComment);
+
+ // For backward compatibility.
+ IO.enumCase(Value, "false", FormatStyle::BLS_Block);
+ IO.enumCase(Value, "true", FormatStyle::BLS_AlignFirstComment);
+ }
+};
+
template <> struct ScalarEnumerationTraits<FormatStyle::DAGArgStyle> {
static void enumeration(IO &IO, FormatStyle::DAGArgStyle &Value) {
IO.enumCase(Value, "DontBreak", FormatStyle::DAS_DontBreak);
@@ -1628,7 +1640,7 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.CompactNamespaces = false;
LLVMStyle.ConstructorInitializerIndentWidth = 4;
LLVMStyle.ContinuationIndentWidth = 4;
- LLVMStyle.Cpp11BracedListStyle = true;
+ LLVMStyle.Cpp11BracedListStyle = FormatStyle::BLS_AlignFirstComment;
LLVMStyle.DerivePointerAlignment = false;
LLVMStyle.DisableFormat = false;
LLVMStyle.EmptyLineAfterAccessModifier = FormatStyle::ELAAMS_Never;
@@ -1904,7 +1916,7 @@ FormatStyle getGoogleStyle(FormatStyle::LanguageKind Language) {
// beneficial there. Investigate turning this on once proper string reflow
// has been implemented.
GoogleStyle.BreakStringLiterals = false;
- GoogleStyle.Cpp11BracedListStyle = false;
+ GoogleStyle.Cpp11BracedListStyle = FormatStyle::BLS_Block;
GoogleStyle.SpacesInContainerLiterals = false;
} else if (Language == FormatStyle::LK_ObjC) {
GoogleStyle.AlwaysBreakBeforeMultilineStrings = false;
@@ -2000,7 +2012,7 @@ FormatStyle getMozillaStyle() {
MozillaStyle.BreakTemplateDeclarations = FormatStyle::BTDS_Yes;
MozillaStyle.ConstructorInitializerIndentWidth = 2;
MozillaStyle.ContinuationIndentWidth = 2;
- MozillaStyle.Cpp11BracedListStyle = false;
+ MozillaStyle.Cpp11BracedListStyle = FormatStyle::BLS_Block;
MozillaStyle.FixNamespaceComments = false;
MozillaStyle.IndentCaseLabels = true;
MozillaStyle.ObjCSpaceAfterProperty = true;
@@ -2023,7 +2035,7 @@ FormatStyle getWebKitStyle() {
Style.BreakBeforeBraces = FormatStyle::BS_WebKit;
Style.BreakConstructorInitializers = FormatStyle::BCIS_BeforeComma;
Style.ColumnLimit = 0;
- Style.Cpp11BracedListStyle = false;
+ Style.Cpp11BracedListStyle = FormatStyle::BLS_Block;
Style.FixNamespaceComments = false;
Style.IndentWidth = 4;
Style.NamespaceIndentation = FormatStyle::NI_Inner;
@@ -2043,7 +2055,7 @@ FormatStyle getGNUStyle() {
Style.BreakBeforeBraces = FormatStyle::BS_GNU;
Style.BreakBeforeTernaryOperators = true;
Style.ColumnLimit = 79;
- Style.Cpp11BracedListStyle = false;
+ Style.Cpp11BracedListStyle = FormatStyle::BLS_Block;
Style.FixNamespaceComments = false;
Style.KeepFormFeed = true;
Style.SpaceBeforeParens = FormatStyle::SBPO_Always;
diff --git a/clang/lib/Format/FormatToken.cpp b/clang/lib/Format/FormatToken.cpp
index cb3fc1c..d1c6264 100644
--- a/clang/lib/Format/FormatToken.cpp
+++ b/clang/lib/Format/FormatToken.cpp
@@ -65,12 +65,13 @@ bool FormatToken::isTypeOrIdentifier(const LangOptions &LangOpts) const {
bool FormatToken::isBlockIndentedInitRBrace(const FormatStyle &Style) const {
assert(is(tok::r_brace));
- if (!Style.Cpp11BracedListStyle ||
+ assert(MatchingParen);
+ assert(MatchingParen->is(tok::l_brace));
+ if (Style.Cpp11BracedListStyle == FormatStyle::BLS_Block ||
Style.AlignAfterOpenBracket != FormatStyle::BAS_BlockIndent) {
return false;
}
const auto *LBrace = MatchingParen;
- assert(LBrace && LBrace->is(tok::l_brace));
if (LBrace->is(BK_BracedInit))
return true;
if (LBrace->Previous && LBrace->Previous->is(tok::equal))
@@ -87,7 +88,8 @@ bool FormatToken::opensBlockOrBlockTypeList(const FormatStyle &Style) const {
return is(TT_ArrayInitializerLSquare) || is(TT_ProtoExtensionLSquare) ||
(is(tok::l_brace) &&
(getBlockKind() == BK_Block || is(TT_DictLiteral) ||
- (!Style.Cpp11BracedListStyle && NestingLevel == 0))) ||
+ (Style.Cpp11BracedListStyle == FormatStyle::BLS_Block &&
+ NestingLevel == 0))) ||
(is(tok::less) && Style.isProto());
}
@@ -183,7 +185,8 @@ void CommaSeparatedList::precomputeFormattingInfos(const FormatToken *Token) {
// In C++11 braced list style, we should not format in columns unless they
// have many items (20 or more) or we allow bin-packing of function call
// arguments.
- if (Style.Cpp11BracedListStyle && !Style.BinPackArguments &&
+ if (Style.Cpp11BracedListStyle != FormatStyle::BLS_Block &&
+ !Style.BinPackArguments &&
(Commas.size() < 19 || !Style.BinPackLongBracedList)) {
return;
}
@@ -227,7 +230,7 @@ void CommaSeparatedList::precomputeFormattingInfos(const FormatToken *Token) {
ItemEnd = Token->MatchingParen;
const FormatToken *NonCommentEnd = ItemEnd->getPreviousNonComment();
ItemLengths.push_back(CodePointsBetween(ItemBegin, NonCommentEnd));
- if (Style.Cpp11BracedListStyle &&
+ if (Style.Cpp11BracedListStyle != FormatStyle::BLS_Block &&
!ItemEnd->Previous->isTrailingComment()) {
// In Cpp11 braced list style, the } and possibly other subsequent
// tokens will need to stay on a line with the last element.
diff --git a/clang/lib/Format/FormatToken.h b/clang/lib/Format/FormatToken.h
index f015d27..6f3d24a 100644
--- a/clang/lib/Format/FormatToken.h
+++ b/clang/lib/Format/FormatToken.h
@@ -30,9 +30,10 @@ namespace format {
TYPE(ArraySubscriptLSquare) \
TYPE(AttributeColon) \
TYPE(AttributeLParen) \
+ TYPE(AttributeLSquare) \
TYPE(AttributeMacro) \
TYPE(AttributeRParen) \
- TYPE(AttributeSquare) \
+ TYPE(AttributeRSquare) \
TYPE(BinaryOperator) \
TYPE(BitFieldColon) \
TYPE(BlockComment) \
diff --git a/clang/lib/Format/TokenAnnotator.cpp b/clang/lib/Format/TokenAnnotator.cpp
index 5b784ed..c97a9e8 100644
--- a/clang/lib/Format/TokenAnnotator.cpp
+++ b/clang/lib/Format/TokenAnnotator.cpp
@@ -531,10 +531,6 @@ private:
OpeningParen.Previous->is(TT_LeadingJavaAnnotation)) {
CurrentToken->setType(TT_LeadingJavaAnnotation);
}
- if (OpeningParen.Previous &&
- OpeningParen.Previous->is(TT_AttributeSquare)) {
- CurrentToken->setType(TT_AttributeSquare);
- }
if (!HasMultipleLines)
OpeningParen.setPackingKind(PPK_Inconclusive);
@@ -722,9 +718,11 @@ private:
} else if (InsideInlineASM) {
Left->setType(TT_InlineASMSymbolicNameLSquare);
} else if (IsCpp11AttributeSpecifier) {
- Left->setType(TT_AttributeSquare);
- if (!IsInnerSquare && Left->Previous)
- Left->Previous->EndsCppAttributeGroup = false;
+ if (!IsInnerSquare) {
+ Left->setType(TT_AttributeLSquare);
+ if (Left->Previous)
+ Left->Previous->EndsCppAttributeGroup = false;
+ }
} else if (Style.isJavaScript() && Parent &&
Contexts.back().ContextKind == tok::l_brace &&
Parent->isOneOf(tok::l_brace, tok::comma)) {
@@ -733,7 +731,7 @@ private:
Parent && Parent->isOneOf(tok::l_brace, tok::comma)) {
Left->setType(TT_DesignatedInitializerLSquare);
} else if (IsCSharpAttributeSpecifier) {
- Left->setType(TT_AttributeSquare);
+ Left->setType(TT_AttributeLSquare);
} else if (CurrentToken->is(tok::r_square) && Parent &&
Parent->is(TT_TemplateCloser)) {
Left->setType(TT_ArraySubscriptLSquare);
@@ -797,13 +795,12 @@ private:
while (CurrentToken) {
if (CurrentToken->is(tok::r_square)) {
- if (IsCpp11AttributeSpecifier) {
- CurrentToken->setType(TT_AttributeSquare);
- if (!IsInnerSquare)
- CurrentToken->EndsCppAttributeGroup = true;
+ if (IsCpp11AttributeSpecifier && !IsInnerSquare) {
+ CurrentToken->setType(TT_AttributeRSquare);
+ CurrentToken->EndsCppAttributeGroup = true;
}
if (IsCSharpAttributeSpecifier) {
- CurrentToken->setType(TT_AttributeSquare);
+ CurrentToken->setType(TT_AttributeRSquare);
} else if (((CurrentToken->Next &&
CurrentToken->Next->is(tok::l_paren)) ||
(CurrentToken->Previous &&
@@ -1297,7 +1294,7 @@ private:
bool consumeToken() {
if (IsCpp) {
const auto *Prev = CurrentToken->getPreviousNonComment();
- if (Prev && Prev->is(tok::r_square) && Prev->is(TT_AttributeSquare) &&
+ if (Prev && Prev->is(TT_AttributeRSquare) &&
CurrentToken->isOneOf(tok::kw_if, tok::kw_switch, tok::kw_case,
tok::kw_default, tok::kw_for, tok::kw_while) &&
mustBreakAfterAttributes(*CurrentToken, Style)) {
@@ -2850,7 +2847,7 @@ private:
T = Tok->Previous;
continue;
}
- } else if (T->is(TT_AttributeSquare)) {
+ } else if (T->is(TT_AttributeRSquare)) {
// Handle `x = (foo *[[clang::foo]])&v;`:
if (T->MatchingParen && T->MatchingParen->Previous) {
T = T->MatchingParen->Previous;
@@ -3656,7 +3653,7 @@ static FormatToken *getFunctionName(const AnnotatedLine &Line,
for (FormatToken *Tok = Line.getFirstNonComment(), *Name = nullptr; Tok;
Tok = Tok->getNextNonComment()) {
// Skip C++11 attributes both before and after the function name.
- if (Tok->is(tok::l_square) && Tok->is(TT_AttributeSquare)) {
+ if (Tok->is(TT_AttributeLSquare)) {
Tok = Tok->MatchingParen;
if (!Tok)
break;
@@ -3794,18 +3791,12 @@ static bool isFunctionDeclarationName(const LangOptions &LangOpts,
if (Current.is(TT_FunctionDeclarationName))
return true;
- if (!Current.Tok.getIdentifierInfo())
+ if (Current.isNoneOf(tok::identifier, tok::kw_operator))
return false;
const auto *Prev = Current.getPreviousNonComment();
assert(Prev);
- if (Prev->is(tok::coloncolon))
- Prev = Prev->Previous;
-
- if (!Prev)
- return false;
-
const auto &Previous = *Prev;
if (const auto *PrevPrev = Previous.getPreviousNonComment();
@@ -3854,6 +3845,8 @@ static bool isFunctionDeclarationName(const LangOptions &LangOpts,
// Find parentheses of parameter list.
if (Current.is(tok::kw_operator)) {
+ if (Line.startsWith(tok::kw_friend))
+ return true;
if (Previous.Tok.getIdentifierInfo() &&
Previous.isNoneOf(tok::kw_return, tok::kw_co_return)) {
return true;
@@ -4098,7 +4091,8 @@ void TokenAnnotator::calculateFormattingInformation(AnnotatedLine &Line) const {
if (Current->is(TT_LineComment)) {
if (Prev->is(BK_BracedInit) && Prev->opensScope()) {
Current->SpacesRequiredBefore =
- (Style.Cpp11BracedListStyle && !Style.SpacesInParensOptions.Other)
+ (Style.Cpp11BracedListStyle == FormatStyle::BLS_AlignFirstComment &&
+ !Style.SpacesInParensOptions.Other)
? 0
: 1;
} else if (Prev->is(TT_VerilogMultiLineListLParen)) {
@@ -4328,7 +4322,7 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line,
return 35;
if (Right.isNoneOf(TT_ObjCMethodExpr, TT_LambdaLSquare,
TT_ArrayInitializerLSquare,
- TT_DesignatedInitializerLSquare, TT_AttributeSquare)) {
+ TT_DesignatedInitializerLSquare, TT_AttributeLSquare)) {
return 500;
}
}
@@ -4449,8 +4443,10 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line,
(Left.ParameterCount <= 1 || Style.AllowAllArgumentsOnNextLine)) {
return 0;
}
- if (Left.is(tok::l_brace) && !Style.Cpp11BracedListStyle)
+ if (Left.is(tok::l_brace) &&
+ Style.Cpp11BracedListStyle == FormatStyle::BLS_Block) {
return 19;
+ }
return Left.ParameterCount > 1 ? Style.PenaltyBreakBeforeFirstCallParameter
: 19;
}
@@ -4616,7 +4612,7 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
// Format empty list as `<>`.
if (Left.is(tok::less) && Right.is(tok::greater))
return false;
- return !Style.Cpp11BracedListStyle;
+ return Style.Cpp11BracedListStyle == FormatStyle::BLS_Block;
}
// Don't attempt to format operator<(), as it is handled later.
if (Right.isNot(TT_OverloadedOperatorLParen))
@@ -4784,7 +4780,8 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
const auto SpaceRequiredForArrayInitializerLSquare =
[](const FormatToken &LSquareTok, const FormatStyle &Style) {
return Style.SpacesInContainerLiterals ||
- (Style.isProto() && !Style.Cpp11BracedListStyle &&
+ (Style.isProto() &&
+ Style.Cpp11BracedListStyle == FormatStyle::BLS_Block &&
LSquareTok.endsSequence(tok::l_square, tok::colon,
TT_SelectorName));
};
@@ -4808,7 +4805,7 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
if (Right.is(tok::l_square) &&
Right.isNoneOf(TT_ObjCMethodExpr, TT_LambdaLSquare,
TT_DesignatedInitializerLSquare,
- TT_StructuredBindingLSquare, TT_AttributeSquare) &&
+ TT_StructuredBindingLSquare, TT_AttributeLSquare) &&
Left.isNoneOf(tok::numeric_constant, TT_DictLiteral) &&
!(Left.isNot(tok::r_square) && Style.SpaceBeforeSquareBrackets &&
Right.is(TT_ArraySubscriptLSquare))) {
@@ -4817,7 +4814,8 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
if ((Left.is(tok::l_brace) && Left.isNot(BK_Block)) ||
(Right.is(tok::r_brace) && Right.MatchingParen &&
Right.MatchingParen->isNot(BK_Block))) {
- return !Style.Cpp11BracedListStyle || Style.SpacesInParensOptions.Other;
+ return Style.Cpp11BracedListStyle == FormatStyle::BLS_Block ||
+ Style.SpacesInParensOptions.Other;
}
if (Left.is(TT_BlockComment)) {
// No whitespace in x(/*foo=*/1), except for JavaScript.
@@ -4826,7 +4824,7 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
// Space between template and attribute.
// e.g. template <typename T> [[nodiscard]] ...
- if (Left.is(TT_TemplateCloser) && Right.is(TT_AttributeSquare))
+ if (Left.is(TT_TemplateCloser) && Right.is(TT_AttributeLSquare))
return true;
// Space before parentheses common for all languages
if (Right.is(tok::l_paren)) {
@@ -4841,10 +4839,8 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
return Style.SpaceBeforeParensOptions.AfterRequiresInExpression ||
spaceRequiredBeforeParens(Right);
}
- if (Left.is(TT_AttributeRParen) ||
- (Left.is(tok::r_square) && Left.is(TT_AttributeSquare))) {
+ if (Left.isOneOf(TT_AttributeRParen, TT_AttributeRSquare))
return true;
- }
if (Left.is(TT_ForEachMacro)) {
return Style.SpaceBeforeParensOptions.AfterForeachMacros ||
spaceRequiredBeforeParens(Right);
@@ -4999,7 +4995,7 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
Left.Children.empty()) {
if (Left.is(BK_Block))
return Style.SpaceInEmptyBraces != FormatStyle::SIEB_Never;
- if (Style.Cpp11BracedListStyle) {
+ if (Style.Cpp11BracedListStyle != FormatStyle::BLS_Block) {
return Style.SpacesInParens == FormatStyle::SIPO_Custom &&
Style.SpacesInParensOptions.InEmptyParentheses;
}
@@ -5081,7 +5077,7 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
if (Left.MatchingParen &&
Left.MatchingParen->is(TT_ProtoExtensionLSquare) &&
Right.isOneOf(tok::l_brace, tok::less)) {
- return !Style.Cpp11BracedListStyle;
+ return Style.Cpp11BracedListStyle == FormatStyle::BLS_Block;
}
// A percent is probably part of a formatting specification, such as %lld.
if (Left.is(tok::percent))
@@ -5521,7 +5517,7 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
if (Left.is(tok::greater) && Right.is(tok::greater)) {
if (Style.isTextProto() ||
(Style.Language == FormatStyle::LK_Proto && Left.is(TT_DictLiteral))) {
- return !Style.Cpp11BracedListStyle;
+ return Style.Cpp11BracedListStyle == FormatStyle::BLS_Block;
}
return Right.is(TT_TemplateCloser) && Left.is(TT_TemplateCloser) &&
((Style.Standard < FormatStyle::LS_Cpp11) ||
@@ -5662,16 +5658,14 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
}
// Break after C# [...] and before public/protected/private/internal.
- if (Left.is(TT_AttributeSquare) && Left.is(tok::r_square) &&
+ if (Left.is(TT_AttributeRSquare) &&
(Right.isAccessSpecifier(/*ColonRequired=*/false) ||
Right.is(Keywords.kw_internal))) {
return true;
}
// Break between ] and [ but only when there are really 2 attributes.
- if (Left.is(TT_AttributeSquare) && Right.is(TT_AttributeSquare) &&
- Left.is(tok::r_square) && Right.is(tok::l_square)) {
+ if (Left.is(TT_AttributeRSquare) && Right.is(TT_AttributeLSquare))
return true;
- }
} else if (Style.isJavaScript()) {
// FIXME: This might apply to other languages and token kinds.
if (Right.is(tok::string_literal) && Left.is(tok::plus) && BeforeLeft &&
@@ -6382,7 +6376,7 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
return false;
}
if (Left.is(tok::equal) && Right.is(tok::l_brace) &&
- !Style.Cpp11BracedListStyle) {
+ Style.Cpp11BracedListStyle == FormatStyle::BLS_Block) {
return false;
}
if (Left.is(TT_AttributeLParen) ||
@@ -6411,8 +6405,10 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
if (Right.isAttribute())
return true;
- if (Right.is(tok::l_square) && Right.is(TT_AttributeSquare))
- return Left.isNot(TT_AttributeSquare);
+ if (Right.is(TT_AttributeLSquare)) {
+ assert(Left.isNot(tok::l_square));
+ return true;
+ }
if (Left.is(tok::identifier) && Right.is(tok::string_literal))
return true;
@@ -6453,8 +6449,12 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
Left.getPrecedence() == prec::Assignment)) {
return true;
}
- if ((Left.is(TT_AttributeSquare) && Right.is(tok::l_square)) ||
- (Left.is(tok::r_square) && Right.is(TT_AttributeSquare))) {
+ if (Left.is(TT_AttributeLSquare) && Right.is(tok::l_square)) {
+ assert(Right.isNot(TT_AttributeLSquare));
+ return false;
+ }
+ if (Left.is(tok::r_square) && Right.is(TT_AttributeRSquare)) {
+ assert(Left.isNot(TT_AttributeRSquare));
return false;
}
diff --git a/clang/lib/Format/WhitespaceManager.cpp b/clang/lib/Format/WhitespaceManager.cpp
index 7348a3a..65fc65e 100644
--- a/clang/lib/Format/WhitespaceManager.cpp
+++ b/clang/lib/Format/WhitespaceManager.cpp
@@ -432,7 +432,11 @@ AlignTokenSequence(const FormatStyle &Style, unsigned Start, unsigned End,
// right-justified. It is used to align compound assignments like `+=` and `=`.
// When RightJustify and ACS.PadOperators are true, operators in each block to
// be aligned will be padded on the left to the same length before aligning.
-template <typename F>
+//
+// The simple check will not look at the indentaion and nesting level to recurse
+// into the line for alignment. It will also not count the commas. This is e.g.
+// for aligning macro definitions.
+template <typename F, bool SimpleCheck = false>
static unsigned AlignTokens(const FormatStyle &Style, F &&Matches,
SmallVector<WhitespaceManager::Change, 16> &Changes,
unsigned StartAt,
@@ -465,9 +469,9 @@ static unsigned AlignTokens(const FormatStyle &Style, F &&Matches,
// Measure the scope level (i.e. depth of (), [], {}) of the first token, and
// abort when we hit any token in a higher scope than the starting one.
- auto IndentAndNestingLevel = StartAt < Changes.size()
- ? Changes[StartAt].indentAndNestingLevel()
- : std::tuple<unsigned, unsigned, unsigned>();
+ const auto IndentAndNestingLevel =
+ StartAt < Changes.size() ? Changes[StartAt].indentAndNestingLevel()
+ : std::tuple<unsigned, unsigned, unsigned>();
// Keep track of the number of commas before the matching tokens, we will only
// align a sequence of matching tokens if they are preceded by the same number
@@ -502,15 +506,15 @@ static unsigned AlignTokens(const FormatStyle &Style, F &&Matches,
MatchedIndices.clear();
};
- unsigned i = StartAt;
- for (unsigned e = Changes.size(); i != e; ++i) {
- auto &CurrentChange = Changes[i];
+ unsigned I = StartAt;
+ for (unsigned E = Changes.size(); I != E; ++I) {
+ auto &CurrentChange = Changes[I];
if (CurrentChange.indentAndNestingLevel() < IndentAndNestingLevel)
break;
if (CurrentChange.NewlinesBefore != 0) {
CommasBeforeMatch = 0;
- EndOfSequence = i;
+ EndOfSequence = I;
// Whether to break the alignment sequence because of an empty line.
bool EmptyLineBreak =
@@ -526,8 +530,8 @@ static unsigned AlignTokens(const FormatStyle &Style, F &&Matches,
// A new line starts, re-initialize line status tracking bools.
// Keep the match state if a string literal is continued on this line.
- if (i == 0 || CurrentChange.Tok->isNot(tok::string_literal) ||
- Changes[i - 1].Tok->isNot(tok::string_literal)) {
+ if (I == 0 || CurrentChange.Tok->isNot(tok::string_literal) ||
+ Changes[I - 1].Tok->isNot(tok::string_literal)) {
FoundMatchOnLine = false;
}
LineIsComment = true;
@@ -536,14 +540,17 @@ static unsigned AlignTokens(const FormatStyle &Style, F &&Matches,
if (CurrentChange.Tok->isNot(tok::comment))
LineIsComment = false;
- if (CurrentChange.Tok->is(tok::comma)) {
- ++CommasBeforeMatch;
- } else if (CurrentChange.indentAndNestingLevel() > IndentAndNestingLevel) {
- // Call AlignTokens recursively, skipping over this scope block.
- unsigned StoppedAt =
- AlignTokens(Style, Matches, Changes, i, ACS, RightJustify);
- i = StoppedAt - 1;
- continue;
+ if (!SimpleCheck) {
+ if (CurrentChange.Tok->is(tok::comma)) {
+ ++CommasBeforeMatch;
+ } else if (CurrentChange.indentAndNestingLevel() >
+ IndentAndNestingLevel) {
+ // Call AlignTokens recursively, skipping over this scope block.
+ const auto StoppedAt =
+ AlignTokens(Style, Matches, Changes, I, ACS, RightJustify);
+ I = StoppedAt - 1;
+ continue;
+ }
}
if (!Matches(CurrentChange))
@@ -552,7 +559,7 @@ static unsigned AlignTokens(const FormatStyle &Style, F &&Matches,
// If there is more than one matching token per line, or if the number of
// preceding commas, do not match anymore, end the sequence.
if (FoundMatchOnLine || CommasBeforeMatch != CommasBeforeLastMatch) {
- MatchedIndices.push_back(i);
+ MatchedIndices.push_back(I);
AlignCurrentSequence();
}
@@ -560,29 +567,69 @@ static unsigned AlignTokens(const FormatStyle &Style, F &&Matches,
FoundMatchOnLine = true;
if (StartOfSequence == 0)
- StartOfSequence = i;
+ StartOfSequence = I;
unsigned ChangeWidthLeft = CurrentChange.StartOfTokenColumn;
unsigned ChangeWidthAnchor = 0;
unsigned ChangeWidthRight = 0;
+ unsigned CurrentChangeWidthRight = 0;
if (RightJustify)
if (ACS.PadOperators)
ChangeWidthAnchor = CurrentChange.TokenLength;
else
ChangeWidthLeft += CurrentChange.TokenLength;
else
- ChangeWidthRight = CurrentChange.TokenLength;
- for (unsigned j = i + 1; j != e && Changes[j].NewlinesBefore == 0; ++j) {
- ChangeWidthRight += Changes[j].Spaces;
+ CurrentChangeWidthRight = CurrentChange.TokenLength;
+ const FormatToken *MatchingParenToEncounter = nullptr;
+ for (unsigned J = I + 1;
+ J != E && (Changes[J].NewlinesBefore == 0 || MatchingParenToEncounter);
+ ++J) {
+ const auto &Change = Changes[J];
+ const auto *Tok = Change.Tok;
+
+ if (Tok->MatchingParen) {
+ if (Tok->isOneOf(tok::l_paren, tok::l_brace, tok::l_square,
+ TT_TemplateOpener) &&
+ !MatchingParenToEncounter) {
+ // If the next token is on the next line, we probably don't need to
+ // check the following lengths, because it most likely isn't aligned
+ // with the rest.
+ if (J + 1 != E && Changes[J + 1].NewlinesBefore == 0)
+ MatchingParenToEncounter = Tok->MatchingParen;
+ } else if (MatchingParenToEncounter == Tok->MatchingParen) {
+ MatchingParenToEncounter = nullptr;
+ }
+ }
+
+ if (Change.NewlinesBefore != 0) {
+ ChangeWidthRight = std::max(ChangeWidthRight, CurrentChangeWidthRight);
+ const auto ChangeWidthStart = ChangeWidthLeft + ChangeWidthAnchor;
+ // If the position of the current token is columnwise before the begin
+ // of the alignment, we drop out here, because the next line does not
+ // have to be moved with the previous one(s) for the alignment. E.g.:
+ // int i1 = 1; | <- ColumnLimit | int i1 = 1;
+ // int j = 0; | Without the break -> | int j = 0;
+ // int k = bar( | We still want to align the = | int k = bar(
+ // argument1, | here, even if we can't move | argument1,
+ // argument2); | the following lines. | argument2);
+ if (static_cast<unsigned>(Change.Spaces) < ChangeWidthStart)
+ break;
+ CurrentChangeWidthRight = Change.Spaces - ChangeWidthStart;
+ } else {
+ CurrentChangeWidthRight += Change.Spaces;
+ }
+
// Changes are generally 1:1 with the tokens, but a change could also be
// inside of a token, in which case it's counted more than once: once for
// the whitespace surrounding the token (!IsInsideToken) and once for
// each whitespace change within it (IsInsideToken).
// Therefore, changes inside of a token should only count the space.
- if (!Changes[j].IsInsideToken)
- ChangeWidthRight += Changes[j].TokenLength;
+ if (!Change.IsInsideToken)
+ CurrentChangeWidthRight += Change.TokenLength;
}
+ ChangeWidthRight = std::max(ChangeWidthRight, CurrentChangeWidthRight);
+
// If we are restricted by the maximum column width, end the sequence.
unsigned NewLeft = std::max(ChangeWidthLeft, WidthLeft);
unsigned NewAnchor = std::max(ChangeWidthAnchor, WidthAnchor);
@@ -591,7 +638,7 @@ static unsigned AlignTokens(const FormatStyle &Style, F &&Matches,
if (Style.ColumnLimit != 0 &&
Style.ColumnLimit < NewLeft + NewAnchor + NewRight) {
AlignCurrentSequence();
- StartOfSequence = i;
+ StartOfSequence = I;
WidthLeft = ChangeWidthLeft;
WidthAnchor = ChangeWidthAnchor;
WidthRight = ChangeWidthRight;
@@ -600,12 +647,12 @@ static unsigned AlignTokens(const FormatStyle &Style, F &&Matches,
WidthAnchor = NewAnchor;
WidthRight = NewRight;
}
- MatchedIndices.push_back(i);
+ MatchedIndices.push_back(I);
}
- EndOfSequence = i;
+ EndOfSequence = I;
AlignCurrentSequence();
- return i;
+ return I;
}
// Aligns a sequence of matching tokens, on the MinColumn column.
@@ -656,7 +703,7 @@ void WhitespaceManager::alignConsecutiveMacros() {
auto AlignMacrosMatches = [](const Change &C) {
const FormatToken *Current = C.Tok;
- unsigned SpacesRequiredBefore = 1;
+ assert(Current);
if (Current->SpacesRequiredBefore == 0 || !Current->Previous)
return false;
@@ -665,79 +712,26 @@ void WhitespaceManager::alignConsecutiveMacros() {
// If token is a ")", skip over the parameter list, to the
// token that precedes the "("
- if (Current->is(tok::r_paren) && Current->MatchingParen) {
- Current = Current->MatchingParen->Previous;
- SpacesRequiredBefore = 0;
- }
-
- if (!Current || Current->isNot(tok::identifier))
- return false;
-
- if (!Current->Previous || Current->Previous->isNot(tok::pp_define))
- return false;
-
- // For a macro function, 0 spaces are required between the
- // identifier and the lparen that opens the parameter list.
- // For a simple macro, 1 space is required between the
- // identifier and the first token of the defined value.
- return Current->Next->SpacesRequiredBefore == SpacesRequiredBefore;
- };
-
- unsigned MinColumn = 0;
-
- // Start and end of the token sequence we're processing.
- unsigned StartOfSequence = 0;
- unsigned EndOfSequence = 0;
-
- // Whether a matching token has been found on the current line.
- bool FoundMatchOnLine = false;
-
- // Whether the current line consists only of comments
- bool LineIsComment = true;
-
- unsigned I = 0;
- for (unsigned E = Changes.size(); I != E; ++I) {
- if (Changes[I].NewlinesBefore != 0) {
- EndOfSequence = I;
-
- // Whether to break the alignment sequence because of an empty line.
- bool EmptyLineBreak = (Changes[I].NewlinesBefore > 1) &&
- !Style.AlignConsecutiveMacros.AcrossEmptyLines;
-
- // Whether to break the alignment sequence because of a line without a
- // match.
- bool NoMatchBreak =
- !FoundMatchOnLine &&
- !(LineIsComment && Style.AlignConsecutiveMacros.AcrossComments);
-
- if (EmptyLineBreak || NoMatchBreak) {
- AlignMatchingTokenSequence(StartOfSequence, EndOfSequence, MinColumn,
- AlignMacrosMatches, Changes);
+ if (Current->is(tok::r_paren)) {
+ const auto *MatchingParen = Current->MatchingParen;
+ // For a macro function, 0 spaces are required between the
+ // identifier and the lparen that opens the parameter list.
+ if (!MatchingParen || MatchingParen->SpacesRequiredBefore > 0 ||
+ !MatchingParen->Previous) {
+ return false;
}
-
- // A new line starts, re-initialize line status tracking bools.
- FoundMatchOnLine = false;
- LineIsComment = true;
+ Current = MatchingParen->Previous;
+ } else if (Current->Next->SpacesRequiredBefore != 1) {
+ // For a simple macro, 1 space is required between the
+ // identifier and the first token of the defined value.
+ return false;
}
- if (Changes[I].Tok->isNot(tok::comment))
- LineIsComment = false;
-
- if (!AlignMacrosMatches(Changes[I]))
- continue;
-
- FoundMatchOnLine = true;
-
- if (StartOfSequence == 0)
- StartOfSequence = I;
-
- unsigned ChangeMinColumn = Changes[I].StartOfTokenColumn;
- MinColumn = std::max(MinColumn, ChangeMinColumn);
- }
+ return Current->endsSequence(tok::identifier, tok::pp_define);
+ };
- EndOfSequence = I;
- AlignMatchingTokenSequence(StartOfSequence, EndOfSequence, MinColumn,
- AlignMacrosMatches, Changes);
+ AlignTokens<decltype(AlignMacrosMatches) &, /*SimpleCheck=*/true>(
+ Style, AlignMacrosMatches, Changes, 0, Style.AlignConsecutiveMacros);
}
void WhitespaceManager::alignConsecutiveAssignments() {
@@ -1238,7 +1232,8 @@ void WhitespaceManager::alignArrayInitializersRightJustified(
if (!CellDescs.isRectangular())
return;
- const int BracePadding = Style.Cpp11BracedListStyle ? 0 : 1;
+ const int BracePadding =
+ Style.Cpp11BracedListStyle != FormatStyle::BLS_Block ? 0 : 1;
auto &Cells = CellDescs.Cells;
// Now go through and fixup the spaces.
auto *CellIter = Cells.begin();
@@ -1314,7 +1309,8 @@ void WhitespaceManager::alignArrayInitializersLeftJustified(
if (!CellDescs.isRectangular())
return;
- const int BracePadding = Style.Cpp11BracedListStyle ? 0 : 1;
+ const int BracePadding =
+ Style.Cpp11BracedListStyle != FormatStyle::BLS_Block ? 0 : 1;
auto &Cells = CellDescs.Cells;
// Now go through and fixup the spaces.
auto *CellIter = Cells.begin();
diff --git a/clang/lib/Frontend/FrontendAction.cpp b/clang/lib/Frontend/FrontendAction.cpp
index 1b63c40..0daa20a 100644
--- a/clang/lib/Frontend/FrontendAction.cpp
+++ b/clang/lib/Frontend/FrontendAction.cpp
@@ -629,7 +629,7 @@ static std::error_code collectModuleHeaderIncludes(
// Check whether this entry has an extension typically associated with
// headers.
if (!llvm::StringSwitch<bool>(llvm::sys::path::extension(Dir->path()))
- .Cases(".h", ".H", ".hh", ".hpp", true)
+ .Cases({".h", ".H", ".hh", ".hpp"}, true)
.Default(false))
continue;
diff --git a/clang/lib/Headers/__clang_hip_runtime_wrapper.h b/clang/lib/Headers/__clang_hip_runtime_wrapper.h
index fb0ece9..19ce7a5 100644
--- a/clang/lib/Headers/__clang_hip_runtime_wrapper.h
+++ b/clang/lib/Headers/__clang_hip_runtime_wrapper.h
@@ -26,6 +26,7 @@
#define __managed__ __attribute__((managed))
#define __cluster_dims__(...) __attribute__((cluster_dims(__VA_ARGS__)))
+#define __no_cluster__ __attribute__((no_cluster))
#if !defined(__cplusplus) || __cplusplus < 201103L
#define nullptr NULL;
diff --git a/clang/lib/Headers/avx2intrin.h b/clang/lib/Headers/avx2intrin.h
index fa7f4c2..fdb825f 100644
--- a/clang/lib/Headers/avx2intrin.h
+++ b/clang/lib/Headers/avx2intrin.h
@@ -1298,9 +1298,8 @@ _mm256_min_epu32(__m256i __a, __m256i __b) {
/// \param __a
/// A 256-bit integer vector containing the source bytes.
/// \returns The 32-bit integer mask.
-static __inline__ int __DEFAULT_FN_ATTRS256
-_mm256_movemask_epi8(__m256i __a)
-{
+static __inline__ int __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_movemask_epi8(__m256i __a) {
return __builtin_ia32_pmovmskb256((__v32qi)__a);
}
@@ -1650,9 +1649,8 @@ _mm256_mul_epi32(__m256i __a, __m256i __b) {
/// \param __b
/// A 256-bit vector of [16 x i16] containing one of the source operands.
/// \returns A 256-bit vector of [16 x i16] containing the rounded products.
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_mulhrs_epi16(__m256i __a, __m256i __b)
-{
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
+_mm256_mulhrs_epi16(__m256i __a, __m256i __b) {
return (__m256i)__builtin_ia32_pmulhrsw256((__v16hi)__a, (__v16hi)__b);
}
@@ -1670,8 +1668,7 @@ _mm256_mulhrs_epi16(__m256i __a, __m256i __b)
/// A 256-bit vector of [16 x i16] containing one of the source operands.
/// \returns A 256-bit vector of [16 x i16] containing the products.
static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
-_mm256_mulhi_epu16(__m256i __a, __m256i __b)
-{
+_mm256_mulhi_epu16(__m256i __a, __m256i __b) {
return (__m256i)__builtin_ia32_pmulhuw256((__v16hu)__a, (__v16hu)__b);
}
diff --git a/clang/lib/Headers/avx512bwintrin.h b/clang/lib/Headers/avx512bwintrin.h
index 23b2d29..ac75b6c 100644
--- a/clang/lib/Headers/avx512bwintrin.h
+++ b/clang/lib/Headers/avx512bwintrin.h
@@ -1003,23 +1003,20 @@ _mm512_maskz_permutex2var_epi16(__mmask32 __U, __m512i __A, __m512i __I,
(__v32hi)_mm512_setzero_si512());
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mulhrs_epi16(__m512i __A, __m512i __B)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_mulhrs_epi16(__m512i __A, __m512i __B) {
return (__m512i)__builtin_ia32_pmulhrsw512((__v32hi)__A, (__v32hi)__B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_mask_mulhrs_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_mask_mulhrs_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
(__v32hi)_mm512_mulhrs_epi16(__A, __B),
(__v32hi)__W);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS512
-_mm512_maskz_mulhrs_epi16(__mmask32 __U, __m512i __A, __m512i __B)
-{
+static __inline__ __m512i __DEFAULT_FN_ATTRS512_CONSTEXPR
+_mm512_maskz_mulhrs_epi16(__mmask32 __U, __m512i __A, __m512i __B) {
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
(__v32hi)_mm512_mulhrs_epi16(__A, __B),
(__v32hi)_mm512_setzero_si512());
diff --git a/clang/lib/Headers/avx512dqintrin.h b/clang/lib/Headers/avx512dqintrin.h
index 3681cca..fef1a2d 100644
--- a/clang/lib/Headers/avx512dqintrin.h
+++ b/clang/lib/Headers/avx512dqintrin.h
@@ -1200,10 +1200,10 @@ _mm512_maskz_broadcast_i64x2(__mmask8 __M, __m128i __A) {
(__v8di)_mm512_setzero_si512());
}
-#define _mm512_extractf32x8_ps(A, imm) \
- ((__m256)__builtin_ia32_extractf32x8_mask((__v16sf)(__m512)(A), (int)(imm), \
- (__v8sf)_mm256_undefined_ps(), \
- (__mmask8)-1))
+#define _mm512_extractf32x8_ps(A, imm) \
+ ((__m256)__builtin_ia32_extractf32x8_mask((__v16sf)(__m512)(A), (int)(imm), \
+ (__v8sf)_mm256_setzero_ps(), \
+ (__mmask8) - 1))
#define _mm512_mask_extractf32x8_ps(W, U, A, imm) \
((__m256)__builtin_ia32_extractf32x8_mask((__v16sf)(__m512)(A), (int)(imm), \
@@ -1215,11 +1215,10 @@ _mm512_maskz_broadcast_i64x2(__mmask8 __M, __m128i __A) {
(__v8sf)_mm256_setzero_ps(), \
(__mmask8)(U)))
-#define _mm512_extractf64x2_pd(A, imm) \
- ((__m128d)__builtin_ia32_extractf64x2_512_mask((__v8df)(__m512d)(A), \
- (int)(imm), \
- (__v2df)_mm_undefined_pd(), \
- (__mmask8)-1))
+#define _mm512_extractf64x2_pd(A, imm) \
+ ((__m128d)__builtin_ia32_extractf64x2_512_mask( \
+ (__v8df)(__m512d)(A), (int)(imm), (__v2df)_mm_setzero_pd(), \
+ (__mmask8) - 1))
#define _mm512_mask_extractf64x2_pd(W, U, A, imm) \
((__m128d)__builtin_ia32_extractf64x2_512_mask((__v8df)(__m512d)(A), \
@@ -1233,10 +1232,10 @@ _mm512_maskz_broadcast_i64x2(__mmask8 __M, __m128i __A) {
(__v2df)_mm_setzero_pd(), \
(__mmask8)(U)))
-#define _mm512_extracti32x8_epi32(A, imm) \
- ((__m256i)__builtin_ia32_extracti32x8_mask((__v16si)(__m512i)(A), (int)(imm), \
- (__v8si)_mm256_undefined_si256(), \
- (__mmask8)-1))
+#define _mm512_extracti32x8_epi32(A, imm) \
+ ((__m256i)__builtin_ia32_extracti32x8_mask( \
+ (__v16si)(__m512i)(A), (int)(imm), (__v8si)_mm256_setzero_si256(), \
+ (__mmask8) - 1))
#define _mm512_mask_extracti32x8_epi32(W, U, A, imm) \
((__m256i)__builtin_ia32_extracti32x8_mask((__v16si)(__m512i)(A), (int)(imm), \
@@ -1248,11 +1247,10 @@ _mm512_maskz_broadcast_i64x2(__mmask8 __M, __m128i __A) {
(__v8si)_mm256_setzero_si256(), \
(__mmask8)(U)))
-#define _mm512_extracti64x2_epi64(A, imm) \
- ((__m128i)__builtin_ia32_extracti64x2_512_mask((__v8di)(__m512i)(A), \
- (int)(imm), \
- (__v2di)_mm_undefined_si128(), \
- (__mmask8)-1))
+#define _mm512_extracti64x2_epi64(A, imm) \
+ ((__m128i)__builtin_ia32_extracti64x2_512_mask( \
+ (__v8di)(__m512i)(A), (int)(imm), (__v2di)_mm_setzero_si128(), \
+ (__mmask8) - 1))
#define _mm512_mask_extracti64x2_epi64(W, U, A, imm) \
((__m128i)__builtin_ia32_extracti64x2_512_mask((__v8di)(__m512i)(A), \
diff --git a/clang/lib/Headers/avx512fintrin.h b/clang/lib/Headers/avx512fintrin.h
index 07de036..18c4a44 100644
--- a/clang/lib/Headers/avx512fintrin.h
+++ b/clang/lib/Headers/avx512fintrin.h
@@ -3156,10 +3156,10 @@ _mm512_maskz_permutex2var_epi64(__mmask8 __U, __m512i __A, __m512i __I,
(__v16si)_mm512_setzero_si512()))
/* Vector Extract */
-#define _mm512_extractf64x4_pd(A, I) \
- ((__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(I), \
- (__v4df)_mm256_undefined_pd(), \
- (__mmask8)-1))
+#define _mm512_extractf64x4_pd(A, I) \
+ ((__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(I), \
+ (__v4df)_mm256_setzero_pd(), \
+ (__mmask8) - 1))
#define _mm512_mask_extractf64x4_pd(W, U, A, imm) \
((__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(imm), \
@@ -3171,10 +3171,10 @@ _mm512_maskz_permutex2var_epi64(__mmask8 __U, __m512i __A, __m512i __I,
(__v4df)_mm256_setzero_pd(), \
(__mmask8)(U)))
-#define _mm512_extractf32x4_ps(A, I) \
- ((__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(I), \
- (__v4sf)_mm_undefined_ps(), \
- (__mmask8)-1))
+#define _mm512_extractf32x4_ps(A, I) \
+ ((__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(I), \
+ (__v4sf)_mm_setzero_ps(), \
+ (__mmask8) - 1))
#define _mm512_mask_extractf32x4_ps(W, U, A, imm) \
((__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(imm), \
@@ -7089,10 +7089,10 @@ _mm512_mask_cvtepi64_storeu_epi16 (void *__P, __mmask8 __M, __m512i __A)
__builtin_ia32_pmovqw512mem_mask ((__v8hi *) __P, (__v8di) __A, __M);
}
-#define _mm512_extracti32x4_epi32(A, imm) \
- ((__m128i)__builtin_ia32_extracti32x4_mask((__v16si)(__m512i)(A), (int)(imm), \
- (__v4si)_mm_undefined_si128(), \
- (__mmask8)-1))
+#define _mm512_extracti32x4_epi32(A, imm) \
+ ((__m128i)__builtin_ia32_extracti32x4_mask( \
+ (__v16si)(__m512i)(A), (int)(imm), (__v4si)_mm_setzero_si128(), \
+ (__mmask8) - 1))
#define _mm512_mask_extracti32x4_epi32(W, U, A, imm) \
((__m128i)__builtin_ia32_extracti32x4_mask((__v16si)(__m512i)(A), (int)(imm), \
@@ -7104,10 +7104,10 @@ _mm512_mask_cvtepi64_storeu_epi16 (void *__P, __mmask8 __M, __m512i __A)
(__v4si)_mm_setzero_si128(), \
(__mmask8)(U)))
-#define _mm512_extracti64x4_epi64(A, imm) \
+#define _mm512_extracti64x4_epi64(A, imm) \
((__m256i)__builtin_ia32_extracti64x4_mask((__v8di)(__m512i)(A), (int)(imm), \
- (__v4di)_mm256_undefined_si256(), \
- (__mmask8)-1))
+ (__v4di)_mm256_setzero_si256(), \
+ (__mmask8) - 1))
#define _mm512_mask_extracti64x4_epi64(W, U, A, imm) \
((__m256i)__builtin_ia32_extracti64x4_mask((__v8di)(__m512i)(A), (int)(imm), \
diff --git a/clang/lib/Headers/avx512vlbwintrin.h b/clang/lib/Headers/avx512vlbwintrin.h
index 639fb60..0fcfe37 100644
--- a/clang/lib/Headers/avx512vlbwintrin.h
+++ b/clang/lib/Headers/avx512vlbwintrin.h
@@ -1510,28 +1510,28 @@ _mm256_mask_cvtusepi16_storeu_epi8 (void * __P, __mmask16 __M, __m256i __A)
__builtin_ia32_pmovuswb256mem_mask ((__v16qi*) __P, (__v16hi) __A, __M);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_mask_mulhrs_epi16(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) {
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
(__v8hi)_mm_mulhrs_epi16(__X, __Y),
(__v8hi)__W);
}
-static __inline__ __m128i __DEFAULT_FN_ATTRS128
+static __inline__ __m128i __DEFAULT_FN_ATTRS128_CONSTEXPR
_mm_maskz_mulhrs_epi16(__mmask8 __U, __m128i __X, __m128i __Y) {
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
(__v8hi)_mm_mulhrs_epi16(__X, __Y),
(__v8hi)_mm_setzero_si128());
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_mask_mulhrs_epi16(__m256i __W, __mmask16 __U, __m256i __X, __m256i __Y) {
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
(__v16hi)_mm256_mulhrs_epi16(__X, __Y),
(__v16hi)__W);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS256
+static __inline__ __m256i __DEFAULT_FN_ATTRS256_CONSTEXPR
_mm256_maskz_mulhrs_epi16(__mmask16 __U, __m256i __X, __m256i __Y) {
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
(__v16hi)_mm256_mulhrs_epi16(__X, __Y),
diff --git a/clang/lib/Headers/avx512vldqintrin.h b/clang/lib/Headers/avx512vldqintrin.h
index ee7974e..707d039 100644
--- a/clang/lib/Headers/avx512vldqintrin.h
+++ b/clang/lib/Headers/avx512vldqintrin.h
@@ -1062,11 +1062,10 @@ _mm256_maskz_broadcast_i64x2(__mmask8 __M, __m128i __A) {
(__v4di)_mm256_setzero_si256());
}
-#define _mm256_extractf64x2_pd(A, imm) \
- ((__m128d)__builtin_ia32_extractf64x2_256_mask((__v4df)(__m256d)(A), \
- (int)(imm), \
- (__v2df)_mm_undefined_pd(), \
- (__mmask8)-1))
+#define _mm256_extractf64x2_pd(A, imm) \
+ ((__m128d)__builtin_ia32_extractf64x2_256_mask( \
+ (__v4df)(__m256d)(A), (int)(imm), (__v2df)_mm_setzero_pd(), \
+ (__mmask8) - 1))
#define _mm256_mask_extractf64x2_pd(W, U, A, imm) \
((__m128d)__builtin_ia32_extractf64x2_256_mask((__v4df)(__m256d)(A), \
@@ -1080,11 +1079,10 @@ _mm256_maskz_broadcast_i64x2(__mmask8 __M, __m128i __A) {
(__v2df)_mm_setzero_pd(), \
(__mmask8)(U)))
-#define _mm256_extracti64x2_epi64(A, imm) \
- ((__m128i)__builtin_ia32_extracti64x2_256_mask((__v4di)(__m256i)(A), \
- (int)(imm), \
- (__v2di)_mm_undefined_si128(), \
- (__mmask8)-1))
+#define _mm256_extracti64x2_epi64(A, imm) \
+ ((__m128i)__builtin_ia32_extracti64x2_256_mask( \
+ (__v4di)(__m256i)(A), (int)(imm), (__v2di)_mm_setzero_si128(), \
+ (__mmask8) - 1))
#define _mm256_mask_extracti64x2_epi64(W, U, A, imm) \
((__m128i)__builtin_ia32_extracti64x2_256_mask((__v4di)(__m256i)(A), \
diff --git a/clang/lib/Headers/avx512vlintrin.h b/clang/lib/Headers/avx512vlintrin.h
index 676b5a0..92bb444 100644
--- a/clang/lib/Headers/avx512vlintrin.h
+++ b/clang/lib/Headers/avx512vlintrin.h
@@ -7545,11 +7545,10 @@ _mm256_mask_cvtepi64_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A)
__builtin_ia32_pmovqw256mem_mask ((__v8hi *) __P, (__v4di) __A, __M);
}
-#define _mm256_extractf32x4_ps(A, imm) \
- ((__m128)__builtin_ia32_extractf32x4_256_mask((__v8sf)(__m256)(A), \
- (int)(imm), \
- (__v4sf)_mm_undefined_ps(), \
- (__mmask8)-1))
+#define _mm256_extractf32x4_ps(A, imm) \
+ ((__m128)__builtin_ia32_extractf32x4_256_mask( \
+ (__v8sf)(__m256)(A), (int)(imm), (__v4sf)_mm_setzero_ps(), \
+ (__mmask8) - 1))
#define _mm256_mask_extractf32x4_ps(W, U, A, imm) \
((__m128)__builtin_ia32_extractf32x4_256_mask((__v8sf)(__m256)(A), \
@@ -7563,11 +7562,10 @@ _mm256_mask_cvtepi64_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A)
(__v4sf)_mm_setzero_ps(), \
(__mmask8)(U)))
-#define _mm256_extracti32x4_epi32(A, imm) \
- ((__m128i)__builtin_ia32_extracti32x4_256_mask((__v8si)(__m256i)(A), \
- (int)(imm), \
- (__v4si)_mm_undefined_si128(), \
- (__mmask8)-1))
+#define _mm256_extracti32x4_epi32(A, imm) \
+ ((__m128i)__builtin_ia32_extracti32x4_256_mask( \
+ (__v8si)(__m256i)(A), (int)(imm), (__v4si)_mm_setzero_si128(), \
+ (__mmask8) - 1))
#define _mm256_mask_extracti32x4_epi32(W, U, A, imm) \
((__m128i)__builtin_ia32_extracti32x4_256_mask((__v8si)(__m256i)(A), \
diff --git a/clang/lib/Headers/avxintrin.h b/clang/lib/Headers/avxintrin.h
index 696ec31..4aef924 100644
--- a/clang/lib/Headers/avxintrin.h
+++ b/clang/lib/Headers/avxintrin.h
@@ -2941,9 +2941,8 @@ _mm256_testnzc_si256(__m256i __a, __m256i __b) {
/// A 256-bit vector of [4 x double] containing the double-precision
/// floating point values with sign bits to be extracted.
/// \returns The sign bits from the operand, written to bits [3:0].
-static __inline int __DEFAULT_FN_ATTRS
-_mm256_movemask_pd(__m256d __a)
-{
+static __inline int __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm256_movemask_pd(__m256d __a) {
return __builtin_ia32_movmskpd256((__v4df)__a);
}
@@ -2959,9 +2958,8 @@ _mm256_movemask_pd(__m256d __a)
/// A 256-bit vector of [8 x float] containing the single-precision floating
/// point values with sign bits to be extracted.
/// \returns The sign bits from the operand, written to bits [7:0].
-static __inline int __DEFAULT_FN_ATTRS
-_mm256_movemask_ps(__m256 __a)
-{
+static __inline int __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm256_movemask_ps(__m256 __a) {
return __builtin_ia32_movmskps256((__v8sf)__a);
}
diff --git a/clang/lib/Headers/emmintrin.h b/clang/lib/Headers/emmintrin.h
index 454e9a2..dbe5ca0 100644
--- a/clang/lib/Headers/emmintrin.h
+++ b/clang/lib/Headers/emmintrin.h
@@ -4280,7 +4280,8 @@ _mm_packus_epi16(__m128i __a, __m128i __b) {
/// A 128-bit integer vector containing the values with bits to be extracted.
/// \returns The most significant bits from each 8-bit element in \a __a,
/// written to bits [15:0]. The other bits are assigned zeros.
-static __inline__ int __DEFAULT_FN_ATTRS _mm_movemask_epi8(__m128i __a) {
+static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_movemask_epi8(__m128i __a) {
return __builtin_ia32_pmovmskb128((__v16qi)__a);
}
@@ -4699,7 +4700,8 @@ _mm_unpacklo_pd(__m128d __a, __m128d __b) {
/// be extracted.
/// \returns The sign bits from each of the double-precision elements in \a __a,
/// written to bits [1:0]. The remaining bits are assigned values of zero.
-static __inline__ int __DEFAULT_FN_ATTRS _mm_movemask_pd(__m128d __a) {
+static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_movemask_pd(__m128d __a) {
return __builtin_ia32_movmskpd((__v2df)__a);
}
diff --git a/clang/lib/Headers/smmintrin.h b/clang/lib/Headers/smmintrin.h
index 4f197d5..511a135 100644
--- a/clang/lib/Headers/smmintrin.h
+++ b/clang/lib/Headers/smmintrin.h
@@ -1524,7 +1524,8 @@ _mm_packus_epi32(__m128i __V1, __m128i __V2) {
/// \returns A 128-bit value where bits [15:0] contain the minimum value found
/// in parameter \a __V, bits [18:16] contain the index of the minimum value
/// and the remaining bits are set to 0.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_minpos_epu16(__m128i __V) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_minpos_epu16(__m128i __V) {
return (__m128i)__builtin_ia32_phminposuw128((__v8hi)__V);
}
diff --git a/clang/lib/Headers/tmmintrin.h b/clang/lib/Headers/tmmintrin.h
index ee96caa..5d0f20f 100644
--- a/clang/lib/Headers/tmmintrin.h
+++ b/clang/lib/Headers/tmmintrin.h
@@ -544,8 +544,8 @@ _mm_maddubs_pi16(__m64 __a, __m64 __b) {
/// A 128-bit vector of [8 x i16] containing one of the source operands.
/// \returns A 128-bit vector of [8 x i16] containing the rounded and scaled
/// products of both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mulhrs_epi16(__m128i __a,
- __m128i __b) {
+static __inline__ __m128i __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_mulhrs_epi16(__m128i __a, __m128i __b) {
return (__m128i)__builtin_ia32_pmulhrsw128((__v8hi)__a, (__v8hi)__b);
}
@@ -563,11 +563,10 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mulhrs_epi16(__m128i __a,
/// A 64-bit vector of [4 x i16] containing one of the source operands.
/// \returns A 64-bit vector of [4 x i16] containing the rounded and scaled
/// products of both operands.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
-_mm_mulhrs_pi16(__m64 __a, __m64 __b)
-{
- return __trunc64(__builtin_ia32_pmulhrsw128((__v8hi)__anyext128(__a),
- (__v8hi)__anyext128(__b)));
+static __inline__ __m64 __DEFAULT_FN_ATTRS_CONSTEXPR
+_mm_mulhrs_pi16(__m64 __a, __m64 __b) {
+ return __trunc64(__builtin_ia32_pmulhrsw128((__v8hi)__zext128(__a),
+ (__v8hi)__zext128(__b)));
}
/// Copies the 8-bit integers from a 128-bit integer vector to the
diff --git a/clang/lib/Headers/xmmintrin.h b/clang/lib/Headers/xmmintrin.h
index 605409c..fe6afdc 100644
--- a/clang/lib/Headers/xmmintrin.h
+++ b/clang/lib/Headers/xmmintrin.h
@@ -3014,9 +3014,7 @@ _mm_cvtps_pi8(__m128 __a)
/// \returns A 32-bit integer value. Bits [3:0] contain the sign bits from each
/// single-precision floating-point element of the parameter. Bits [31:4] are
/// set to zero.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_movemask_ps(__m128 __a)
-{
+static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR _mm_movemask_ps(__m128 __a) {
return __builtin_ia32_movmskps((__v4sf)__a);
}
diff --git a/clang/lib/Lex/PPLexerChange.cpp b/clang/lib/Lex/PPLexerChange.cpp
index d8f61c0..b014124 100644
--- a/clang/lib/Lex/PPLexerChange.cpp
+++ b/clang/lib/Lex/PPLexerChange.cpp
@@ -302,7 +302,7 @@ void Preprocessor::diagnoseMissingHeaderInUmbrellaDir(const Module &Mod) {
// Check whether this entry has an extension typically associated with
// headers.
if (!StringSwitch<bool>(llvm::sys::path::extension(Entry->path()))
- .Cases(".h", ".H", ".hh", ".hpp", true)
+ .Cases({".h", ".H", ".hh", ".hpp"}, true)
.Default(false))
continue;
diff --git a/clang/lib/Parse/ParsePragma.cpp b/clang/lib/Parse/ParsePragma.cpp
index 9893381..7c2b928 100644
--- a/clang/lib/Parse/ParsePragma.cpp
+++ b/clang/lib/Parse/ParsePragma.cpp
@@ -1419,10 +1419,11 @@ bool Parser::HandlePragmaLoopHint(LoopHint &Hint) {
// Return a valid hint if pragma unroll or nounroll were specified
// without an argument.
- auto IsLoopHint = llvm::StringSwitch<bool>(PragmaNameInfo->getName())
- .Cases("unroll", "nounroll", "unroll_and_jam",
- "nounroll_and_jam", true)
- .Default(false);
+ auto IsLoopHint =
+ llvm::StringSwitch<bool>(PragmaNameInfo->getName())
+ .Cases({"unroll", "nounroll", "unroll_and_jam", "nounroll_and_jam"},
+ true)
+ .Default(false);
if (Toks.empty() && IsLoopHint) {
ConsumeAnnotationToken();
diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp
index 652527a..2990fd6 100644
--- a/clang/lib/Sema/SemaChecking.cpp
+++ b/clang/lib/Sema/SemaChecking.cpp
@@ -6926,13 +6926,13 @@ StringRef Sema::GetFormatStringTypeName(FormatStringType FST) {
FormatStringType Sema::GetFormatStringType(StringRef Flavor) {
return llvm::StringSwitch<FormatStringType>(Flavor)
- .Cases("gnu_scanf", "scanf", FormatStringType::Scanf)
- .Cases("gnu_printf", "printf", "printf0", "syslog",
+ .Cases({"gnu_scanf", "scanf"}, FormatStringType::Scanf)
+ .Cases({"gnu_printf", "printf", "printf0", "syslog"},
FormatStringType::Printf)
- .Cases("NSString", "CFString", FormatStringType::NSString)
- .Cases("gnu_strftime", "strftime", FormatStringType::Strftime)
- .Cases("gnu_strfmon", "strfmon", FormatStringType::Strfmon)
- .Cases("kprintf", "cmn_err", "vcmn_err", "zcmn_err",
+ .Cases({"NSString", "CFString"}, FormatStringType::NSString)
+ .Cases({"gnu_strftime", "strftime"}, FormatStringType::Strftime)
+ .Cases({"gnu_strfmon", "strfmon"}, FormatStringType::Strfmon)
+ .Cases({"kprintf", "cmn_err", "vcmn_err", "zcmn_err"},
FormatStringType::Kprintf)
.Case("freebsd_kprintf", FormatStringType::FreeBSDKPrintf)
.Case("os_trace", FormatStringType::OSLog)
@@ -12309,13 +12309,20 @@ static void DiagnoseMixedUnicodeImplicitConversion(Sema &S, const Type *Source,
SourceLocation CC) {
assert(Source->isUnicodeCharacterType() && Target->isUnicodeCharacterType() &&
Source != Target);
+
+ // Lone surrogates have a distinct representation in UTF-32.
+ // Converting between UTF-16 and UTF-32 codepoints seems very widespread,
+ // so don't warn on such conversion.
+ if (Source->isChar16Type() && Target->isChar32Type())
+ return;
+
Expr::EvalResult Result;
if (E->EvaluateAsInt(Result, S.getASTContext(), Expr::SE_AllowSideEffects,
S.isConstantEvaluatedContext())) {
llvm::APSInt Value(32);
Value = Result.Val.getInt();
bool IsASCII = Value <= 0x7F;
- bool IsBMP = Value <= 0xD7FF || (Value >= 0xE000 && Value <= 0xFFFF);
+ bool IsBMP = Value <= 0xDFFF || (Value >= 0xE000 && Value <= 0xFFFF);
bool ConversionPreservesSemantics =
IsASCII || (!Source->isChar8Type() && !Target->isChar8Type() && IsBMP);
diff --git a/clang/lib/Sema/SemaConcept.cpp b/clang/lib/Sema/SemaConcept.cpp
index 04a73181..54cbfe4 100644
--- a/clang/lib/Sema/SemaConcept.cpp
+++ b/clang/lib/Sema/SemaConcept.cpp
@@ -432,7 +432,7 @@ private:
// XXX: It is SLOW! Use it very carefully.
std::optional<MultiLevelTemplateArgumentList> SubstitutionInTemplateArguments(
const NormalizedConstraintWithParamMapping &Constraint,
- MultiLevelTemplateArgumentList MLTAL,
+ const MultiLevelTemplateArgumentList &MLTAL,
llvm::SmallVector<TemplateArgument> &SubstitutedOuterMost);
ExprResult EvaluateSlow(const AtomicConstraint &Constraint,
@@ -564,12 +564,17 @@ ExprResult ConstraintSatisfactionChecker::EvaluateAtomicConstraint(
std::optional<MultiLevelTemplateArgumentList>
ConstraintSatisfactionChecker::SubstitutionInTemplateArguments(
const NormalizedConstraintWithParamMapping &Constraint,
- MultiLevelTemplateArgumentList MLTAL,
- llvm::SmallVector<TemplateArgument> &SubstitutedOuterMost) {
+ const MultiLevelTemplateArgumentList &MLTAL,
+ llvm::SmallVector<TemplateArgument> &SubstitutedOutermost) {
if (!Constraint.hasParameterMapping())
return std::move(MLTAL);
+ // The mapping is empty, meaning no template arguments are needed for
+ // evaluation.
+ if (Constraint.getParameterMapping().empty())
+ return MultiLevelTemplateArgumentList();
+
TemplateDeductionInfo Info(Constraint.getBeginLoc());
Sema::InstantiatingTemplate Inst(
S, Constraint.getBeginLoc(),
@@ -607,7 +612,7 @@ ConstraintSatisfactionChecker::SubstitutionInTemplateArguments(
// The empty MLTAL situation should only occur when evaluating non-dependent
// constraints.
if (MLTAL.getNumSubstitutedLevels())
- SubstitutedOuterMost =
+ SubstitutedOutermost =
llvm::to_vector_of<TemplateArgument>(MLTAL.getOutermost());
unsigned Offset = 0;
for (unsigned I = 0, MappedIndex = 0; I < Used.size(); I++) {
@@ -615,19 +620,19 @@ ConstraintSatisfactionChecker::SubstitutionInTemplateArguments(
if (Used[I])
Arg = S.Context.getCanonicalTemplateArgument(
CTAI.SugaredConverted[MappedIndex++]);
- if (I < SubstitutedOuterMost.size()) {
- SubstitutedOuterMost[I] = Arg;
+ if (I < SubstitutedOutermost.size()) {
+ SubstitutedOutermost[I] = Arg;
Offset = I + 1;
} else {
- SubstitutedOuterMost.push_back(Arg);
- Offset = SubstitutedOuterMost.size();
+ SubstitutedOutermost.push_back(Arg);
+ Offset = SubstitutedOutermost.size();
}
}
- if (Offset < SubstitutedOuterMost.size())
- SubstitutedOuterMost.erase(SubstitutedOuterMost.begin() + Offset);
+ if (Offset < SubstitutedOutermost.size())
+ SubstitutedOutermost.erase(SubstitutedOutermost.begin() + Offset);
MultiLevelTemplateArgumentList SubstitutedTemplateArgs;
- SubstitutedTemplateArgs.addOuterTemplateArguments(TD, SubstitutedOuterMost,
+ SubstitutedTemplateArgs.addOuterTemplateArguments(TD, SubstitutedOutermost,
/*Final=*/false);
return std::move(SubstitutedTemplateArgs);
}
@@ -636,9 +641,9 @@ ExprResult ConstraintSatisfactionChecker::EvaluateSlow(
const AtomicConstraint &Constraint,
const MultiLevelTemplateArgumentList &MLTAL) {
- llvm::SmallVector<TemplateArgument> SubstitutedOuterMost;
+ llvm::SmallVector<TemplateArgument> SubstitutedOutermost;
std::optional<MultiLevelTemplateArgumentList> SubstitutedArgs =
- SubstitutionInTemplateArguments(Constraint, MLTAL, SubstitutedOuterMost);
+ SubstitutionInTemplateArguments(Constraint, MLTAL, SubstitutedOutermost);
if (!SubstitutedArgs) {
Satisfaction.IsSatisfied = false;
return ExprEmpty();
@@ -736,8 +741,9 @@ ExprResult ConstraintSatisfactionChecker::Evaluate(
UnsubstitutedConstraintSatisfactionCacheResult Cache;
Cache.Satisfaction.ContainsErrors = Satisfaction.ContainsErrors;
Cache.Satisfaction.IsSatisfied = Satisfaction.IsSatisfied;
- std::copy(Satisfaction.Details.begin() + Size, Satisfaction.Details.end(),
- std::back_inserter(Cache.Satisfaction.Details));
+ Cache.Satisfaction.Details.insert(Cache.Satisfaction.Details.end(),
+ Satisfaction.Details.begin() + Size,
+ Satisfaction.Details.end());
Cache.SubstExpr = E;
S.UnsubstitutedConstraintSatisfactionCache.insert({ID, std::move(Cache)});
@@ -786,13 +792,13 @@ ExprResult ConstraintSatisfactionChecker::EvaluateSlow(
FoldExpandedConstraint::FoldOperatorKind::And;
unsigned EffectiveDetailEndIndex = Satisfaction.Details.size();
- llvm::SmallVector<TemplateArgument> SubstitutedOuterMost;
+ llvm::SmallVector<TemplateArgument> SubstitutedOutermost;
// FIXME: Is PackSubstitutionIndex correct?
llvm::SaveAndRestore _(PackSubstitutionIndex, S.ArgPackSubstIndex);
std::optional<MultiLevelTemplateArgumentList> SubstitutedArgs =
SubstitutionInTemplateArguments(
static_cast<const NormalizedConstraintWithParamMapping &>(Constraint),
- MLTAL, SubstitutedOuterMost);
+ MLTAL, SubstitutedOutermost);
if (!SubstitutedArgs) {
Satisfaction.IsSatisfied = false;
return ExprError();
@@ -868,8 +874,9 @@ ExprResult ConstraintSatisfactionChecker::Evaluate(
UnsubstitutedConstraintSatisfactionCacheResult Cache;
Cache.Satisfaction.ContainsErrors = Satisfaction.ContainsErrors;
Cache.Satisfaction.IsSatisfied = Satisfaction.IsSatisfied;
- std::copy(Satisfaction.Details.begin() + Size, Satisfaction.Details.end(),
- std::back_inserter(Cache.Satisfaction.Details));
+ Cache.Satisfaction.Details.insert(Cache.Satisfaction.Details.end(),
+ Satisfaction.Details.begin() + Size,
+ Satisfaction.Details.end());
Cache.SubstExpr = E;
S.UnsubstitutedConstraintSatisfactionCache.insert({ID, std::move(Cache)});
return E;
@@ -880,9 +887,9 @@ ExprResult ConstraintSatisfactionChecker::EvaluateSlow(
const MultiLevelTemplateArgumentList &MLTAL, unsigned Size) {
const ConceptReference *ConceptId = Constraint.getConceptId();
- llvm::SmallVector<TemplateArgument> SubstitutedOuterMost;
+ llvm::SmallVector<TemplateArgument> SubstitutedOutermost;
std::optional<MultiLevelTemplateArgumentList> SubstitutedArgs =
- SubstitutionInTemplateArguments(Constraint, MLTAL, SubstitutedOuterMost);
+ SubstitutionInTemplateArguments(Constraint, MLTAL, SubstitutedOutermost);
if (!SubstitutedArgs) {
Satisfaction.IsSatisfied = false;
@@ -1012,8 +1019,9 @@ ExprResult ConstraintSatisfactionChecker::Evaluate(
UnsubstitutedConstraintSatisfactionCacheResult Cache;
Cache.Satisfaction.ContainsErrors = Satisfaction.ContainsErrors;
Cache.Satisfaction.IsSatisfied = Satisfaction.IsSatisfied;
- std::copy(Satisfaction.Details.begin() + Size, Satisfaction.Details.end(),
- std::back_inserter(Cache.Satisfaction.Details));
+ Cache.Satisfaction.Details.insert(Cache.Satisfaction.Details.end(),
+ Satisfaction.Details.begin() + Size,
+ Satisfaction.Details.end());
Cache.SubstExpr = CE;
S.UnsubstitutedConstraintSatisfactionCache.insert({ID, std::move(Cache)});
return CE;
@@ -1217,10 +1225,10 @@ bool Sema::CheckConstraintSatisfaction(
return false;
}
-static const ExprResult
-SubstituteConceptsInConstrainExpression(Sema &S, const NamedDecl *D,
- const ConceptSpecializationExpr *CSE,
- UnsignedOrNone SubstIndex) {
+static ExprResult
+SubstituteConceptsInConstraintExpression(Sema &S, const NamedDecl *D,
+ const ConceptSpecializationExpr *CSE,
+ UnsignedOrNone SubstIndex) {
// [C++2c] [temp.constr.normal]
// Otherwise, to form CE, any non-dependent concept template argument Ai
@@ -1255,7 +1263,7 @@ bool Sema::CheckConstraintSatisfaction(
const ConceptSpecializationExpr *ConstraintExpr,
ConstraintSatisfaction &Satisfaction) {
- ExprResult Res = SubstituteConceptsInConstrainExpression(
+ ExprResult Res = SubstituteConceptsInConstraintExpression(
*this, nullptr, ConstraintExpr, ArgPackSubstIndex);
if (!Res.isUsable())
return true;
@@ -2017,8 +2025,13 @@ void SubstituteParameterMappings::buildParameterMapping(
SemaRef.MarkUsedTemplateParameters(Args->arguments(),
/*Depth=*/0, OccurringIndices);
}
+ unsigned Size = OccurringIndices.count();
+ // When the constraint is independent of any template parameters,
+ // we build an empty mapping so that we can distinguish these cases
+ // from cases where no mapping exists at all, e.g. when there are only atomic
+ // constraints.
TemplateArgumentLoc *TempArgs =
- new (SemaRef.Context) TemplateArgumentLoc[OccurringIndices.count()];
+ new (SemaRef.Context) TemplateArgumentLoc[Size];
llvm::SmallVector<NamedDecl *> UsedParams;
for (unsigned I = 0, J = 0, C = TemplateParams->size(); I != C; ++I) {
SourceLocation Loc = ArgsAsWritten->NumTemplateArgs > I
@@ -2039,7 +2052,6 @@ void SubstituteParameterMappings::buildParameterMapping(
TemplateParams->getLAngleLoc(), UsedParams,
/*RAngleLoc=*/SourceLocation(),
/*RequiresClause=*/nullptr);
- unsigned Size = OccurringIndices.count();
N.updateParameterMapping(
std::move(OccurringIndices), std::move(OccurringIndicesForSubsumption),
MutableArrayRef<TemplateArgumentLoc>{TempArgs, Size}, UsedList);
@@ -2050,6 +2062,10 @@ bool SubstituteParameterMappings::substitute(
if (!N.hasParameterMapping())
buildParameterMapping(N);
+ // If the parameter mapping is empty, there is nothing to substitute.
+ if (N.getParameterMapping().empty())
+ return false;
+
SourceLocation InstLocBegin, InstLocEnd;
llvm::ArrayRef Arguments = ArgsAsWritten->arguments();
if (Arguments.empty()) {
@@ -2289,7 +2305,7 @@ NormalizedConstraint *NormalizedConstraint::fromConstraintExpr(
ConceptDecl *CD = CSE->getNamedConcept()->getCanonicalDecl();
ExprResult Res =
- SubstituteConceptsInConstrainExpression(S, D, CSE, SubstIndex);
+ SubstituteConceptsInConstraintExpression(S, D, CSE, SubstIndex);
if (!Res.isUsable())
return nullptr;
diff --git a/clang/lib/Sema/SemaExpr.cpp b/clang/lib/Sema/SemaExpr.cpp
index dca9d6e..a50c276 100644
--- a/clang/lib/Sema/SemaExpr.cpp
+++ b/clang/lib/Sema/SemaExpr.cpp
@@ -12811,7 +12811,7 @@ QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS,
if (auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(DC)) {
if (CTSD->isInStdNamespace() &&
llvm::StringSwitch<bool>(CTSD->getName())
- .Cases("less", "less_equal", "greater", "greater_equal", true)
+ .Cases({"less", "less_equal", "greater", "greater_equal"}, true)
.Default(false)) {
if (RHSType->isNullPtrType())
RHS = ImpCastExprToType(RHS.get(), LHSType, CK_NullToPointer);
diff --git a/clang/lib/Sema/SemaInit.cpp b/clang/lib/Sema/SemaInit.cpp
index f7974eb..7debe33 100644
--- a/clang/lib/Sema/SemaInit.cpp
+++ b/clang/lib/Sema/SemaInit.cpp
@@ -672,11 +672,12 @@ ExprResult InitListChecker::PerformEmptyInit(SourceLocation Loc,
IsInStd = true;
}
- if (IsInStd && llvm::StringSwitch<bool>(R->getName())
- .Cases("basic_string", "deque", "forward_list", true)
- .Cases("list", "map", "multimap", "multiset", true)
- .Cases("priority_queue", "queue", "set", "stack", true)
- .Cases("unordered_map", "unordered_set", "vector", true)
+ if (IsInStd &&
+ llvm::StringSwitch<bool>(R->getName())
+ .Cases({"basic_string", "deque", "forward_list"}, true)
+ .Cases({"list", "map", "multimap", "multiset"}, true)
+ .Cases({"priority_queue", "queue", "set", "stack"}, true)
+ .Cases({"unordered_map", "unordered_set", "vector"}, true)
.Default(false)) {
InitSeq.InitializeFrom(
SemaRef, Entity,
diff --git a/clang/lib/Sema/SemaOpenACC.cpp b/clang/lib/Sema/SemaOpenACC.cpp
index ca99834..3bb8080 100644
--- a/clang/lib/Sema/SemaOpenACC.cpp
+++ b/clang/lib/Sema/SemaOpenACC.cpp
@@ -2996,6 +2996,8 @@ bool SemaOpenACC::CreateReductionCombinerRecipe(
case OpenACCReductionOperator::Max:
case OpenACCReductionOperator::Min:
+ BinOp = BinaryOperatorKind::BO_LT;
+ break;
case OpenACCReductionOperator::And:
case OpenACCReductionOperator::Or:
// We just want a 'NYI' error in the backend, so leave an empty combiner
@@ -3011,26 +3013,80 @@ bool SemaOpenACC::CreateReductionCombinerRecipe(
assert(!VarTy->isArrayType() && "Only 1 level of array allowed");
+ enum class CombinerFailureKind {
+ None = 0,
+ BinOp = 1,
+ Conditional = 2,
+ Assignment = 3,
+ };
+
+ auto genCombiner = [&, this](DeclRefExpr *LHSDRE, DeclRefExpr *RHSDRE)
+ -> std::pair<ExprResult, CombinerFailureKind> {
+ ExprResult BinOpRes =
+ SemaRef.BuildBinOp(SemaRef.getCurScope(), Loc, BinOp, LHSDRE, RHSDRE,
+ /*ForFoldExpr=*/false);
+ switch (ReductionOperator) {
+ case OpenACCReductionOperator::Addition:
+ case OpenACCReductionOperator::Multiplication:
+ case OpenACCReductionOperator::BitwiseAnd:
+ case OpenACCReductionOperator::BitwiseOr:
+ case OpenACCReductionOperator::BitwiseXOr:
+ // These 5 are simple and are being done as compound operators, so we can
+ // immediately quit here.
+ return {BinOpRes, BinOpRes.isUsable() ? CombinerFailureKind::None
+ : CombinerFailureKind::BinOp};
+ case OpenACCReductionOperator::Max:
+ case OpenACCReductionOperator::Min: {
+ // These are done as:
+ // LHS = (LHS < RHS) ? LHS : RHS; and LHS = (LHS < RHS) ? RHS : LHS;
+ //
+ // The BinOpRes should have been created with the less-than, so we just
+ // have to build the conditional and assignment.
+ if (!BinOpRes.isUsable())
+ return {BinOpRes, CombinerFailureKind::BinOp};
+
+ // Create the correct conditional operator, swapping the results
+ // (true/false value) depending on min/max.
+ ExprResult CondRes;
+ if (ReductionOperator == OpenACCReductionOperator::Min)
+ CondRes = SemaRef.ActOnConditionalOp(Loc, Loc, BinOpRes.get(), LHSDRE,
+ RHSDRE);
+ else
+ CondRes = SemaRef.ActOnConditionalOp(Loc, Loc, BinOpRes.get(), RHSDRE,
+ LHSDRE);
+
+ if (!CondRes.isUsable())
+ return {CondRes, CombinerFailureKind::Conditional};
+
+ // Build assignment.
+ ExprResult Assignment = SemaRef.BuildBinOp(SemaRef.getCurScope(), Loc,
+ BinaryOperatorKind::BO_Assign,
+ LHSDRE, CondRes.get(),
+ /*ForFoldExpr=*/false);
+ return {Assignment, Assignment.isUsable()
+ ? CombinerFailureKind::None
+ : CombinerFailureKind::Assignment};
+ }
+ case OpenACCReductionOperator::And:
+ case OpenACCReductionOperator::Or:
+ llvm_unreachable("And/Or not implemented, but should fail earlier");
+ case OpenACCReductionOperator::Invalid:
+ llvm_unreachable("Invalid should have been caught above");
+ }
+ };
+
auto tryCombiner = [&, this](DeclRefExpr *LHSDRE, DeclRefExpr *RHSDRE,
bool IncludeTrap) {
- // TODO: OpenACC: we have to figure out based on the bin-op how to do the
- // ones that we can't just use compound operators for. So &&, ||, max, and
- // min aren't really clear what we could do here.
if (IncludeTrap) {
// Trap all of the errors here, we'll emit our own at the end.
Sema::TentativeAnalysisScope Trap{SemaRef};
-
- return SemaRef.BuildBinOp(SemaRef.getCurScope(), Loc, BinOp, LHSDRE,
- RHSDRE,
- /*ForFoldExpr=*/false);
- } else {
- return SemaRef.BuildBinOp(SemaRef.getCurScope(), Loc, BinOp, LHSDRE,
- RHSDRE,
- /*ForFoldExpr=*/false);
+ return genCombiner(LHSDRE, RHSDRE);
}
+ return genCombiner(LHSDRE, RHSDRE);
};
struct CombinerAttemptTy {
+ CombinerFailureKind FailKind;
VarDecl *LHS;
DeclRefExpr *LHSDRE;
VarDecl *RHS;
@@ -3058,9 +3114,11 @@ bool SemaOpenACC::CreateReductionCombinerRecipe(
RHSDecl->getBeginLoc()},
Ty, clang::VK_LValue, RHSDecl, nullptr, NOUR_None);
- ExprResult BinOpResult = tryCombiner(LHSDRE, RHSDRE, /*IncludeTrap=*/true);
+ std::pair<ExprResult, CombinerFailureKind> BinOpResult =
+ tryCombiner(LHSDRE, RHSDRE, /*IncludeTrap=*/true);
- return {LHSDecl, LHSDRE, RHSDecl, RHSDRE, BinOpResult.get()};
+ return {BinOpResult.second, LHSDecl, LHSDRE, RHSDecl, RHSDRE,
+ BinOpResult.first.get()};
};
CombinerAttemptTy TopLevelCombinerInfo = formCombiner(VarTy);
@@ -3081,12 +3139,20 @@ bool SemaOpenACC::CreateReductionCombinerRecipe(
}
}
+ auto EmitFailureNote = [&](CombinerFailureKind CFK) {
+ if (CFK == CombinerFailureKind::BinOp)
+ return Diag(Loc, diag::note_acc_reduction_combiner_forming)
+ << CFK << BinaryOperator::getOpcodeStr(BinOp);
+ return Diag(Loc, diag::note_acc_reduction_combiner_forming) << CFK;
+ };
+
// Since the 'root' level didn't fail, the only thing that could be successful
// is a struct that we decompose on its individual fields.
RecordDecl *RD = VarTy->getAsRecordDecl();
if (!RD) {
Diag(Loc, diag::err_acc_reduction_recipe_no_op) << VarTy;
+ EmitFailureNote(TopLevelCombinerInfo.FailKind);
tryCombiner(TopLevelCombinerInfo.LHSDRE, TopLevelCombinerInfo.RHSDRE,
/*IncludeTrap=*/false);
return true;
@@ -3098,6 +3164,7 @@ bool SemaOpenACC::CreateReductionCombinerRecipe(
if (!FieldCombinerInfo.Op || FieldCombinerInfo.Op->containsErrors()) {
Diag(Loc, diag::err_acc_reduction_recipe_no_op) << FD->getType();
Diag(FD->getBeginLoc(), diag::note_acc_reduction_recipe_noop_field) << RD;
+ EmitFailureNote(FieldCombinerInfo.FailKind);
tryCombiner(FieldCombinerInfo.LHSDRE, FieldCombinerInfo.RHSDRE,
/*IncludeTrap=*/false);
return true;
diff --git a/clang/lib/Sema/SemaRISCV.cpp b/clang/lib/Sema/SemaRISCV.cpp
index 3ba93ff9..c5ef0d5 100644
--- a/clang/lib/Sema/SemaRISCV.cpp
+++ b/clang/lib/Sema/SemaRISCV.cpp
@@ -1464,7 +1464,8 @@ void SemaRISCV::checkRVVTypeSupport(QualType Ty, SourceLocation Loc, Decl *D,
}
else if (Info.ElementType->isBFloat16Type() &&
!FeatureMap.lookup("zvfbfmin") &&
- !FeatureMap.lookup("xandesvbfhcvt"))
+ !FeatureMap.lookup("xandesvbfhcvt") &&
+ !FeatureMap.lookup("experimental-zvfbfa"))
if (DeclareAndesVectorBuiltins) {
Diag(Loc, diag::err_riscv_type_requires_extension, D)
<< Ty << "zvfbfmin or xandesvbfhcvt";
diff --git a/clang/lib/Sema/SemaStmtAttr.cpp b/clang/lib/Sema/SemaStmtAttr.cpp
index 50acc83..27fd556 100644
--- a/clang/lib/Sema/SemaStmtAttr.cpp
+++ b/clang/lib/Sema/SemaStmtAttr.cpp
@@ -81,7 +81,7 @@ static Attr *handleLoopHintAttr(Sema &S, Stmt *St, const ParsedAttr &A,
StringRef PragmaName =
llvm::StringSwitch<StringRef>(
PragmaNameLoc->getIdentifierInfo()->getName())
- .Cases("unroll", "nounroll", "unroll_and_jam", "nounroll_and_jam",
+ .Cases({"unroll", "nounroll", "unroll_and_jam", "nounroll_and_jam"},
PragmaNameLoc->getIdentifierInfo()->getName())
.Default("clang loop");
diff --git a/clang/lib/Sema/SemaTemplateInstantiate.cpp b/clang/lib/Sema/SemaTemplateInstantiate.cpp
index ca7e3b2..7f85805 100644
--- a/clang/lib/Sema/SemaTemplateInstantiate.cpp
+++ b/clang/lib/Sema/SemaTemplateInstantiate.cpp
@@ -639,15 +639,8 @@ Sema::InstantiatingTemplate::InstantiatingTemplate(
}
Invalid = SemaRef.pushCodeSynthesisContext(Inst);
- if (!Invalid) {
- AlreadyInstantiating =
- !Inst.Entity
- ? false
- : !SemaRef.InstantiatingSpecializations
- .insert({Inst.Entity->getCanonicalDecl(), Inst.Kind})
- .second;
+ if (!Invalid)
atTemplateBegin(SemaRef.TemplateInstCallbacks, SemaRef, Inst);
- }
}
Sema::InstantiatingTemplate::InstantiatingTemplate(
@@ -902,13 +895,6 @@ void Sema::popCodeSynthesisContext() {
void Sema::InstantiatingTemplate::Clear() {
if (!Invalid) {
- if (!AlreadyInstantiating) {
- auto &Active = SemaRef.CodeSynthesisContexts.back();
- if (Active.Entity)
- SemaRef.InstantiatingSpecializations.erase(
- {Active.Entity->getCanonicalDecl(), Active.Kind});
- }
-
atTemplateEnd(SemaRef.TemplateInstCallbacks, SemaRef,
SemaRef.CodeSynthesisContexts.back());
@@ -2864,9 +2850,9 @@ TemplateInstantiator::TransformNestedRequirement(
TemplateArgs, Constraint->getSourceRange(), Satisfaction,
/*TopLevelConceptId=*/nullptr, &NewConstraint);
- assert(!Success || !Trap.hasErrorOccurred() &&
- "Substitution failures must be handled "
- "by CheckConstraintSatisfaction.");
+ assert((!Success || !Trap.hasErrorOccurred()) &&
+ "Substitution failures must be handled "
+ "by CheckConstraintSatisfaction.");
}
if (!Success || Satisfaction.HasSubstitutionFailure())
@@ -3312,17 +3298,20 @@ bool Sema::SubstDefaultArgument(
FunctionDecl *FD = cast<FunctionDecl>(Param->getDeclContext());
Expr *PatternExpr = Param->getUninstantiatedDefaultArg();
+ RecursiveInstGuard AlreadyInstantiating(
+ *this, Param, RecursiveInstGuard::Kind::DefaultArgument);
+ if (AlreadyInstantiating) {
+ Param->setInvalidDecl();
+ return Diag(Param->getBeginLoc(), diag::err_recursive_default_argument)
+ << FD << PatternExpr->getSourceRange();
+ }
+
EnterExpressionEvaluationContext EvalContext(
*this, ExpressionEvaluationContext::PotentiallyEvaluated, Param);
InstantiatingTemplate Inst(*this, Loc, Param, TemplateArgs.getInnermost());
if (Inst.isInvalid())
return true;
- if (Inst.isAlreadyInstantiating()) {
- Diag(Param->getBeginLoc(), diag::err_recursive_default_argument) << FD;
- Param->setInvalidDecl();
- return true;
- }
ExprResult Result;
// C++ [dcl.fct.default]p5:
@@ -3554,12 +3543,26 @@ namespace clang {
}
}
-bool
-Sema::InstantiateClass(SourceLocation PointOfInstantiation,
- CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
- const MultiLevelTemplateArgumentList &TemplateArgs,
- TemplateSpecializationKind TSK,
- bool Complain) {
+bool Sema::InstantiateClass(SourceLocation PointOfInstantiation,
+ CXXRecordDecl *Instantiation,
+ CXXRecordDecl *Pattern,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ TemplateSpecializationKind TSK, bool Complain) {
+#ifndef NDEBUG
+ RecursiveInstGuard AlreadyInstantiating(*this, Instantiation,
+ RecursiveInstGuard::Kind::Template);
+ assert(!AlreadyInstantiating && "should have been caught by caller");
+#endif
+
+ return InstantiateClassImpl(PointOfInstantiation, Instantiation, Pattern,
+ TemplateArgs, TSK, Complain);
+}
+
+bool Sema::InstantiateClassImpl(
+ SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation,
+ CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs,
+ TemplateSpecializationKind TSK, bool Complain) {
+
CXXRecordDecl *PatternDef
= cast_or_null<CXXRecordDecl>(Pattern->getDefinition());
if (DiagnoseUninstantiableTemplate(PointOfInstantiation, Instantiation,
@@ -3596,7 +3599,6 @@ Sema::InstantiateClass(SourceLocation PointOfInstantiation,
InstantiatingTemplate Inst(*this, PointOfInstantiation, Instantiation);
if (Inst.isInvalid())
return true;
- assert(!Inst.isAlreadyInstantiating() && "should have been caught by caller");
PrettyDeclStackTraceEntry CrashInfo(Context, Instantiation, SourceLocation(),
"instantiating class definition");
@@ -3808,6 +3810,12 @@ bool Sema::InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK) {
+#ifndef NDEBUG
+ RecursiveInstGuard AlreadyInstantiating(*this, Instantiation,
+ RecursiveInstGuard::Kind::Template);
+ assert(!AlreadyInstantiating && "should have been caught by caller");
+#endif
+
EnumDecl *PatternDef = Pattern->getDefinition();
if (DiagnoseUninstantiableTemplate(PointOfInstantiation, Instantiation,
Instantiation->getInstantiatedFromMemberEnum(),
@@ -3825,8 +3833,6 @@ bool Sema::InstantiateEnum(SourceLocation PointOfInstantiation,
InstantiatingTemplate Inst(*this, PointOfInstantiation, Instantiation);
if (Inst.isInvalid())
return true;
- if (Inst.isAlreadyInstantiating())
- return false;
PrettyDeclStackTraceEntry CrashInfo(Context, Instantiation, SourceLocation(),
"instantiating enum definition");
@@ -3865,6 +3871,14 @@ bool Sema::InstantiateInClassInitializer(
Pattern->getInClassInitStyle() &&
"pattern and instantiation disagree about init style");
+ RecursiveInstGuard AlreadyInstantiating(*this, Instantiation,
+ RecursiveInstGuard::Kind::Template);
+ if (AlreadyInstantiating)
+ // Error out if we hit an instantiation cycle for this initializer.
+ return Diag(PointOfInstantiation,
+ diag::err_default_member_initializer_cycle)
+ << Instantiation;
+
// Error out if we haven't parsed the initializer of the pattern yet because
// we are waiting for the closing brace of the outer class.
Expr *OldInit = Pattern->getInClassInitializer();
@@ -3883,12 +3897,6 @@ bool Sema::InstantiateInClassInitializer(
InstantiatingTemplate Inst(*this, PointOfInstantiation, Instantiation);
if (Inst.isInvalid())
return true;
- if (Inst.isAlreadyInstantiating()) {
- // Error out if we hit an instantiation cycle for this initializer.
- Diag(PointOfInstantiation, diag::err_default_member_initializer_cycle)
- << Instantiation;
- return true;
- }
PrettyDeclStackTraceEntry CrashInfo(Context, Instantiation, SourceLocation(),
"instantiating default member init");
@@ -3972,8 +3980,6 @@ static ActionResult<CXXRecordDecl *> getPatternForClassTemplateSpecialization(
Sema::InstantiatingTemplate Inst(S, PointOfInstantiation, ClassTemplateSpec);
if (Inst.isInvalid())
return {/*Invalid=*/true};
- if (Inst.isAlreadyInstantiating())
- return {/*Invalid=*/false};
llvm::PointerUnion<ClassTemplateDecl *,
ClassTemplatePartialSpecializationDecl *>
@@ -4136,6 +4142,11 @@ bool Sema::InstantiateClassTemplateSpecialization(
if (ClassTemplateSpec->isInvalidDecl())
return true;
+ Sema::RecursiveInstGuard AlreadyInstantiating(
+ *this, ClassTemplateSpec, Sema::RecursiveInstGuard::Kind::Template);
+ if (AlreadyInstantiating)
+ return false;
+
bool HadAvaibilityWarning =
ShouldDiagnoseAvailabilityOfDecl(ClassTemplateSpec, nullptr, nullptr)
.first != AR_Available;
@@ -4148,7 +4159,7 @@ bool Sema::InstantiateClassTemplateSpecialization(
if (!Pattern.isUsable())
return Pattern.isInvalid();
- bool Err = InstantiateClass(
+ bool Err = InstantiateClassImpl(
PointOfInstantiation, ClassTemplateSpec, Pattern.get(),
getTemplateInstantiationArgs(ClassTemplateSpec), TSK, Complain);
diff --git a/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp b/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
index 4863b45..28925cc 100644
--- a/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
+++ b/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
@@ -5312,6 +5312,16 @@ void Sema::InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
if (Proto->getExceptionSpecType() != EST_Uninstantiated)
return;
+ RecursiveInstGuard AlreadyInstantiating(
+ *this, Decl, RecursiveInstGuard::Kind::ExceptionSpec);
+ if (AlreadyInstantiating) {
+ // This exception specification indirectly depends on itself. Reject.
+ // FIXME: Corresponding rule in the standard?
+ Diag(PointOfInstantiation, diag::err_exception_spec_cycle) << Decl;
+ UpdateExceptionSpec(Decl, EST_None);
+ return;
+ }
+
InstantiatingTemplate Inst(*this, PointOfInstantiation, Decl,
InstantiatingTemplate::ExceptionSpecification());
if (Inst.isInvalid()) {
@@ -5320,13 +5330,6 @@ void Sema::InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
UpdateExceptionSpec(Decl, EST_None);
return;
}
- if (Inst.isAlreadyInstantiating()) {
- // This exception specification indirectly depends on itself. Reject.
- // FIXME: Corresponding rule in the standard?
- Diag(PointOfInstantiation, diag::err_exception_spec_cycle) << Decl;
- UpdateExceptionSpec(Decl, EST_None);
- return;
- }
// Enter the scope of this instantiation. We don't use
// PushDeclContext because we don't have a scope.
@@ -5386,8 +5389,6 @@ TemplateDeclInstantiator::InitFunctionInstantiation(FunctionDecl *New,
if (ActiveInst.Kind == ActiveInstType::ExplicitTemplateArgumentSubstitution ||
ActiveInst.Kind == ActiveInstType::DeducedTemplateArgumentSubstitution) {
if (isa<FunctionTemplateDecl>(ActiveInst.Entity)) {
- SemaRef.InstantiatingSpecializations.erase(
- {ActiveInst.Entity->getCanonicalDecl(), ActiveInst.Kind});
atTemplateEnd(SemaRef.TemplateInstCallbacks, SemaRef, ActiveInst);
ActiveInst.Kind = ActiveInstType::TemplateInstantiation;
ActiveInst.Entity = New;
@@ -5545,6 +5546,12 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
Function = const_cast<FunctionDecl*>(ExistingDefn);
}
+#ifndef NDEBUG
+ RecursiveInstGuard AlreadyInstantiating(*this, Function,
+ RecursiveInstGuard::Kind::Template);
+ assert(!AlreadyInstantiating && "should have been caught by caller");
+#endif
+
// Find the function body that we'll be substituting.
const FunctionDecl *PatternDecl = Function->getTemplateInstantiationPattern();
assert(PatternDecl && "instantiating a non-template");
@@ -5684,7 +5691,7 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
}
InstantiatingTemplate Inst(*this, PointOfInstantiation, Function);
- if (Inst.isInvalid() || Inst.isAlreadyInstantiating())
+ if (Inst.isInvalid())
return;
PrettyDeclStackTraceEntry CrashInfo(Context, Function, SourceLocation(),
"instantiating function definition");
@@ -6253,6 +6260,11 @@ void Sema::InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
if (TSK == TSK_ExplicitSpecialization)
return;
+ RecursiveInstGuard AlreadyInstantiating(*this, Var,
+ RecursiveInstGuard::Kind::Template);
+ if (AlreadyInstantiating)
+ return;
+
// Find the pattern and the arguments to substitute into it.
VarDecl *PatternDecl = Var->getTemplateInstantiationPattern();
assert(PatternDecl && "no pattern for templated variable");
@@ -6276,7 +6288,7 @@ void Sema::InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
// FIXME: Factor out the duplicated instantiation context setup/tear down
// code here.
InstantiatingTemplate Inst(*this, PointOfInstantiation, Var);
- if (Inst.isInvalid() || Inst.isAlreadyInstantiating())
+ if (Inst.isInvalid())
return;
PrettyDeclStackTraceEntry CrashInfo(Context, Var, SourceLocation(),
"instantiating variable initializer");
@@ -6380,7 +6392,7 @@ void Sema::InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
}
InstantiatingTemplate Inst(*this, PointOfInstantiation, Var);
- if (Inst.isInvalid() || Inst.isAlreadyInstantiating())
+ if (Inst.isInvalid())
return;
PrettyDeclStackTraceEntry CrashInfo(Context, Var, SourceLocation(),
"instantiating variable definition");
diff --git a/clang/lib/Sema/TreeTransform.h b/clang/lib/Sema/TreeTransform.h
index 29f0c30..0c8c1d1 100644
--- a/clang/lib/Sema/TreeTransform.h
+++ b/clang/lib/Sema/TreeTransform.h
@@ -16430,12 +16430,16 @@ ExprResult TreeTransform<Derived>::TransformSubstNonTypeTemplateParmExpr(
AssociatedDecl == E->getAssociatedDecl())
return E;
- auto getParamAndType = [Index = E->getIndex()](Decl *AssociatedDecl)
+ auto getParamAndType = [E](Decl *AssociatedDecl)
-> std::tuple<NonTypeTemplateParmDecl *, QualType> {
- auto [PDecl, Arg] = getReplacedTemplateParameter(AssociatedDecl, Index);
+ auto [PDecl, Arg] =
+ getReplacedTemplateParameter(AssociatedDecl, E->getIndex());
auto *Param = cast<NonTypeTemplateParmDecl>(PDecl);
- return {Param, Arg.isNull() ? Param->getType()
- : Arg.getNonTypeTemplateArgumentType()};
+ if (Arg.isNull())
+ return {Param, Param->getType()};
+ if (UnsignedOrNone PackIndex = E->getPackIndex())
+ Arg = Arg.getPackAsArray()[*PackIndex];
+ return {Param, Arg.getNonTypeTemplateArgumentType()};
};
// If the replacement expression did not change, and the parameter type
diff --git a/clang/lib/StaticAnalyzer/Checkers/BlockInCriticalSectionChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/BlockInCriticalSectionChecker.cpp
index bf35bee..3ddd659 100644
--- a/clang/lib/StaticAnalyzer/Checkers/BlockInCriticalSectionChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/BlockInCriticalSectionChecker.cpp
@@ -104,7 +104,7 @@ class RAIIMutexDescriptor {
// this function is called instead of early returning it. To avoid this, a
// bool variable (IdentifierInfoInitialized) is used and the function will
// be run only once.
- const auto &ASTCtx = Call.getState()->getStateManager().getContext();
+ const auto &ASTCtx = Call.getASTContext();
Guard = &ASTCtx.Idents.get(GuardName);
}
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp b/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
index 9d3aeff..2420848 100644
--- a/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
@@ -929,7 +929,7 @@ ObjCDeallocChecker::getValueReleasedByNillingOut(const ObjCMethodCall &M,
SVal Arg = M.getArgSVal(0);
ProgramStateRef notNilState, nilState;
std::tie(notNilState, nilState) =
- M.getState()->assume(Arg.castAs<DefinedOrUnknownSVal>());
+ C.getState()->assume(Arg.castAs<DefinedOrUnknownSVal>());
if (!(nilState && !notNilState))
return nullptr;
diff --git a/clang/lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp
index f984caf..227cbfa 100644
--- a/clang/lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp
@@ -34,7 +34,7 @@ class ObjCSuperDeallocChecker
this, "[super dealloc] should not be called more than once",
categories::CoreFoundationObjectiveC};
- void initIdentifierInfoAndSelectors(ASTContext &Ctx) const;
+ void initIdentifierInfoAndSelectors(const ASTContext &Ctx) const;
bool isSuperDeallocMessage(const ObjCMethodCall &M) const;
@@ -214,8 +214,8 @@ void ObjCSuperDeallocChecker::diagnoseCallArguments(const CallEvent &CE,
}
}
-void
-ObjCSuperDeallocChecker::initIdentifierInfoAndSelectors(ASTContext &Ctx) const {
+void ObjCSuperDeallocChecker::initIdentifierInfoAndSelectors(
+ const ASTContext &Ctx) const {
if (IIdealloc)
return;
@@ -230,7 +230,7 @@ ObjCSuperDeallocChecker::isSuperDeallocMessage(const ObjCMethodCall &M) const {
if (M.getOriginExpr()->getReceiverKind() != ObjCMessageExpr::SuperInstance)
return false;
- ASTContext &Ctx = M.getState()->getStateManager().getContext();
+ const ASTContext &Ctx = M.getASTContext();
initIdentifierInfoAndSelectors(Ctx);
return M.getSelector() == SELdealloc;
diff --git a/clang/lib/StaticAnalyzer/Checkers/StdVariantChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/StdVariantChecker.cpp
index 4fc1c57..db8bbee 100644
--- a/clang/lib/StaticAnalyzer/Checkers/StdVariantChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/StdVariantChecker.cpp
@@ -211,13 +211,13 @@ private:
if (!DefaultType)
return;
- ProgramStateRef State = ConstructorCall->getState();
+ ProgramStateRef State = C.getState();
State = State->set<VariantHeldTypeMap>(ThisMemRegion, *DefaultType);
C.addTransition(State);
}
bool handleStdGetCall(const CallEvent &Call, CheckerContext &C) const {
- ProgramStateRef State = Call.getState();
+ ProgramStateRef State = C.getState();
const auto &ArgType = Call.getArgSVal(0)
.getType(C.getASTContext())
diff --git a/clang/lib/StaticAnalyzer/Checkers/TaggedUnionModeling.h b/clang/lib/StaticAnalyzer/Checkers/TaggedUnionModeling.h
index dec4612..b8fb572 100644
--- a/clang/lib/StaticAnalyzer/Checkers/TaggedUnionModeling.h
+++ b/clang/lib/StaticAnalyzer/Checkers/TaggedUnionModeling.h
@@ -52,7 +52,7 @@ removeInformationStoredForDeadInstances(const CallEvent &Call,
template <class TypeMap>
void handleConstructorAndAssignment(const CallEvent &Call, CheckerContext &C,
SVal ThisSVal) {
- ProgramStateRef State = Call.getState();
+ ProgramStateRef State = C.getState();
if (!State)
return;
diff --git a/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp b/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp
index 44c6f9f..8ee4832 100644
--- a/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp
+++ b/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp
@@ -731,19 +731,22 @@ void CheckerManager::runCheckersForEvalCall(ExplodedNodeSet &Dst,
ExplodedNodeSet checkDst;
NodeBuilder B(Pred, checkDst, Eng.getBuilderContext());
+ ProgramStateRef State = Pred->getState();
+ CallEventRef<> UpdatedCall = Call.cloneWithState(State);
+
// Check if any of the EvalCall callbacks can evaluate the call.
for (const auto &EvalCallChecker : EvalCallCheckers) {
// TODO: Support the situation when the call doesn't correspond
// to any Expr.
ProgramPoint L = ProgramPoint::getProgramPoint(
- Call.getOriginExpr(), ProgramPoint::PostStmtKind,
+ UpdatedCall->getOriginExpr(), ProgramPoint::PostStmtKind,
Pred->getLocationContext(), EvalCallChecker.Checker);
bool evaluated = false;
- { // CheckerContext generates transitions(populates checkDest) on
+ { // CheckerContext generates transitions (populates checkDest) on
// destruction, so introduce the scope to make sure it gets properly
// populated.
CheckerContext C(B, Eng, Pred, L);
- evaluated = EvalCallChecker(Call, C);
+ evaluated = EvalCallChecker(*UpdatedCall, C);
}
#ifndef NDEBUG
if (evaluated && evaluatorChecker) {
@@ -774,7 +777,7 @@ void CheckerManager::runCheckersForEvalCall(ExplodedNodeSet &Dst,
// If none of the checkers evaluated the call, ask ExprEngine to handle it.
if (!evaluatorChecker) {
NodeBuilder B(Pred, Dst, Eng.getBuilderContext());
- Eng.defaultEvalCall(B, Pred, Call, CallOpts);
+ Eng.defaultEvalCall(B, Pred, *UpdatedCall, CallOpts);
}
}
}
diff --git a/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp b/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
index 0c491b8..ac6c1d7 100644
--- a/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
+++ b/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
@@ -628,6 +628,8 @@ void ExprEngine::VisitCallExpr(const CallExpr *CE, ExplodedNode *Pred,
ProgramStateRef ExprEngine::finishArgumentConstruction(ProgramStateRef State,
const CallEvent &Call) {
+ // WARNING: The state attached to 'Call' may be obsolete, do not call any
+ // methods that rely on it!
const Expr *E = Call.getOriginExpr();
// FIXME: Constructors to placement arguments of operator new
// are not supported yet.
@@ -653,6 +655,8 @@ ProgramStateRef ExprEngine::finishArgumentConstruction(ProgramStateRef State,
void ExprEngine::finishArgumentConstruction(ExplodedNodeSet &Dst,
ExplodedNode *Pred,
const CallEvent &Call) {
+ // WARNING: The state attached to 'Call' may be obsolete, do not call any
+ // methods that rely on it!
ProgramStateRef State = Pred->getState();
ProgramStateRef CleanedState = finishArgumentConstruction(State, Call);
if (CleanedState == State) {
@@ -670,35 +674,33 @@ void ExprEngine::finishArgumentConstruction(ExplodedNodeSet &Dst,
}
void ExprEngine::evalCall(ExplodedNodeSet &Dst, ExplodedNode *Pred,
- const CallEvent &Call) {
- // WARNING: At this time, the state attached to 'Call' may be older than the
- // state in 'Pred'. This is a minor optimization since CheckerManager will
- // use an updated CallEvent instance when calling checkers, but if 'Call' is
- // ever used directly in this function all callers should be updated to pass
- // the most recent state. (It is probably not worth doing the work here since
- // for some callers this will not be necessary.)
+ const CallEvent &CallTemplate) {
+ // NOTE: CallTemplate is called a "template" because its attached state may
+ // be obsolete (compared to the state of Pred). The state-dependent methods
+ // of CallEvent should be used only after a `cloneWithState` call that
+ // attaches the up-to-date state to this template object.
// Run any pre-call checks using the generic call interface.
ExplodedNodeSet dstPreVisit;
- getCheckerManager().runCheckersForPreCall(dstPreVisit, Pred,
- Call, *this);
+ getCheckerManager().runCheckersForPreCall(dstPreVisit, Pred, CallTemplate,
+ *this);
// Actually evaluate the function call. We try each of the checkers
// to see if the can evaluate the function call, and get a callback at
// defaultEvalCall if all of them fail.
ExplodedNodeSet dstCallEvaluated;
- getCheckerManager().runCheckersForEvalCall(dstCallEvaluated, dstPreVisit,
- Call, *this, EvalCallOptions());
+ getCheckerManager().runCheckersForEvalCall(
+ dstCallEvaluated, dstPreVisit, CallTemplate, *this, EvalCallOptions());
// If there were other constructors called for object-type arguments
// of this call, clean them up.
ExplodedNodeSet dstArgumentCleanup;
for (ExplodedNode *I : dstCallEvaluated)
- finishArgumentConstruction(dstArgumentCleanup, I, Call);
+ finishArgumentConstruction(dstArgumentCleanup, I, CallTemplate);
ExplodedNodeSet dstPostCall;
getCheckerManager().runCheckersForPostCall(dstPostCall, dstArgumentCleanup,
- Call, *this);
+ CallTemplate, *this);
// Escaping symbols conjured during invalidating the regions above.
// Note that, for inlined calls the nodes were put back into the worklist,
@@ -708,12 +710,13 @@ void ExprEngine::evalCall(ExplodedNodeSet &Dst, ExplodedNode *Pred,
// Run pointerEscape callback with the newly conjured symbols.
SmallVector<std::pair<SVal, SVal>, 8> Escaped;
for (ExplodedNode *I : dstPostCall) {
- NodeBuilder B(I, Dst, *currBldrCtx);
ProgramStateRef State = I->getState();
+ CallEventRef<> Call = CallTemplate.cloneWithState(State);
+ NodeBuilder B(I, Dst, *currBldrCtx);
Escaped.clear();
{
unsigned Arg = -1;
- for (const ParmVarDecl *PVD : Call.parameters()) {
+ for (const ParmVarDecl *PVD : Call->parameters()) {
++Arg;
QualType ParamTy = PVD->getType();
if (ParamTy.isNull() ||
@@ -722,13 +725,13 @@ void ExprEngine::evalCall(ExplodedNodeSet &Dst, ExplodedNode *Pred,
QualType Pointee = ParamTy->getPointeeType();
if (Pointee.isConstQualified() || Pointee->isVoidType())
continue;
- if (const MemRegion *MR = Call.getArgSVal(Arg).getAsRegion())
+ if (const MemRegion *MR = Call->getArgSVal(Arg).getAsRegion())
Escaped.emplace_back(loc::MemRegionVal(MR), State->getSVal(MR, Pointee));
}
}
State = processPointerEscapedOnBind(State, Escaped, I->getLocationContext(),
- PSK_EscapeOutParameters, &Call);
+ PSK_EscapeOutParameters, &*Call);
if (State == I->getState())
Dst.insert(I);
@@ -1212,48 +1215,47 @@ static bool isTrivialObjectAssignment(const CallEvent &Call) {
}
void ExprEngine::defaultEvalCall(NodeBuilder &Bldr, ExplodedNode *Pred,
- const CallEvent &CallTemplate,
+ const CallEvent &Call,
const EvalCallOptions &CallOpts) {
// Make sure we have the most recent state attached to the call.
ProgramStateRef State = Pred->getState();
- CallEventRef<> Call = CallTemplate.cloneWithState(State);
// Special-case trivial assignment operators.
- if (isTrivialObjectAssignment(*Call)) {
- performTrivialCopy(Bldr, Pred, *Call);
+ if (isTrivialObjectAssignment(Call)) {
+ performTrivialCopy(Bldr, Pred, Call);
return;
}
// Try to inline the call.
// The origin expression here is just used as a kind of checksum;
// this should still be safe even for CallEvents that don't come from exprs.
- const Expr *E = Call->getOriginExpr();
+ const Expr *E = Call.getOriginExpr();
ProgramStateRef InlinedFailedState = getInlineFailedState(State, E);
if (InlinedFailedState) {
// If we already tried once and failed, make sure we don't retry later.
State = InlinedFailedState;
} else {
- RuntimeDefinition RD = Call->getRuntimeDefinition();
- Call->setForeign(RD.isForeign());
+ RuntimeDefinition RD = Call.getRuntimeDefinition();
+ Call.setForeign(RD.isForeign());
const Decl *D = RD.getDecl();
- if (shouldInlineCall(*Call, D, Pred, CallOpts)) {
+ if (shouldInlineCall(Call, D, Pred, CallOpts)) {
if (RD.mayHaveOtherDefinitions()) {
AnalyzerOptions &Options = getAnalysisManager().options;
// Explore with and without inlining the call.
if (Options.getIPAMode() == IPAK_DynamicDispatchBifurcate) {
- BifurcateCall(RD.getDispatchRegion(), *Call, D, Bldr, Pred);
+ BifurcateCall(RD.getDispatchRegion(), Call, D, Bldr, Pred);
return;
}
// Don't inline if we're not in any dynamic dispatch mode.
if (Options.getIPAMode() != IPAK_DynamicDispatch) {
- conservativeEvalCall(*Call, Bldr, Pred, State);
+ conservativeEvalCall(Call, Bldr, Pred, State);
return;
}
}
- ctuBifurcate(*Call, D, Bldr, Pred, State);
+ ctuBifurcate(Call, D, Bldr, Pred, State);
return;
}
}
@@ -1261,10 +1263,10 @@ void ExprEngine::defaultEvalCall(NodeBuilder &Bldr, ExplodedNode *Pred,
// If we can't inline it, clean up the state traits used only if the function
// is inlined.
State = removeStateTraitsUsedForArrayEvaluation(
- State, dyn_cast_or_null<CXXConstructExpr>(E), Call->getLocationContext());
+ State, dyn_cast_or_null<CXXConstructExpr>(E), Call.getLocationContext());
// Also handle the return value and invalidate the regions.
- conservativeEvalCall(*Call, Bldr, Pred, State);
+ conservativeEvalCall(Call, Bldr, Pred, State);
}
void ExprEngine::BifurcateCall(const MemRegion *BifurReg,
diff --git a/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp b/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
index f6a3e79..871400e 100644
--- a/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
+++ b/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
@@ -723,6 +723,7 @@ AnalysisConsumer::getModeForDecl(Decl *D, AnalysisMode Mode) {
}
static UnsignedEPStat PathRunningTime("PathRunningTime");
+static UnsignedEPStat SyntaxRunningTime("SyntaxRunningTime");
void AnalysisConsumer::HandleCode(Decl *D, AnalysisMode Mode,
ExprEngine::InliningModes IMode,
@@ -761,6 +762,8 @@ void AnalysisConsumer::HandleCode(Decl *D, AnalysisMode Mode,
SyntaxCheckTimer->stopTimer();
llvm::TimeRecord CheckerEndTime = SyntaxCheckTimer->getTotalTime();
CheckerEndTime -= CheckerStartTime;
+ FunctionSummaries.findOrInsertSummary(D)->second.SyntaxRunningTime =
+ std::lround(CheckerEndTime.getWallTime() * 1000);
DisplayTime(CheckerEndTime);
if (AnalyzerTimers && ShouldClearTimersToPreventDisplayingThem) {
AnalyzerTimers->clear();
@@ -792,11 +795,23 @@ void AnalysisConsumer::RunPathSensitiveChecks(Decl *D,
if (!CFG)
return;
+ CFGSize.set(CFG->size());
+
+ auto *DeclContext = Mgr->getAnalysisDeclContext(D);
// See if the LiveVariables analysis scales.
- if (!Mgr->getAnalysisDeclContext(D)->getAnalysis<RelaxedLiveVariables>())
+ if (!DeclContext->getAnalysis<RelaxedLiveVariables>())
return;
- CFGSize.set(CFG->size());
+ // DeclContext declaration is the redeclaration of D that has a body.
+ const Decl *DefDecl = DeclContext->getDecl();
+
+ // Get the SyntaxRunningTime from the function summary, because it is computed
+ // during the AM_Syntax analysis, which is done at a different point in time
+ // and in different order, but always before AM_Path.
+ if (const auto *Summary = FunctionSummaries.findSummary(DefDecl);
+ Summary && Summary->SyntaxRunningTime.has_value()) {
+ SyntaxRunningTime.set(*Summary->SyntaxRunningTime);
+ }
ExprEngine Eng(CTU, *Mgr, VisitedCallees, &FunctionSummaries, IMode);
diff --git a/clang/lib/Support/RISCVVIntrinsicUtils.cpp b/clang/lib/Support/RISCVVIntrinsicUtils.cpp
index 5a4e805..dad3d0da 100644
--- a/clang/lib/Support/RISCVVIntrinsicUtils.cpp
+++ b/clang/lib/Support/RISCVVIntrinsicUtils.cpp
@@ -654,6 +654,9 @@ PrototypeDescriptor::parsePrototypeDescriptor(
case 'F':
TM |= TypeModifier::Float;
break;
+ case 'Y':
+ TM |= TypeModifier::BFloat;
+ break;
case 'S':
TM |= TypeModifier::LMUL1;
break;
@@ -704,6 +707,8 @@ void RVVType::applyModifier(const PrototypeDescriptor &Transformer) {
ElementBitwidth *= 2;
LMUL.MulLog2LMUL(1);
Scale = LMUL.getScale(ElementBitwidth);
+ if (ScalarType == ScalarTypeKind::BFloat)
+ ScalarType = ScalarTypeKind::Float;
break;
case VectorTypeModifier::Widening4XVector:
ElementBitwidth *= 4;