aboutsummaryrefslogtreecommitdiff
path: root/clang/lib
diff options
context:
space:
mode:
Diffstat (limited to 'clang/lib')
-rw-r--r--clang/lib/AST/ASTContext.cpp20
-rw-r--r--clang/lib/AST/ASTStructuralEquivalence.cpp70
-rw-r--r--clang/lib/AST/ByteCode/Compiler.cpp77
-rw-r--r--clang/lib/AST/ByteCode/Compiler.h1
-rw-r--r--clang/lib/AST/ByteCode/Disasm.cpp2
-rw-r--r--clang/lib/AST/ByteCode/DynamicAllocator.cpp2
-rw-r--r--clang/lib/AST/ByteCode/Interp.cpp2
-rw-r--r--clang/lib/AST/ByteCode/Interp.h40
-rw-r--r--clang/lib/AST/ByteCode/InterpBlock.cpp43
-rw-r--r--clang/lib/AST/ByteCode/InterpBuiltin.cpp28
-rw-r--r--clang/lib/AST/ByteCode/InterpStack.h2
-rw-r--r--clang/lib/AST/ByteCode/InterpState.cpp2
-rw-r--r--clang/lib/AST/ByteCode/Opcodes.td4
-rw-r--r--clang/lib/AST/ByteCode/Pointer.cpp30
-rw-r--r--clang/lib/AST/ByteCode/Pointer.h21
-rw-r--r--clang/lib/AST/ByteCode/Program.cpp6
-rw-r--r--clang/lib/AST/ByteCode/Program.h3
-rw-r--r--clang/lib/AST/Decl.cpp9
-rw-r--r--clang/lib/AST/Expr.cpp37
-rw-r--r--clang/lib/AST/ExprConstant.cpp23
-rw-r--r--clang/lib/AST/ExprObjC.cpp1
-rw-r--r--clang/lib/AST/OSLog.cpp18
-rw-r--r--clang/lib/AST/RecordLayoutBuilder.cpp2
-rw-r--r--clang/lib/Analysis/UnsafeBufferUsage.cpp89
-rw-r--r--clang/lib/Basic/FileManager.cpp5
-rw-r--r--clang/lib/Basic/SourceManager.cpp10
-rw-r--r--clang/lib/Basic/Targets/AMDGPU.cpp7
-rw-r--r--clang/lib/Basic/Targets/ARM.cpp10
-rw-r--r--clang/lib/Basic/Targets/ARM.h9
-rw-r--r--clang/lib/Basic/Targets/WebAssembly.cpp17
-rw-r--r--clang/lib/Basic/Targets/WebAssembly.h1
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp5
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenCall.cpp8
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenCall.h5
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenClass.cpp124
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenCleanup.cpp69
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenDecl.cpp169
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExpr.cpp124
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp91
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp254
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp40
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenFunction.cpp292
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenFunction.h121
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp2
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenModule.cpp118
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenModule.h8
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp106
-rw-r--r--clang/lib/CIR/CodeGen/CIRGenStmt.cpp9
-rw-r--r--clang/lib/CIR/CodeGen/CMakeLists.txt1
-rw-r--r--clang/lib/CIR/CodeGen/EHScopeStack.h99
-rw-r--r--clang/lib/CIR/Dialect/IR/CIRDialect.cpp256
-rw-r--r--clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp3
-rw-r--r--clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp358
-rw-r--r--clang/lib/CIR/Lowering/CIRPasses.cpp2
-rw-r--r--clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp106
-rw-r--r--clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h15
-rw-r--r--clang/lib/CodeGen/BackendUtil.cpp6
-rw-r--r--clang/lib/CodeGen/CGBuiltin.cpp3
-rw-r--r--clang/lib/CodeGen/CGCall.cpp36
-rw-r--r--clang/lib/CodeGen/CGCoroutine.cpp2
-rw-r--r--clang/lib/CodeGen/CGDebugInfo.cpp4
-rw-r--r--clang/lib/CodeGen/CGExpr.cpp30
-rw-r--r--clang/lib/CodeGen/CGExprCXX.cpp27
-rw-r--r--clang/lib/CodeGen/CGOpenMPRuntime.cpp157
-rw-r--r--clang/lib/CodeGen/CodeGenAction.cpp8
-rw-r--r--clang/lib/CodeGen/ObjectFilePCHContainerWriter.cpp2
-rw-r--r--clang/lib/CodeGen/SanitizerHandler.h88
-rw-r--r--clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp37
-rw-r--r--clang/lib/CodeGen/TargetBuiltins/ARM.cpp13
-rw-r--r--clang/lib/CodeGen/TargetBuiltins/WebAssembly.cpp61
-rw-r--r--clang/lib/Driver/Driver.cpp41
-rw-r--r--clang/lib/Driver/SanitizerArgs.cpp6
-rw-r--r--clang/lib/Driver/ToolChain.cpp11
-rw-r--r--clang/lib/Driver/ToolChains/AMDGPU.h1
-rw-r--r--clang/lib/Driver/ToolChains/Arch/AArch64.cpp15
-rw-r--r--clang/lib/Driver/ToolChains/Arch/AArch64.h1
-rw-r--r--clang/lib/Driver/ToolChains/Arch/Sparc.cpp13
-rw-r--r--clang/lib/Driver/ToolChains/BareMetal.cpp24
-rw-r--r--clang/lib/Driver/ToolChains/Clang.cpp52
-rw-r--r--clang/lib/Driver/ToolChains/CommonArgs.cpp43
-rw-r--r--clang/lib/Driver/ToolChains/Cuda.h56
-rw-r--r--clang/lib/Driver/ToolChains/Darwin.h8
-rw-r--r--clang/lib/Driver/ToolChains/Gnu.h8
-rw-r--r--clang/lib/Driver/ToolChains/HIPAMD.h1
-rw-r--r--clang/lib/Driver/ToolChains/HIPSPV.cpp9
-rw-r--r--clang/lib/Driver/ToolChains/LazyDetector.h45
-rw-r--r--clang/lib/Driver/ToolChains/MSVC.cpp4
-rw-r--r--clang/lib/Driver/ToolChains/MSVC.h8
-rw-r--r--clang/lib/Driver/ToolChains/MinGW.cpp13
-rw-r--r--clang/lib/Driver/ToolChains/MinGW.h5
-rw-r--r--clang/lib/Driver/ToolChains/ROCm.h314
-rw-r--r--clang/lib/Driver/ToolChains/SYCL.h11
-rw-r--r--clang/lib/Driver/ToolChains/Solaris.cpp6
-rw-r--r--clang/lib/Driver/ToolChains/UEFI.cpp4
-rw-r--r--clang/lib/Format/ContinuationIndenter.cpp3
-rw-r--r--clang/lib/Format/Format.cpp48
-rw-r--r--clang/lib/Format/IntegerLiteralSeparatorFixer.cpp11
-rw-r--r--clang/lib/Format/TokenAnnotator.cpp3
-rw-r--r--clang/lib/Frontend/ASTUnit.cpp9
-rw-r--r--clang/lib/Frontend/CompilerInstance.cpp9
-rw-r--r--clang/lib/Frontend/FrontendActions.cpp2
-rw-r--r--clang/lib/Frontend/InitPreprocessor.cpp4
-rw-r--r--clang/lib/Frontend/PrecompiledPreamble.cpp6
-rw-r--r--clang/lib/Frontend/Rewrite/FrontendActions.cpp5
-rw-r--r--clang/lib/Headers/avx10_2_512niintrin.h12
-rw-r--r--clang/lib/Headers/avx10_2niintrin.h24
-rw-r--r--clang/lib/Headers/hlsl/hlsl_intrinsic_helpers.h2
-rw-r--r--clang/lib/Headers/hlsl/hlsl_intrinsics.h2
-rw-r--r--clang/lib/Headers/opencl-c-base.h11
-rw-r--r--clang/lib/Headers/opencl-c.h16
-rw-r--r--clang/lib/Lex/PPMacroExpansion.cpp8
-rw-r--r--clang/lib/Parse/ParseDecl.cpp3
-rw-r--r--clang/lib/Parse/ParseDeclCXX.cpp7
-rw-r--r--clang/lib/Parse/ParseHLSLRootSignature.cpp7
-rw-r--r--clang/lib/Sema/AnalysisBasedWarnings.cpp4
-rw-r--r--clang/lib/Sema/Sema.cpp7
-rw-r--r--clang/lib/Sema/SemaARM.cpp66
-rw-r--r--clang/lib/Sema/SemaAvailability.cpp6
-rw-r--r--clang/lib/Sema/SemaChecking.cpp4
-rw-r--r--clang/lib/Sema/SemaCodeComplete.cpp8
-rw-r--r--clang/lib/Sema/SemaConcept.cpp4
-rw-r--r--clang/lib/Sema/SemaDecl.cpp15
-rw-r--r--clang/lib/Sema/SemaDeclAttr.cpp16
-rw-r--r--clang/lib/Sema/SemaModule.cpp468
-rw-r--r--clang/lib/Sema/SemaOpenACC.cpp18
-rw-r--r--clang/lib/Sema/SemaOpenACCAtomic.cpp16
-rw-r--r--clang/lib/Sema/SemaOpenACCClause.cpp16
-rw-r--r--clang/lib/Sema/SemaOpenMP.cpp37
-rw-r--r--clang/lib/Sema/SemaOverload.cpp4
-rw-r--r--clang/lib/Sema/SemaStmt.cpp15
-rw-r--r--clang/lib/Sema/SemaStmtAttr.cpp4
-rw-r--r--clang/lib/Sema/SemaTemplate.cpp2
-rw-r--r--clang/lib/Sema/SemaTemplateDeduction.cpp21
-rw-r--r--clang/lib/Sema/SemaTemplateInstantiate.cpp5
-rw-r--r--clang/lib/Sema/SemaTemplateInstantiateDecl.cpp2
-rw-r--r--clang/lib/Sema/SemaTypeTraits.cpp72
-rw-r--r--clang/lib/Sema/SemaWasm.cpp49
-rw-r--r--clang/lib/Serialization/ASTReader.cpp1
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp38
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp146
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/CheckPlacementNew.cpp30
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp188
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp20
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp98
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp79
-rw-r--r--clang/lib/Tooling/Core/Replacement.cpp4
-rw-r--r--clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp4
-rw-r--r--clang/lib/Tooling/Tooling.cpp26
148 files changed, 4246 insertions, 1588 deletions
diff --git a/clang/lib/AST/ASTContext.cpp b/clang/lib/AST/ASTContext.cpp
index 6b6275f..16cf114 100644
--- a/clang/lib/AST/ASTContext.cpp
+++ b/clang/lib/AST/ASTContext.cpp
@@ -940,7 +940,6 @@ ASTContext::ASTContext(LangOptions &LOpts, SourceManager &SM,
FunctionProtoTypes(this_(), FunctionProtoTypesLog2InitSize),
DependentTypeOfExprTypes(this_()), DependentDecltypeTypes(this_()),
DependentPackIndexingTypes(this_()), TemplateSpecializationTypes(this_()),
- DependentTemplateSpecializationTypes(this_()),
DependentBitIntTypes(this_()), SubstTemplateTemplateParmPacks(this_()),
DeducedTemplates(this_()), ArrayParameterTypes(this_()),
CanonTemplateTemplateParms(this_()), SourceMgr(SM), LangOpts(LOpts),
@@ -5979,10 +5978,9 @@ QualType ASTContext::getDependentTemplateSpecializationType(
llvm::FoldingSetNodeID ID;
DependentTemplateSpecializationType::Profile(ID, *this, Keyword, Name, Args);
- void *InsertPos = nullptr;
- if (auto *T = DependentTemplateSpecializationTypes.FindNodeOrInsertPos(
- ID, InsertPos))
- return QualType(T, 0);
+ if (auto const T_iter = DependentTemplateSpecializationTypes.find(ID);
+ T_iter != DependentTemplateSpecializationTypes.end())
+ return QualType(T_iter->getSecond(), 0);
NestedNameSpecifier *NNS = Name.getQualifier();
@@ -6001,11 +5999,6 @@ QualType ASTContext::getDependentTemplateSpecializationType(
CanonKeyword, {CanonNNS, Name.getName(), /*HasTemplateKeyword=*/true},
CanonArgs,
/*IsCanonical=*/true);
- // Find the insert position again.
- [[maybe_unused]] auto *Nothing =
- DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID,
- InsertPos);
- assert(!Nothing && "canonical type broken");
}
} else {
assert(Keyword == getCanonicalElaboratedTypeKeyword(Keyword));
@@ -6021,8 +6014,13 @@ QualType ASTContext::getDependentTemplateSpecializationType(
alignof(DependentTemplateSpecializationType));
auto *T =
new (Mem) DependentTemplateSpecializationType(Keyword, Name, Args, Canon);
+#ifndef NDEBUG
+ llvm::FoldingSetNodeID InsertedID;
+ T->Profile(InsertedID, *this);
+ assert(InsertedID == ID && "ID does not match");
+#endif
Types.push_back(T);
- DependentTemplateSpecializationTypes.InsertNode(T, InsertPos);
+ DependentTemplateSpecializationTypes.try_emplace(ID, T);
return QualType(T, 0);
}
diff --git a/clang/lib/AST/ASTStructuralEquivalence.cpp b/clang/lib/AST/ASTStructuralEquivalence.cpp
index 0f2762d..22bb4cb 100644
--- a/clang/lib/AST/ASTStructuralEquivalence.cpp
+++ b/clang/lib/AST/ASTStructuralEquivalence.cpp
@@ -456,7 +456,9 @@ CheckStructurallyEquivalentAttributes(StructuralEquivalenceContext &Context,
const Decl *D1, const Decl *D2,
const Decl *PrimaryDecl = nullptr) {
// If either declaration has an attribute on it, we treat the declarations
- // as not being structurally equivalent.
+ // as not being structurally equivalent unless both declarations are implicit
+ // (ones generated by the compiler like __NSConstantString_tag).
+ //
// FIXME: this should be handled on a case-by-case basis via tablegen in
// Attr.td. There are multiple cases to consider: one declaration with the
// attribute, another without it; different attribute syntax|spellings for
@@ -468,7 +470,7 @@ CheckStructurallyEquivalentAttributes(StructuralEquivalenceContext &Context,
D1Attr = *D1->getAttrs().begin();
if (D2->hasAttrs())
D2Attr = *D2->getAttrs().begin();
- if (D1Attr || D2Attr) {
+ if ((D1Attr || D2Attr) && !D1->isImplicit() && !D2->isImplicit()) {
const auto *DiagnoseDecl = cast<TypeDecl>(PrimaryDecl ? PrimaryDecl : D2);
Context.Diag2(DiagnoseDecl->getLocation(),
diag::warn_odr_tag_type_with_attributes)
@@ -870,7 +872,27 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
else if (T1->getTypeClass() == Type::FunctionNoProto &&
T2->getTypeClass() == Type::FunctionProto)
TC = Type::FunctionNoProto;
- else
+ else if (Context.LangOpts.C23 && !Context.StrictTypeSpelling &&
+ (T1->getTypeClass() == Type::Enum ||
+ T2->getTypeClass() == Type::Enum)) {
+ // In C23, if not being strict about token equivalence, we need to handle
+ // the case where one type is an enumeration and the other type is an
+ // integral type.
+ //
+ // C23 6.7.3.3p16: The enumerated type is compatible with the underlying
+ // type of the enumeration.
+ //
+ // Treat the enumeration as its underlying type and use the builtin type
+ // class comparison.
+ if (T1->getTypeClass() == Type::Enum) {
+ T1 = T1->getAs<EnumType>()->getDecl()->getIntegerType();
+ assert(T2->isBuiltinType() && !T1.isNull()); // Sanity check
+ } else if (T2->getTypeClass() == Type::Enum) {
+ T2 = T2->getAs<EnumType>()->getDecl()->getIntegerType();
+ assert(T1->isBuiltinType() && !T2.isNull()); // Sanity check
+ }
+ TC = Type::Builtin;
+ } else
return false;
}
@@ -2071,6 +2093,48 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
!CheckStructurallyEquivalentAttributes(Context, D1, D2))
return false;
+ // In C23, if one enumeration has a fixed underlying type, the other shall
+ // have a compatible fixed underlying type (6.2.7).
+ if (Context.LangOpts.C23) {
+ if (D1->isFixed() != D2->isFixed()) {
+ if (Context.Complain) {
+ Context.Diag2(D2->getLocation(),
+ Context.getApplicableDiagnostic(
+ diag::err_odr_tag_type_inconsistent))
+ << Context.ToCtx.getTypeDeclType(D2)
+ << (&Context.FromCtx != &Context.ToCtx);
+ Context.Diag1(D1->getLocation(),
+ D1->isFixed()
+ ? diag::note_odr_fixed_underlying_type
+ : diag::note_odr_missing_fixed_underlying_type)
+ << D1;
+ Context.Diag2(D2->getLocation(),
+ D2->isFixed()
+ ? diag::note_odr_fixed_underlying_type
+ : diag::note_odr_missing_fixed_underlying_type)
+ << D2;
+ }
+ return false;
+ }
+ if (D1->isFixed()) {
+ assert(D2->isFixed() && "enums expected to have fixed underlying types");
+ if (!IsStructurallyEquivalent(Context, D1->getIntegerType(),
+ D2->getIntegerType())) {
+ if (Context.Complain) {
+ Context.Diag2(D2->getLocation(),
+ Context.getApplicableDiagnostic(
+ diag::err_odr_tag_type_inconsistent))
+ << Context.ToCtx.getTypeDeclType(D2)
+ << (&Context.FromCtx != &Context.ToCtx);
+ Context.Diag2(D2->getLocation(),
+ diag::note_odr_incompatible_fixed_underlying_type)
+ << D2 << D2->getIntegerType() << D1->getIntegerType();
+ }
+ return false;
+ }
+ }
+ }
+
llvm::SmallVector<const EnumConstantDecl *, 8> D1Enums, D2Enums;
auto CopyEnumerators =
[](auto &&Range, llvm::SmallVectorImpl<const EnumConstantDecl *> &Cont) {
diff --git a/clang/lib/AST/ByteCode/Compiler.cpp b/clang/lib/AST/ByteCode/Compiler.cpp
index 63ac536..8b9e5e0 100644
--- a/clang/lib/AST/ByteCode/Compiler.cpp
+++ b/clang/lib/AST/ByteCode/Compiler.cpp
@@ -106,25 +106,14 @@ bool InitLink::emit(Compiler<Emitter> *Ctx, const Expr *E) const {
return true;
}
-/// Scope managing label targets.
-template <class Emitter> class LabelScope {
-public:
- virtual ~LabelScope() {}
-
-protected:
- LabelScope(Compiler<Emitter> *Ctx) : Ctx(Ctx) {}
- /// Compiler instance.
- Compiler<Emitter> *Ctx;
-};
-
/// Sets the context for break/continue statements.
-template <class Emitter> class LoopScope final : public LabelScope<Emitter> {
+template <class Emitter> class LoopScope final {
public:
using LabelTy = typename Compiler<Emitter>::LabelTy;
using OptLabelTy = typename Compiler<Emitter>::OptLabelTy;
LoopScope(Compiler<Emitter> *Ctx, LabelTy BreakLabel, LabelTy ContinueLabel)
- : LabelScope<Emitter>(Ctx), OldBreakLabel(Ctx->BreakLabel),
+ : Ctx(Ctx), OldBreakLabel(Ctx->BreakLabel),
OldContinueLabel(Ctx->ContinueLabel),
OldBreakVarScope(Ctx->BreakVarScope),
OldContinueVarScope(Ctx->ContinueVarScope) {
@@ -142,6 +131,7 @@ public:
}
private:
+ Compiler<Emitter> *Ctx;
OptLabelTy OldBreakLabel;
OptLabelTy OldContinueLabel;
VariableScope<Emitter> *OldBreakVarScope;
@@ -149,7 +139,7 @@ private:
};
// Sets the context for a switch scope, mapping labels.
-template <class Emitter> class SwitchScope final : public LabelScope<Emitter> {
+template <class Emitter> class SwitchScope final {
public:
using LabelTy = typename Compiler<Emitter>::LabelTy;
using OptLabelTy = typename Compiler<Emitter>::OptLabelTy;
@@ -157,7 +147,7 @@ public:
SwitchScope(Compiler<Emitter> *Ctx, CaseMap &&CaseLabels, LabelTy BreakLabel,
OptLabelTy DefaultLabel)
- : LabelScope<Emitter>(Ctx), OldBreakLabel(Ctx->BreakLabel),
+ : Ctx(Ctx), OldBreakLabel(Ctx->BreakLabel),
OldDefaultLabel(this->Ctx->DefaultLabel),
OldCaseLabels(std::move(this->Ctx->CaseLabels)),
OldLabelVarScope(Ctx->BreakVarScope) {
@@ -175,6 +165,7 @@ public:
}
private:
+ Compiler<Emitter> *Ctx;
OptLabelTy OldBreakLabel;
OptLabelTy OldDefaultLabel;
CaseMap OldCaseLabels;
@@ -457,13 +448,17 @@ bool Compiler<Emitter>::VisitCastExpr(const CastExpr *CE) {
assert(isPtrType(*FromT));
assert(isPtrType(*ToT));
if (FromT == ToT) {
- if (CE->getType()->isVoidPointerType())
+ if (CE->getType()->isVoidPointerType() &&
+ !SubExprTy->isFunctionPointerType()) {
return this->delegate(SubExpr);
+ }
if (!this->visit(SubExpr))
return false;
- if (CE->getType()->isFunctionPointerType())
- return true;
+ if (CE->getType()->isFunctionPointerType() ||
+ SubExprTy->isFunctionPointerType()) {
+ return this->emitFnPtrCast(CE);
+ }
if (FromT == PT_Ptr)
return this->emitPtrPtrCast(SubExprTy->isVoidPointerType(), CE);
return true;
@@ -1022,7 +1017,8 @@ bool Compiler<Emitter>::VisitPointerArithBinOp(const BinaryOperator *E) {
if (classifyPrim(E) != PT_Ptr)
return this->emitDecayPtr(PT_Ptr, classifyPrim(E), E);
return true;
- } else if (Op == BO_Sub) {
+ }
+ if (Op == BO_Sub) {
if (!this->emitSubOffset(OffsetType, E))
return false;
@@ -1762,6 +1758,9 @@ bool Compiler<Emitter>::visitInitList(ArrayRef<const Expr *> Inits,
if (Inits.size() == 1 && E->getType() == Inits[0]->getType())
return this->delegate(Inits[0]);
+ if (!R)
+ return false;
+
auto initPrimitiveField = [=](const Record::Field *FieldToInit,
const Expr *Init, PrimType T,
bool Activate = false) -> bool {
@@ -3703,7 +3702,7 @@ bool Compiler<Emitter>::VisitBlockExpr(const BlockExpr *E) {
return true;
const Function *Func = nullptr;
- if (auto F = Ctx.getOrCreateObjCBlock(E))
+ if (const Function *F = Ctx.getOrCreateObjCBlock(E))
Func = F;
if (!Func)
@@ -4288,7 +4287,8 @@ bool Compiler<Emitter>::visitZeroArrayInitializer(QualType T, const Expr *E) {
return false;
}
return true;
- } else if (ElemType->isRecordType()) {
+ }
+ if (ElemType->isRecordType()) {
const Record *R = getRecord(ElemType);
for (size_t I = 0; I != NumElems; ++I) {
@@ -4302,7 +4302,8 @@ bool Compiler<Emitter>::visitZeroArrayInitializer(QualType T, const Expr *E) {
return false;
}
return true;
- } else if (ElemType->isArrayType()) {
+ }
+ if (ElemType->isArrayType()) {
for (size_t I = 0; I != NumElems; ++I) {
if (!this->emitConstUint32(I, E))
return false;
@@ -4774,11 +4775,10 @@ VarCreationState Compiler<Emitter>::visitVarDecl(const VarDecl *VD,
if (!this->visit(Init))
return false;
return this->emitSetLocal(*VarT, Offset, VD) && Scope.destroyLocals();
- } else {
+ }
if (!this->visit(Init))
return false;
return this->emitSetLocal(*VarT, Offset, VD);
- }
}
} else {
if (std::optional<unsigned> Offset = this->allocateLocal(
@@ -4805,7 +4805,7 @@ bool Compiler<Emitter>::visitAPValue(const APValue &Val, PrimType ValType,
assert(!DiscardResult);
if (Val.isInt())
return this->emitConst(Val.getInt(), ValType, E);
- else if (Val.isFloat()) {
+ if (Val.isFloat()) {
APFloat F = Val.getFloat();
return this->emitFloat(F, E);
}
@@ -4816,9 +4816,8 @@ bool Compiler<Emitter>::visitAPValue(const APValue &Val, PrimType ValType,
APValue::LValueBase Base = Val.getLValueBase();
if (const Expr *BaseExpr = Base.dyn_cast<const Expr *>())
return this->visit(BaseExpr);
- else if (const auto *VD = Base.dyn_cast<const ValueDecl *>()) {
+ if (const auto *VD = Base.dyn_cast<const ValueDecl *>())
return this->visitDeclRef(VD, E);
- }
} else if (Val.isMemberPointer()) {
if (const ValueDecl *MemberDecl = Val.getMemberPointerDecl())
return this->emitGetMemberPtr(MemberDecl, E);
@@ -4854,7 +4853,8 @@ bool Compiler<Emitter>::visitAPValueInitializer(const APValue &Val,
}
}
return true;
- } else if (Val.isUnion()) {
+ }
+ if (Val.isUnion()) {
const FieldDecl *UnionField = Val.getUnionField();
const Record *R = this->getRecord(UnionField->getParent());
assert(R);
@@ -4864,7 +4864,8 @@ bool Compiler<Emitter>::visitAPValueInitializer(const APValue &Val,
if (!this->visitAPValue(F, T, E))
return false;
return this->emitInitField(T, RF->Offset, E);
- } else if (Val.isArray()) {
+ }
+ if (Val.isArray()) {
const auto *ArrType = T->getAsArrayTypeUnsafe();
QualType ElemType = ArrType->getElementType();
for (unsigned A = 0, AN = Val.getArraySize(); A != AN; ++A) {
@@ -4981,12 +4982,10 @@ bool Compiler<Emitter>::VisitCallExpr(const CallExpr *E) {
// Calls to replaceable operator new/operator delete.
if (FuncDecl->isUsableAsGlobalAllocationFunctionInConstantEvaluation()) {
- if (FuncDecl->getDeclName().isAnyOperatorNew()) {
+ if (FuncDecl->getDeclName().isAnyOperatorNew())
return VisitBuiltinCallExpr(E, Builtin::BI__builtin_operator_new);
- } else {
- assert(FuncDecl->getDeclName().getCXXOverloadedOperator() == OO_Delete);
- return VisitBuiltinCallExpr(E, Builtin::BI__builtin_operator_delete);
- }
+ assert(FuncDecl->getDeclName().getCXXOverloadedOperator() == OO_Delete);
+ return VisitBuiltinCallExpr(E, Builtin::BI__builtin_operator_delete);
}
// Explicit calls to trivial destructors
@@ -5455,7 +5454,9 @@ bool Compiler<Emitter>::visitReturnStmt(const ReturnStmt *RS) {
return false;
this->emitCleanup();
return this->emitRet(*ReturnType, RS);
- } else if (RE->getType()->isVoidType()) {
+ }
+
+ if (RE->getType()->isVoidType()) {
if (!this->visit(RE))
return false;
} else {
@@ -5500,7 +5501,7 @@ template <class Emitter> bool Compiler<Emitter>::visitIfStmt(const IfStmt *IS) {
if (std::optional<bool> BoolValue = getBoolValue(IS->getCond())) {
if (*BoolValue)
return visitChildStmt(IS->getThen());
- else if (const Stmt *Else = IS->getElse())
+ if (const Stmt *Else = IS->getElse())
return visitChildStmt(Else);
return true;
}
@@ -5992,7 +5993,7 @@ bool Compiler<Emitter>::compileConstructor(const CXXConstructorDecl *Ctor) {
if (!this->emitThis(Ctor))
return false;
- auto PVD = Ctor->getParamDecl(0);
+ const ParmVarDecl *PVD = Ctor->getParamDecl(0);
ParamOffset PO = this->Params[PVD]; // Must exist.
if (!this->emitGetParam(PT_Ptr, PO.Offset, Ctor))
@@ -6153,7 +6154,7 @@ bool Compiler<Emitter>::compileUnionAssignmentOperator(
if (!this->emitThis(MD))
return false;
- auto PVD = MD->getParamDecl(0);
+ const ParmVarDecl *PVD = MD->getParamDecl(0);
ParamOffset PO = this->Params[PVD]; // Must exist.
if (!this->emitGetParam(PT_Ptr, PO.Offset, MD))
diff --git a/clang/lib/AST/ByteCode/Compiler.h b/clang/lib/AST/ByteCode/Compiler.h
index 3a26342..16f108f 100644
--- a/clang/lib/AST/ByteCode/Compiler.h
+++ b/clang/lib/AST/ByteCode/Compiler.h
@@ -21,7 +21,6 @@
#include "clang/AST/Decl.h"
#include "clang/AST/Expr.h"
#include "clang/AST/StmtVisitor.h"
-#include "clang/Basic/TargetInfo.h"
namespace clang {
class QualType;
diff --git a/clang/lib/AST/ByteCode/Disasm.cpp b/clang/lib/AST/ByteCode/Disasm.cpp
index 74399d1..5049a65 100644
--- a/clang/lib/AST/ByteCode/Disasm.cpp
+++ b/clang/lib/AST/ByteCode/Disasm.cpp
@@ -531,7 +531,7 @@ LLVM_DUMP_METHOD void Block::dump(llvm::raw_ostream &OS) const {
Desc->dump(OS);
OS << ")\n";
unsigned NPointers = 0;
- for (const Pointer *P = Pointers; P; P = P->Next) {
+ for (const Pointer *P = Pointers; P; P = P->asBlockPointer().Next) {
++NPointers;
}
OS << " EvalID: " << EvalID << '\n';
diff --git a/clang/lib/AST/ByteCode/DynamicAllocator.cpp b/clang/lib/AST/ByteCode/DynamicAllocator.cpp
index 4f0f511..169250c 100644
--- a/clang/lib/AST/ByteCode/DynamicAllocator.cpp
+++ b/clang/lib/AST/ByteCode/DynamicAllocator.cpp
@@ -27,7 +27,7 @@ void DynamicAllocator::cleanup() {
B->invokeDtor();
if (B->hasPointers()) {
while (B->Pointers) {
- Pointer *Next = B->Pointers->Next;
+ Pointer *Next = B->Pointers->asBlockPointer().Next;
B->Pointers->PointeeStorage.BS.Pointee = nullptr;
B->Pointers = Next;
}
diff --git a/clang/lib/AST/ByteCode/Interp.cpp b/clang/lib/AST/ByteCode/Interp.cpp
index 5463aec..224d65c 100644
--- a/clang/lib/AST/ByteCode/Interp.cpp
+++ b/clang/lib/AST/ByteCode/Interp.cpp
@@ -845,7 +845,7 @@ bool CheckInit(InterpState &S, CodePtr OpPC, const Pointer &Ptr) {
return true;
}
-bool CheckCallable(InterpState &S, CodePtr OpPC, const Function *F) {
+static bool CheckCallable(InterpState &S, CodePtr OpPC, const Function *F) {
if (F->isVirtual() && !S.getLangOpts().CPlusPlus20) {
const SourceLocation &Loc = S.Current->getLocation(OpPC);
diff --git a/clang/lib/AST/ByteCode/Interp.h b/clang/lib/AST/ByteCode/Interp.h
index 9012442..9a325ab 100644
--- a/clang/lib/AST/ByteCode/Interp.h
+++ b/clang/lib/AST/ByteCode/Interp.h
@@ -25,7 +25,6 @@
#include "InterpStack.h"
#include "InterpState.h"
#include "MemberPointer.h"
-#include "Opcode.h"
#include "PrimType.h"
#include "Program.h"
#include "State.h"
@@ -481,13 +480,11 @@ inline bool Mulc(InterpState &S, CodePtr OpPC) {
Floating RA = S.allocFloat(A.getSemantics());
RA.copy(ResR);
Result.elem<Floating>(0) = RA; // Floating(ResR);
- Result.atIndex(0).initialize();
Floating RI = S.allocFloat(A.getSemantics());
RI.copy(ResI);
Result.elem<Floating>(1) = RI; // Floating(ResI);
- Result.atIndex(1).initialize();
- Result.initialize();
+ Result.initializeAllElements();
} else {
// Integer element type.
const T &LHSR = LHS.elem<T>(0);
@@ -505,7 +502,6 @@ inline bool Mulc(InterpState &S, CodePtr OpPC) {
return false;
if (T::sub(A, B, Bits, &Result.elem<T>(0)))
return false;
- Result.atIndex(0).initialize();
// imag(Result) = (real(LHS) * imag(RHS)) + (imag(LHS) * real(RHS))
if (T::mul(LHSR, RHSI, Bits, &A))
@@ -514,8 +510,8 @@ inline bool Mulc(InterpState &S, CodePtr OpPC) {
return false;
if (T::add(A, B, Bits, &Result.elem<T>(1)))
return false;
- Result.atIndex(1).initialize();
Result.initialize();
+ Result.initializeAllElements();
}
return true;
@@ -541,14 +537,12 @@ inline bool Divc(InterpState &S, CodePtr OpPC) {
Floating RA = S.allocFloat(A.getSemantics());
RA.copy(ResR);
Result.elem<Floating>(0) = RA; // Floating(ResR);
- Result.atIndex(0).initialize();
Floating RI = S.allocFloat(A.getSemantics());
RI.copy(ResI);
Result.elem<Floating>(1) = RI; // Floating(ResI);
- Result.atIndex(1).initialize();
- Result.initialize();
+ Result.initializeAllElements();
} else {
// Integer element type.
const T &LHSR = LHS.elem<T>(0);
@@ -590,7 +584,6 @@ inline bool Divc(InterpState &S, CodePtr OpPC) {
return false;
if (T::div(ResultR, Den, Bits, &ResultR))
return false;
- Result.atIndex(0).initialize();
// imag(Result) = ((imag(LHS) * real(RHS)) - (real(LHS) * imag(RHS))) / Den
if (T::mul(LHSI, RHSR, Bits, &A) || T::mul(LHSR, RHSI, Bits, &B))
@@ -599,8 +592,7 @@ inline bool Divc(InterpState &S, CodePtr OpPC) {
return false;
if (T::div(ResultI, Den, Bits, &ResultI))
return false;
- Result.atIndex(1).initialize();
- Result.initialize();
+ Result.initializeAllElements();
}
return true;
@@ -1138,8 +1130,9 @@ inline bool CmpHelperEQ<Pointer>(InterpState &S, CodePtr OpPC, CompareFn Fn) {
S.FFDiag(Loc, diag::note_constexpr_pointer_comparison_past_end)
<< LHS.toDiagnosticString(S.getASTContext());
return false;
- } else if (RHS.isOnePastEnd() && !LHS.isOnePastEnd() && !LHS.isZero() &&
- LHS.getOffset() == 0) {
+ }
+ if (RHS.isOnePastEnd() && !LHS.isOnePastEnd() && !LHS.isZero() &&
+ LHS.getOffset() == 0) {
const SourceInfo &Loc = S.Current->getSource(OpPC);
S.FFDiag(Loc, diag::note_constexpr_pointer_comparison_past_end)
<< RHS.toDiagnosticString(S.getASTContext());
@@ -1157,8 +1150,9 @@ inline bool CmpHelperEQ<Pointer>(InterpState &S, CodePtr OpPC, CompareFn Fn) {
const SourceInfo &Loc = S.Current->getSource(OpPC);
S.FFDiag(Loc, diag::note_constexpr_literal_comparison);
return false;
- } else if (const auto *CE = dyn_cast<CallExpr>(E);
- CE && IsOpaqueConstantCall(CE)) {
+ }
+ if (const auto *CE = dyn_cast<CallExpr>(E);
+ CE && IsOpaqueConstantCall(CE)) {
const SourceInfo &Loc = S.Current->getSource(OpPC);
S.FFDiag(Loc, diag::note_constexpr_opaque_call_comparison)
<< P.toDiagnosticString(S.getASTContext());
@@ -2688,6 +2682,14 @@ static inline bool CastFixedPointIntegral(InterpState &S, CodePtr OpPC) {
return true;
}
+static inline bool FnPtrCast(InterpState &S, CodePtr OpPC) {
+ const SourceInfo &E = S.Current->getSource(OpPC);
+ S.CCEDiag(E, diag::note_constexpr_invalid_cast)
+ << diag::ConstexprInvalidCastKind::ThisConversionOrReinterpret
+ << S.getLangOpts().CPlusPlus << S.Current->getRange(OpPC);
+ return true;
+}
+
static inline bool PtrPtrCast(InterpState &S, CodePtr OpPC, bool SrcIsVoidPtr) {
const auto &Ptr = S.Stk.peek<Pointer>();
@@ -3273,7 +3275,8 @@ inline bool InvalidCast(InterpState &S, CodePtr OpPC, CastKind Kind,
S.CCEDiag(Loc, diag::note_constexpr_invalid_cast)
<< static_cast<unsigned>(Kind) << S.Current->getRange(OpPC);
return !Fatal;
- } else if (Kind == CastKind::Volatile) {
+ }
+ if (Kind == CastKind::Volatile) {
if (!S.checkingPotentialConstantExpression()) {
const auto *E = cast<CastExpr>(S.Current->getExpr(OpPC));
if (S.getLangOpts().CPlusPlus)
@@ -3284,7 +3287,8 @@ inline bool InvalidCast(InterpState &S, CodePtr OpPC, CastKind Kind,
}
return false;
- } else if (Kind == CastKind::Dynamic) {
+ }
+ if (Kind == CastKind::Dynamic) {
assert(!S.getLangOpts().CPlusPlus20);
S.CCEDiag(S.Current->getSource(OpPC), diag::note_constexpr_invalid_cast)
<< diag::ConstexprInvalidCastKind::Dynamic;
diff --git a/clang/lib/AST/ByteCode/InterpBlock.cpp b/clang/lib/AST/ByteCode/InterpBlock.cpp
index f603078..963b54e 100644
--- a/clang/lib/AST/ByteCode/InterpBlock.cpp
+++ b/clang/lib/AST/ByteCode/InterpBlock.cpp
@@ -27,9 +27,9 @@ void Block::addPointer(Pointer *P) {
assert(!hasPointer(P));
#endif
if (Pointers)
- Pointers->Prev = P;
- P->Next = Pointers;
- P->Prev = nullptr;
+ Pointers->PointeeStorage.BS.Prev = P;
+ P->PointeeStorage.BS.Next = Pointers;
+ P->PointeeStorage.BS.Prev = nullptr;
Pointers = P;
#ifndef NDEBUG
assert(hasPointer(P));
@@ -48,13 +48,15 @@ void Block::removePointer(Pointer *P) {
assert(hasPointer(P));
#endif
+ BlockPointer &BP = P->PointeeStorage.BS;
+
if (Pointers == P)
- Pointers = P->Next;
+ Pointers = BP.Next;
- if (P->Prev)
- P->Prev->Next = P->Next;
- if (P->Next)
- P->Next->Prev = P->Prev;
+ if (BP.Prev)
+ BP.Prev->PointeeStorage.BS.Next = BP.Next;
+ if (BP.Next)
+ BP.Next->PointeeStorage.BS.Prev = BP.Prev;
P->PointeeStorage.BS.Pointee = nullptr;
#ifndef NDEBUG
assert(!hasPointer(P));
@@ -68,7 +70,9 @@ void Block::cleanup() {
void Block::replacePointer(Pointer *Old, Pointer *New) {
assert(Old);
+ assert(Old->isBlockPointer());
assert(New);
+ assert(New->isBlockPointer());
assert(Old != New);
if (IsStatic) {
assert(!Pointers);
@@ -78,17 +82,20 @@ void Block::replacePointer(Pointer *Old, Pointer *New) {
assert(hasPointer(Old));
#endif
- if (Old->Prev)
- Old->Prev->Next = New;
- if (Old->Next)
- Old->Next->Prev = New;
- New->Prev = Old->Prev;
- New->Next = Old->Next;
+ BlockPointer &OldBP = Old->PointeeStorage.BS;
+ BlockPointer &NewBP = New->PointeeStorage.BS;
+
+ if (OldBP.Prev)
+ OldBP.Prev->PointeeStorage.BS.Next = New;
+ if (OldBP.Next)
+ OldBP.Next->PointeeStorage.BS.Prev = New;
+ NewBP.Prev = OldBP.Prev;
+ NewBP.Next = OldBP.Next;
if (Pointers == Old)
Pointers = New;
- Old->PointeeStorage.BS.Pointee = nullptr;
- New->PointeeStorage.BS.Pointee = this;
+ OldBP.Pointee = nullptr;
+ NewBP.Pointee = this;
#ifndef NDEBUG
assert(!hasPointer(Old));
assert(hasPointer(New));
@@ -97,7 +104,7 @@ void Block::replacePointer(Pointer *Old, Pointer *New) {
#ifndef NDEBUG
bool Block::hasPointer(const Pointer *P) const {
- for (const Pointer *C = Pointers; C; C = C->Next) {
+ for (const Pointer *C = Pointers; C; C = C->asBlockPointer().Next) {
if (C == P)
return true;
}
@@ -120,7 +127,7 @@ DeadBlock::DeadBlock(DeadBlock *&Root, Block *Blk)
// Transfer pointers.
B.Pointers = Blk->Pointers;
- for (Pointer *P = Blk->Pointers; P; P = P->Next)
+ for (Pointer *P = Blk->Pointers; P; P = P->asBlockPointer().Next)
P->PointeeStorage.BS.Pointee = &B;
Blk->Pointers = nullptr;
}
diff --git a/clang/lib/AST/ByteCode/InterpBuiltin.cpp b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
index 19d4c0c..f908d02 100644
--- a/clang/lib/AST/ByteCode/InterpBuiltin.cpp
+++ b/clang/lib/AST/ByteCode/InterpBuiltin.cpp
@@ -240,9 +240,9 @@ static bool interp__builtin_strcmp(InterpState &S, CodePtr OpPC,
T CB = PB.deref<T>();
if (CA > CB)
return returnResult(1);
- else if (CA < CB)
+ if (CA < CB)
return returnResult(-1);
- else if (CA.isZero() || CB.isZero())
+ if (CA.isZero() || CB.isZero())
return returnResult(0);
});
continue;
@@ -253,7 +253,7 @@ static bool interp__builtin_strcmp(InterpState &S, CodePtr OpPC,
if (CA > CB)
return returnResult(1);
- else if (CA < CB)
+ if (CA < CB)
return returnResult(-1);
if (CA == 0 || CB == 0)
return returnResult(0);
@@ -1048,7 +1048,7 @@ static bool interp__builtin_atomic_lock_free(InterpState &S, CodePtr OpPC,
PtrArg = ICE->getSubExpr();
}
- if (auto PtrTy = PtrArg->getType()->getAs<PointerType>()) {
+ if (const auto *PtrTy = PtrArg->getType()->getAs<PointerType>()) {
QualType PointeeType = PtrTy->getPointeeType();
if (!PointeeType->isIncompleteType() &&
S.getASTContext().getTypeAlignInChars(PointeeType) >= Size) {
@@ -1099,10 +1099,8 @@ static bool interp__builtin_complex(InterpState &S, CodePtr OpPC,
Pointer &Result = S.Stk.peek<Pointer>();
Result.elem<Floating>(0) = Arg1;
- Result.atIndex(0).initialize();
Result.elem<Floating>(1) = Arg2;
- Result.atIndex(1).initialize();
- Result.initialize();
+ Result.initializeAllElements();
return true;
}
@@ -1728,9 +1726,9 @@ static bool interp__builtin_elementwise_popcount(InterpState &S, CodePtr OpPC,
Dst.elem<T>(I) =
T::from(Arg.elem<T>(I).toAPSInt().reverseBits().getZExtValue());
}
- Dst.atIndex(I).initialize();
});
}
+ Dst.initializeAllElements();
return true;
}
@@ -1967,7 +1965,8 @@ static bool interp__builtin_memcmp(InterpState &S, CodePtr OpPC,
if (A < B) {
pushInteger(S, -1, Call->getType());
return true;
- } else if (A > B) {
+ }
+ if (A > B) {
pushInteger(S, 1, Call->getType());
return true;
}
@@ -1979,7 +1978,8 @@ static bool interp__builtin_memcmp(InterpState &S, CodePtr OpPC,
if (A < B) {
pushInteger(S, -1, Call->getType());
return true;
- } else if (A > B) {
+ }
+ if (A > B) {
pushInteger(S, 1, Call->getType());
return true;
}
@@ -2312,12 +2312,10 @@ static bool interp__builtin_elementwise_sat(InterpState &S, CodePtr OpPC,
llvm_unreachable("Wrong builtin ID");
}
- INT_TYPE_SWITCH_NO_BOOL(ElemT, {
- const Pointer &E = Dst.atIndex(I);
- E.deref<T>() = static_cast<T>(Result);
- E.initialize();
- });
+ INT_TYPE_SWITCH_NO_BOOL(ElemT,
+ { Dst.elem<T>(I) = static_cast<T>(Result); });
}
+ Dst.initializeAllElements();
return true;
}
diff --git a/clang/lib/AST/ByteCode/InterpStack.h b/clang/lib/AST/ByteCode/InterpStack.h
index 0b76f1d..580494e 100644
--- a/clang/lib/AST/ByteCode/InterpStack.h
+++ b/clang/lib/AST/ByteCode/InterpStack.h
@@ -14,11 +14,9 @@
#define LLVM_CLANG_AST_INTERP_INTERPSTACK_H
#include "FixedPoint.h"
-#include "FunctionPointer.h"
#include "IntegralAP.h"
#include "MemberPointer.h"
#include "PrimType.h"
-#include <memory>
#include <vector>
namespace clang {
diff --git a/clang/lib/AST/ByteCode/InterpState.cpp b/clang/lib/AST/ByteCode/InterpState.cpp
index 7848f29..3010847 100644
--- a/clang/lib/AST/ByteCode/InterpState.cpp
+++ b/clang/lib/AST/ByteCode/InterpState.cpp
@@ -52,7 +52,7 @@ void InterpState::cleanup() {
// As a last resort, make sure all pointers still pointing to a dead block
// don't point to it anymore.
for (DeadBlock *DB = DeadBlocks; DB; DB = DB->Next) {
- for (Pointer *P = DB->B.Pointers; P; P = P->Next) {
+ for (Pointer *P = DB->B.Pointers; P; P = P->asBlockPointer().Next) {
P->PointeeStorage.BS.Pointee = nullptr;
}
}
diff --git a/clang/lib/AST/ByteCode/Opcodes.td b/clang/lib/AST/ByteCode/Opcodes.td
index abfed77..95a4433 100644
--- a/clang/lib/AST/ByteCode/Opcodes.td
+++ b/clang/lib/AST/ByteCode/Opcodes.td
@@ -412,7 +412,7 @@ def CheckDecl : Opcode {
def CheckEnumValue : Opcode {
let Args = [ArgEnumDecl];
- let Types = [FixedSizeIntegralTypeClass];
+ let Types = [IntegralTypeClass];
let HasGroup = 1;
}
@@ -735,6 +735,8 @@ def PtrPtrCast : Opcode {
}
+def FnPtrCast : Opcode;
+
def DecayPtr : Opcode {
let Types = [PtrTypeClass, PtrTypeClass];
let HasGroup = 1;
diff --git a/clang/lib/AST/ByteCode/Pointer.cpp b/clang/lib/AST/ByteCode/Pointer.cpp
index 4019b74..dec2088 100644
--- a/clang/lib/AST/ByteCode/Pointer.cpp
+++ b/clang/lib/AST/ByteCode/Pointer.cpp
@@ -16,6 +16,7 @@
#include "MemberPointer.h"
#include "PrimType.h"
#include "Record.h"
+#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/RecordLayout.h"
@@ -41,7 +42,7 @@ Pointer::Pointer(Block *Pointee, unsigned Base, uint64_t Offset)
: Offset(Offset), StorageKind(Storage::Block) {
assert((Base == RootPtrMark || Base % alignof(void *) == 0) && "wrong base");
- PointeeStorage.BS = {Pointee, Base};
+ PointeeStorage.BS = {Pointee, Base, nullptr, nullptr};
if (Pointee)
Pointee->addPointer(this);
@@ -66,14 +67,14 @@ Pointer::~Pointer() {
}
}
-void Pointer::operator=(const Pointer &P) {
+Pointer &Pointer::operator=(const Pointer &P) {
// If the current storage type is Block, we need to remove
// this pointer from the block.
if (isBlockPointer()) {
if (P.isBlockPointer() && this->block() == P.block()) {
Offset = P.Offset;
PointeeStorage.BS.Base = P.PointeeStorage.BS.Base;
- return;
+ return *this;
}
if (Block *Pointee = PointeeStorage.BS.Pointee) {
@@ -88,7 +89,6 @@ void Pointer::operator=(const Pointer &P) {
if (P.isBlockPointer()) {
PointeeStorage.BS = P.PointeeStorage.BS;
- PointeeStorage.BS.Pointee = P.PointeeStorage.BS.Pointee;
if (PointeeStorage.BS.Pointee)
PointeeStorage.BS.Pointee->addPointer(this);
@@ -101,16 +101,17 @@ void Pointer::operator=(const Pointer &P) {
} else {
assert(false && "Unhandled storage kind");
}
+ return *this;
}
-void Pointer::operator=(Pointer &&P) {
+Pointer &Pointer::operator=(Pointer &&P) {
// If the current storage type is Block, we need to remove
// this pointer from the block.
if (isBlockPointer()) {
if (P.isBlockPointer() && this->block() == P.block()) {
Offset = P.Offset;
PointeeStorage.BS.Base = P.PointeeStorage.BS.Base;
- return;
+ return *this;
}
if (Block *Pointee = PointeeStorage.BS.Pointee) {
@@ -125,7 +126,6 @@ void Pointer::operator=(Pointer &&P) {
if (P.isBlockPointer()) {
PointeeStorage.BS = P.PointeeStorage.BS;
- PointeeStorage.BS.Pointee = P.PointeeStorage.BS.Pointee;
if (PointeeStorage.BS.Pointee)
PointeeStorage.BS.Pointee->addPointer(this);
@@ -138,6 +138,7 @@ void Pointer::operator=(Pointer &&P) {
} else {
assert(false && "Unhandled storage kind");
}
+ return *this;
}
APValue Pointer::toAPValue(const ASTContext &ASTCtx) const {
@@ -492,6 +493,19 @@ void Pointer::initialize() const {
getInlineDesc()->IsInitialized = true;
}
+void Pointer::initializeAllElements() const {
+ assert(getFieldDesc()->isPrimitiveArray());
+ assert(isArrayRoot());
+
+ InitMapPtr &IM = getInitMap();
+ if (!IM) {
+ IM = std::make_pair(true, nullptr);
+ } else {
+ IM->first = true;
+ IM->second.reset();
+ }
+}
+
void Pointer::activate() const {
// Field has its bit in an inline descriptor.
assert(PointeeStorage.BS.Base != 0 &&
@@ -603,7 +617,7 @@ bool Pointer::pointsToStringLiteral() const {
return false;
const Expr *E = block()->getDescriptor()->asExpr();
- return E && isa<StringLiteral>(E);
+ return isa_and_nonnull<StringLiteral>(E);
}
std::optional<std::pair<Pointer, Pointer>>
diff --git a/clang/lib/AST/ByteCode/Pointer.h b/clang/lib/AST/ByteCode/Pointer.h
index d17eba5..5bafc5b 100644
--- a/clang/lib/AST/ByteCode/Pointer.h
+++ b/clang/lib/AST/ByteCode/Pointer.h
@@ -39,6 +39,10 @@ struct BlockPointer {
Block *Pointee;
/// Start of the current subfield.
unsigned Base;
+ /// Previous link in the pointer chain.
+ Pointer *Prev;
+ /// Next link in the pointer chain.
+ Pointer *Next;
};
struct IntPointer {
@@ -120,8 +124,8 @@ public:
Pointer(Block *Pointee, unsigned Base, uint64_t Offset);
~Pointer();
- void operator=(const Pointer &P);
- void operator=(Pointer &&P);
+ Pointer &operator=(const Pointer &P);
+ Pointer &operator=(Pointer &&P);
/// Equality operators are just for tests.
bool operator==(const Pointer &P) const {
@@ -725,6 +729,10 @@ public:
/// Initializes a field.
void initialize() const;
+ /// Initialize all elements of a primitive array at once. This can be
+ /// used in situations where we *know* we have initialized *all* elements
+ /// of a primtive array.
+ void initializeAllElements() const;
/// Activats a field.
void activate() const;
/// Deactivates an entire strurcutre.
@@ -761,7 +769,7 @@ public:
if (Offset < Other.Offset)
return ComparisonCategoryResult::Less;
- else if (Offset > Other.Offset)
+ if (Offset > Other.Offset)
return ComparisonCategoryResult::Greater;
return ComparisonCategoryResult::Equal;
@@ -828,15 +836,10 @@ private:
/// Offset into the storage.
uint64_t Offset = 0;
- /// Previous link in the pointer chain.
- Pointer *Prev = nullptr;
- /// Next link in the pointer chain.
- Pointer *Next = nullptr;
-
Storage StorageKind = Storage::Int;
union {
- BlockPointer BS;
IntPointer Int;
+ BlockPointer BS;
FunctionPointer Fn;
TypeidPointer Typeid;
} PointeeStorage;
diff --git a/clang/lib/AST/ByteCode/Program.cpp b/clang/lib/AST/ByteCode/Program.cpp
index 7002724..2421ec4 100644
--- a/clang/lib/AST/ByteCode/Program.cpp
+++ b/clang/lib/AST/ByteCode/Program.cpp
@@ -418,7 +418,7 @@ Descriptor *Program::createDescriptor(const DeclTy &D, const Type *Ty,
}
return allocateDescriptor(D, *T, MDSize, NumElems, IsConst, IsTemporary,
IsMutable);
- } else {
+ }
// Arrays of composites. In this case, the array is a list of pointers,
// followed by the actual elements.
const Descriptor *ElemDesc = createDescriptor(
@@ -430,7 +430,6 @@ Descriptor *Program::createDescriptor(const DeclTy &D, const Type *Ty,
return {};
return allocateDescriptor(D, Ty, ElemDesc, MDSize, NumElems, IsConst,
IsTemporary, IsMutable);
- }
}
// Array of unknown bounds - cannot be accessed and pointer arithmetic
@@ -440,14 +439,13 @@ Descriptor *Program::createDescriptor(const DeclTy &D, const Type *Ty,
if (OptPrimType T = Ctx.classify(ElemTy)) {
return allocateDescriptor(D, *T, MDSize, IsConst, IsTemporary,
Descriptor::UnknownSize{});
- } else {
+ }
const Descriptor *Desc = createDescriptor(
D, ElemTy.getTypePtr(), std::nullopt, IsConst, IsTemporary);
if (!Desc)
return nullptr;
return allocateDescriptor(D, Desc, MDSize, IsTemporary,
Descriptor::UnknownSize{});
- }
}
}
diff --git a/clang/lib/AST/ByteCode/Program.h b/clang/lib/AST/ByteCode/Program.h
index 5d9c422..207ceef 100644
--- a/clang/lib/AST/ByteCode/Program.h
+++ b/clang/lib/AST/ByteCode/Program.h
@@ -19,10 +19,7 @@
#include "Record.h"
#include "Source.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/PointerUnion.h"
-#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Allocator.h"
-#include <map>
#include <vector>
namespace clang {
diff --git a/clang/lib/AST/Decl.cpp b/clang/lib/AST/Decl.cpp
index 83fcd87..1588be4 100644
--- a/clang/lib/AST/Decl.cpp
+++ b/clang/lib/AST/Decl.cpp
@@ -5988,11 +5988,10 @@ bool clang::IsArmStreamingFunction(const FunctionDecl *FD,
if (FD->hasAttr<ArmLocallyStreamingAttr>())
return true;
- if (const Type *Ty = FD->getType().getTypePtrOrNull())
- if (const auto *FPT = Ty->getAs<FunctionProtoType>())
- if (FPT->getAArch64SMEAttributes() &
- FunctionType::SME_PStateSMEnabledMask)
- return true;
+ assert(!FD->getType().isNull() && "Expected a valid FunctionDecl");
+ if (const auto *FPT = FD->getType()->getAs<FunctionProtoType>())
+ if (FPT->getAArch64SMEAttributes() & FunctionType::SME_PStateSMEnabledMask)
+ return true;
return false;
}
diff --git a/clang/lib/AST/Expr.cpp b/clang/lib/AST/Expr.cpp
index 2e1a9a3..cd9672d 100644
--- a/clang/lib/AST/Expr.cpp
+++ b/clang/lib/AST/Expr.cpp
@@ -1629,20 +1629,20 @@ QualType CallExpr::getCallReturnType(const ASTContext &Ctx) const {
return FnType->getReturnType();
}
-std::pair<const NamedDecl *, const Attr *>
-CallExpr::getUnusedResultAttr(const ASTContext &Ctx) const {
+std::pair<const NamedDecl *, const WarnUnusedResultAttr *>
+Expr::getUnusedResultAttrImpl(const Decl *Callee, QualType ReturnType) {
// If the callee is marked nodiscard, return that attribute
- if (const Decl *D = getCalleeDecl())
- if (const auto *A = D->getAttr<WarnUnusedResultAttr>())
+ if (Callee != nullptr)
+ if (const auto *A = Callee->getAttr<WarnUnusedResultAttr>())
return {nullptr, A};
// If the return type is a struct, union, or enum that is marked nodiscard,
// then return the return type attribute.
- if (const TagDecl *TD = getCallReturnType(Ctx)->getAsTagDecl())
+ if (const TagDecl *TD = ReturnType->getAsTagDecl())
if (const auto *A = TD->getAttr<WarnUnusedResultAttr>())
return {TD, A};
- for (const auto *TD = getCallReturnType(Ctx)->getAs<TypedefType>(); TD;
+ for (const auto *TD = ReturnType->getAs<TypedefType>(); TD;
TD = TD->desugar()->getAs<TypedefType>())
if (const auto *A = TD->getDecl()->getAttr<WarnUnusedResultAttr>())
return {TD->getDecl(), A};
@@ -2844,12 +2844,11 @@ bool Expr::isUnusedResultAWarning(const Expr *&WarnE, SourceLocation &Loc,
return true;
}
- if (const ObjCMethodDecl *MD = ME->getMethodDecl())
- if (MD->hasAttr<WarnUnusedResultAttr>()) {
- WarnE = this;
- Loc = getExprLoc();
- return true;
- }
+ if (ME->hasUnusedResultAttr(Ctx)) {
+ WarnE = this;
+ Loc = getExprLoc();
+ return true;
+ }
return false;
}
@@ -4234,8 +4233,15 @@ bool Expr::isSameComparisonOperand(const Expr* E1, const Expr* E2) {
// template parameters.
const auto *DRE1 = cast<DeclRefExpr>(E1);
const auto *DRE2 = cast<DeclRefExpr>(E2);
- return DRE1->isPRValue() && DRE2->isPRValue() &&
- DRE1->getDecl() == DRE2->getDecl();
+
+ if (DRE1->getDecl() != DRE2->getDecl())
+ return false;
+
+ if ((DRE1->isPRValue() && DRE2->isPRValue()) ||
+ (DRE1->isLValue() && DRE2->isLValue()))
+ return true;
+
+ return false;
}
case ImplicitCastExprClass: {
// Peel off implicit casts.
@@ -4245,7 +4251,8 @@ bool Expr::isSameComparisonOperand(const Expr* E1, const Expr* E2) {
if (!ICE1 || !ICE2)
return false;
if (ICE1->getCastKind() != ICE2->getCastKind())
- return false;
+ return isSameComparisonOperand(ICE1->IgnoreParenImpCasts(),
+ ICE2->IgnoreParenImpCasts());
E1 = ICE1->getSubExpr()->IgnoreParens();
E2 = ICE2->getSubExpr()->IgnoreParens();
// The final cast must be one of these types.
diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp
index 0d12161..993b64b 100644
--- a/clang/lib/AST/ExprConstant.cpp
+++ b/clang/lib/AST/ExprConstant.cpp
@@ -9741,10 +9741,19 @@ bool PointerExprEvaluator::VisitCastExpr(const CastExpr *E) {
case CK_AddressSpaceConversion:
if (!Visit(SubExpr))
return false;
- // Bitcasts to cv void* are static_casts, not reinterpret_casts, so are
- // permitted in constant expressions in C++11. Bitcasts from cv void* are
- // also static_casts, but we disallow them as a resolution to DR1312.
- if (!E->getType()->isVoidPointerType()) {
+ if (E->getType()->isFunctionPointerType() ||
+ SubExpr->getType()->isFunctionPointerType()) {
+ // Casting between two function pointer types, or between a function
+ // pointer and an object pointer, is always a reinterpret_cast.
+ CCEDiag(E, diag::note_constexpr_invalid_cast)
+ << diag::ConstexprInvalidCastKind::ThisConversionOrReinterpret
+ << Info.Ctx.getLangOpts().CPlusPlus;
+ Result.Designator.setInvalid();
+ } else if (!E->getType()->isVoidPointerType()) {
+ // Bitcasts to cv void* are static_casts, not reinterpret_casts, so are
+ // permitted in constant expressions in C++11. Bitcasts from cv void* are
+ // also static_casts, but we disallow them as a resolution to DR1312.
+ //
// In some circumstances, we permit casting from void* to cv1 T*, when the
// actual pointee object is actually a cv2 T.
bool HasValidResult = !Result.InvalidBase && !Result.Designator.Invalid &&
@@ -14636,7 +14645,9 @@ EvaluateComparisonBinaryOperator(EvalInfo &Info, const BinaryOperator *E,
if (!LHSDesignator.Invalid && !RHSDesignator.Invalid && IsRelational) {
bool WasArrayIndex;
unsigned Mismatch = FindDesignatorMismatch(
- getType(LHSValue.Base), LHSDesignator, RHSDesignator, WasArrayIndex);
+ LHSValue.Base.isNull() ? QualType()
+ : getType(LHSValue.Base).getNonReferenceType(),
+ LHSDesignator, RHSDesignator, WasArrayIndex);
// At the point where the designators diverge, the comparison has a
// specified value if:
// - we are comparing array indices
@@ -14680,7 +14691,7 @@ EvaluateComparisonBinaryOperator(EvalInfo &Info, const BinaryOperator *E,
// compare pointers within the object in question; otherwise, the result
// depends on where the object is located in memory.
if (!LHSValue.Base.isNull() && IsRelational) {
- QualType BaseTy = getType(LHSValue.Base);
+ QualType BaseTy = getType(LHSValue.Base).getNonReferenceType();
if (BaseTy->isIncompleteType())
return Error(E);
CharUnits Size = Info.Ctx.getTypeSizeInChars(BaseTy);
diff --git a/clang/lib/AST/ExprObjC.cpp b/clang/lib/AST/ExprObjC.cpp
index 50d3a447..83419a1 100644
--- a/clang/lib/AST/ExprObjC.cpp
+++ b/clang/lib/AST/ExprObjC.cpp
@@ -12,6 +12,7 @@
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/Attr.h"
#include "clang/AST/ComputeDependence.h"
#include "clang/AST/SelectorLocationsKind.h"
#include "clang/AST/Type.h"
diff --git a/clang/lib/AST/OSLog.cpp b/clang/lib/AST/OSLog.cpp
index b777d4d..91f8410 100644
--- a/clang/lib/AST/OSLog.cpp
+++ b/clang/lib/AST/OSLog.cpp
@@ -1,4 +1,16 @@
-// TODO: header template
+//===--- OSLog.cpp - OS log format string analysis ------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements analysis functions for OS log format strings and
+/// buffer layout computation for __builtin_os_log_format and related builtins.
+///
+//===----------------------------------------------------------------------===//
#include "clang/AST/OSLog.h"
#include "clang/AST/Attr.h"
@@ -137,8 +149,8 @@ public:
for (auto &Data : ArgsData) {
if (!Data.MaskType.empty()) {
CharUnits Size = CharUnits::fromQuantity(8);
- Layout.Items.emplace_back(OSLogBufferItem::MaskKind, nullptr,
- Size, 0, Data.MaskType);
+ Layout.Items.emplace_back(OSLogBufferItem::MaskKind, nullptr, Size, 0,
+ Data.MaskType);
}
if (Data.FieldWidth) {
diff --git a/clang/lib/AST/RecordLayoutBuilder.cpp b/clang/lib/AST/RecordLayoutBuilder.cpp
index 6a74e98..760b2fc 100644
--- a/clang/lib/AST/RecordLayoutBuilder.cpp
+++ b/clang/lib/AST/RecordLayoutBuilder.cpp
@@ -1953,7 +1953,7 @@ void ItaniumRecordLayoutBuilder::LayoutField(const FieldDecl *D,
// silently there. For other targets that have ms_struct enabled
// (most probably via a pragma or attribute), trigger a diagnostic
// that defaults to an error.
- if (!Context.getTargetInfo().getTriple().isWindowsGNUEnvironment())
+ if (!Context.getTargetInfo().getTriple().isOSCygMing())
Diag(D->getLocation(), diag::warn_npot_ms_struct);
}
if (TypeSize > FieldAlign &&
diff --git a/clang/lib/Analysis/UnsafeBufferUsage.cpp b/clang/lib/Analysis/UnsafeBufferUsage.cpp
index ac47b12..40dff7e 100644
--- a/clang/lib/Analysis/UnsafeBufferUsage.cpp
+++ b/clang/lib/Analysis/UnsafeBufferUsage.cpp
@@ -25,6 +25,7 @@
#include "clang/Basic/SourceLocation.h"
#include "clang/Lex/Lexer.h"
#include "clang/Lex/Preprocessor.h"
+#include "llvm/ADT/APInt.h"
#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/STLFunctionalExtras.h"
#include "llvm/ADT/SmallSet.h"
@@ -809,28 +810,86 @@ static bool hasUnsafeFormatOrSArg(const CallExpr *Call, const Expr *&UnsafeArg,
const CallExpr *Call;
unsigned FmtArgIdx;
const Expr *&UnsafeArg;
+ ASTContext &Ctx;
+
+ // Returns an `Expr` representing the precision if specified, null
+ // otherwise.
+ // The parameter `Call` is a printf call and the parameter `Precision` is
+ // the precision of a format specifier of the `Call`.
+ //
+ // For example, for the `printf("%d, %.10s", 10, p)` call
+ // `Precision` can be the precision of either "%d" or "%.10s". The former
+ // one will have `NotSpecified` kind.
+ const Expr *
+ getPrecisionAsExpr(const analyze_printf::OptionalAmount &Precision,
+ const CallExpr *Call) {
+ unsigned PArgIdx = -1;
+
+ if (Precision.hasDataArgument())
+ PArgIdx = Precision.getPositionalArgIndex() + FmtArgIdx;
+ if (0 < PArgIdx && PArgIdx < Call->getNumArgs()) {
+ const Expr *PArg = Call->getArg(PArgIdx);
+
+ // Strip the cast if `PArg` is a cast-to-int expression:
+ if (auto *CE = dyn_cast<CastExpr>(PArg);
+ CE && CE->getType()->isSignedIntegerType())
+ PArg = CE->getSubExpr();
+ return PArg;
+ }
+ if (Precision.getHowSpecified() ==
+ analyze_printf::OptionalAmount::HowSpecified::Constant) {
+ auto SizeTy = Ctx.getSizeType();
+ llvm::APSInt PArgVal = llvm::APSInt(
+ llvm::APInt(Ctx.getTypeSize(SizeTy), Precision.getConstantAmount()),
+ true);
+
+ return IntegerLiteral::Create(Ctx, PArgVal, Ctx.getSizeType(), {});
+ }
+ return nullptr;
+ }
public:
StringFormatStringHandler(const CallExpr *Call, unsigned FmtArgIdx,
- const Expr *&UnsafeArg)
- : Call(Call), FmtArgIdx(FmtArgIdx), UnsafeArg(UnsafeArg) {}
+ const Expr *&UnsafeArg, ASTContext &Ctx)
+ : Call(Call), FmtArgIdx(FmtArgIdx), UnsafeArg(UnsafeArg), Ctx(Ctx) {}
bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS,
const char *startSpecifier,
unsigned specifierLen,
const TargetInfo &Target) override {
- if (FS.getConversionSpecifier().getKind() ==
- analyze_printf::PrintfConversionSpecifier::sArg) {
- unsigned ArgIdx = FS.getPositionalArgIndex() + FmtArgIdx;
-
- if (0 < ArgIdx && ArgIdx < Call->getNumArgs())
- if (!isNullTermPointer(Call->getArg(ArgIdx))) {
- UnsafeArg = Call->getArg(ArgIdx); // output
- // returning false stops parsing immediately
- return false;
- }
- }
- return true; // continue parsing
+ if (FS.getConversionSpecifier().getKind() !=
+ analyze_printf::PrintfConversionSpecifier::sArg)
+ return true; // continue parsing
+
+ unsigned ArgIdx = FS.getPositionalArgIndex() + FmtArgIdx;
+
+ if (!(0 < ArgIdx && ArgIdx < Call->getNumArgs()))
+ // If the `ArgIdx` is invalid, give up.
+ return true; // continue parsing
+
+ const Expr *Arg = Call->getArg(ArgIdx);
+
+ if (isNullTermPointer(Arg))
+ // If Arg is a null-terminated pointer, it is safe anyway.
+ return true; // continue parsing
+
+ // Otherwise, check if the specifier has a precision and if the character
+ // pointer is safely bound by the precision:
+ auto LengthModifier = FS.getLengthModifier();
+ QualType ArgType = Arg->getType();
+ bool IsArgTypeValid = // Is ArgType a character pointer type?
+ ArgType->isPointerType() &&
+ (LengthModifier.getKind() == LengthModifier.AsWideChar
+ ? ArgType->getPointeeType()->isWideCharType()
+ : ArgType->getPointeeType()->isCharType());
+
+ if (auto *Precision = getPrecisionAsExpr(FS.getPrecision(), Call);
+ Precision && IsArgTypeValid)
+ if (isPtrBufferSafe(Arg, Precision, Ctx))
+ return true;
+ // Handle unsafe case:
+ UnsafeArg = Call->getArg(ArgIdx); // output
+ return false; // returning false stops parsing immediately
}
};
@@ -846,7 +905,7 @@ static bool hasUnsafeFormatOrSArg(const CallExpr *Call, const Expr *&UnsafeArg,
else
goto CHECK_UNSAFE_PTR;
- StringFormatStringHandler Handler(Call, FmtArgIdx, UnsafeArg);
+ StringFormatStringHandler Handler(Call, FmtArgIdx, UnsafeArg, Ctx);
return analyze_format_string::ParsePrintfString(
Handler, FmtStr.begin(), FmtStr.end(), Ctx.getLangOpts(),
diff --git a/clang/lib/Basic/FileManager.cpp b/clang/lib/Basic/FileManager.cpp
index fc4ec78..7481e1e 100644
--- a/clang/lib/Basic/FileManager.cpp
+++ b/clang/lib/Basic/FileManager.cpp
@@ -368,11 +368,6 @@ void FileManager::trackVFSUsage(bool Active) {
});
}
-const FileEntry *FileManager::getVirtualFile(StringRef Filename, off_t Size,
- time_t ModificationTime) {
- return &getVirtualFileRef(Filename, Size, ModificationTime).getFileEntry();
-}
-
FileEntryRef FileManager::getVirtualFileRef(StringRef Filename, off_t Size,
time_t ModificationTime) {
++NumFileLookups;
diff --git a/clang/lib/Basic/SourceManager.cpp b/clang/lib/Basic/SourceManager.cpp
index b2b1488..5b8444a 100644
--- a/clang/lib/Basic/SourceManager.cpp
+++ b/clang/lib/Basic/SourceManager.cpp
@@ -2366,18 +2366,16 @@ size_t SourceManager::getDataStructureSizes() const {
SourceManagerForFile::SourceManagerForFile(StringRef FileName,
StringRef Content) {
- // This is referenced by `FileMgr` and will be released by `FileMgr` when it
- // is deleted.
- IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> InMemoryFileSystem(
- new llvm::vfs::InMemoryFileSystem);
+ auto InMemoryFileSystem =
+ llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
InMemoryFileSystem->addFile(
FileName, 0,
llvm::MemoryBuffer::getMemBuffer(Content, FileName,
/*RequiresNullTerminator=*/false));
// This is passed to `SM` as reference, so the pointer has to be referenced
// in `Environment` so that `FileMgr` can out-live this function scope.
- FileMgr =
- std::make_unique<FileManager>(FileSystemOptions(), InMemoryFileSystem);
+ FileMgr = std::make_unique<FileManager>(FileSystemOptions(),
+ std::move(InMemoryFileSystem));
DiagOpts = std::make_unique<DiagnosticOptions>();
// This is passed to `SM` as reference, so the pointer has to be referenced
// by `Environment` due to the same reason above.
diff --git a/clang/lib/Basic/Targets/AMDGPU.cpp b/clang/lib/Basic/Targets/AMDGPU.cpp
index cebcfa3..52cbdbc 100644
--- a/clang/lib/Basic/Targets/AMDGPU.cpp
+++ b/clang/lib/Basic/Targets/AMDGPU.cpp
@@ -266,8 +266,11 @@ AMDGPUTargetInfo::AMDGPUTargetInfo(const llvm::Triple &Triple,
MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
CUMode = !(GPUFeatures & llvm::AMDGPU::FEATURE_WGP);
- for (auto F : {"image-insts", "gws", "vmem-to-lds-load-insts"})
- ReadOnlyFeatures.insert(F);
+
+ for (auto F : {"image-insts", "gws", "vmem-to-lds-load-insts"}) {
+ if (GPUKind != llvm::AMDGPU::GK_NONE)
+ ReadOnlyFeatures.insert(F);
+ }
HalfArgsAndReturns = true;
}
diff --git a/clang/lib/Basic/Targets/ARM.cpp b/clang/lib/Basic/Targets/ARM.cpp
index 29de34bb..6bec2fa 100644
--- a/clang/lib/Basic/Targets/ARM.cpp
+++ b/clang/lib/Basic/Targets/ARM.cpp
@@ -618,21 +618,21 @@ bool ARMTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
LDREX = 0;
else if (ArchKind == llvm::ARM::ArchKind::ARMV6K ||
ArchKind == llvm::ARM::ArchKind::ARMV6KZ)
- LDREX = LDREX_D | LDREX_W | LDREX_H | LDREX_B;
+ LDREX = ARM_LDREX_D | ARM_LDREX_W | ARM_LDREX_H | ARM_LDREX_B;
else
- LDREX = LDREX_W;
+ LDREX = ARM_LDREX_W;
break;
case 7:
case 8:
if (ArchProfile == llvm::ARM::ProfileKind::M)
- LDREX = LDREX_W | LDREX_H | LDREX_B;
+ LDREX = ARM_LDREX_W | ARM_LDREX_H | ARM_LDREX_B;
else
- LDREX = LDREX_D | LDREX_W | LDREX_H | LDREX_B;
+ LDREX = ARM_LDREX_D | ARM_LDREX_W | ARM_LDREX_H | ARM_LDREX_B;
break;
case 9:
assert(ArchProfile != llvm::ARM::ProfileKind::M &&
"No Armv9-M architectures defined");
- LDREX = LDREX_D | LDREX_W | LDREX_H | LDREX_B;
+ LDREX = ARM_LDREX_D | ARM_LDREX_W | ARM_LDREX_H | ARM_LDREX_B;
}
if (!(FPU & NeonFPU) && FPMath == FP_Neon) {
diff --git a/clang/lib/Basic/Targets/ARM.h b/clang/lib/Basic/Targets/ARM.h
index 1719217..43c4718 100644
--- a/clang/lib/Basic/Targets/ARM.h
+++ b/clang/lib/Basic/Targets/ARM.h
@@ -98,13 +98,6 @@ class LLVM_LIBRARY_VISIBILITY ARMTargetInfo : public TargetInfo {
LLVM_PREFERRED_TYPE(bool)
unsigned HasBTI : 1;
- enum {
- LDREX_B = (1 << 0), /// byte (8-bit)
- LDREX_H = (1 << 1), /// half (16-bit)
- LDREX_W = (1 << 2), /// word (32-bit)
- LDREX_D = (1 << 3), /// double (64-bit)
- };
-
uint32_t LDREX;
// ACLE 6.5.1 Hardware floating point
@@ -225,6 +218,8 @@ public:
bool hasBitIntType() const override { return true; }
+ unsigned getARMLDREXMask() const override { return LDREX; }
+
const char *getBFloat16Mangling() const override { return "u6__bf16"; };
std::pair<unsigned, unsigned> hardwareInterferenceSizes() const override {
diff --git a/clang/lib/Basic/Targets/WebAssembly.cpp b/clang/lib/Basic/Targets/WebAssembly.cpp
index af25d25..55ffe1d 100644
--- a/clang/lib/Basic/Targets/WebAssembly.cpp
+++ b/clang/lib/Basic/Targets/WebAssembly.cpp
@@ -59,6 +59,7 @@ bool WebAssemblyTargetInfo::hasFeature(StringRef Feature) const {
.Case("exception-handling", HasExceptionHandling)
.Case("extended-const", HasExtendedConst)
.Case("fp16", HasFP16)
+ .Case("gc", HasGC)
.Case("multimemory", HasMultiMemory)
.Case("multivalue", HasMultivalue)
.Case("mutable-globals", HasMutableGlobals)
@@ -98,6 +99,8 @@ void WebAssemblyTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__wasm_multimemory__");
if (HasFP16)
Builder.defineMacro("__wasm_fp16__");
+ if (HasGC)
+ Builder.defineMacro("__wasm_gc__");
if (HasMultivalue)
Builder.defineMacro("__wasm_multivalue__");
if (HasMutableGlobals)
@@ -191,6 +194,7 @@ bool WebAssemblyTargetInfo::initFeatureMap(
Features["exception-handling"] = true;
Features["extended-const"] = true;
Features["fp16"] = true;
+ Features["gc"] = true;
Features["multimemory"] = true;
Features["tail-call"] = true;
Features["wide-arithmetic"] = true;
@@ -267,6 +271,14 @@ bool WebAssemblyTargetInfo::handleTargetFeatures(
HasFP16 = false;
continue;
}
+ if (Feature == "+gc") {
+ HasGC = true;
+ continue;
+ }
+ if (Feature == "-gc") {
+ HasGC = false;
+ continue;
+ }
if (Feature == "+multimemory") {
HasMultiMemory = true;
continue;
@@ -353,6 +365,11 @@ bool WebAssemblyTargetInfo::handleTargetFeatures(
return false;
}
+ // gc implies reference-types
+ if (HasGC) {
+ HasReferenceTypes = true;
+ }
+
// bulk-memory-opt is a subset of bulk-memory.
if (HasBulkMemory) {
HasBulkMemoryOpt = true;
diff --git a/clang/lib/Basic/Targets/WebAssembly.h b/clang/lib/Basic/Targets/WebAssembly.h
index 57b366c..eba7422 100644
--- a/clang/lib/Basic/Targets/WebAssembly.h
+++ b/clang/lib/Basic/Targets/WebAssembly.h
@@ -64,6 +64,7 @@ class LLVM_LIBRARY_VISIBILITY WebAssemblyTargetInfo : public TargetInfo {
bool HasExceptionHandling = false;
bool HasExtendedConst = false;
bool HasFP16 = false;
+ bool HasGC = false;
bool HasMultiMemory = false;
bool HasMultivalue = false;
bool HasMutableGlobals = false;
diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
index ef136f8..9049a01 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
@@ -190,6 +190,11 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl &gd, unsigned builtinID,
assert(!cir::MissingFeatures::builtinCheckKind());
return emitBuiltinBitOp<cir::BitClzOp>(*this, e, /*poisonZero=*/true);
+ case Builtin::BI__builtin_ffs:
+ case Builtin::BI__builtin_ffsl:
+ case Builtin::BI__builtin_ffsll:
+ return emitBuiltinBitOp<cir::BitFfsOp>(*this, e);
+
case Builtin::BI__builtin_parity:
case Builtin::BI__builtin_parityl:
case Builtin::BI__builtin_parityll:
diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.cpp b/clang/lib/CIR/CodeGen/CIRGenCall.cpp
index 938d143..fc208ff 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCall.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenCall.cpp
@@ -582,6 +582,14 @@ RValue CIRGenFunction::emitCall(const CIRGenFunctionInfo &funcInfo,
cir::FuncOp directFuncOp;
if (auto fnOp = dyn_cast<cir::FuncOp>(calleePtr)) {
directFuncOp = fnOp;
+ } else if (auto getGlobalOp = mlir::dyn_cast<cir::GetGlobalOp>(calleePtr)) {
+ // FIXME(cir): This peephole optimization avoids indirect calls for
+ // builtins. This should be fixed in the builtin declaration instead by
+ // not emitting an unecessary get_global in the first place.
+ // However, this is also used for no-prototype functions.
+ mlir::Operation *globalOp = cgm.getGlobalValue(getGlobalOp.getName());
+ assert(globalOp && "undefined global function");
+ directFuncOp = mlir::cast<cir::FuncOp>(globalOp);
} else {
[[maybe_unused]] mlir::ValueTypeRange<mlir::ResultRange> resultTypes =
calleePtr->getResultTypes();
diff --git a/clang/lib/CIR/CodeGen/CIRGenCall.h b/clang/lib/CIR/CodeGen/CIRGenCall.h
index bd11329..a78956b 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCall.h
+++ b/clang/lib/CIR/CodeGen/CIRGenCall.h
@@ -116,6 +116,11 @@ public:
assert(isOrdinary());
return reinterpret_cast<mlir::Operation *>(kindOrFunctionPtr);
}
+
+ void setFunctionPointer(mlir::Operation *functionPtr) {
+ assert(isOrdinary());
+ kindOrFunctionPtr = SpecialKind(reinterpret_cast<uintptr_t>(functionPtr));
+ }
};
/// Type for representing both the decl and type of parameters to a function.
diff --git a/clang/lib/CIR/CodeGen/CIRGenClass.cpp b/clang/lib/CIR/CodeGen/CIRGenClass.cpp
index fbf53db..50cca0e 100644
--- a/clang/lib/CIR/CodeGen/CIRGenClass.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenClass.cpp
@@ -12,6 +12,7 @@
#include "CIRGenCXXABI.h"
#include "CIRGenFunction.h"
+#include "CIRGenValue.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/RecordLayout.h"
@@ -311,6 +312,116 @@ void CIRGenFunction::emitInitializerForField(FieldDecl *field, LValue lhs,
assert(!cir::MissingFeatures::requiresCleanups());
}
+/// Emit a loop to call a particular constructor for each of several members
+/// of an array.
+///
+/// \param ctor the constructor to call for each element
+/// \param arrayType the type of the array to initialize
+/// \param arrayBegin an arrayType*
+/// \param zeroInitialize true if each element should be
+/// zero-initialized before it is constructed
+void CIRGenFunction::emitCXXAggrConstructorCall(
+ const CXXConstructorDecl *ctor, const clang::ArrayType *arrayType,
+ Address arrayBegin, const CXXConstructExpr *e, bool newPointerIsChecked,
+ bool zeroInitialize) {
+ QualType elementType;
+ mlir::Value numElements = emitArrayLength(arrayType, elementType, arrayBegin);
+ emitCXXAggrConstructorCall(ctor, numElements, arrayBegin, e,
+ newPointerIsChecked, zeroInitialize);
+}
+
+/// Emit a loop to call a particular constructor for each of several members
+/// of an array.
+///
+/// \param ctor the constructor to call for each element
+/// \param numElements the number of elements in the array;
+/// may be zero
+/// \param arrayBase a T*, where T is the type constructed by ctor
+/// \param zeroInitialize true if each element should be
+/// zero-initialized before it is constructed
+void CIRGenFunction::emitCXXAggrConstructorCall(
+ const CXXConstructorDecl *ctor, mlir::Value numElements, Address arrayBase,
+ const CXXConstructExpr *e, bool newPointerIsChecked, bool zeroInitialize) {
+ // It's legal for numElements to be zero. This can happen both
+ // dynamically, because x can be zero in 'new A[x]', and statically,
+ // because of GCC extensions that permit zero-length arrays. There
+ // are probably legitimate places where we could assume that this
+ // doesn't happen, but it's not clear that it's worth it.
+
+ // Optimize for a constant count.
+ auto constantCount = dyn_cast<cir::ConstantOp>(numElements.getDefiningOp());
+ if (constantCount) {
+ auto constIntAttr = mlir::dyn_cast<cir::IntAttr>(constantCount.getValue());
+ // Just skip out if the constant count is zero.
+ if (constIntAttr && constIntAttr.getUInt() == 0)
+ return;
+ } else {
+ // Otherwise, emit the check.
+ cgm.errorNYI(e->getSourceRange(), "dynamic-length array expression");
+ }
+
+ auto arrayTy = mlir::cast<cir::ArrayType>(arrayBase.getElementType());
+ mlir::Type elementType = arrayTy.getElementType();
+ cir::PointerType ptrToElmType = builder.getPointerTo(elementType);
+
+ // Tradional LLVM codegen emits a loop here. CIR lowers to a loop as part of
+ // LoweringPrepare.
+
+ // The alignment of the base, adjusted by the size of a single element,
+ // provides a conservative estimate of the alignment of every element.
+ // (This assumes we never start tracking offsetted alignments.)
+ //
+ // Note that these are complete objects and so we don't need to
+ // use the non-virtual size or alignment.
+ QualType type = getContext().getTypeDeclType(ctor->getParent());
+ CharUnits eltAlignment = arrayBase.getAlignment().alignmentOfArrayElement(
+ getContext().getTypeSizeInChars(type));
+
+ // Zero initialize the storage, if requested.
+ if (zeroInitialize)
+ emitNullInitialization(*currSrcLoc, arrayBase, type);
+
+ // C++ [class.temporary]p4:
+ // There are two contexts in which temporaries are destroyed at a different
+ // point than the end of the full-expression. The first context is when a
+ // default constructor is called to initialize an element of an array.
+ // If the constructor has one or more default arguments, the destruction of
+ // every temporary created in a default argument expression is sequenced
+ // before the construction of the next array element, if any.
+ {
+ assert(!cir::MissingFeatures::runCleanupsScope());
+
+ // Evaluate the constructor and its arguments in a regular
+ // partial-destroy cleanup.
+ if (getLangOpts().Exceptions &&
+ !ctor->getParent()->hasTrivialDestructor()) {
+ cgm.errorNYI(e->getSourceRange(), "partial array cleanups");
+ }
+
+ // Emit the constructor call that will execute for every array element.
+ mlir::Value arrayOp =
+ builder.createPtrBitcast(arrayBase.getPointer(), arrayTy);
+ builder.create<cir::ArrayCtor>(
+ *currSrcLoc, arrayOp, [&](mlir::OpBuilder &b, mlir::Location loc) {
+ mlir::BlockArgument arg =
+ b.getInsertionBlock()->addArgument(ptrToElmType, loc);
+ Address curAddr = Address(arg, elementType, eltAlignment);
+ assert(!cir::MissingFeatures::sanitizers());
+ auto currAVS = AggValueSlot::forAddr(
+ curAddr, type.getQualifiers(), AggValueSlot::IsDestructed,
+ AggValueSlot::IsNotAliased, AggValueSlot::DoesNotOverlap,
+ AggValueSlot::IsNotZeroed);
+ emitCXXConstructorCall(ctor, Ctor_Complete,
+ /*ForVirtualBase=*/false,
+ /*Delegating=*/false, currAVS, e);
+ builder.create<cir::YieldOp>(loc);
+ });
+ }
+
+ if (constantCount.use_empty())
+ constantCount.erase();
+}
+
void CIRGenFunction::emitDelegateCXXConstructorCall(
const CXXConstructorDecl *ctor, CXXCtorType ctorType,
const FunctionArgList &args, SourceLocation loc) {
@@ -369,6 +480,19 @@ void CIRGenFunction::emitImplicitAssignmentOperatorBody(FunctionArgList &args) {
s->getStmtClassName());
}
+void CIRGenFunction::destroyCXXObject(CIRGenFunction &cgf, Address addr,
+ QualType type) {
+ const RecordType *rtype = type->castAs<RecordType>();
+ const CXXRecordDecl *record = cast<CXXRecordDecl>(rtype->getDecl());
+ const CXXDestructorDecl *dtor = record->getDestructor();
+ // TODO(cir): Unlike traditional codegen, CIRGen should actually emit trivial
+ // dtors which shall be removed on later CIR passes. However, only remove this
+ // assertion after we have a test case to exercise this path.
+ assert(!dtor->isTrivial());
+ cgf.emitCXXDestructorCall(dtor, Dtor_Complete, /*forVirtualBase*/ false,
+ /*delegating=*/false, addr, type);
+}
+
void CIRGenFunction::emitDelegatingCXXConstructorCall(
const CXXConstructorDecl *ctor, const FunctionArgList &args) {
assert(ctor->isDelegatingConstructor());
diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp
new file mode 100644
index 0000000..be21ce9
--- /dev/null
+++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp
@@ -0,0 +1,69 @@
+//===--- CIRGenCleanup.cpp - Bookkeeping and code emission for cleanups ---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains code dealing with the IR generation for cleanups
+// and related information.
+//
+// A "cleanup" is a piece of code which needs to be executed whenever
+// control transfers out of a particular scope. This can be
+// conditionalized to occur only on exceptional control flow, only on
+// normal control flow, or both.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CIRGenFunction.h"
+
+#include "clang/CIR/MissingFeatures.h"
+
+using namespace clang;
+using namespace clang::CIRGen;
+
+//===----------------------------------------------------------------------===//
+// CIRGenFunction cleanup related
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// EHScopeStack
+//===----------------------------------------------------------------------===//
+
+void EHScopeStack::Cleanup::anchor() {}
+
+static mlir::Block *getCurCleanupBlock(CIRGenFunction &cgf) {
+ mlir::OpBuilder::InsertionGuard guard(cgf.getBuilder());
+ mlir::Block *cleanup =
+ cgf.curLexScope->getOrCreateCleanupBlock(cgf.getBuilder());
+ return cleanup;
+}
+
+/// Pops a cleanup block. If the block includes a normal cleanup, the
+/// current insertion point is threaded through the cleanup, as are
+/// any branch fixups on the cleanup.
+void CIRGenFunction::popCleanupBlock() {
+ assert(!ehStack.cleanupStack.empty() && "cleanup stack is empty!");
+ mlir::OpBuilder::InsertionGuard guard(builder);
+ std::unique_ptr<EHScopeStack::Cleanup> cleanup =
+ ehStack.cleanupStack.pop_back_val();
+
+ assert(!cir::MissingFeatures::ehCleanupFlags());
+ mlir::Block *cleanupEntry = getCurCleanupBlock(*this);
+ builder.setInsertionPointToEnd(cleanupEntry);
+ cleanup->emit(*this);
+}
+
+/// Pops cleanup blocks until the given savepoint is reached.
+void CIRGenFunction::popCleanupBlocks(size_t oldCleanupStackDepth) {
+ assert(!cir::MissingFeatures::ehstackBranches());
+
+ assert(ehStack.getStackDepth() >= oldCleanupStackDepth);
+
+ // Pop cleanup blocks until we reach the base stack depth for the
+ // current scope.
+ while (ehStack.getStackDepth() > oldCleanupStackDepth) {
+ popCleanupBlock();
+ }
+}
diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
index afbe92a..9e8eaa5 100644
--- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp
@@ -183,8 +183,8 @@ void CIRGenFunction::emitAutoVarCleanups(
const VarDecl &d = *emission.Variable;
// Check the type for a cleanup.
- if (d.needsDestruction(getContext()))
- cgm.errorNYI(d.getSourceRange(), "emitAutoVarCleanups: type cleanup");
+ if (QualType::DestructionKind dtorKind = d.needsDestruction(getContext()))
+ emitAutoVarTypeCleanup(emission, dtorKind);
assert(!cir::MissingFeatures::opAllocaPreciseLifetime());
@@ -520,7 +520,7 @@ void CIRGenFunction::emitExprAsInit(const Expr *init, const ValueDecl *d,
llvm_unreachable("bad evaluation kind");
}
-void CIRGenFunction::emitDecl(const Decl &d) {
+void CIRGenFunction::emitDecl(const Decl &d, bool evaluateConditionDecl) {
switch (d.getKind()) {
case Decl::BuiltinTemplate:
case Decl::TranslationUnit:
@@ -608,11 +608,14 @@ void CIRGenFunction::emitDecl(const Decl &d) {
case Decl::UsingDirective: // using namespace X; [C++]
assert(!cir::MissingFeatures::generateDebugInfo());
return;
- case Decl::Var: {
+ case Decl::Var:
+ case Decl::Decomposition: {
const VarDecl &vd = cast<VarDecl>(d);
assert(vd.isLocalVarDecl() &&
"Should not see file-scope variables inside a function!");
emitVarDecl(vd);
+ if (evaluateConditionDecl)
+ maybeEmitDeferredVarDeclInit(&vd);
return;
}
case Decl::OpenACCDeclare:
@@ -632,7 +635,6 @@ void CIRGenFunction::emitDecl(const Decl &d) {
case Decl::ImplicitConceptSpecialization:
case Decl::TopLevelStmt:
case Decl::UsingPack:
- case Decl::Decomposition: // This could be moved to join Decl::Var
case Decl::OMPDeclareReduction:
case Decl::OMPDeclareMapper:
cgm.errorNYI(d.getSourceRange(),
@@ -648,3 +650,160 @@ void CIRGenFunction::emitNullabilityCheck(LValue lhs, mlir::Value rhs,
assert(!cir::MissingFeatures::sanitizers());
}
+
+/// Destroys all the elements of the given array, beginning from last to first.
+/// The array cannot be zero-length.
+///
+/// \param begin - a type* denoting the first element of the array
+/// \param end - a type* denoting one past the end of the array
+/// \param elementType - the element type of the array
+/// \param destroyer - the function to call to destroy elements
+void CIRGenFunction::emitArrayDestroy(mlir::Value begin, mlir::Value end,
+ QualType elementType,
+ CharUnits elementAlign,
+ Destroyer *destroyer) {
+ assert(!elementType->isArrayType());
+
+ // Differently from LLVM traditional codegen, use a higher level
+ // representation instead of lowering directly to a loop.
+ mlir::Type cirElementType = convertTypeForMem(elementType);
+ cir::PointerType ptrToElmType = builder.getPointerTo(cirElementType);
+
+ // Emit the dtor call that will execute for every array element.
+ cir::ArrayDtor::create(
+ builder, *currSrcLoc, begin, [&](mlir::OpBuilder &b, mlir::Location loc) {
+ auto arg = b.getInsertionBlock()->addArgument(ptrToElmType, loc);
+ Address curAddr = Address(arg, cirElementType, elementAlign);
+ assert(!cir::MissingFeatures::dtorCleanups());
+
+ // Perform the actual destruction there.
+ destroyer(*this, curAddr, elementType);
+
+ cir::YieldOp::create(builder, loc);
+ });
+}
+
+/// Immediately perform the destruction of the given object.
+///
+/// \param addr - the address of the object; a type*
+/// \param type - the type of the object; if an array type, all
+/// objects are destroyed in reverse order
+/// \param destroyer - the function to call to destroy individual
+/// elements
+void CIRGenFunction::emitDestroy(Address addr, QualType type,
+ Destroyer *destroyer) {
+ const ArrayType *arrayType = getContext().getAsArrayType(type);
+ if (!arrayType)
+ return destroyer(*this, addr, type);
+
+ mlir::Value length = emitArrayLength(arrayType, type, addr);
+
+ CharUnits elementAlign = addr.getAlignment().alignmentOfArrayElement(
+ getContext().getTypeSizeInChars(type));
+
+ auto constantCount = length.getDefiningOp<cir::ConstantOp>();
+ if (!constantCount) {
+ assert(!cir::MissingFeatures::vlas());
+ cgm.errorNYI("emitDestroy: variable length array");
+ return;
+ }
+
+ auto constIntAttr = mlir::dyn_cast<cir::IntAttr>(constantCount.getValue());
+ // If it's constant zero, we can just skip the entire thing.
+ if (constIntAttr && constIntAttr.getUInt() == 0)
+ return;
+
+ mlir::Value begin = addr.getPointer();
+ mlir::Value end; // This will be used for future non-constant counts.
+ emitArrayDestroy(begin, end, type, elementAlign, destroyer);
+
+ // If the array destroy didn't use the length op, we can erase it.
+ if (constantCount.use_empty())
+ constantCount.erase();
+}
+
+CIRGenFunction::Destroyer *
+CIRGenFunction::getDestroyer(QualType::DestructionKind kind) {
+ switch (kind) {
+ case QualType::DK_none:
+ llvm_unreachable("no destroyer for trivial dtor");
+ case QualType::DK_cxx_destructor:
+ return destroyCXXObject;
+ case QualType::DK_objc_strong_lifetime:
+ case QualType::DK_objc_weak_lifetime:
+ case QualType::DK_nontrivial_c_struct:
+ cgm.errorNYI("getDestroyer: other destruction kind");
+ return nullptr;
+ }
+ llvm_unreachable("Unknown DestructionKind");
+}
+
+namespace {
+struct DestroyObject final : EHScopeStack::Cleanup {
+ DestroyObject(Address addr, QualType type,
+ CIRGenFunction::Destroyer *destroyer)
+ : addr(addr), type(type), destroyer(destroyer) {}
+
+ Address addr;
+ QualType type;
+ CIRGenFunction::Destroyer *destroyer;
+
+ void emit(CIRGenFunction &cgf) override {
+ cgf.emitDestroy(addr, type, destroyer);
+ }
+};
+} // namespace
+
+/// Enter a destroy cleanup for the given local variable.
+void CIRGenFunction::emitAutoVarTypeCleanup(
+ const CIRGenFunction::AutoVarEmission &emission,
+ QualType::DestructionKind dtorKind) {
+ assert(dtorKind != QualType::DK_none);
+
+ // Note that for __block variables, we want to destroy the
+ // original stack object, not the possibly forwarded object.
+ Address addr = emission.getObjectAddress(*this);
+
+ const VarDecl *var = emission.Variable;
+ QualType type = var->getType();
+
+ CleanupKind cleanupKind = NormalAndEHCleanup;
+ CIRGenFunction::Destroyer *destroyer = nullptr;
+
+ switch (dtorKind) {
+ case QualType::DK_none:
+ llvm_unreachable("no cleanup for trivially-destructible variable");
+
+ case QualType::DK_cxx_destructor:
+ // If there's an NRVO flag on the emission, we need a different
+ // cleanup.
+ if (emission.NRVOFlag) {
+ cgm.errorNYI(var->getSourceRange(), "emitAutoVarTypeCleanup: NRVO");
+ return;
+ }
+ // Otherwise, this is handled below.
+ break;
+
+ case QualType::DK_objc_strong_lifetime:
+ case QualType::DK_objc_weak_lifetime:
+ case QualType::DK_nontrivial_c_struct:
+ cgm.errorNYI(var->getSourceRange(),
+ "emitAutoVarTypeCleanup: other dtor kind");
+ return;
+ }
+
+ // If we haven't chosen a more specific destroyer, use the default.
+ if (!destroyer)
+ destroyer = getDestroyer(dtorKind);
+
+ assert(!cir::MissingFeatures::ehCleanupFlags());
+ ehStack.pushCleanup<DestroyObject>(cleanupKind, addr, type, destroyer);
+}
+
+void CIRGenFunction::maybeEmitDeferredVarDeclInit(const VarDecl *vd) {
+ if (auto *dd = dyn_cast_if_present<DecompositionDecl>(vd)) {
+ for (auto *b : dd->flat_bindings())
+ if (auto *hd = b->getHoldingVar())
+ emitVarDecl(*hd);
+ }
+}
diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
index 1f64801..a509ffa 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
@@ -584,6 +584,15 @@ LValue CIRGenFunction::emitDeclRefLValue(const DeclRefExpr *e) {
return lv;
}
+ if (const auto *bd = dyn_cast<BindingDecl>(nd)) {
+ if (e->refersToEnclosingVariableOrCapture()) {
+ assert(!cir::MissingFeatures::lambdaCaptures());
+ cgm.errorNYI(e->getSourceRange(), "emitDeclRefLValue: lambda captures");
+ return LValue();
+ }
+ return emitLValue(bd->getBinding());
+ }
+
cgm.errorNYI(e->getSourceRange(), "emitDeclRefLValue: unhandled decl type");
return LValue();
}
@@ -949,7 +958,6 @@ LValue CIRGenFunction::emitCastLValue(const CastExpr *e) {
case CK_Dynamic:
case CK_ToUnion:
case CK_BaseToDerived:
- case CK_LValueBitCast:
case CK_AddressSpaceConversion:
case CK_ObjCObjectLValueCast:
case CK_VectorSplat:
@@ -965,6 +973,18 @@ LValue CIRGenFunction::emitCastLValue(const CastExpr *e) {
return {};
}
+ case CK_LValueBitCast: {
+ // This must be a reinterpret_cast (or c-style equivalent).
+ const auto *ce = cast<ExplicitCastExpr>(e);
+
+ cgm.emitExplicitCastExprType(ce, this);
+ LValue LV = emitLValue(e->getSubExpr());
+ Address V = LV.getAddress().withElementType(
+ builder, convertTypeForMem(ce->getTypeAsWritten()->getPointeeType()));
+
+ return makeAddrLValue(V, e->getType(), LV.getBaseInfo());
+ }
+
case CK_NoOp: {
// CK_NoOp can model a qualification conversion, which can remove an array
// bound and change the IR type.
@@ -1269,7 +1289,7 @@ RValue CIRGenFunction::getUndefRValue(QualType ty) {
}
RValue CIRGenFunction::emitCall(clang::QualType calleeTy,
- const CIRGenCallee &callee,
+ const CIRGenCallee &origCallee,
const clang::CallExpr *e,
ReturnValueSlot returnValue) {
// Get the actual function type. The callee type will always be a pointer to
@@ -1280,6 +1300,8 @@ RValue CIRGenFunction::emitCall(clang::QualType calleeTy,
calleeTy = getContext().getCanonicalType(calleeTy);
auto pointeeTy = cast<PointerType>(calleeTy)->getPointeeType();
+ CIRGenCallee callee = origCallee;
+
if (getLangOpts().CPlusPlus)
assert(!cir::MissingFeatures::sanitizers());
@@ -1296,7 +1318,44 @@ RValue CIRGenFunction::emitCall(clang::QualType calleeTy,
const CIRGenFunctionInfo &funcInfo =
cgm.getTypes().arrangeFreeFunctionCall(args, fnType);
- assert(!cir::MissingFeatures::opCallNoPrototypeFunc());
+ // C99 6.5.2.2p6:
+ // If the expression that denotes the called function has a type that does
+ // not include a prototype, [the default argument promotions are performed].
+ // If the number of arguments does not equal the number of parameters, the
+ // behavior is undefined. If the function is defined with a type that
+ // includes a prototype, and either the prototype ends with an ellipsis (,
+ // ...) or the types of the arguments after promotion are not compatible
+ // with the types of the parameters, the behavior is undefined. If the
+ // function is defined with a type that does not include a prototype, and
+ // the types of the arguments after promotion are not compatible with those
+ // of the parameters after promotion, the behavior is undefined [except in
+ // some trivial cases].
+ // That is, in the general case, we should assume that a call through an
+ // unprototyped function type works like a *non-variadic* call. The way we
+ // make this work is to cast to the exxact type fo the promoted arguments.
+ if (isa<FunctionNoProtoType>(fnType)) {
+ assert(!cir::MissingFeatures::opCallChain());
+ assert(!cir::MissingFeatures::addressSpace());
+ cir::FuncType calleeTy = getTypes().getFunctionType(funcInfo);
+ // get non-variadic function type
+ calleeTy = cir::FuncType::get(calleeTy.getInputs(),
+ calleeTy.getReturnType(), false);
+ auto calleePtrTy = cir::PointerType::get(calleeTy);
+
+ mlir::Operation *fn = callee.getFunctionPointer();
+ mlir::Value addr;
+ if (auto funcOp = mlir::dyn_cast<cir::FuncOp>(fn)) {
+ addr = builder.create<cir::GetGlobalOp>(
+ getLoc(e->getSourceRange()),
+ cir::PointerType::get(funcOp.getFunctionType()), funcOp.getSymName());
+ } else {
+ addr = fn->getResult(0);
+ }
+
+ fn = builder.createBitcast(addr, calleePtrTy).getDefiningOp();
+ callee.setFunctionPointer(fn);
+ }
+
assert(!cir::MissingFeatures::opCallFnInfoOpts());
assert(!cir::MissingFeatures::hip());
assert(!cir::MissingFeatures::opCallMustTail());
@@ -1657,37 +1716,38 @@ void CIRGenFunction::emitCXXConstructExpr(const CXXConstructExpr *e,
return;
}
- if (getContext().getAsArrayType(e->getType())) {
- cgm.errorNYI(e->getSourceRange(), "emitCXXConstructExpr: array type");
- return;
- }
+ if (const ArrayType *arrayType = getContext().getAsArrayType(e->getType())) {
+ assert(!cir::MissingFeatures::sanitizers());
+ emitCXXAggrConstructorCall(cd, arrayType, dest.getAddress(), e, false);
+ } else {
- clang::CXXCtorType type = Ctor_Complete;
- bool forVirtualBase = false;
- bool delegating = false;
-
- switch (e->getConstructionKind()) {
- case CXXConstructionKind::Complete:
- type = Ctor_Complete;
- break;
- case CXXConstructionKind::Delegating:
- // We should be emitting a constructor; GlobalDecl will assert this
- type = curGD.getCtorType();
- delegating = true;
- break;
- case CXXConstructionKind::VirtualBase:
- // This should just set 'forVirtualBase' to true and fall through, but
- // virtual base class support is otherwise missing, so this needs to wait
- // until it can be tested.
- cgm.errorNYI(e->getSourceRange(),
- "emitCXXConstructExpr: virtual base constructor");
- return;
- case CXXConstructionKind::NonVirtualBase:
- type = Ctor_Base;
- break;
- }
+ clang::CXXCtorType type = Ctor_Complete;
+ bool forVirtualBase = false;
+ bool delegating = false;
+
+ switch (e->getConstructionKind()) {
+ case CXXConstructionKind::Complete:
+ type = Ctor_Complete;
+ break;
+ case CXXConstructionKind::Delegating:
+ // We should be emitting a constructor; GlobalDecl will assert this
+ type = curGD.getCtorType();
+ delegating = true;
+ break;
+ case CXXConstructionKind::VirtualBase:
+ // This should just set 'forVirtualBase' to true and fall through, but
+ // virtual base class support is otherwise missing, so this needs to wait
+ // until it can be tested.
+ cgm.errorNYI(e->getSourceRange(),
+ "emitCXXConstructExpr: virtual base constructor");
+ return;
+ case CXXConstructionKind::NonVirtualBase:
+ type = Ctor_Base;
+ break;
+ }
- emitCXXConstructorCall(cd, type, forVirtualBase, delegating, dest, e);
+ emitCXXConstructorCall(cd, type, forVirtualBase, delegating, dest, e);
+ }
}
RValue CIRGenFunction::emitReferenceBindingToExpr(const Expr *e) {
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp b/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp
index 0d12c5c..51aab95 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprAggregate.cpp
@@ -357,10 +357,97 @@ void AggExprEmitter::visitCXXParenListOrInitListExpr(
emitArrayInit(dest.getAddress(), arrayTy, e->getType(), e, args,
arrayFiller);
return;
+ } else if (e->getType()->isVariableArrayType()) {
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "visitCXXParenListOrInitListExpr variable array type");
+ return;
+ }
+
+ if (e->getType()->isArrayType()) {
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "visitCXXParenListOrInitListExpr array type");
+ return;
+ }
+
+ assert(e->getType()->isRecordType() && "Only support structs/unions here!");
+
+ // Do struct initialization; this code just sets each individual member
+ // to the approprate value. This makes bitfield support automatic;
+ // the disadvantage is that the generated code is more difficult for
+ // the optimizer, especially with bitfields.
+ unsigned numInitElements = args.size();
+ RecordDecl *record = e->getType()->castAs<RecordType>()->getDecl();
+
+ // We'll need to enter cleanup scopes in case any of the element
+ // initializers throws an exception.
+ assert(!cir::MissingFeatures::requiresCleanups());
+
+ unsigned curInitIndex = 0;
+
+ // Emit initialization of base classes.
+ if (auto *cxxrd = dyn_cast<CXXRecordDecl>(record)) {
+ assert(numInitElements >= cxxrd->getNumBases() &&
+ "missing initializer for base class");
+ if (cxxrd->getNumBases() > 0) {
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "visitCXXParenListOrInitListExpr base class init");
+ return;
+ }
+ }
+
+ LValue destLV = cgf.makeAddrLValue(dest.getAddress(), e->getType());
+
+ if (record->isUnion()) {
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "visitCXXParenListOrInitListExpr union type");
+ return;
}
- cgf.cgm.errorNYI(
- "visitCXXParenListOrInitListExpr Record or VariableSizeArray type");
+ // Here we iterate over the fields; this makes it simpler to both
+ // default-initialize fields and skip over unnamed fields.
+ for (const FieldDecl *field : record->fields()) {
+ // We're done once we hit the flexible array member.
+ if (field->getType()->isIncompleteArrayType())
+ break;
+
+ // Always skip anonymous bitfields.
+ if (field->isUnnamedBitField())
+ continue;
+
+ // We're done if we reach the end of the explicit initializers, we
+ // have a zeroed object, and the rest of the fields are
+ // zero-initializable.
+ if (curInitIndex == numInitElements && dest.isZeroed() &&
+ cgf.getTypes().isZeroInitializable(e->getType()))
+ break;
+ LValue lv =
+ cgf.emitLValueForFieldInitialization(destLV, field, field->getName());
+ // We never generate write-barriers for initialized fields.
+ assert(!cir::MissingFeatures::setNonGC());
+
+ if (curInitIndex < numInitElements) {
+ // Store the initializer into the field.
+ CIRGenFunction::SourceLocRAIIObject loc{
+ cgf, cgf.getLoc(record->getSourceRange())};
+ emitInitializationToLValue(args[curInitIndex++], lv);
+ } else {
+ // We're out of initializers; default-initialize to null
+ emitNullInitializationToLValue(cgf.getLoc(e->getSourceRange()), lv);
+ }
+
+ // Push a destructor if necessary.
+ // FIXME: if we have an array of structures, all explicitly
+ // initialized, we can end up pushing a linear number of cleanups.
+ if (field->getType().isDestructedType()) {
+ cgf.cgm.errorNYI(e->getSourceRange(),
+ "visitCXXParenListOrInitListExpr destructor");
+ return;
+ }
+
+ // From classic codegen, maybe not useful for CIR:
+ // If the GEP didn't get used because of a dead zero init or something
+ // else, clean it up for -O0 builds and general tidiness.
+ }
}
// TODO(cir): This could be shared with classic codegen.
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp
index 6756a7c..ea60a95 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp
@@ -34,11 +34,20 @@ public:
}
mlir::Value emitLoadOfLValue(LValue lv, SourceLocation loc);
+
/// Store the specified real/imag parts into the
/// specified value pointer.
void emitStoreOfComplex(mlir::Location loc, mlir::Value val, LValue lv,
bool isInit);
+ /// Emit a cast from complex value Val to DestType.
+ mlir::Value emitComplexToComplexCast(mlir::Value value, QualType srcType,
+ QualType destType, SourceLocation loc);
+
+ /// Emit a cast from scalar value Val to DestType.
+ mlir::Value emitScalarToComplexCast(mlir::Value value, QualType srcType,
+ QualType destType, SourceLocation loc);
+
mlir::Value
VisitAbstractConditionalOperator(const AbstractConditionalOperator *e);
mlir::Value VisitArraySubscriptExpr(Expr *e);
@@ -51,7 +60,7 @@ public:
mlir::Value VisitDeclRefExpr(DeclRefExpr *e);
mlir::Value VisitGenericSelectionExpr(GenericSelectionExpr *e);
mlir::Value VisitImplicitCastExpr(ImplicitCastExpr *e);
- mlir::Value VisitInitListExpr(const InitListExpr *e);
+ mlir::Value VisitInitListExpr(InitListExpr *e);
mlir::Value VisitCompoundLiteralExpr(CompoundLiteralExpr *e) {
return emitLoadOfLValue(e);
@@ -82,6 +91,14 @@ public:
}
mlir::Value VisitUnaryDeref(const Expr *e);
+
+ mlir::Value VisitUnaryPlus(const UnaryOperator *e);
+
+ mlir::Value VisitPlusMinus(const UnaryOperator *e, cir::UnaryOpKind kind,
+ QualType promotionType);
+
+ mlir::Value VisitUnaryMinus(const UnaryOperator *e);
+
mlir::Value VisitUnaryNot(const UnaryOperator *e);
struct BinOpInfo {
@@ -101,6 +118,7 @@ public:
mlir::Value emitBinAdd(const BinOpInfo &op);
mlir::Value emitBinSub(const BinOpInfo &op);
+ mlir::Value emitBinMul(const BinOpInfo &op);
QualType getPromotionType(QualType ty, bool isDivOpCode = false) {
if (auto *complexTy = ty->getAs<ComplexType>()) {
@@ -133,6 +151,7 @@ public:
HANDLEBINOP(Add)
HANDLEBINOP(Sub)
+ HANDLEBINOP(Mul)
#undef HANDLEBINOP
};
} // namespace
@@ -164,14 +183,148 @@ LValue ComplexExprEmitter::emitBinAssignLValue(const BinaryOperator *e,
mlir::Value ComplexExprEmitter::emitCast(CastKind ck, Expr *op,
QualType destTy) {
switch (ck) {
+ case CK_Dependent:
+ llvm_unreachable("dependent type must be resolved before the CIR codegen");
+
case CK_NoOp:
case CK_LValueToRValue:
return Visit(op);
- default:
- break;
+
+ case CK_AtomicToNonAtomic:
+ case CK_NonAtomicToAtomic:
+ case CK_UserDefinedConversion: {
+ cgf.cgm.errorNYI(
+ "ComplexExprEmitter::emitCast Atmoic & UserDefinedConversion");
+ return {};
}
- cgf.cgm.errorNYI("ComplexType Cast");
- return {};
+
+ case CK_LValueBitCast: {
+ LValue origLV = cgf.emitLValue(op);
+ Address addr =
+ origLV.getAddress().withElementType(builder, cgf.convertType(destTy));
+ LValue destLV = cgf.makeAddrLValue(addr, destTy);
+ return emitLoadOfLValue(destLV, op->getExprLoc());
+ }
+
+ case CK_LValueToRValueBitCast: {
+ LValue sourceLVal = cgf.emitLValue(op);
+ Address addr = sourceLVal.getAddress().withElementType(
+ builder, cgf.convertTypeForMem(destTy));
+ LValue destLV = cgf.makeAddrLValue(addr, destTy);
+ assert(!cir::MissingFeatures::opTBAA());
+ return emitLoadOfLValue(destLV, op->getExprLoc());
+ }
+
+ case CK_BitCast:
+ case CK_BaseToDerived:
+ case CK_DerivedToBase:
+ case CK_UncheckedDerivedToBase:
+ case CK_Dynamic:
+ case CK_ToUnion:
+ case CK_ArrayToPointerDecay:
+ case CK_FunctionToPointerDecay:
+ case CK_NullToPointer:
+ case CK_NullToMemberPointer:
+ case CK_BaseToDerivedMemberPointer:
+ case CK_DerivedToBaseMemberPointer:
+ case CK_MemberPointerToBoolean:
+ case CK_ReinterpretMemberPointer:
+ case CK_ConstructorConversion:
+ case CK_IntegralToPointer:
+ case CK_PointerToIntegral:
+ case CK_PointerToBoolean:
+ case CK_ToVoid:
+ case CK_VectorSplat:
+ case CK_IntegralCast:
+ case CK_BooleanToSignedIntegral:
+ case CK_IntegralToBoolean:
+ case CK_IntegralToFloating:
+ case CK_FloatingToIntegral:
+ case CK_FloatingToBoolean:
+ case CK_FloatingCast:
+ case CK_CPointerToObjCPointerCast:
+ case CK_BlockPointerToObjCPointerCast:
+ case CK_AnyPointerToBlockPointerCast:
+ case CK_ObjCObjectLValueCast:
+ case CK_FloatingComplexToReal:
+ case CK_FloatingComplexToBoolean:
+ case CK_IntegralComplexToReal:
+ case CK_IntegralComplexToBoolean:
+ case CK_ARCProduceObject:
+ case CK_ARCConsumeObject:
+ case CK_ARCReclaimReturnedObject:
+ case CK_ARCExtendBlockObject:
+ case CK_CopyAndAutoreleaseBlockObject:
+ case CK_BuiltinFnToFnPtr:
+ case CK_ZeroToOCLOpaqueType:
+ case CK_AddressSpaceConversion:
+ case CK_IntToOCLSampler:
+ case CK_FloatingToFixedPoint:
+ case CK_FixedPointToFloating:
+ case CK_FixedPointCast:
+ case CK_FixedPointToBoolean:
+ case CK_FixedPointToIntegral:
+ case CK_IntegralToFixedPoint:
+ case CK_MatrixCast:
+ case CK_HLSLVectorTruncation:
+ case CK_HLSLArrayRValue:
+ case CK_HLSLElementwiseCast:
+ case CK_HLSLAggregateSplatCast:
+ llvm_unreachable("invalid cast kind for complex value");
+
+ case CK_FloatingRealToComplex:
+ case CK_IntegralRealToComplex: {
+ assert(!cir::MissingFeatures::cgFPOptionsRAII());
+ return emitScalarToComplexCast(cgf.emitScalarExpr(op), op->getType(),
+ destTy, op->getExprLoc());
+ }
+
+ case CK_FloatingComplexCast:
+ case CK_FloatingComplexToIntegralComplex:
+ case CK_IntegralComplexCast:
+ case CK_IntegralComplexToFloatingComplex: {
+ assert(!cir::MissingFeatures::cgFPOptionsRAII());
+ return emitComplexToComplexCast(Visit(op), op->getType(), destTy,
+ op->getExprLoc());
+ }
+ }
+
+ llvm_unreachable("unknown cast resulting in complex value");
+}
+
+mlir::Value ComplexExprEmitter::VisitUnaryPlus(const UnaryOperator *e) {
+ QualType promotionTy = getPromotionType(e->getSubExpr()->getType());
+ mlir::Value result = VisitPlusMinus(e, cir::UnaryOpKind::Plus, promotionTy);
+ if (!promotionTy.isNull()) {
+ cgf.cgm.errorNYI("ComplexExprEmitter::VisitUnaryPlus emitUnPromotedValue");
+ return {};
+ }
+ return result;
+}
+
+mlir::Value ComplexExprEmitter::VisitPlusMinus(const UnaryOperator *e,
+ cir::UnaryOpKind kind,
+ QualType promotionType) {
+ assert(kind == cir::UnaryOpKind::Plus ||
+ kind == cir::UnaryOpKind::Minus &&
+ "Invalid UnaryOp kind for ComplexType Plus or Minus");
+
+ mlir::Value op;
+ if (!promotionType.isNull())
+ op = cgf.emitPromotedComplexExpr(e->getSubExpr(), promotionType);
+ else
+ op = Visit(e->getSubExpr());
+ return builder.createUnaryOp(cgf.getLoc(e->getExprLoc()), kind, op);
+}
+
+mlir::Value ComplexExprEmitter::VisitUnaryMinus(const UnaryOperator *e) {
+ QualType promotionTy = getPromotionType(e->getSubExpr()->getType());
+ mlir::Value result = VisitPlusMinus(e, cir::UnaryOpKind::Minus, promotionTy);
+ if (!promotionTy.isNull()) {
+ cgf.cgm.errorNYI("ComplexExprEmitter::VisitUnaryMinus emitUnPromotedValue");
+ return {};
+ }
+ return result;
}
mlir::Value ComplexExprEmitter::emitConstant(
@@ -207,6 +360,49 @@ void ComplexExprEmitter::emitStoreOfComplex(mlir::Location loc, mlir::Value val,
builder.createStore(loc, val, destAddr);
}
+mlir::Value ComplexExprEmitter::emitComplexToComplexCast(mlir::Value val,
+ QualType srcType,
+ QualType destType,
+ SourceLocation loc) {
+ if (srcType == destType)
+ return val;
+
+ // Get the src/dest element type.
+ QualType srcElemTy = srcType->castAs<ComplexType>()->getElementType();
+ QualType destElemTy = destType->castAs<ComplexType>()->getElementType();
+
+ cir::CastKind castOpKind;
+ if (srcElemTy->isFloatingType() && destElemTy->isFloatingType())
+ castOpKind = cir::CastKind::float_complex;
+ else if (srcElemTy->isFloatingType() && destElemTy->isIntegerType())
+ castOpKind = cir::CastKind::float_complex_to_int_complex;
+ else if (srcElemTy->isIntegerType() && destElemTy->isFloatingType())
+ castOpKind = cir::CastKind::int_complex_to_float_complex;
+ else if (srcElemTy->isIntegerType() && destElemTy->isIntegerType())
+ castOpKind = cir::CastKind::int_complex;
+ else
+ llvm_unreachable("unexpected src type or dest type");
+
+ return builder.createCast(cgf.getLoc(loc), castOpKind, val,
+ cgf.convertType(destType));
+}
+
+mlir::Value ComplexExprEmitter::emitScalarToComplexCast(mlir::Value val,
+ QualType srcType,
+ QualType destType,
+ SourceLocation loc) {
+ cir::CastKind castOpKind;
+ if (srcType->isFloatingType())
+ castOpKind = cir::CastKind::float_to_complex;
+ else if (srcType->isIntegerType())
+ castOpKind = cir::CastKind::int_to_complex;
+ else
+ llvm_unreachable("unexpected src type");
+
+ return builder.createCast(cgf.getLoc(loc), castOpKind, val,
+ cgf.convertType(destType));
+}
+
mlir::Value ComplexExprEmitter::VisitAbstractConditionalOperator(
const AbstractConditionalOperator *e) {
mlir::Value condValue = Visit(e->getCond());
@@ -304,7 +500,7 @@ mlir::Value ComplexExprEmitter::VisitImplicitCastExpr(ImplicitCastExpr *e) {
return emitCast(e->getCastKind(), e->getSubExpr(), e->getType());
}
-mlir::Value ComplexExprEmitter::VisitInitListExpr(const InitListExpr *e) {
+mlir::Value ComplexExprEmitter::VisitInitListExpr(InitListExpr *e) {
mlir::Location loc = cgf.getLoc(e->getExprLoc());
if (e->getNumInits() == 2) {
mlir::Value real = cgf.emitScalarExpr(e->getInit(0));
@@ -312,10 +508,8 @@ mlir::Value ComplexExprEmitter::VisitInitListExpr(const InitListExpr *e) {
return builder.createComplexCreate(loc, real, imag);
}
- if (e->getNumInits() == 1) {
- cgf.cgm.errorNYI("Create Complex with InitList with size 1");
- return {};
- }
+ if (e->getNumInits() == 1)
+ return Visit(e->getInit(0));
assert(e->getNumInits() == 0 && "Unexpected number of inits");
mlir::Type complexTy = cgf.convertType(e->getType());
@@ -385,13 +579,22 @@ mlir::Value ComplexExprEmitter::emitPromoted(const Expr *e,
return emitBin##OP(emitBinOps(bo, promotionTy));
HANDLE_BINOP(Add)
HANDLE_BINOP(Sub)
+ HANDLE_BINOP(Mul)
#undef HANDLE_BINOP
default:
break;
}
- } else if (isa<UnaryOperator>(e)) {
- cgf.cgm.errorNYI("emitPromoted UnaryOperator");
- return {};
+ } else if (const auto *unaryOp = dyn_cast<UnaryOperator>(e)) {
+ switch (unaryOp->getOpcode()) {
+ case UO_Minus:
+ case UO_Plus: {
+ auto kind = unaryOp->getOpcode() == UO_Plus ? cir::UnaryOpKind::Plus
+ : cir::UnaryOpKind::Minus;
+ return VisitPlusMinus(unaryOp, kind, promotionTy);
+ }
+ default:
+ break;
+ }
}
mlir::Value result = Visit(const_cast<Expr *>(e));
@@ -436,6 +639,31 @@ mlir::Value ComplexExprEmitter::emitBinSub(const BinOpInfo &op) {
return builder.create<cir::ComplexSubOp>(op.loc, op.lhs, op.rhs);
}
+static cir::ComplexRangeKind
+getComplexRangeAttr(LangOptions::ComplexRangeKind range) {
+ switch (range) {
+ case LangOptions::CX_Full:
+ return cir::ComplexRangeKind::Full;
+ case LangOptions::CX_Improved:
+ return cir::ComplexRangeKind::Improved;
+ case LangOptions::CX_Promoted:
+ return cir::ComplexRangeKind::Promoted;
+ case LangOptions::CX_Basic:
+ return cir::ComplexRangeKind::Basic;
+ case LangOptions::CX_None:
+ // The default value for ComplexRangeKind is Full is no option is selected
+ return cir::ComplexRangeKind::Full;
+ }
+}
+
+mlir::Value ComplexExprEmitter::emitBinMul(const BinOpInfo &op) {
+ assert(!cir::MissingFeatures::fastMathFlags());
+ assert(!cir::MissingFeatures::cgFPOptionsRAII());
+ cir::ComplexRangeKind rangeKind =
+ getComplexRangeAttr(op.fpFeatures.getComplexRange());
+ return builder.create<cir::ComplexMulOp>(op.loc, op.lhs, op.rhs, rangeKind);
+}
+
LValue CIRGenFunction::emitComplexAssignmentLValue(const BinaryOperator *e) {
assert(e->getOpcode() == BO_Assign && "Expected assign op");
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
index eba6bff..2523b0f 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
@@ -88,6 +88,10 @@ public:
// Utilities
//===--------------------------------------------------------------------===//
+ mlir::Value emitComplexToScalarConversion(mlir::Location loc,
+ mlir::Value value, CastKind kind,
+ QualType destTy);
+
mlir::Value emitPromotedValue(mlir::Value result, QualType promotionType) {
return builder.createFloatingCast(result, cgf.convertType(promotionType));
}
@@ -1125,7 +1129,7 @@ LValue ScalarExprEmitter::emitCompoundAssignLValue(
// 'An assignment expression has the value of the left operand after the
// assignment...'.
if (lhsLV.isBitField())
- cgf.cgm.errorNYI(e->getSourceRange(), "store through bitfield lvalue");
+ cgf.emitStoreThroughBitfieldLValue(RValue::get(result), lhsLV);
else
cgf.emitStoreThroughLValue(RValue::get(result), lhsLV);
@@ -1135,6 +1139,31 @@ LValue ScalarExprEmitter::emitCompoundAssignLValue(
return lhsLV;
}
+mlir::Value ScalarExprEmitter::emitComplexToScalarConversion(mlir::Location lov,
+ mlir::Value value,
+ CastKind kind,
+ QualType destTy) {
+ cir::CastKind castOpKind;
+ switch (kind) {
+ case CK_FloatingComplexToReal:
+ castOpKind = cir::CastKind::float_complex_to_real;
+ break;
+ case CK_IntegralComplexToReal:
+ castOpKind = cir::CastKind::int_complex_to_real;
+ break;
+ case CK_FloatingComplexToBoolean:
+ castOpKind = cir::CastKind::float_complex_to_bool;
+ break;
+ case CK_IntegralComplexToBoolean:
+ castOpKind = cir::CastKind::int_complex_to_bool;
+ break;
+ default:
+ llvm_unreachable("invalid complex-to-scalar cast kind");
+ }
+
+ return builder.createCast(lov, castOpKind, value, cgf.convertType(destTy));
+}
+
mlir::Value ScalarExprEmitter::emitPromoted(const Expr *e,
QualType promotionType) {
e = e->IgnoreParens();
@@ -1758,6 +1787,15 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *ce) {
ce->getExprLoc(), opts);
}
+ case CK_FloatingComplexToReal:
+ case CK_IntegralComplexToReal:
+ case CK_FloatingComplexToBoolean:
+ case CK_IntegralComplexToBoolean: {
+ mlir::Value value = cgf.emitComplexExpr(subExpr);
+ return emitComplexToScalarConversion(cgf.getLoc(ce->getExprLoc()), value,
+ kind, destTy);
+ }
+
case CK_FloatingRealToComplex:
case CK_FloatingComplexCast:
case CK_IntegralRealToComplex:
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
index 3e69e56..3e9de17 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
@@ -26,7 +26,11 @@ namespace clang::CIRGen {
CIRGenFunction::CIRGenFunction(CIRGenModule &cgm, CIRGenBuilderTy &builder,
bool suppressNewContext)
- : CIRGenTypeCache(cgm), cgm{cgm}, builder(builder) {}
+ : CIRGenTypeCache(cgm), cgm{cgm}, builder(builder) {
+ ehStack.setCGF(this);
+ currentCleanupStackDepth = 0;
+ assert(ehStack.getStackDepth() == 0);
+}
CIRGenFunction::~CIRGenFunction() {}
@@ -227,6 +231,14 @@ void CIRGenFunction::LexicalScope::cleanup() {
CIRGenBuilderTy &builder = cgf.builder;
LexicalScope *localScope = cgf.curLexScope;
+ auto applyCleanup = [&]() {
+ if (performCleanup) {
+ // ApplyDebugLocation
+ assert(!cir::MissingFeatures::generateDebugInfo());
+ forceCleanup();
+ }
+ };
+
if (returnBlock != nullptr) {
// Write out the return block, which loads the value from `__retval` and
// issues the `cir.return`.
@@ -235,32 +247,42 @@ void CIRGenFunction::LexicalScope::cleanup() {
(void)emitReturn(*returnLoc);
}
- mlir::Block *curBlock = builder.getBlock();
- if (isGlobalInit() && !curBlock)
- return;
- if (curBlock->mightHaveTerminator() && curBlock->getTerminator())
- return;
-
- // Get rid of any empty block at the end of the scope.
- bool entryBlock = builder.getInsertionBlock()->isEntryBlock();
- if (!entryBlock && curBlock->empty()) {
- curBlock->erase();
- if (returnBlock != nullptr && returnBlock->getUses().empty())
- returnBlock->erase();
- return;
- }
-
- // Reached the end of the scope.
- {
+ auto insertCleanupAndLeave = [&](mlir::Block *insPt) {
mlir::OpBuilder::InsertionGuard guard(builder);
- builder.setInsertionPointToEnd(curBlock);
+ builder.setInsertionPointToEnd(insPt);
+
+ // If we still don't have a cleanup block, it means that `applyCleanup`
+ // below might be able to get us one.
+ mlir::Block *cleanupBlock = localScope->getCleanupBlock(builder);
+
+ // Leverage and defers to RunCleanupsScope's dtor and scope handling.
+ applyCleanup();
+
+ // If we now have one after `applyCleanup`, hook it up properly.
+ if (!cleanupBlock && localScope->getCleanupBlock(builder)) {
+ cleanupBlock = localScope->getCleanupBlock(builder);
+ builder.create<cir::BrOp>(insPt->back().getLoc(), cleanupBlock);
+ if (!cleanupBlock->mightHaveTerminator()) {
+ mlir::OpBuilder::InsertionGuard guard(builder);
+ builder.setInsertionPointToEnd(cleanupBlock);
+ builder.create<cir::YieldOp>(localScope->endLoc);
+ }
+ }
if (localScope->depth == 0) {
// Reached the end of the function.
if (returnBlock != nullptr) {
- if (returnBlock->getUses().empty())
+ if (returnBlock->getUses().empty()) {
returnBlock->erase();
- else {
+ } else {
+ // Thread return block via cleanup block.
+ if (cleanupBlock) {
+ for (mlir::BlockOperand &blockUse : returnBlock->getUses()) {
+ cir::BrOp brOp = mlir::cast<cir::BrOp>(blockUse.getOwner());
+ brOp.setSuccessor(cleanupBlock);
+ }
+ }
+
builder.create<cir::BrOp>(*returnLoc, returnBlock);
return;
}
@@ -268,13 +290,50 @@ void CIRGenFunction::LexicalScope::cleanup() {
emitImplicitReturn();
return;
}
- // Reached the end of a non-function scope. Some scopes, such as those
- // used with the ?: operator, can return a value.
- if (!localScope->isTernary() && !curBlock->mightHaveTerminator()) {
+
+ // End of any local scope != function
+ // Ternary ops have to deal with matching arms for yielding types
+ // and do return a value, it must do its own cir.yield insertion.
+ if (!localScope->isTernary() && !insPt->mightHaveTerminator()) {
!retVal ? builder.create<cir::YieldOp>(localScope->endLoc)
: builder.create<cir::YieldOp>(localScope->endLoc, retVal);
}
+ };
+
+ // If a cleanup block has been created at some point, branch to it
+ // and set the insertion point to continue at the cleanup block.
+ // Terminators are then inserted either in the cleanup block or
+ // inline in this current block.
+ mlir::Block *cleanupBlock = localScope->getCleanupBlock(builder);
+ if (cleanupBlock)
+ insertCleanupAndLeave(cleanupBlock);
+
+ // Now deal with any pending block wrap up like implicit end of
+ // scope.
+
+ mlir::Block *curBlock = builder.getBlock();
+ if (isGlobalInit() && !curBlock)
+ return;
+ if (curBlock->mightHaveTerminator() && curBlock->getTerminator())
+ return;
+
+ // Get rid of any empty block at the end of the scope.
+ bool entryBlock = builder.getInsertionBlock()->isEntryBlock();
+ if (!entryBlock && curBlock->empty()) {
+ curBlock->erase();
+ if (returnBlock != nullptr && returnBlock->getUses().empty())
+ returnBlock->erase();
+ return;
+ }
+
+ // If there's a cleanup block, branch to it, nothing else to do.
+ if (cleanupBlock) {
+ builder.create<cir::BrOp>(curBlock->back().getLoc(), cleanupBlock);
+ return;
}
+
+ // No pre-existent cleanup block, emit cleanup code and yield/return.
+ insertCleanupAndLeave(curBlock);
}
cir::ReturnOp CIRGenFunction::LexicalScope::emitReturn(mlir::Location loc) {
@@ -408,7 +467,19 @@ void CIRGenFunction::startFunction(GlobalDecl gd, QualType returnType,
}
}
-void CIRGenFunction::finishFunction(SourceLocation endLoc) {}
+void CIRGenFunction::finishFunction(SourceLocation endLoc) {
+ // Pop any cleanups that might have been associated with the
+ // parameters. Do this in whatever block we're currently in; it's
+ // important to do this before we enter the return block or return
+ // edges will be *really* confused.
+ // TODO(cir): Use prologueCleanupDepth here.
+ bool hasCleanups = ehStack.getStackDepth() != currentCleanupStackDepth;
+ if (hasCleanups) {
+ assert(!cir::MissingFeatures::generateDebugInfo());
+ // FIXME(cir): should we clearInsertionPoint? breaks many testcases
+ popCleanupBlocks(currentCleanupStackDepth);
+ }
+}
mlir::LogicalResult CIRGenFunction::emitFunctionBody(const clang::Stmt *body) {
auto result = mlir::LogicalResult::success();
@@ -808,4 +879,175 @@ bool CIRGenFunction::shouldNullCheckClassCastValue(const CastExpr *ce) {
return true;
}
+/// Computes the length of an array in elements, as well as the base
+/// element type and a properly-typed first element pointer.
+mlir::Value
+CIRGenFunction::emitArrayLength(const clang::ArrayType *origArrayType,
+ QualType &baseType, Address &addr) {
+ const clang::ArrayType *arrayType = origArrayType;
+
+ // If it's a VLA, we have to load the stored size. Note that
+ // this is the size of the VLA in bytes, not its size in elements.
+ if (isa<VariableArrayType>(arrayType)) {
+ assert(cir::MissingFeatures::vlas());
+ cgm.errorNYI(*currSrcLoc, "VLAs");
+ return builder.getConstInt(*currSrcLoc, SizeTy, 0);
+ }
+
+ uint64_t countFromCLAs = 1;
+ QualType eltType;
+
+ auto cirArrayType = mlir::dyn_cast<cir::ArrayType>(addr.getElementType());
+
+ while (cirArrayType) {
+ assert(isa<ConstantArrayType>(arrayType));
+ countFromCLAs *= cirArrayType.getSize();
+ eltType = arrayType->getElementType();
+
+ cirArrayType =
+ mlir::dyn_cast<cir::ArrayType>(cirArrayType.getElementType());
+
+ arrayType = getContext().getAsArrayType(arrayType->getElementType());
+ assert((!cirArrayType || arrayType) &&
+ "CIR and Clang types are out-of-sync");
+ }
+
+ if (arrayType) {
+ // From this point onwards, the Clang array type has been emitted
+ // as some other type (probably a packed struct). Compute the array
+ // size, and just emit the 'begin' expression as a bitcast.
+ cgm.errorNYI(*currSrcLoc, "length for non-array underlying types");
+ }
+
+ baseType = eltType;
+ return builder.getConstInt(*currSrcLoc, SizeTy, countFromCLAs);
+}
+
+// TODO(cir): Most of this function can be shared between CIRGen
+// and traditional LLVM codegen
+void CIRGenFunction::emitVariablyModifiedType(QualType type) {
+ assert(type->isVariablyModifiedType() &&
+ "Must pass variably modified type to EmitVLASizes!");
+
+ // We're going to walk down into the type and look for VLA
+ // expressions.
+ do {
+ assert(type->isVariablyModifiedType());
+
+ const Type *ty = type.getTypePtr();
+ switch (ty->getTypeClass()) {
+ case Type::CountAttributed:
+ case Type::PackIndexing:
+ case Type::ArrayParameter:
+ case Type::HLSLAttributedResource:
+ case Type::HLSLInlineSpirv:
+ case Type::PredefinedSugar:
+ cgm.errorNYI("CIRGenFunction::emitVariablyModifiedType");
+ break;
+
+#define TYPE(Class, Base)
+#define ABSTRACT_TYPE(Class, Base)
+#define NON_CANONICAL_TYPE(Class, Base)
+#define DEPENDENT_TYPE(Class, Base) case Type::Class:
+#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
+#include "clang/AST/TypeNodes.inc"
+ llvm_unreachable(
+ "dependent type must be resolved before the CIR codegen");
+
+ // These types are never variably-modified.
+ case Type::Builtin:
+ case Type::Complex:
+ case Type::Vector:
+ case Type::ExtVector:
+ case Type::ConstantMatrix:
+ case Type::Record:
+ case Type::Enum:
+ case Type::Using:
+ case Type::TemplateSpecialization:
+ case Type::ObjCTypeParam:
+ case Type::ObjCObject:
+ case Type::ObjCInterface:
+ case Type::ObjCObjectPointer:
+ case Type::BitInt:
+ llvm_unreachable("type class is never variably-modified!");
+
+ case Type::Elaborated:
+ type = cast<clang::ElaboratedType>(ty)->getNamedType();
+ break;
+
+ case Type::Adjusted:
+ type = cast<clang::AdjustedType>(ty)->getAdjustedType();
+ break;
+
+ case Type::Decayed:
+ type = cast<clang::DecayedType>(ty)->getPointeeType();
+ break;
+
+ case Type::Pointer:
+ type = cast<clang::PointerType>(ty)->getPointeeType();
+ break;
+
+ case Type::BlockPointer:
+ type = cast<clang::BlockPointerType>(ty)->getPointeeType();
+ break;
+
+ case Type::LValueReference:
+ case Type::RValueReference:
+ type = cast<clang::ReferenceType>(ty)->getPointeeType();
+ break;
+
+ case Type::MemberPointer:
+ type = cast<clang::MemberPointerType>(ty)->getPointeeType();
+ break;
+
+ case Type::ConstantArray:
+ case Type::IncompleteArray:
+ // Losing element qualification here is fine.
+ type = cast<clang::ArrayType>(ty)->getElementType();
+ break;
+
+ case Type::VariableArray: {
+ cgm.errorNYI("CIRGenFunction::emitVariablyModifiedType VLA");
+ break;
+ }
+
+ case Type::FunctionProto:
+ case Type::FunctionNoProto:
+ type = cast<clang::FunctionType>(ty)->getReturnType();
+ break;
+
+ case Type::Paren:
+ case Type::TypeOf:
+ case Type::UnaryTransform:
+ case Type::Attributed:
+ case Type::BTFTagAttributed:
+ case Type::SubstTemplateTypeParm:
+ case Type::MacroQualified:
+ // Keep walking after single level desugaring.
+ type = type.getSingleStepDesugaredType(getContext());
+ break;
+
+ case Type::Typedef:
+ case Type::Decltype:
+ case Type::Auto:
+ case Type::DeducedTemplateSpecialization:
+ // Stop walking: nothing to do.
+ return;
+
+ case Type::TypeOfExpr:
+ // Stop walking: emit typeof expression.
+ emitIgnoredExpr(cast<clang::TypeOfExprType>(ty)->getUnderlyingExpr());
+ return;
+
+ case Type::Atomic:
+ type = cast<clang::AtomicType>(ty)->getValueType();
+ break;
+
+ case Type::Pipe:
+ type = cast<clang::PipeType>(ty)->getElementType();
+ break;
+ }
+ } while (type->isVariablyModifiedType());
+}
+
} // namespace clang::CIRGen
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h
index 2aceeef..f9c8636 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.h
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h
@@ -18,6 +18,7 @@
#include "CIRGenModule.h"
#include "CIRGenTypeCache.h"
#include "CIRGenValue.h"
+#include "EHScopeStack.h"
#include "Address.h"
@@ -61,6 +62,9 @@ public:
/// The compiler-generated variable that holds the return value.
std::optional<mlir::Value> fnRetAlloca;
+ /// Tracks function scope overall cleanup handling.
+ EHScopeStack ehStack;
+
/// CXXThisDecl - When generating code for a C++ member function,
/// this will hold the implicit 'this' declaration.
ImplicitParamDecl *cxxabiThisDecl = nullptr;
@@ -595,14 +599,65 @@ public:
FunctionArgList args, clang::SourceLocation loc,
clang::SourceLocation startLoc);
+ /// Takes the old cleanup stack size and emits the cleanup blocks
+ /// that have been added.
+ void popCleanupBlocks(size_t oldCleanupStackDepth);
+ void popCleanupBlock();
+
+ /// Enters a new scope for capturing cleanups, all of which
+ /// will be executed once the scope is exited.
+ class RunCleanupsScope {
+ size_t cleanupStackDepth, oldCleanupStackDepth;
+
+ protected:
+ bool performCleanup;
+
+ private:
+ RunCleanupsScope(const RunCleanupsScope &) = delete;
+ void operator=(const RunCleanupsScope &) = delete;
+
+ protected:
+ CIRGenFunction &cgf;
+
+ /// Enter a new cleanup scope.
+ explicit RunCleanupsScope(CIRGenFunction &cgf)
+ : performCleanup(true), cgf(cgf) {
+ cleanupStackDepth = cgf.ehStack.getStackDepth();
+ oldCleanupStackDepth = cgf.currentCleanupStackDepth;
+ cgf.currentCleanupStackDepth = cleanupStackDepth;
+ }
+
+ /// Exit this cleanup scope, emitting any accumulated cleanups.
+ ~RunCleanupsScope() {
+ if (performCleanup)
+ forceCleanup();
+ }
+
+ /// Force the emission of cleanups now, instead of waiting
+ /// until this object is destroyed.
+ void forceCleanup() {
+ assert(performCleanup && "Already forced cleanup");
+ {
+ mlir::OpBuilder::InsertionGuard guard(cgf.getBuilder());
+ cgf.popCleanupBlocks(cleanupStackDepth);
+ performCleanup = false;
+ cgf.currentCleanupStackDepth = oldCleanupStackDepth;
+ }
+ }
+ };
+
+ // Cleanup stack depth of the RunCleanupsScope that was pushed most recently.
+ size_t currentCleanupStackDepth;
+
+public:
/// Represents a scope, including function bodies, compound statements, and
/// the substatements of if/while/do/for/switch/try statements. This class
/// handles any automatic cleanup, along with the return value.
- struct LexicalScope {
+ struct LexicalScope : public RunCleanupsScope {
private:
- // TODO(CIR): This will live in the base class RunCleanupScope once that
- // class is upstreamed.
- CIRGenFunction &cgf;
+ // Block containing cleanup code for things initialized in this
+ // lexical context (scope).
+ mlir::Block *cleanupBlock = nullptr;
// Points to the scope entry block. This is useful, for instance, for
// helping to insert allocas before finalizing any recursive CodeGen from
@@ -632,8 +687,8 @@ public:
unsigned depth = 0;
LexicalScope(CIRGenFunction &cgf, mlir::Location loc, mlir::Block *eb)
- : cgf(cgf), entryBlock(eb), parentScope(cgf.curLexScope), beginLoc(loc),
- endLoc(loc) {
+ : RunCleanupsScope(cgf), entryBlock(eb), parentScope(cgf.curLexScope),
+ beginLoc(loc), endLoc(loc) {
assert(entryBlock && "LexicalScope requires an entry block");
cgf.curLexScope = this;
@@ -671,6 +726,27 @@ public:
void setAsSwitch() { scopeKind = Kind::Switch; }
void setAsTernary() { scopeKind = Kind::Ternary; }
+ // Lazy create cleanup block or return what's available.
+ mlir::Block *getOrCreateCleanupBlock(mlir::OpBuilder &builder) {
+ if (cleanupBlock)
+ return cleanupBlock;
+ cleanupBlock = createCleanupBlock(builder);
+ return cleanupBlock;
+ }
+
+ mlir::Block *getCleanupBlock(mlir::OpBuilder &builder) {
+ return cleanupBlock;
+ }
+
+ mlir::Block *createCleanupBlock(mlir::OpBuilder &builder) {
+ // Create the cleanup block but dont hook it up around just yet.
+ mlir::OpBuilder::InsertionGuard guard(builder);
+ mlir::Region *r = builder.getBlock() ? builder.getBlock()->getParent()
+ : &cgf.curFn->getRegion(0);
+ cleanupBlock = builder.createBlock(r);
+ return cleanupBlock;
+ }
+
// ---
// Return handling.
// ---
@@ -721,6 +797,12 @@ public:
LexicalScope *curLexScope = nullptr;
+ typedef void Destroyer(CIRGenFunction &cgf, Address addr, QualType ty);
+
+ static Destroyer destroyCXXObject;
+
+ Destroyer *getDestroyer(clang::QualType::DestructionKind kind);
+
/// ----------------------
/// CIR emit functions
/// ----------------------
@@ -766,6 +848,12 @@ public:
/// even if no aggregate location is provided.
RValue emitAnyExprToTemp(const clang::Expr *e);
+ void emitArrayDestroy(mlir::Value begin, mlir::Value end,
+ QualType elementType, CharUnits elementAlign,
+ Destroyer *destroyer);
+
+ mlir::Value emitArrayLength(const clang::ArrayType *arrayType,
+ QualType &baseType, Address &addr);
LValue emitArraySubscriptExpr(const clang::ArraySubscriptExpr *e);
Address emitArrayToPointerDecay(const Expr *array);
@@ -779,6 +867,10 @@ public:
void emitAutoVarCleanups(const AutoVarEmission &emission);
void emitAutoVarInit(const AutoVarEmission &emission);
+ void emitAutoVarTypeCleanup(const AutoVarEmission &emission,
+ clang::QualType::DestructionKind dtorKind);
+
+ void maybeEmitDeferredVarDeclInit(const VarDecl *vd);
void emitBaseInitializer(mlir::Location loc, const CXXRecordDecl *classDecl,
CXXCtorInitializer *baseInit);
@@ -836,6 +928,9 @@ public:
LValue emitCompoundLiteralLValue(const CompoundLiteralExpr *e);
void emitConstructorBody(FunctionArgList &args);
+
+ void emitDestroy(Address addr, QualType type, Destroyer *destroyer);
+
void emitDestructorBody(FunctionArgList &args);
mlir::LogicalResult emitContinueStmt(const clang::ContinueStmt &s);
@@ -843,6 +938,16 @@ public:
void emitCXXConstructExpr(const clang::CXXConstructExpr *e,
AggValueSlot dest);
+ void emitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
+ const clang::ArrayType *arrayType,
+ Address arrayBegin, const CXXConstructExpr *e,
+ bool newPointerIsChecked,
+ bool zeroInitialize = false);
+ void emitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
+ mlir::Value numElements, Address arrayBase,
+ const CXXConstructExpr *e,
+ bool newPointerIsChecked,
+ bool zeroInitialize);
void emitCXXConstructorCall(const clang::CXXConstructorDecl *d,
clang::CXXCtorType type, bool forVirtualBase,
bool delegating, AggValueSlot thisAVS,
@@ -956,7 +1061,7 @@ public:
void emitCompoundStmtWithoutScope(const clang::CompoundStmt &s);
- void emitDecl(const clang::Decl &d);
+ void emitDecl(const clang::Decl &d, bool evaluateConditionDecl = false);
mlir::LogicalResult emitDeclStmt(const clang::DeclStmt &s);
LValue emitDeclRefLValue(const clang::DeclRefExpr *e);
@@ -1102,6 +1207,8 @@ public:
/// inside a function, including static vars etc.
void emitVarDecl(const clang::VarDecl &d);
+ void emitVariablyModifiedType(QualType ty);
+
mlir::LogicalResult emitWhileStmt(const clang::WhileStmt &s);
/// Given an assignment `*lhs = rhs`, emit a test that checks if \p rhs is
diff --git a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp
index 6577f5f..e5e4c68 100644
--- a/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenItaniumCXXABI.cpp
@@ -113,8 +113,6 @@ static StructorCIRGen getCIRGenToUse(CIRGenModule &cgm,
GlobalDecl aliasDecl;
if (const auto *dd = dyn_cast<CXXDestructorDecl>(md)) {
- // The assignment is correct here, but other support for this is NYI.
- cgm.errorNYI(md->getSourceRange(), "getCIRGenToUse: dtor");
aliasDecl = GlobalDecl(dd, Dtor_Complete);
} else {
const auto *cd = cast<CXXConstructorDecl>(md);
diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp b/clang/lib/CIR/CodeGen/CIRGenModule.cpp
index 3502705..d0f9fc3 100644
--- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp
@@ -1103,6 +1103,60 @@ cir::GlobalLinkageKind CIRGenModule::getCIRLinkageForDeclarator(
return cir::GlobalLinkageKind::ExternalLinkage;
}
+/// This function is called when we implement a function with no prototype, e.g.
+/// "int foo() {}". If there are existing call uses of the old function in the
+/// module, this adjusts them to call the new function directly.
+///
+/// This is not just a cleanup: the always_inline pass requires direct calls to
+/// functions to be able to inline them. If there is a bitcast in the way, it
+/// won't inline them. Instcombine normally deletes these calls, but it isn't
+/// run at -O0.
+void CIRGenModule::replaceUsesOfNonProtoTypeWithRealFunction(
+ mlir::Operation *old, cir::FuncOp newFn) {
+ // If we're redefining a global as a function, don't transform it.
+ auto oldFn = mlir::dyn_cast<cir::FuncOp>(old);
+ if (!oldFn)
+ return;
+
+ // TODO(cir): this RAUW ignores the features below.
+ assert(!cir::MissingFeatures::opFuncExceptions());
+ assert(!cir::MissingFeatures::opFuncParameterAttributes());
+ assert(!cir::MissingFeatures::opFuncOperandBundles());
+ if (oldFn->getAttrs().size() <= 1)
+ errorNYI(old->getLoc(),
+ "replaceUsesOfNonProtoTypeWithRealFunction: Attribute forwarding");
+
+ // Mark new function as originated from a no-proto declaration.
+ newFn.setNoProto(oldFn.getNoProto());
+
+ // Iterate through all calls of the no-proto function.
+ std::optional<mlir::SymbolTable::UseRange> symUses =
+ oldFn.getSymbolUses(oldFn->getParentOp());
+ for (const mlir::SymbolTable::SymbolUse &use : symUses.value()) {
+ mlir::OpBuilder::InsertionGuard guard(builder);
+
+ if (auto noProtoCallOp = mlir::dyn_cast<cir::CallOp>(use.getUser())) {
+ builder.setInsertionPoint(noProtoCallOp);
+
+ // Patch call type with the real function type.
+ cir::CallOp realCallOp = builder.createCallOp(
+ noProtoCallOp.getLoc(), newFn, noProtoCallOp.getOperands());
+
+ // Replace old no proto call with fixed call.
+ noProtoCallOp.replaceAllUsesWith(realCallOp);
+ noProtoCallOp.erase();
+ } else if (auto getGlobalOp =
+ mlir::dyn_cast<cir::GetGlobalOp>(use.getUser())) {
+ // Replace type
+ getGlobalOp.getAddr().setType(
+ cir::PointerType::get(newFn.getFunctionType()));
+ } else {
+ errorNYI(use.getUser()->getLoc(),
+ "replaceUsesOfNonProtoTypeWithRealFunction: unexpected use");
+ }
+ }
+}
+
cir::GlobalLinkageKind
CIRGenModule::getCIRLinkageVarDefinition(const VarDecl *vd, bool isConstant) {
assert(!isConstant && "constant variables NYI");
@@ -1208,6 +1262,15 @@ cir::GlobalOp CIRGenModule::getGlobalForStringLiteral(const StringLiteral *s,
return gv;
}
+void CIRGenModule::emitExplicitCastExprType(const ExplicitCastExpr *e,
+ CIRGenFunction *cgf) {
+ if (cgf && e->getType()->isVariablyModifiedType())
+ cgf->emitVariablyModifiedType(e->getType());
+
+ assert(!cir::MissingFeatures::generateDebugInfo() &&
+ "emitExplicitCastExprType");
+}
+
void CIRGenModule::emitDeclContext(const DeclContext *dc) {
for (Decl *decl : dc->decls()) {
// Unlike other DeclContexts, the contents of an ObjCImplDecl at TU scope
@@ -1235,6 +1298,7 @@ void CIRGenModule::emitTopLevelDecl(Decl *decl) {
decl->getDeclKindName());
break;
+ case Decl::CXXConversion:
case Decl::CXXMethod:
case Decl::Function: {
auto *fd = cast<FunctionDecl>(decl);
@@ -1244,8 +1308,13 @@ void CIRGenModule::emitTopLevelDecl(Decl *decl) {
break;
}
- case Decl::Var: {
+ case Decl::Var:
+ case Decl::Decomposition: {
auto *vd = cast<VarDecl>(decl);
+ if (isa<DecompositionDecl>(decl)) {
+ errorNYI(decl->getSourceRange(), "global variable decompositions");
+ break;
+ }
emitGlobal(vd);
break;
}
@@ -1267,8 +1336,14 @@ void CIRGenModule::emitTopLevelDecl(Decl *decl) {
break;
// No code generation needed.
- case Decl::UsingShadow:
+ case Decl::ClassTemplate:
+ case Decl::Concept:
+ case Decl::CXXDeductionGuide:
case Decl::Empty:
+ case Decl::FunctionTemplate:
+ case Decl::StaticAssert:
+ case Decl::TypeAliasTemplate:
+ case Decl::UsingShadow:
break;
case Decl::CXXConstructor:
@@ -1530,10 +1605,10 @@ static bool shouldAssumeDSOLocal(const CIRGenModule &cgm,
const llvm::Triple &tt = cgm.getTriple();
const CodeGenOptions &cgOpts = cgm.getCodeGenOpts();
- if (tt.isWindowsGNUEnvironment()) {
- // In MinGW, variables without DLLImport can still be automatically
- // imported from a DLL by the linker; don't mark variables that
- // potentially could come from another DLL as DSO local.
+ if (tt.isOSCygMing()) {
+ // In MinGW and Cygwin, variables without DLLImport can still be
+ // automatically imported from a DLL by the linker; don't mark variables
+ // that potentially could come from another DLL as DSO local.
// With EmulatedTLS, TLS variables can be autoimported from other DLLs
// (and this actually happens in the public interface of libstdc++), so
@@ -1692,8 +1767,7 @@ cir::FuncOp CIRGenModule::getOrCreateCIRFunction(
// Lookup the entry, lazily creating it if necessary.
mlir::Operation *entry = getGlobalValue(mangledName);
if (entry) {
- if (!isa<cir::FuncOp>(entry))
- errorNYI(d->getSourceRange(), "getOrCreateCIRFunction: non-FuncOp");
+ assert(mlir::isa<cir::FuncOp>(entry));
assert(!cir::MissingFeatures::weakRefReference());
@@ -1729,6 +1803,30 @@ cir::FuncOp CIRGenModule::getOrCreateCIRFunction(
invalidLoc ? theModule->getLoc() : getLoc(funcDecl->getSourceRange()),
mangledName, mlir::cast<cir::FuncType>(funcType), funcDecl);
+ // If we already created a function with the same mangled name (but different
+ // type) before, take its name and add it to the list of functions to be
+ // replaced with F at the end of CodeGen.
+ //
+ // This happens if there is a prototype for a function (e.g. "int f()") and
+ // then a definition of a different type (e.g. "int f(int x)").
+ if (entry) {
+
+ // Fetch a generic symbol-defining operation and its uses.
+ auto symbolOp = mlir::cast<mlir::SymbolOpInterface>(entry);
+
+ // This might be an implementation of a function without a prototype, in
+ // which case, try to do special replacement of calls which match the new
+ // prototype. The really key thing here is that we also potentially drop
+ // arguments from the call site so as to make a direct call, which makes the
+ // inliner happier and suppresses a number of optimizer warnings (!) about
+ // dropping arguments.
+ if (symbolOp.getSymbolUses(symbolOp->getParentOp()))
+ replaceUsesOfNonProtoTypeWithRealFunction(entry, funcOp);
+
+ // Obliterate no-proto declaration.
+ entry->erase();
+ }
+
if (d)
setFunctionAttributes(gd, funcOp, /*isIncompleteFunction=*/false, isThunk);
@@ -1805,7 +1903,9 @@ CIRGenModule::createCIRFunction(mlir::Location loc, StringRef name,
func = builder.create<cir::FuncOp>(loc, name, funcType);
assert(!cir::MissingFeatures::opFuncAstDeclAttr());
- assert(!cir::MissingFeatures::opFuncNoProto());
+
+ if (funcDecl && !funcDecl->hasPrototype())
+ func.setNoProto(true);
assert(func.isDeclaration() && "expected empty body");
diff --git a/clang/lib/CIR/CodeGen/CIRGenModule.h b/clang/lib/CIR/CodeGen/CIRGenModule.h
index 16922b1..5d07d38 100644
--- a/clang/lib/CIR/CodeGen/CIRGenModule.h
+++ b/clang/lib/CIR/CodeGen/CIRGenModule.h
@@ -252,6 +252,11 @@ public:
getAddrOfGlobal(clang::GlobalDecl gd,
ForDefinition_t isForDefinition = NotForDefinition);
+ /// Emit type info if type of an expression is a variably modified
+ /// type. Also emit proper debug info for cast types.
+ void emitExplicitCastExprType(const ExplicitCastExpr *e,
+ CIRGenFunction *cgf = nullptr);
+
/// Emit code for a single global function or variable declaration. Forward
/// declarations are emitted lazily.
void emitGlobal(clang::GlobalDecl gd);
@@ -308,6 +313,9 @@ public:
static void setInitializer(cir::GlobalOp &op, mlir::Attribute value);
+ void replaceUsesOfNonProtoTypeWithRealFunction(mlir::Operation *old,
+ cir::FuncOp newFn);
+
cir::FuncOp
getOrCreateCIRFunction(llvm::StringRef mangledName, mlir::Type funcType,
clang::GlobalDecl gd, bool forVTable,
diff --git a/clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp b/clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp
index 05e8848..0c8ff4bd 100644
--- a/clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenRecordLayoutBuilder.cpp
@@ -91,6 +91,9 @@ struct CIRRecordLowering final {
return astContext.getTargetInfo().getABI().starts_with("aapcs");
}
+ /// Helper function to check if the target machine is BigEndian.
+ bool isBigEndian() const { return astContext.getTargetInfo().isBigEndian(); }
+
CharUnits bitsToCharUnits(uint64_t bitOffset) {
return astContext.toCharUnitsFromBits(bitOffset);
}
@@ -438,9 +441,7 @@ CIRRecordLowering::accumulateBitFields(RecordDecl::field_iterator field,
} else if (cirGenTypes.getCGModule()
.getCodeGenOpts()
.FineGrainedBitfieldAccesses) {
- assert(!cir::MissingFeatures::nonFineGrainedBitfields());
- cirGenTypes.getCGModule().errorNYI(field->getSourceRange(),
- "NYI FineGrainedBitfield");
+ installBest = true;
} else {
// Otherwise, we're not installing. Update the bit size
// of the current span to go all the way to limitOffset, which is
@@ -773,7 +774,104 @@ void CIRRecordLowering::computeVolatileBitfields() {
!cirGenTypes.getCGModule().getCodeGenOpts().AAPCSBitfieldWidth)
return;
- assert(!cir::MissingFeatures::armComputeVolatileBitfields());
+ for (auto &[field, info] : bitFields) {
+ mlir::Type resLTy = cirGenTypes.convertTypeForMem(field->getType());
+
+ if (astContext.toBits(astRecordLayout.getAlignment()) <
+ getSizeInBits(resLTy).getQuantity())
+ continue;
+
+ // CIRRecordLowering::setBitFieldInfo() pre-adjusts the bit-field offsets
+ // for big-endian targets, but it assumes a container of width
+ // info.storageSize. Since AAPCS uses a different container size (width
+ // of the type), we first undo that calculation here and redo it once
+ // the bit-field offset within the new container is calculated.
+ const unsigned oldOffset =
+ isBigEndian() ? info.storageSize - (info.offset + info.size)
+ : info.offset;
+ // Offset to the bit-field from the beginning of the struct.
+ const unsigned absoluteOffset =
+ astContext.toBits(info.storageOffset) + oldOffset;
+
+ // Container size is the width of the bit-field type.
+ const unsigned storageSize = getSizeInBits(resLTy).getQuantity();
+ // Nothing to do if the access uses the desired
+ // container width and is naturally aligned.
+ if (info.storageSize == storageSize && (oldOffset % storageSize == 0))
+ continue;
+
+ // Offset within the container.
+ unsigned offset = absoluteOffset & (storageSize - 1);
+ // Bail out if an aligned load of the container cannot cover the entire
+ // bit-field. This can happen for example, if the bit-field is part of a
+ // packed struct. AAPCS does not define access rules for such cases, we let
+ // clang to follow its own rules.
+ if (offset + info.size > storageSize)
+ continue;
+
+ // Re-adjust offsets for big-endian targets.
+ if (isBigEndian())
+ offset = storageSize - (offset + info.size);
+
+ const CharUnits storageOffset =
+ astContext.toCharUnitsFromBits(absoluteOffset & ~(storageSize - 1));
+ const CharUnits end = storageOffset +
+ astContext.toCharUnitsFromBits(storageSize) -
+ CharUnits::One();
+
+ const ASTRecordLayout &layout =
+ astContext.getASTRecordLayout(field->getParent());
+ // If we access outside memory outside the record, than bail out.
+ const CharUnits recordSize = layout.getSize();
+ if (end >= recordSize)
+ continue;
+
+ // Bail out if performing this load would access non-bit-fields members.
+ bool conflict = false;
+ for (const auto *f : recordDecl->fields()) {
+ // Allow sized bit-fields overlaps.
+ if (f->isBitField() && !f->isZeroLengthBitField())
+ continue;
+
+ const CharUnits fOffset = astContext.toCharUnitsFromBits(
+ layout.getFieldOffset(f->getFieldIndex()));
+
+ // As C11 defines, a zero sized bit-field defines a barrier, so
+ // fields after and before it should be race condition free.
+ // The AAPCS acknowledges it and imposes no restritions when the
+ // natural container overlaps a zero-length bit-field.
+ if (f->isZeroLengthBitField()) {
+ if (end > fOffset && storageOffset < fOffset) {
+ conflict = true;
+ break;
+ }
+ }
+
+ const CharUnits fEnd =
+ fOffset +
+ astContext.toCharUnitsFromBits(astContext.toBits(
+ getSizeInBits(cirGenTypes.convertTypeForMem(f->getType())))) -
+ CharUnits::One();
+ // If no overlap, continue.
+ if (end < fOffset || fEnd < storageOffset)
+ continue;
+
+ // The desired load overlaps a non-bit-field member, bail out.
+ conflict = true;
+ break;
+ }
+
+ if (conflict)
+ continue;
+ // Write the new bit-field access parameters.
+ // As the storage offset now is defined as the number of elements from the
+ // start of the structure, we should divide the Offset by the element size.
+ info.volatileStorageOffset =
+ storageOffset /
+ astContext.toCharUnitsFromBits(storageSize).getQuantity();
+ info.volatileStorageSize = storageSize;
+ info.volatileOffset = offset;
+ }
}
void CIRRecordLowering::accumulateBases(const CXXRecordDecl *cxxRecordDecl) {
diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
index 9193f6f..75da229e 100644
--- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
@@ -363,8 +363,8 @@ mlir::LogicalResult CIRGenFunction::emitIfStmt(const IfStmt &s) {
mlir::LogicalResult CIRGenFunction::emitDeclStmt(const DeclStmt &s) {
assert(builder.getInsertionBlock() && "expected valid insertion point");
- for (const Decl *I : s.decls())
- emitDecl(*I);
+ for (const Decl *i : s.decls())
+ emitDecl(*i, /*evaluateConditionDecl=*/true);
return mlir::success();
}
@@ -409,7 +409,10 @@ mlir::LogicalResult CIRGenFunction::emitReturnStmt(const ReturnStmt &s) {
}
auto *retBlock = curLexScope->getOrCreateRetBlock(*this, loc);
+ // This should emit a branch through the cleanup block if one exists.
builder.create<cir::BrOp>(loc, retBlock);
+ if (ehStack.getStackDepth() != currentCleanupStackDepth)
+ cgm.errorNYI(s.getSourceRange(), "return with cleanup stack");
builder.createBlock(builder.getBlock()->getParent());
return mlir::success();
@@ -872,7 +875,7 @@ mlir::LogicalResult CIRGenFunction::emitSwitchStmt(const clang::SwitchStmt &s) {
return mlir::failure();
if (s.getConditionVariable())
- emitDecl(*s.getConditionVariable());
+ emitDecl(*s.getConditionVariable(), /*evaluateConditionDecl=*/true);
mlir::Value condV = emitScalarExpr(s.getCond());
diff --git a/clang/lib/CIR/CodeGen/CMakeLists.txt b/clang/lib/CIR/CodeGen/CMakeLists.txt
index 03ea60c..ca3a329 100644
--- a/clang/lib/CIR/CodeGen/CMakeLists.txt
+++ b/clang/lib/CIR/CodeGen/CMakeLists.txt
@@ -11,6 +11,7 @@ add_clang_library(clangCIR
CIRGenBuilder.cpp
CIRGenCall.cpp
CIRGenClass.cpp
+ CIRGenCleanup.cpp
CIRGenCXX.cpp
CIRGenCXXABI.cpp
CIRGenCXXExpr.cpp
diff --git a/clang/lib/CIR/CodeGen/EHScopeStack.h b/clang/lib/CIR/CodeGen/EHScopeStack.h
new file mode 100644
index 0000000..22750ac
--- /dev/null
+++ b/clang/lib/CIR/CodeGen/EHScopeStack.h
@@ -0,0 +1,99 @@
+//===-- EHScopeStack.h - Stack for cleanup CIR generation -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes should be the minimum interface required for other parts of
+// CIR CodeGen to emit cleanups. The implementation is in CIRGenCleanup.cpp and
+// other implemenentation details that are not widely needed are in
+// CIRGenCleanup.h.
+//
+// TODO(cir): this header should be shared between LLVM and CIR codegen.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_LIB_CIR_CODEGEN_EHSCOPESTACK_H
+#define CLANG_LIB_CIR_CODEGEN_EHSCOPESTACK_H
+
+#include "llvm/ADT/SmallVector.h"
+
+namespace clang::CIRGen {
+
+class CIRGenFunction;
+
+enum CleanupKind : unsigned {
+ /// Denotes a cleanup that should run when a scope is exited using exceptional
+ /// control flow (a throw statement leading to stack unwinding, ).
+ EHCleanup = 0x1,
+
+ /// Denotes a cleanup that should run when a scope is exited using normal
+ /// control flow (falling off the end of the scope, return, goto, ...).
+ NormalCleanup = 0x2,
+
+ NormalAndEHCleanup = EHCleanup | NormalCleanup,
+
+ LifetimeMarker = 0x8,
+ NormalEHLifetimeMarker = LifetimeMarker | NormalAndEHCleanup,
+};
+
+/// A stack of scopes which respond to exceptions, including cleanups
+/// and catch blocks.
+class EHScopeStack {
+public:
+ /// Information for lazily generating a cleanup. Subclasses must be
+ /// POD-like: cleanups will not be destructed, and they will be
+ /// allocated on the cleanup stack and freely copied and moved
+ /// around.
+ ///
+ /// Cleanup implementations should generally be declared in an
+ /// anonymous namespace.
+ class Cleanup {
+ // Anchor the construction vtable.
+ virtual void anchor();
+
+ public:
+ Cleanup(const Cleanup &) = default;
+ Cleanup(Cleanup &&) {}
+ Cleanup() = default;
+
+ virtual ~Cleanup() = default;
+
+ /// Emit the cleanup. For normal cleanups, this is run in the
+ /// same EH context as when the cleanup was pushed, i.e. the
+ /// immediately-enclosing context of the cleanup scope. For
+ /// EH cleanups, this is run in a terminate context.
+ ///
+ // \param flags cleanup kind.
+ virtual void emit(CIRGenFunction &cgf) = 0;
+ };
+
+ // Classic codegen has a finely tuned custom allocator and a complex stack
+ // management scheme. We'll probably eventually want to find a way to share
+ // that implementation. For now, we will use a very simplified implementation
+ // to get cleanups working.
+ llvm::SmallVector<std::unique_ptr<Cleanup>, 8> cleanupStack;
+
+private:
+ /// The CGF this Stack belong to
+ CIRGenFunction *cgf = nullptr;
+
+public:
+ EHScopeStack() = default;
+ ~EHScopeStack() = default;
+
+ /// Push a lazily-created cleanup on the stack.
+ template <class T, class... As> void pushCleanup(CleanupKind kind, As... a) {
+ cleanupStack.push_back(std::make_unique<T>(a...));
+ }
+
+ void setCGF(CIRGenFunction *inCGF) { cgf = inCGF; }
+
+ size_t getStackDepth() const { return cleanupStack.size(); }
+};
+
+} // namespace clang::CIRGen
+
+#endif // CLANG_LIB_CIR_CODEGEN_EHSCOPESTACK_H
diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
index f0416b6..1c3a310 100644
--- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
+++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
@@ -17,6 +17,7 @@
#include "mlir/Interfaces/ControlFlowInterfaces.h"
#include "mlir/Interfaces/FunctionImplementation.h"
+#include "mlir/Support/LLVM.h"
#include "clang/CIR/Dialect/IR/CIROpsDialect.cpp.inc"
#include "clang/CIR/Dialect/IR/CIROpsEnums.cpp.inc"
@@ -338,7 +339,7 @@ static LogicalResult checkConstantTypes(mlir::Operation *op, mlir::Type opType,
}
if (mlir::isa<cir::ConstArrayAttr, cir::ConstVectorAttr,
- cir::ConstComplexAttr>(attrType))
+ cir::ConstComplexAttr, cir::PoisonAttr>(attrType))
return success();
assert(isa<TypedAttr>(attrType) && "What else could we be looking at here?");
@@ -489,6 +490,104 @@ LogicalResult cir::CastOp::verify() {
return emitOpError() << "requires two types differ in addrspace only";
return success();
}
+ case cir::CastKind::float_to_complex: {
+ if (!mlir::isa<cir::FPTypeInterface>(srcType))
+ return emitOpError() << "requires !cir.float type for source";
+ auto resComplexTy = mlir::dyn_cast<cir::ComplexType>(resType);
+ if (!resComplexTy)
+ return emitOpError() << "requires !cir.complex type for result";
+ if (srcType != resComplexTy.getElementType())
+ return emitOpError() << "requires source type match result element type";
+ return success();
+ }
+ case cir::CastKind::int_to_complex: {
+ if (!mlir::isa<cir::IntType>(srcType))
+ return emitOpError() << "requires !cir.int type for source";
+ auto resComplexTy = mlir::dyn_cast<cir::ComplexType>(resType);
+ if (!resComplexTy)
+ return emitOpError() << "requires !cir.complex type for result";
+ if (srcType != resComplexTy.getElementType())
+ return emitOpError() << "requires source type match result element type";
+ return success();
+ }
+ case cir::CastKind::float_complex_to_real: {
+ auto srcComplexTy = mlir::dyn_cast<cir::ComplexType>(srcType);
+ if (!srcComplexTy)
+ return emitOpError() << "requires !cir.complex type for source";
+ if (!mlir::isa<cir::FPTypeInterface>(resType))
+ return emitOpError() << "requires !cir.float type for result";
+ if (srcComplexTy.getElementType() != resType)
+ return emitOpError() << "requires source element type match result type";
+ return success();
+ }
+ case cir::CastKind::int_complex_to_real: {
+ auto srcComplexTy = mlir::dyn_cast<cir::ComplexType>(srcType);
+ if (!srcComplexTy)
+ return emitOpError() << "requires !cir.complex type for source";
+ if (!mlir::isa<cir::IntType>(resType))
+ return emitOpError() << "requires !cir.int type for result";
+ if (srcComplexTy.getElementType() != resType)
+ return emitOpError() << "requires source element type match result type";
+ return success();
+ }
+ case cir::CastKind::float_complex_to_bool: {
+ auto srcComplexTy = mlir::dyn_cast<cir::ComplexType>(srcType);
+ if (!srcComplexTy || !srcComplexTy.isFloatingPointComplex())
+ return emitOpError()
+ << "requires floating point !cir.complex type for source";
+ if (!mlir::isa<cir::BoolType>(resType))
+ return emitOpError() << "requires !cir.bool type for result";
+ return success();
+ }
+ case cir::CastKind::int_complex_to_bool: {
+ auto srcComplexTy = mlir::dyn_cast<cir::ComplexType>(srcType);
+ if (!srcComplexTy || !srcComplexTy.isIntegerComplex())
+ return emitOpError()
+ << "requires floating point !cir.complex type for source";
+ if (!mlir::isa<cir::BoolType>(resType))
+ return emitOpError() << "requires !cir.bool type for result";
+ return success();
+ }
+ case cir::CastKind::float_complex: {
+ auto srcComplexTy = mlir::dyn_cast<cir::ComplexType>(srcType);
+ if (!srcComplexTy || !srcComplexTy.isFloatingPointComplex())
+ return emitOpError()
+ << "requires floating point !cir.complex type for source";
+ auto resComplexTy = mlir::dyn_cast<cir::ComplexType>(resType);
+ if (!resComplexTy || !resComplexTy.isFloatingPointComplex())
+ return emitOpError()
+ << "requires floating point !cir.complex type for result";
+ return success();
+ }
+ case cir::CastKind::float_complex_to_int_complex: {
+ auto srcComplexTy = mlir::dyn_cast<cir::ComplexType>(srcType);
+ if (!srcComplexTy || !srcComplexTy.isFloatingPointComplex())
+ return emitOpError()
+ << "requires floating point !cir.complex type for source";
+ auto resComplexTy = mlir::dyn_cast<cir::ComplexType>(resType);
+ if (!resComplexTy || !resComplexTy.isIntegerComplex())
+ return emitOpError() << "requires integer !cir.complex type for result";
+ return success();
+ }
+ case cir::CastKind::int_complex: {
+ auto srcComplexTy = mlir::dyn_cast<cir::ComplexType>(srcType);
+ if (!srcComplexTy || !srcComplexTy.isIntegerComplex())
+ return emitOpError() << "requires integer !cir.complex type for source";
+ auto resComplexTy = mlir::dyn_cast<cir::ComplexType>(resType);
+ if (!resComplexTy || !resComplexTy.isIntegerComplex())
+ return emitOpError() << "requires integer !cir.complex type for result";
+ return success();
+ }
+ case cir::CastKind::int_complex_to_float_complex: {
+ auto srcComplexTy = mlir::dyn_cast<cir::ComplexType>(srcType);
+ if (!srcComplexTy || !srcComplexTy.isIntegerComplex())
+ return emitOpError() << "requires integer !cir.complex type for source";
+ auto resComplexTy = mlir::dyn_cast<cir::ComplexType>(resType);
+ if (!resComplexTy || !resComplexTy.isFloatingPointComplex())
+ return emitOpError()
+ << "requires floating point !cir.complex type for result";
+ return success();
+ }
default:
llvm_unreachable("Unknown CastOp kind?");
}
@@ -530,6 +629,11 @@ static Value tryFoldCastChain(cir::CastOp op) {
}
OpFoldResult cir::CastOp::fold(FoldAdaptor adaptor) {
+ if (mlir::isa_and_present<cir::PoisonAttr>(adaptor.getSrc())) {
+ // Propagate poison value
+ return cir::PoisonAttr::get(getContext(), getType());
+ }
+
if (getSrc().getType() == getType()) {
switch (getKind()) {
case cir::CastKind::integral: {
@@ -1366,10 +1470,14 @@ ParseResult cir::FuncOp::parse(OpAsmParser &parser, OperationState &state) {
llvm::SMLoc loc = parser.getCurrentLocation();
mlir::Builder &builder = parser.getBuilder();
+ mlir::StringAttr noProtoNameAttr = getNoProtoAttrName(state.name);
mlir::StringAttr visNameAttr = getSymVisibilityAttrName(state.name);
mlir::StringAttr visibilityNameAttr = getGlobalVisibilityAttrName(state.name);
mlir::StringAttr dsoLocalNameAttr = getDsoLocalAttrName(state.name);
+ if (parser.parseOptionalKeyword(noProtoNameAttr).succeeded())
+ state.addAttribute(noProtoNameAttr, parser.getBuilder().getUnitAttr());
+
// Default to external linkage if no keyword is provided.
state.addAttribute(getLinkageAttrNameString(),
GlobalLinkageKindAttr::get(
@@ -1474,6 +1582,9 @@ mlir::Region *cir::FuncOp::getCallableRegion() {
}
void cir::FuncOp::print(OpAsmPrinter &p) {
+ if (getNoProto())
+ p << " no_proto";
+
if (getComdat())
p << " comdat";
@@ -1684,6 +1795,12 @@ static bool isBoolNot(cir::UnaryOp op) {
//
// and the argument of the first one (%0) will be used instead.
OpFoldResult cir::UnaryOp::fold(FoldAdaptor adaptor) {
+ if (auto poison =
+ mlir::dyn_cast_if_present<cir::PoisonAttr>(adaptor.getInput())) {
+ // Propagate poison values
+ return poison;
+ }
+
if (isBoolNot(*this))
if (auto previous = dyn_cast_or_null<UnaryOp>(getInput().getDefiningOp()))
if (isBoolNot(previous))
@@ -2133,6 +2250,143 @@ LogicalResult cir::ComplexImagPtrOp::verify() {
}
//===----------------------------------------------------------------------===//
+// Bit manipulation operations
+//===----------------------------------------------------------------------===//
+
+static OpFoldResult
+foldUnaryBitOp(mlir::Attribute inputAttr,
+ llvm::function_ref<llvm::APInt(const llvm::APInt &)> func,
+ bool poisonZero = false) {
+ if (mlir::isa_and_present<cir::PoisonAttr>(inputAttr)) {
+ // Propagate poison value
+ return inputAttr;
+ }
+
+ auto input = mlir::dyn_cast_if_present<IntAttr>(inputAttr);
+ if (!input)
+ return nullptr;
+
+ llvm::APInt inputValue = input.getValue();
+ if (poisonZero && inputValue.isZero())
+ return cir::PoisonAttr::get(input.getType());
+
+ llvm::APInt resultValue = func(inputValue);
+ return IntAttr::get(input.getType(), resultValue);
+}
+
+OpFoldResult BitClrsbOp::fold(FoldAdaptor adaptor) {
+ return foldUnaryBitOp(adaptor.getInput(), [](const llvm::APInt &inputValue) {
+ unsigned resultValue =
+ inputValue.getBitWidth() - inputValue.getSignificantBits();
+ return llvm::APInt(inputValue.getBitWidth(), resultValue);
+ });
+}
+
+OpFoldResult BitClzOp::fold(FoldAdaptor adaptor) {
+ return foldUnaryBitOp(
+ adaptor.getInput(),
+ [](const llvm::APInt &inputValue) {
+ unsigned resultValue = inputValue.countLeadingZeros();
+ return llvm::APInt(inputValue.getBitWidth(), resultValue);
+ },
+ getPoisonZero());
+}
+
+OpFoldResult BitCtzOp::fold(FoldAdaptor adaptor) {
+ return foldUnaryBitOp(
+ adaptor.getInput(),
+ [](const llvm::APInt &inputValue) {
+ return llvm::APInt(inputValue.getBitWidth(),
+ inputValue.countTrailingZeros());
+ },
+ getPoisonZero());
+}
+
+OpFoldResult BitFfsOp::fold(FoldAdaptor adaptor) {
+ return foldUnaryBitOp(adaptor.getInput(), [](const llvm::APInt &inputValue) {
+ unsigned trailingZeros = inputValue.countTrailingZeros();
+ unsigned result =
+ trailingZeros == inputValue.getBitWidth() ? 0 : trailingZeros + 1;
+ return llvm::APInt(inputValue.getBitWidth(), result);
+ });
+}
+
+OpFoldResult BitParityOp::fold(FoldAdaptor adaptor) {
+ return foldUnaryBitOp(adaptor.getInput(), [](const llvm::APInt &inputValue) {
+ return llvm::APInt(inputValue.getBitWidth(), inputValue.popcount() % 2);
+ });
+}
+
+OpFoldResult BitPopcountOp::fold(FoldAdaptor adaptor) {
+ return foldUnaryBitOp(adaptor.getInput(), [](const llvm::APInt &inputValue) {
+ return llvm::APInt(inputValue.getBitWidth(), inputValue.popcount());
+ });
+}
+
+OpFoldResult BitReverseOp::fold(FoldAdaptor adaptor) {
+ return foldUnaryBitOp(adaptor.getInput(), [](const llvm::APInt &inputValue) {
+ return inputValue.reverseBits();
+ });
+}
+
+OpFoldResult ByteSwapOp::fold(FoldAdaptor adaptor) {
+ return foldUnaryBitOp(adaptor.getInput(), [](const llvm::APInt &inputValue) {
+ return inputValue.byteSwap();
+ });
+}
+
+OpFoldResult RotateOp::fold(FoldAdaptor adaptor) {
+ if (mlir::isa_and_present<cir::PoisonAttr>(adaptor.getInput()) ||
+ mlir::isa_and_present<cir::PoisonAttr>(adaptor.getAmount())) {
+ // Propagate poison values
+ return cir::PoisonAttr::get(getType());
+ }
+
+ auto input = mlir::dyn_cast_if_present<IntAttr>(adaptor.getInput());
+ auto amount = mlir::dyn_cast_if_present<IntAttr>(adaptor.getAmount());
+ if (!input && !amount)
+ return nullptr;
+
+ // We could fold cir.rotate even if one of its two operands is not a constant:
+ // - `cir.rotate left/right %0, 0` could be folded into just %0 even if %0
+ // is not a constant.
+ // - `cir.rotate left/right 0/0b111...111, %0` could be folded into 0 or
+ // 0b111...111 even if %0 is not a constant.
+
+ llvm::APInt inputValue;
+ if (input) {
+ inputValue = input.getValue();
+ if (inputValue.isZero() || inputValue.isAllOnes()) {
+ // An input value of all 0s or all 1s will not change after rotation
+ return input;
+ }
+ }
+
+ uint64_t amountValue;
+ if (amount) {
+ amountValue = amount.getValue().urem(getInput().getType().getWidth());
+ if (amountValue == 0) {
+ // A shift amount of 0 will not change the input value
+ return getInput();
+ }
+ }
+
+ if (!input || !amount)
+ return nullptr;
+
+ assert(inputValue.getBitWidth() == getInput().getType().getWidth() &&
+ "input value must have the same bit width as the input type");
+
+ llvm::APInt resultValue;
+ if (isRotateLeft())
+ resultValue = inputValue.rotl(amountValue);
+ else
+ resultValue = inputValue.rotr(amountValue);
+
+ return IntAttr::get(input.getContext(), input.getType(), resultValue);
+}
+
+//===----------------------------------------------------------------------===//
// TableGen'd op method definitions
//===----------------------------------------------------------------------===//
diff --git a/clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp b/clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp
index e505db5..2eaa60c 100644
--- a/clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp
+++ b/clang/lib/CIR/Dialect/Transforms/CIRCanonicalize.cpp
@@ -143,7 +143,8 @@ void CIRCanonicalizePass::runOnOperation() {
if (isa<BrOp, BrCondOp, CastOp, ScopeOp, SwitchOp, SelectOp, UnaryOp,
ComplexCreateOp, ComplexImagOp, ComplexRealOp, VecCmpOp,
VecCreateOp, VecExtractOp, VecShuffleOp, VecShuffleDynamicOp,
- VecTernaryOp>(op))
+ VecTernaryOp, BitClrsbOp, BitClzOp, BitCtzOp, BitFfsOp, BitParityOp,
+ BitPopcountOp, BitReverseOp, ByteSwapOp, RotateOp>(op))
ops.push_back(op);
});
diff --git a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp
index 8f848c7..66260eb 100644
--- a/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp
+++ b/clang/lib/CIR/Dialect/Transforms/LoweringPrepare.cpp
@@ -8,10 +8,12 @@
#include "PassDetail.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/CharUnits.h"
#include "clang/CIR/Dialect/Builder/CIRBaseBuilder.h"
#include "clang/CIR/Dialect/IR/CIRDialect.h"
#include "clang/CIR/Dialect/IR/CIROpsEnums.h"
#include "clang/CIR/Dialect/Passes.h"
+#include "clang/CIR/MissingFeatures.h"
#include <memory>
@@ -24,11 +26,251 @@ struct LoweringPreparePass : public LoweringPrepareBase<LoweringPreparePass> {
void runOnOperation() override;
void runOnOp(mlir::Operation *op);
+ void lowerCastOp(cir::CastOp op);
+ void lowerComplexMulOp(cir::ComplexMulOp op);
void lowerUnaryOp(cir::UnaryOp op);
+ void lowerArrayDtor(cir::ArrayDtor op);
+ void lowerArrayCtor(cir::ArrayCtor op);
+
+ cir::FuncOp buildRuntimeFunction(
+ mlir::OpBuilder &builder, llvm::StringRef name, mlir::Location loc,
+ cir::FuncType type,
+ cir::GlobalLinkageKind linkage = cir::GlobalLinkageKind::ExternalLinkage);
+
+ ///
+ /// AST related
+ /// -----------
+
+ clang::ASTContext *astCtx;
+
+ /// Tracks current module.
+ mlir::ModuleOp mlirModule;
+
+ void setASTContext(clang::ASTContext *c) { astCtx = c; }
};
} // namespace
+cir::FuncOp LoweringPreparePass::buildRuntimeFunction(
+ mlir::OpBuilder &builder, llvm::StringRef name, mlir::Location loc,
+ cir::FuncType type, cir::GlobalLinkageKind linkage) {
+ cir::FuncOp f = dyn_cast_or_null<FuncOp>(SymbolTable::lookupNearestSymbolFrom(
+ mlirModule, StringAttr::get(mlirModule->getContext(), name)));
+ if (!f) {
+ f = builder.create<cir::FuncOp>(loc, name, type);
+ f.setLinkageAttr(
+ cir::GlobalLinkageKindAttr::get(builder.getContext(), linkage));
+ mlir::SymbolTable::setSymbolVisibility(
+ f, mlir::SymbolTable::Visibility::Private);
+
+ assert(!cir::MissingFeatures::opFuncExtraAttrs());
+ }
+ return f;
+}
+
+static mlir::Value lowerScalarToComplexCast(mlir::MLIRContext &ctx,
+ cir::CastOp op) {
+ cir::CIRBaseBuilderTy builder(ctx);
+ builder.setInsertionPoint(op);
+
+ mlir::Value src = op.getSrc();
+ mlir::Value imag = builder.getNullValue(src.getType(), op.getLoc());
+ return builder.createComplexCreate(op.getLoc(), src, imag);
+}
+
+static mlir::Value lowerComplexToScalarCast(mlir::MLIRContext &ctx,
+ cir::CastOp op,
+ cir::CastKind elemToBoolKind) {
+ cir::CIRBaseBuilderTy builder(ctx);
+ builder.setInsertionPoint(op);
+
+ mlir::Value src = op.getSrc();
+ if (!mlir::isa<cir::BoolType>(op.getType()))
+ return builder.createComplexReal(op.getLoc(), src);
+
+ // Complex cast to bool: (bool)(a+bi) => (bool)a || (bool)b
+ mlir::Value srcReal = builder.createComplexReal(op.getLoc(), src);
+ mlir::Value srcImag = builder.createComplexImag(op.getLoc(), src);
+
+ cir::BoolType boolTy = builder.getBoolTy();
+ mlir::Value srcRealToBool =
+ builder.createCast(op.getLoc(), elemToBoolKind, srcReal, boolTy);
+ mlir::Value srcImagToBool =
+ builder.createCast(op.getLoc(), elemToBoolKind, srcImag, boolTy);
+ return builder.createLogicalOr(op.getLoc(), srcRealToBool, srcImagToBool);
+}
+
+static mlir::Value lowerComplexToComplexCast(mlir::MLIRContext &ctx,
+ cir::CastOp op,
+ cir::CastKind scalarCastKind) {
+ CIRBaseBuilderTy builder(ctx);
+ builder.setInsertionPoint(op);
+
+ mlir::Value src = op.getSrc();
+ auto dstComplexElemTy =
+ mlir::cast<cir::ComplexType>(op.getType()).getElementType();
+
+ mlir::Value srcReal = builder.createComplexReal(op.getLoc(), src);
+ mlir::Value srcImag = builder.createComplexImag(op.getLoc(), src);
+
+ mlir::Value dstReal = builder.createCast(op.getLoc(), scalarCastKind, srcReal,
+ dstComplexElemTy);
+ mlir::Value dstImag = builder.createCast(op.getLoc(), scalarCastKind, srcImag,
+ dstComplexElemTy);
+ return builder.createComplexCreate(op.getLoc(), dstReal, dstImag);
+}
+
+void LoweringPreparePass::lowerCastOp(cir::CastOp op) {
+ mlir::MLIRContext &ctx = getContext();
+ mlir::Value loweredValue = [&]() -> mlir::Value {
+ switch (op.getKind()) {
+ case cir::CastKind::float_to_complex:
+ case cir::CastKind::int_to_complex:
+ return lowerScalarToComplexCast(ctx, op);
+ case cir::CastKind::float_complex_to_real:
+ case cir::CastKind::int_complex_to_real:
+ return lowerComplexToScalarCast(ctx, op, op.getKind());
+ case cir::CastKind::float_complex_to_bool:
+ return lowerComplexToScalarCast(ctx, op, cir::CastKind::float_to_bool);
+ case cir::CastKind::int_complex_to_bool:
+ return lowerComplexToScalarCast(ctx, op, cir::CastKind::int_to_bool);
+ case cir::CastKind::float_complex:
+ return lowerComplexToComplexCast(ctx, op, cir::CastKind::floating);
+ case cir::CastKind::float_complex_to_int_complex:
+ return lowerComplexToComplexCast(ctx, op, cir::CastKind::float_to_int);
+ case cir::CastKind::int_complex:
+ return lowerComplexToComplexCast(ctx, op, cir::CastKind::integral);
+ case cir::CastKind::int_complex_to_float_complex:
+ return lowerComplexToComplexCast(ctx, op, cir::CastKind::int_to_float);
+ default:
+ return nullptr;
+ }
+ }();
+
+ if (loweredValue) {
+ op.replaceAllUsesWith(loweredValue);
+ op.erase();
+ }
+}
+
+static mlir::Value buildComplexBinOpLibCall(
+ LoweringPreparePass &pass, CIRBaseBuilderTy &builder,
+ llvm::StringRef (*libFuncNameGetter)(llvm::APFloat::Semantics),
+ mlir::Location loc, cir::ComplexType ty, mlir::Value lhsReal,
+ mlir::Value lhsImag, mlir::Value rhsReal, mlir::Value rhsImag) {
+ cir::FPTypeInterface elementTy =
+ mlir::cast<cir::FPTypeInterface>(ty.getElementType());
+
+ llvm::StringRef libFuncName = libFuncNameGetter(
+ llvm::APFloat::SemanticsToEnum(elementTy.getFloatSemantics()));
+ llvm::SmallVector<mlir::Type, 4> libFuncInputTypes(4, elementTy);
+
+ cir::FuncType libFuncTy = cir::FuncType::get(libFuncInputTypes, ty);
+
+ // Insert a declaration for the runtime function to be used in Complex
+ // multiplication and division when needed
+ cir::FuncOp libFunc;
+ {
+ mlir::OpBuilder::InsertionGuard ipGuard{builder};
+ builder.setInsertionPointToStart(pass.mlirModule.getBody());
+ libFunc = pass.buildRuntimeFunction(builder, libFuncName, loc, libFuncTy);
+ }
+
+ cir::CallOp call =
+ builder.createCallOp(loc, libFunc, {lhsReal, lhsImag, rhsReal, rhsImag});
+ return call.getResult();
+}
+
+static llvm::StringRef
+getComplexMulLibCallName(llvm::APFloat::Semantics semantics) {
+ switch (semantics) {
+ case llvm::APFloat::S_IEEEhalf:
+ return "__mulhc3";
+ case llvm::APFloat::S_IEEEsingle:
+ return "__mulsc3";
+ case llvm::APFloat::S_IEEEdouble:
+ return "__muldc3";
+ case llvm::APFloat::S_PPCDoubleDouble:
+ return "__multc3";
+ case llvm::APFloat::S_x87DoubleExtended:
+ return "__mulxc3";
+ case llvm::APFloat::S_IEEEquad:
+ return "__multc3";
+ default:
+ llvm_unreachable("unsupported floating point type");
+ }
+}
+
+static mlir::Value lowerComplexMul(LoweringPreparePass &pass,
+ CIRBaseBuilderTy &builder,
+ mlir::Location loc, cir::ComplexMulOp op,
+ mlir::Value lhsReal, mlir::Value lhsImag,
+ mlir::Value rhsReal, mlir::Value rhsImag) {
+ // (a+bi) * (c+di) = (ac-bd) + (ad+bc)i
+ mlir::Value resultRealLhs =
+ builder.createBinop(loc, lhsReal, cir::BinOpKind::Mul, rhsReal);
+ mlir::Value resultRealRhs =
+ builder.createBinop(loc, lhsImag, cir::BinOpKind::Mul, rhsImag);
+ mlir::Value resultImagLhs =
+ builder.createBinop(loc, lhsReal, cir::BinOpKind::Mul, rhsImag);
+ mlir::Value resultImagRhs =
+ builder.createBinop(loc, lhsImag, cir::BinOpKind::Mul, rhsReal);
+ mlir::Value resultReal = builder.createBinop(
+ loc, resultRealLhs, cir::BinOpKind::Sub, resultRealRhs);
+ mlir::Value resultImag = builder.createBinop(
+ loc, resultImagLhs, cir::BinOpKind::Add, resultImagRhs);
+ mlir::Value algebraicResult =
+ builder.createComplexCreate(loc, resultReal, resultImag);
+
+ cir::ComplexType complexTy = op.getType();
+ cir::ComplexRangeKind rangeKind = op.getRange();
+ if (mlir::isa<cir::IntType>(complexTy.getElementType()) ||
+ rangeKind == cir::ComplexRangeKind::Basic ||
+ rangeKind == cir::ComplexRangeKind::Improved ||
+ rangeKind == cir::ComplexRangeKind::Promoted)
+ return algebraicResult;
+
+ assert(!cir::MissingFeatures::fastMathFlags());
+
+ // Check whether the real part and the imaginary part of the result are both
+ // NaN. If so, emit a library call to compute the multiplication instead.
+ // We check a value against NaN by comparing the value against itself.
+ mlir::Value resultRealIsNaN = builder.createIsNaN(loc, resultReal);
+ mlir::Value resultImagIsNaN = builder.createIsNaN(loc, resultImag);
+ mlir::Value resultRealAndImagAreNaN =
+ builder.createLogicalAnd(loc, resultRealIsNaN, resultImagIsNaN);
+
+ return builder
+ .create<cir::TernaryOp>(
+ loc, resultRealAndImagAreNaN,
+ [&](mlir::OpBuilder &, mlir::Location) {
+ mlir::Value libCallResult = buildComplexBinOpLibCall(
+ pass, builder, &getComplexMulLibCallName, loc, complexTy,
+ lhsReal, lhsImag, rhsReal, rhsImag);
+ builder.createYield(loc, libCallResult);
+ },
+ [&](mlir::OpBuilder &, mlir::Location) {
+ builder.createYield(loc, algebraicResult);
+ })
+ .getResult();
+}
+
+void LoweringPreparePass::lowerComplexMulOp(cir::ComplexMulOp op) {
+ cir::CIRBaseBuilderTy builder(getContext());
+ builder.setInsertionPointAfter(op);
+ mlir::Location loc = op.getLoc();
+ mlir::TypedValue<cir::ComplexType> lhs = op.getLhs();
+ mlir::TypedValue<cir::ComplexType> rhs = op.getRhs();
+ mlir::Value lhsReal = builder.createComplexReal(loc, lhs);
+ mlir::Value lhsImag = builder.createComplexImag(loc, lhs);
+ mlir::Value rhsReal = builder.createComplexReal(loc, rhs);
+ mlir::Value rhsImag = builder.createComplexImag(loc, rhs);
+ mlir::Value loweredResult = lowerComplexMul(*this, builder, loc, op, lhsReal,
+ lhsImag, rhsReal, rhsImag);
+ op.replaceAllUsesWith(loweredResult);
+ op.erase();
+}
+
void LoweringPreparePass::lowerUnaryOp(cir::UnaryOp op) {
mlir::Type ty = op.getType();
if (!mlir::isa<cir::ComplexType>(ty))
@@ -56,7 +298,8 @@ void LoweringPreparePass::lowerUnaryOp(cir::UnaryOp op) {
case cir::UnaryOpKind::Plus:
case cir::UnaryOpKind::Minus:
- llvm_unreachable("Complex unary Plus/Minus NYI");
+ resultReal = builder.createUnaryOp(loc, opKind, operandReal);
+ resultImag = builder.createUnaryOp(loc, opKind, operandImag);
break;
case cir::UnaryOpKind::Not:
@@ -71,18 +314,120 @@ void LoweringPreparePass::lowerUnaryOp(cir::UnaryOp op) {
op.erase();
}
+static void lowerArrayDtorCtorIntoLoop(cir::CIRBaseBuilderTy &builder,
+ clang::ASTContext *astCtx,
+ mlir::Operation *op, mlir::Type eltTy,
+ mlir::Value arrayAddr, uint64_t arrayLen,
+ bool isCtor) {
+ // Generate loop to call into ctor/dtor for every element.
+ mlir::Location loc = op->getLoc();
+
+ // TODO: instead of getting the size from the AST context, create alias for
+ // PtrDiffTy and unify with CIRGen stuff.
+ const unsigned sizeTypeSize =
+ astCtx->getTypeSize(astCtx->getSignedSizeType());
+ uint64_t endOffset = isCtor ? arrayLen : arrayLen - 1;
+ mlir::Value endOffsetVal =
+ builder.getUnsignedInt(loc, endOffset, sizeTypeSize);
+
+ auto begin = cir::CastOp::create(builder, loc, eltTy,
+ cir::CastKind::array_to_ptrdecay, arrayAddr);
+ mlir::Value end =
+ cir::PtrStrideOp::create(builder, loc, eltTy, begin, endOffsetVal);
+ mlir::Value start = isCtor ? begin : end;
+ mlir::Value stop = isCtor ? end : begin;
+
+ mlir::Value tmpAddr = builder.createAlloca(
+ loc, /*addr type*/ builder.getPointerTo(eltTy),
+ /*var type*/ eltTy, "__array_idx", builder.getAlignmentAttr(1));
+ builder.createStore(loc, start, tmpAddr);
+
+ cir::DoWhileOp loop = builder.createDoWhile(
+ loc,
+ /*condBuilder=*/
+ [&](mlir::OpBuilder &b, mlir::Location loc) {
+ auto currentElement = b.create<cir::LoadOp>(loc, eltTy, tmpAddr);
+ mlir::Type boolTy = cir::BoolType::get(b.getContext());
+ auto cmp = builder.create<cir::CmpOp>(loc, boolTy, cir::CmpOpKind::ne,
+ currentElement, stop);
+ builder.createCondition(cmp);
+ },
+ /*bodyBuilder=*/
+ [&](mlir::OpBuilder &b, mlir::Location loc) {
+ auto currentElement = b.create<cir::LoadOp>(loc, eltTy, tmpAddr);
+
+ cir::CallOp ctorCall;
+ op->walk([&](cir::CallOp c) { ctorCall = c; });
+ assert(ctorCall && "expected ctor call");
+
+ // Array elements get constructed in order but destructed in reverse.
+ mlir::Value stride;
+ if (isCtor)
+ stride = builder.getUnsignedInt(loc, 1, sizeTypeSize);
+ else
+ stride = builder.getSignedInt(loc, -1, sizeTypeSize);
+
+ ctorCall->moveBefore(stride.getDefiningOp());
+ ctorCall->setOperand(0, currentElement);
+ auto nextElement = cir::PtrStrideOp::create(builder, loc, eltTy,
+ currentElement, stride);
+
+ // Store the element pointer to the temporary variable
+ builder.createStore(loc, nextElement, tmpAddr);
+ builder.createYield(loc);
+ });
+
+ op->replaceAllUsesWith(loop);
+ op->erase();
+}
+
+void LoweringPreparePass::lowerArrayDtor(cir::ArrayDtor op) {
+ CIRBaseBuilderTy builder(getContext());
+ builder.setInsertionPointAfter(op.getOperation());
+
+ mlir::Type eltTy = op->getRegion(0).getArgument(0).getType();
+ assert(!cir::MissingFeatures::vlas());
+ auto arrayLen =
+ mlir::cast<cir::ArrayType>(op.getAddr().getType().getPointee()).getSize();
+ lowerArrayDtorCtorIntoLoop(builder, astCtx, op, eltTy, op.getAddr(), arrayLen,
+ false);
+}
+
+void LoweringPreparePass::lowerArrayCtor(cir::ArrayCtor op) {
+ cir::CIRBaseBuilderTy builder(getContext());
+ builder.setInsertionPointAfter(op.getOperation());
+
+ mlir::Type eltTy = op->getRegion(0).getArgument(0).getType();
+ assert(!cir::MissingFeatures::vlas());
+ auto arrayLen =
+ mlir::cast<cir::ArrayType>(op.getAddr().getType().getPointee()).getSize();
+ lowerArrayDtorCtorIntoLoop(builder, astCtx, op, eltTy, op.getAddr(), arrayLen,
+ true);
+}
+
void LoweringPreparePass::runOnOp(mlir::Operation *op) {
- if (auto unary = dyn_cast<cir::UnaryOp>(op))
+ if (auto arrayCtor = dyn_cast<ArrayCtor>(op))
+ lowerArrayCtor(arrayCtor);
+ else if (auto arrayDtor = dyn_cast<cir::ArrayDtor>(op))
+ lowerArrayDtor(arrayDtor);
+ else if (auto cast = mlir::dyn_cast<cir::CastOp>(op))
+ lowerCastOp(cast);
+ else if (auto complexMul = mlir::dyn_cast<cir::ComplexMulOp>(op))
+ lowerComplexMulOp(complexMul);
+ else if (auto unary = mlir::dyn_cast<cir::UnaryOp>(op))
lowerUnaryOp(unary);
}
void LoweringPreparePass::runOnOperation() {
mlir::Operation *op = getOperation();
+ if (isa<::mlir::ModuleOp>(op))
+ mlirModule = cast<::mlir::ModuleOp>(op);
llvm::SmallVector<mlir::Operation *> opsToTransform;
op->walk([&](mlir::Operation *op) {
- if (mlir::isa<cir::UnaryOp>(op))
+ if (mlir::isa<cir::ArrayCtor, cir::ArrayDtor, cir::CastOp,
+ cir::ComplexMulOp, cir::UnaryOp>(op))
opsToTransform.push_back(op);
});
@@ -93,3 +438,10 @@ void LoweringPreparePass::runOnOperation() {
std::unique_ptr<Pass> mlir::createLoweringPreparePass() {
return std::make_unique<LoweringPreparePass>();
}
+
+std::unique_ptr<Pass>
+mlir::createLoweringPreparePass(clang::ASTContext *astCtx) {
+ auto pass = std::make_unique<LoweringPreparePass>();
+ pass->setASTContext(astCtx);
+ return std::move(pass);
+}
diff --git a/clang/lib/CIR/Lowering/CIRPasses.cpp b/clang/lib/CIR/Lowering/CIRPasses.cpp
index 5607abc..bb9781b 100644
--- a/clang/lib/CIR/Lowering/CIRPasses.cpp
+++ b/clang/lib/CIR/Lowering/CIRPasses.cpp
@@ -31,7 +31,7 @@ mlir::LogicalResult runCIRToCIRPasses(mlir::ModuleOp theModule,
if (enableCIRSimplify)
pm.addPass(mlir::createCIRSimplifyPass());
- pm.addPass(mlir::createLoweringPreparePass());
+ pm.addPass(mlir::createLoweringPreparePass(&astContext));
pm.enableVerifier(enableVerifier);
(void)mlir::applyPassManagerCLOptions(pm);
diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
index 3cd7de0..957a51a 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
@@ -521,6 +521,32 @@ mlir::LogicalResult CIRToLLVMBitCtzOpLowering::matchAndRewrite(
return mlir::LogicalResult::success();
}
+mlir::LogicalResult CIRToLLVMBitFfsOpLowering::matchAndRewrite(
+ cir::BitFfsOp op, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ auto resTy = getTypeConverter()->convertType(op.getType());
+ auto ctz = rewriter.create<mlir::LLVM::CountTrailingZerosOp>(
+ op.getLoc(), resTy, adaptor.getInput(), /*is_zero_poison=*/true);
+
+ auto one = rewriter.create<mlir::LLVM::ConstantOp>(op.getLoc(), resTy, 1);
+ auto ctzAddOne = rewriter.create<mlir::LLVM::AddOp>(op.getLoc(), ctz, one);
+
+ auto zeroInputTy = rewriter.create<mlir::LLVM::ConstantOp>(
+ op.getLoc(), adaptor.getInput().getType(), 0);
+ auto isZero = rewriter.create<mlir::LLVM::ICmpOp>(
+ op.getLoc(),
+ mlir::LLVM::ICmpPredicateAttr::get(rewriter.getContext(),
+ mlir::LLVM::ICmpPredicate::eq),
+ adaptor.getInput(), zeroInputTy);
+
+ auto zero = rewriter.create<mlir::LLVM::ConstantOp>(op.getLoc(), resTy, 0);
+ auto res = rewriter.create<mlir::LLVM::SelectOp>(op.getLoc(), isZero, zero,
+ ctzAddOne);
+ rewriter.replaceOp(op, res);
+
+ return mlir::LogicalResult::success();
+}
+
mlir::LogicalResult CIRToLLVMBitParityOpLowering::matchAndRewrite(
cir::BitParityOp op, OpAdaptor adaptor,
mlir::ConversionPatternRewriter &rewriter) const {
@@ -919,13 +945,45 @@ rewriteCallOrInvoke(mlir::Operation *op, mlir::ValueRange callOperands,
memoryEffects, noUnwind, willReturn);
mlir::LLVM::LLVMFunctionType llvmFnTy;
+
+ // Temporary to handle the case where we need to prepend an operand if the
+ // callee is an alias.
+ SmallVector<mlir::Value> adjustedCallOperands;
+
if (calleeAttr) { // direct call
- mlir::FunctionOpInterface fn =
- mlir::SymbolTable::lookupNearestSymbolFrom<mlir::FunctionOpInterface>(
- op, calleeAttr);
- assert(fn && "Did not find function for call");
- llvmFnTy = cast<mlir::LLVM::LLVMFunctionType>(
- converter->convertType(fn.getFunctionType()));
+ mlir::Operation *callee =
+ mlir::SymbolTable::lookupNearestSymbolFrom(op, calleeAttr);
+ if (auto fn = mlir::dyn_cast<mlir::FunctionOpInterface>(callee)) {
+ llvmFnTy = converter->convertType<mlir::LLVM::LLVMFunctionType>(
+ fn.getFunctionType());
+ assert(llvmFnTy && "Failed to convert function type");
+ } else if (auto alias = mlir::cast<mlir::LLVM::AliasOp>(callee)) {
+ // If the callee was an alias. In that case,
+ // we need to prepend the address of the alias to the operands. The
+ // way aliases work in the LLVM dialect is a little counter-intuitive.
+ // The AliasOp itself is a pseudo-function that returns the address of
+ // the global value being aliased, but when we generate the call we
+ // need to insert an operation that gets the address of the AliasOp.
+ // This all gets sorted out when the LLVM dialect is lowered to LLVM IR.
+ auto symAttr = mlir::cast<mlir::FlatSymbolRefAttr>(calleeAttr);
+ auto addrOfAlias =
+ mlir::LLVM::AddressOfOp::create(
+ rewriter, op->getLoc(),
+ mlir::LLVM::LLVMPointerType::get(rewriter.getContext()), symAttr)
+ .getResult();
+ adjustedCallOperands.push_back(addrOfAlias);
+
+ // Now add the regular operands and assign this to the range value.
+ llvm::append_range(adjustedCallOperands, callOperands);
+ callOperands = adjustedCallOperands;
+
+ // Clear the callee attribute because we're calling an alias.
+ calleeAttr = {};
+ llvmFnTy = mlir::cast<mlir::LLVM::LLVMFunctionType>(alias.getType());
+ } else {
+ // Was this an ifunc?
+ return op->emitError("Unexpected callee type!");
+ }
} else { // indirect call
assert(!op->getOperands().empty() &&
"operands list must no be empty for the indirect call");
@@ -1027,6 +1085,12 @@ mlir::LogicalResult CIRToLLVMConstantOpLowering::matchAndRewrite(
mlir::ConversionPatternRewriter &rewriter) const {
mlir::Attribute attr = op.getValue();
+ if (mlir::isa<cir::PoisonAttr>(attr)) {
+ rewriter.replaceOpWithNewOp<mlir::LLVM::PoisonOp>(
+ op, getTypeConverter()->convertType(op.getType()));
+ return mlir::success();
+ }
+
if (mlir::isa<mlir::IntegerType>(op.getType())) {
// Verified cir.const operations cannot actually be of these types, but the
// lowering pass may generate temporary cir.const operations with these
@@ -1166,6 +1230,30 @@ void CIRToLLVMFuncOpLowering::lowerFuncAttributes(
}
}
+mlir::LogicalResult CIRToLLVMFuncOpLowering::matchAndRewriteAlias(
+ cir::FuncOp op, llvm::StringRef aliasee, mlir::Type ty, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ SmallVector<mlir::NamedAttribute, 4> attributes;
+ lowerFuncAttributes(op, /*filterArgAndResAttrs=*/false, attributes);
+
+ mlir::Location loc = op.getLoc();
+ auto aliasOp = rewriter.replaceOpWithNewOp<mlir::LLVM::AliasOp>(
+ op, ty, convertLinkage(op.getLinkage()), op.getName(), op.getDsoLocal(),
+ /*threadLocal=*/false, attributes);
+
+ // Create the alias body
+ mlir::OpBuilder builder(op.getContext());
+ mlir::Block *block = builder.createBlock(&aliasOp.getInitializerRegion());
+ builder.setInsertionPointToStart(block);
+ // The type of AddressOfOp is always a pointer.
+ assert(!cir::MissingFeatures::addressSpace());
+ mlir::Type ptrTy = mlir::LLVM::LLVMPointerType::get(ty.getContext());
+ auto addrOp = mlir::LLVM::AddressOfOp::create(builder, loc, ptrTy, aliasee);
+ mlir::LLVM::ReturnOp::create(builder, loc, addrOp);
+
+ return mlir::success();
+}
+
mlir::LogicalResult CIRToLLVMFuncOpLowering::matchAndRewrite(
cir::FuncOp op, OpAdaptor adaptor,
mlir::ConversionPatternRewriter &rewriter) const {
@@ -1190,6 +1278,11 @@ mlir::LogicalResult CIRToLLVMFuncOpLowering::matchAndRewrite(
resultType ? resultType : mlir::LLVM::LLVMVoidType::get(getContext()),
signatureConversion.getConvertedTypes(),
/*isVarArg=*/fnType.isVarArg());
+
+ // If this is an alias, it needs to be lowered to llvm::AliasOp.
+ if (std::optional<llvm::StringRef> aliasee = op.getAliasee())
+ return matchAndRewriteAlias(op, *aliasee, llvmFnTy, adaptor, rewriter);
+
// LLVMFuncOp expects a single FileLine Location instead of a fused
// location.
mlir::Location loc = op.getLoc();
@@ -2083,6 +2176,7 @@ void ConvertCIRToLLVMPass::runOnOperation() {
CIRToLLVMBitClrsbOpLowering,
CIRToLLVMBitClzOpLowering,
CIRToLLVMBitCtzOpLowering,
+ CIRToLLVMBitFfsOpLowering,
CIRToLLVMBitParityOpLowering,
CIRToLLVMBitPopcountOpLowering,
CIRToLLVMBitReverseOpLowering,
diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h
index 2911ced..f339d43 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h
@@ -84,6 +84,16 @@ public:
mlir::ConversionPatternRewriter &) const override;
};
+class CIRToLLVMBitFfsOpLowering
+ : public mlir::OpConversionPattern<cir::BitFfsOp> {
+public:
+ using mlir::OpConversionPattern<cir::BitFfsOp>::OpConversionPattern;
+
+ mlir::LogicalResult
+ matchAndRewrite(cir::BitFfsOp op, OpAdaptor,
+ mlir::ConversionPatternRewriter &) const override;
+};
+
class CIRToLLVMBitParityOpLowering
: public mlir::OpConversionPattern<cir::BitParityOp> {
public:
@@ -257,6 +267,11 @@ class CIRToLLVMFuncOpLowering : public mlir::OpConversionPattern<cir::FuncOp> {
cir::FuncOp func, bool filterArgAndResAttrs,
mlir::SmallVectorImpl<mlir::NamedAttribute> &result) const;
+ mlir::LogicalResult
+ matchAndRewriteAlias(cir::FuncOp op, llvm::StringRef aliasee, mlir::Type ty,
+ OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const;
+
public:
using mlir::OpConversionPattern<cir::FuncOp>::OpConversionPattern;
diff --git a/clang/lib/CodeGen/BackendUtil.cpp b/clang/lib/CodeGen/BackendUtil.cpp
index 1b72578..0b8b824 100644
--- a/clang/lib/CodeGen/BackendUtil.cpp
+++ b/clang/lib/CodeGen/BackendUtil.cpp
@@ -1027,12 +1027,6 @@ void EmitAssemblyHelper::RunOptimizationPipeline(
MPM.addPass(
createModuleToFunctionPassAdaptor(ObjCARCExpandPass()));
});
- PB.registerPipelineEarlySimplificationEPCallback(
- [](ModulePassManager &MPM, OptimizationLevel Level,
- ThinOrFullLTOPhase) {
- if (Level != OptimizationLevel::O0)
- MPM.addPass(ObjCARCAPElimPass());
- });
PB.registerScalarOptimizerLateEPCallback(
[](FunctionPassManager &FPM, OptimizationLevel Level) {
if (Level != OptimizationLevel::O0)
diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index 3f784fc..e1f7ea0 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -1148,7 +1148,8 @@ llvm::Value *CodeGenFunction::emitCountedByPointerSize(
assert(E->getCastKind() == CK_LValueToRValue &&
"must be an LValue to RValue cast");
- const MemberExpr *ME = dyn_cast<MemberExpr>(E->getSubExpr());
+ const MemberExpr *ME =
+ dyn_cast<MemberExpr>(E->getSubExpr()->IgnoreParenNoopCasts(getContext()));
if (!ME)
return nullptr;
diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp
index 0bceece..d9bd443 100644
--- a/clang/lib/CodeGen/CGCall.cpp
+++ b/clang/lib/CodeGen/CGCall.cpp
@@ -2852,20 +2852,28 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
if (AI.getInReg())
Attrs.addAttribute(llvm::Attribute::InReg);
- // Depending on the ABI, this may be either a byval or a dead_on_return
- // argument.
- if (AI.getIndirectByVal()) {
- Attrs.addByValAttr(getTypes().ConvertTypeForMem(ParamType));
- } else {
- // Add dead_on_return when the object's lifetime ends in the callee.
- // This includes trivially-destructible objects, as well as objects
- // whose destruction / clean-up is carried out within the callee (e.g.,
- // Obj-C ARC-managed structs, MSVC callee-destroyed objects).
- if (!ParamType.isDestructedType() || !ParamType->isRecordType() ||
- ParamType->castAs<RecordType>()
- ->getDecl()
- ->isParamDestroyedInCallee())
- Attrs.addAttribute(llvm::Attribute::DeadOnReturn);
+ // HLSL out and inout parameters must not be marked with ByVal or
+ // DeadOnReturn attributes because stores to these parameters by the
+ // callee are visible to the caller.
+ if (auto ParamABI = FI.getExtParameterInfo(ArgNo).getABI();
+ ParamABI != ParameterABI::HLSLOut &&
+ ParamABI != ParameterABI::HLSLInOut) {
+
+ // Depending on the ABI, this may be either a byval or a dead_on_return
+ // argument.
+ if (AI.getIndirectByVal()) {
+ Attrs.addByValAttr(getTypes().ConvertTypeForMem(ParamType));
+ } else {
+ // Add dead_on_return when the object's lifetime ends in the callee.
+ // This includes trivially-destructible objects, as well as objects
+ // whose destruction / clean-up is carried out within the callee
+ // (e.g., Obj-C ARC-managed structs, MSVC callee-destroyed objects).
+ if (!ParamType.isDestructedType() || !ParamType->isRecordType() ||
+ ParamType->castAs<RecordType>()
+ ->getDecl()
+ ->isParamDestroyedInCallee())
+ Attrs.addAttribute(llvm::Attribute::DeadOnReturn);
+ }
}
auto *Decl = ParamType->getAsRecordDecl();
diff --git a/clang/lib/CodeGen/CGCoroutine.cpp b/clang/lib/CodeGen/CGCoroutine.cpp
index 5ee9089..827385f 100644
--- a/clang/lib/CodeGen/CGCoroutine.cpp
+++ b/clang/lib/CodeGen/CGCoroutine.cpp
@@ -435,7 +435,7 @@ CodeGenFunction::generateAwaitSuspendWrapper(Twine const &CoroName,
llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI);
llvm::Function *Fn = llvm::Function::Create(
- LTy, llvm::GlobalValue::PrivateLinkage, FuncName, &CGM.getModule());
+ LTy, llvm::GlobalValue::InternalLinkage, FuncName, &CGM.getModule());
Fn->addParamAttr(0, llvm::Attribute::AttrKind::NonNull);
Fn->addParamAttr(0, llvm::Attribute::AttrKind::NoUndef);
diff --git a/clang/lib/CodeGen/CGDebugInfo.cpp b/clang/lib/CodeGen/CGDebugInfo.cpp
index a371b67..7a69b5d 100644
--- a/clang/lib/CodeGen/CGDebugInfo.cpp
+++ b/clang/lib/CodeGen/CGDebugInfo.cpp
@@ -2641,6 +2641,8 @@ void CGDebugInfo::emitVTableSymbol(llvm::GlobalVariable *VTable,
const CXXRecordDecl *RD) {
if (!CGM.getTarget().getCXXABI().isItaniumFamily())
return;
+ if (DebugKind <= llvm::codegenoptions::DebugLineTablesOnly)
+ return;
ASTContext &Context = CGM.getContext();
StringRef SymbolName = "_vtable$";
@@ -6435,7 +6437,7 @@ CodeGenFunction::LexicalScope::~LexicalScope() {
static std::string SanitizerHandlerToCheckLabel(SanitizerHandler Handler) {
std::string Label;
switch (Handler) {
-#define SANITIZER_CHECK(Enum, Name, Version) \
+#define SANITIZER_CHECK(Enum, Name, Version, Msg) \
case Enum: \
Label = "__ubsan_check_" #Name; \
break;
diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp
index 85c7688..5a3d4e4 100644
--- a/clang/lib/CodeGen/CGExpr.cpp
+++ b/clang/lib/CodeGen/CGExpr.cpp
@@ -85,6 +85,17 @@ enum VariableTypeDescriptorKind : uint16_t {
// Miscellaneous Helper Methods
//===--------------------------------------------------------------------===//
+static llvm::StringRef GetUBSanTrapForHandler(SanitizerHandler ID) {
+ switch (ID) {
+#define SANITIZER_CHECK(Enum, Name, Version, Msg) \
+ case SanitizerHandler::Enum: \
+ return Msg;
+ LIST_SANITIZER_CHECKS
+#undef SANITIZER_CHECK
+ }
+ llvm_unreachable("unhandled switch case");
+}
+
/// CreateTempAlloca - This creates a alloca and inserts it into the entry
/// block.
RawAddress
@@ -3649,7 +3660,7 @@ struct SanitizerHandlerInfo {
}
const SanitizerHandlerInfo SanitizerHandlers[] = {
-#define SANITIZER_CHECK(Enum, Name, Version) {#Name, Version},
+#define SANITIZER_CHECK(Enum, Name, Version, Msg) {#Name, Version},
LIST_SANITIZER_CHECKS
#undef SANITIZER_CHECK
};
@@ -3954,6 +3965,8 @@ void CodeGenFunction::EmitCfiCheckFail() {
StartFunction(GlobalDecl(), CGM.getContext().VoidTy, F, FI, Args,
SourceLocation());
+ ApplyDebugLocation ADL = ApplyDebugLocation::CreateArtificial(*this);
+
// This function is not affected by NoSanitizeList. This function does
// not have a source location, but "src:*" would still apply. Revert any
// changes to SanOpts made in StartFunction.
@@ -4051,6 +4064,15 @@ void CodeGenFunction::EmitTrapCheck(llvm::Value *Checked,
llvm::BasicBlock *&TrapBB = TrapBBs[CheckHandlerID];
+ llvm::DILocation *TrapLocation = Builder.getCurrentDebugLocation();
+ llvm::StringRef TrapMessage = GetUBSanTrapForHandler(CheckHandlerID);
+
+ if (getDebugInfo() && !TrapMessage.empty() &&
+ CGM.getCodeGenOpts().SanitizeDebugTrapReasons && TrapLocation) {
+ TrapLocation = getDebugInfo()->CreateTrapFailureMessageFor(
+ TrapLocation, "Undefined Behavior Sanitizer", TrapMessage);
+ }
+
NoMerge = NoMerge || !CGM.getCodeGenOpts().OptimizationLevel ||
(CurCodeDecl && CurCodeDecl->hasAttr<OptimizeNoneAttr>());
@@ -4059,8 +4081,8 @@ void CodeGenFunction::EmitTrapCheck(llvm::Value *Checked,
auto Call = TrapBB->begin();
assert(isa<llvm::CallInst>(Call) && "Expected call in trap BB");
- Call->applyMergedLocation(Call->getDebugLoc(),
- Builder.getCurrentDebugLocation());
+ Call->applyMergedLocation(Call->getDebugLoc(), TrapLocation);
+
Builder.CreateCondBr(Checked, Cont, TrapBB,
MDHelper.createLikelyBranchWeights());
} else {
@@ -4069,6 +4091,8 @@ void CodeGenFunction::EmitTrapCheck(llvm::Value *Checked,
MDHelper.createLikelyBranchWeights());
EmitBlock(TrapBB);
+ ApplyDebugLocation applyTrapDI(*this, TrapLocation);
+
llvm::CallInst *TrapCall =
Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::ubsantrap),
llvm::ConstantInt::get(CGM.Int8Ty, CheckHandlerID));
diff --git a/clang/lib/CodeGen/CGExprCXX.cpp b/clang/lib/CodeGen/CGExprCXX.cpp
index 359e30c..b8238a4 100644
--- a/clang/lib/CodeGen/CGExprCXX.cpp
+++ b/clang/lib/CodeGen/CGExprCXX.cpp
@@ -2146,30 +2146,9 @@ void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
return;
}
- // We might be deleting a pointer to array. If so, GEP down to the
- // first non-array element.
- // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*)
- if (DeleteTy->isConstantArrayType()) {
- llvm::Value *Zero = Builder.getInt32(0);
- SmallVector<llvm::Value*,8> GEP;
-
- GEP.push_back(Zero); // point at the outermost array
-
- // For each layer of array type we're pointing at:
- while (const ConstantArrayType *Arr
- = getContext().getAsConstantArrayType(DeleteTy)) {
- // 1. Unpeel the array type.
- DeleteTy = Arr->getElementType();
-
- // 2. GEP to the first element of the array.
- GEP.push_back(Zero);
- }
-
- Ptr = Builder.CreateInBoundsGEP(Ptr, GEP, ConvertTypeForMem(DeleteTy),
- Ptr.getAlignment(), "del.first");
- }
-
- assert(ConvertTypeForMem(DeleteTy) == Ptr.getElementType());
+ // We might be deleting a pointer to array.
+ DeleteTy = getContext().getBaseElementType(DeleteTy);
+ Ptr = Ptr.withElementType(ConvertTypeForMem(DeleteTy));
if (E->isArrayForm()) {
EmitArrayDelete(*this, E, Ptr, DeleteTy);
diff --git a/clang/lib/CodeGen/CGOpenMPRuntime.cpp b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
index ce2dd4d..91237cf 100644
--- a/clang/lib/CodeGen/CGOpenMPRuntime.cpp
+++ b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
@@ -7080,6 +7080,110 @@ private:
return ConstLength.getSExtValue() != 1;
}
+ /// A helper class to copy structures with overlapped elements, i.e. those
+ /// which have mappings of both "s" and "s.mem". Consecutive elements that
+ /// are not explicitly copied have mapping nodes synthesized for them,
+ /// taking care to avoid generating zero-sized copies.
+ class CopyOverlappedEntryGaps {
+ CodeGenFunction &CGF;
+ MapCombinedInfoTy &CombinedInfo;
+ OpenMPOffloadMappingFlags Flags = OpenMPOffloadMappingFlags::OMP_MAP_NONE;
+ const ValueDecl *MapDecl = nullptr;
+ const Expr *MapExpr = nullptr;
+ Address BP = Address::invalid();
+ bool IsNonContiguous = false;
+ uint64_t DimSize = 0;
+ // These elements track the position as the struct is iterated over
+ // (in order of increasing element address).
+ const RecordDecl *LastParent = nullptr;
+ uint64_t Cursor = 0;
+ unsigned LastIndex = -1u;
+ Address LB = Address::invalid();
+
+ public:
+ CopyOverlappedEntryGaps(CodeGenFunction &CGF,
+ MapCombinedInfoTy &CombinedInfo,
+ OpenMPOffloadMappingFlags Flags,
+ const ValueDecl *MapDecl, const Expr *MapExpr,
+ Address BP, Address LB, bool IsNonContiguous,
+ uint64_t DimSize)
+ : CGF(CGF), CombinedInfo(CombinedInfo), Flags(Flags), MapDecl(MapDecl),
+ MapExpr(MapExpr), BP(BP), IsNonContiguous(IsNonContiguous),
+ DimSize(DimSize), LB(LB) {}
+
+ void processField(
+ const OMPClauseMappableExprCommon::MappableComponent &MC,
+ const FieldDecl *FD,
+ llvm::function_ref<LValue(CodeGenFunction &, const MemberExpr *)>
+ EmitMemberExprBase) {
+ const RecordDecl *RD = FD->getParent();
+ const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
+ uint64_t FieldOffset = RL.getFieldOffset(FD->getFieldIndex());
+ uint64_t FieldSize =
+ CGF.getContext().getTypeSize(FD->getType().getCanonicalType());
+ Address ComponentLB = Address::invalid();
+
+ if (FD->getType()->isLValueReferenceType()) {
+ const auto *ME = cast<MemberExpr>(MC.getAssociatedExpression());
+ LValue BaseLVal = EmitMemberExprBase(CGF, ME);
+ ComponentLB =
+ CGF.EmitLValueForFieldInitialization(BaseLVal, FD).getAddress();
+ } else {
+ ComponentLB =
+ CGF.EmitOMPSharedLValue(MC.getAssociatedExpression()).getAddress();
+ }
+
+ if (!LastParent)
+ LastParent = RD;
+ if (FD->getParent() == LastParent) {
+ if (FD->getFieldIndex() != LastIndex + 1)
+ copyUntilField(FD, ComponentLB);
+ } else {
+ LastParent = FD->getParent();
+ if (((int64_t)FieldOffset - (int64_t)Cursor) > 0)
+ copyUntilField(FD, ComponentLB);
+ }
+ Cursor = FieldOffset + FieldSize;
+ LastIndex = FD->getFieldIndex();
+ LB = CGF.Builder.CreateConstGEP(ComponentLB, 1);
+ }
+
+ void copyUntilField(const FieldDecl *FD, Address ComponentLB) {
+ llvm::Value *ComponentLBPtr = ComponentLB.emitRawPointer(CGF);
+ llvm::Value *LBPtr = LB.emitRawPointer(CGF);
+ llvm::Value *Size =
+ CGF.Builder.CreatePtrDiff(CGF.Int8Ty, ComponentLBPtr, LBPtr);
+ copySizedChunk(LBPtr, Size);
+ }
+
+ void copyUntilEnd(Address HB) {
+ if (LastParent) {
+ const ASTRecordLayout &RL =
+ CGF.getContext().getASTRecordLayout(LastParent);
+ if ((uint64_t)CGF.getContext().toBits(RL.getSize()) <= Cursor)
+ return;
+ }
+ llvm::Value *LBPtr = LB.emitRawPointer(CGF);
+ llvm::Value *Size = CGF.Builder.CreatePtrDiff(
+ CGF.Int8Ty, CGF.Builder.CreateConstGEP(HB, 1).emitRawPointer(CGF),
+ LBPtr);
+ copySizedChunk(LBPtr, Size);
+ }
+
+ void copySizedChunk(llvm::Value *Base, llvm::Value *Size) {
+ CombinedInfo.Exprs.emplace_back(MapDecl, MapExpr);
+ CombinedInfo.BasePointers.push_back(BP.emitRawPointer(CGF));
+ CombinedInfo.DevicePtrDecls.push_back(nullptr);
+ CombinedInfo.DevicePointers.push_back(DeviceInfoTy::None);
+ CombinedInfo.Pointers.push_back(Base);
+ CombinedInfo.Sizes.push_back(
+ CGF.Builder.CreateIntCast(Size, CGF.Int64Ty, /*isSigned=*/true));
+ CombinedInfo.Types.push_back(Flags);
+ CombinedInfo.Mappers.push_back(nullptr);
+ CombinedInfo.NonContigInfo.Dims.push_back(IsNonContiguous ? DimSize : 1);
+ }
+ };
+
/// Generate the base pointers, section pointers, sizes, map type bits, and
/// user-defined mappers (all included in \a CombinedInfo) for the provided
/// map type, map or motion modifiers, and expression components.
@@ -7570,63 +7674,22 @@ private:
getMapTypeBits(MapType, MapModifiers, MotionModifiers, IsImplicit,
/*AddPtrFlag=*/false,
/*AddIsTargetParamFlag=*/false, IsNonContiguous);
- llvm::Value *Size = nullptr;
+ CopyOverlappedEntryGaps CopyGaps(CGF, CombinedInfo, Flags, MapDecl,
+ MapExpr, BP, LB, IsNonContiguous,
+ DimSize);
// Do bitcopy of all non-overlapped structure elements.
for (OMPClauseMappableExprCommon::MappableExprComponentListRef
Component : OverlappedElements) {
- Address ComponentLB = Address::invalid();
for (const OMPClauseMappableExprCommon::MappableComponent &MC :
Component) {
if (const ValueDecl *VD = MC.getAssociatedDeclaration()) {
- const auto *FD = dyn_cast<FieldDecl>(VD);
- if (FD && FD->getType()->isLValueReferenceType()) {
- const auto *ME =
- cast<MemberExpr>(MC.getAssociatedExpression());
- LValue BaseLVal = EmitMemberExprBase(CGF, ME);
- ComponentLB =
- CGF.EmitLValueForFieldInitialization(BaseLVal, FD)
- .getAddress();
- } else {
- ComponentLB =
- CGF.EmitOMPSharedLValue(MC.getAssociatedExpression())
- .getAddress();
+ if (const auto *FD = dyn_cast<FieldDecl>(VD)) {
+ CopyGaps.processField(MC, FD, EmitMemberExprBase);
}
- llvm::Value *ComponentLBPtr = ComponentLB.emitRawPointer(CGF);
- llvm::Value *LBPtr = LB.emitRawPointer(CGF);
- Size = CGF.Builder.CreatePtrDiff(CGF.Int8Ty, ComponentLBPtr,
- LBPtr);
- break;
}
}
- assert(Size && "Failed to determine structure size");
- CombinedInfo.Exprs.emplace_back(MapDecl, MapExpr);
- CombinedInfo.BasePointers.push_back(BP.emitRawPointer(CGF));
- CombinedInfo.DevicePtrDecls.push_back(nullptr);
- CombinedInfo.DevicePointers.push_back(DeviceInfoTy::None);
- CombinedInfo.Pointers.push_back(LB.emitRawPointer(CGF));
- CombinedInfo.Sizes.push_back(CGF.Builder.CreateIntCast(
- Size, CGF.Int64Ty, /*isSigned=*/true));
- CombinedInfo.Types.push_back(Flags);
- CombinedInfo.Mappers.push_back(nullptr);
- CombinedInfo.NonContigInfo.Dims.push_back(IsNonContiguous ? DimSize
- : 1);
- LB = CGF.Builder.CreateConstGEP(ComponentLB, 1);
}
- CombinedInfo.Exprs.emplace_back(MapDecl, MapExpr);
- CombinedInfo.BasePointers.push_back(BP.emitRawPointer(CGF));
- CombinedInfo.DevicePtrDecls.push_back(nullptr);
- CombinedInfo.DevicePointers.push_back(DeviceInfoTy::None);
- CombinedInfo.Pointers.push_back(LB.emitRawPointer(CGF));
- llvm::Value *LBPtr = LB.emitRawPointer(CGF);
- Size = CGF.Builder.CreatePtrDiff(
- CGF.Int8Ty, CGF.Builder.CreateConstGEP(HB, 1).emitRawPointer(CGF),
- LBPtr);
- CombinedInfo.Sizes.push_back(
- CGF.Builder.CreateIntCast(Size, CGF.Int64Ty, /*isSigned=*/true));
- CombinedInfo.Types.push_back(Flags);
- CombinedInfo.Mappers.push_back(nullptr);
- CombinedInfo.NonContigInfo.Dims.push_back(IsNonContiguous ? DimSize
- : 1);
+ CopyGaps.copyUntilEnd(HB);
break;
}
llvm::Value *Size = getExprTypeSize(I->getAssociatedExpression());
diff --git a/clang/lib/CodeGen/CodeGenAction.cpp b/clang/lib/CodeGen/CodeGenAction.cpp
index eb5b604..dc54c97 100644
--- a/clang/lib/CodeGen/CodeGenAction.cpp
+++ b/clang/lib/CodeGen/CodeGenAction.cpp
@@ -908,6 +908,8 @@ bool CodeGenAction::loadLinkModules(CompilerInstance &CI) {
bool CodeGenAction::hasIRSupport() const { return true; }
void CodeGenAction::EndSourceFileAction() {
+ ASTFrontendAction::EndSourceFileAction();
+
// If the consumer creation failed, do nothing.
if (!getCompilerInstance().hasASTConsumer())
return;
@@ -932,7 +934,7 @@ CodeGenerator *CodeGenAction::getCodeGenerator() const {
bool CodeGenAction::BeginSourceFileAction(CompilerInstance &CI) {
if (CI.getFrontendOpts().GenReducedBMI)
CI.getLangOpts().setCompilingModule(LangOptions::CMK_ModuleInterface);
- return true;
+ return ASTFrontendAction::BeginSourceFileAction(CI);
}
static std::unique_ptr<raw_pwrite_stream>
@@ -976,7 +978,7 @@ CodeGenAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
CI.getPreprocessor());
std::unique_ptr<BackendConsumer> Result(new BackendConsumer(
- CI, BA, &CI.getVirtualFileSystem(), *VMContext, std::move(LinkModules),
+ CI, BA, CI.getVirtualFileSystemPtr(), *VMContext, std::move(LinkModules),
InFile, std::move(OS), CoverageInfo));
BEConsumer = Result.get();
@@ -1154,7 +1156,7 @@ void CodeGenAction::ExecuteAction() {
// Set clang diagnostic handler. To do this we need to create a fake
// BackendConsumer.
- BackendConsumer Result(CI, BA, &CI.getVirtualFileSystem(), *VMContext,
+ BackendConsumer Result(CI, BA, CI.getVirtualFileSystemPtr(), *VMContext,
std::move(LinkModules), "", nullptr, nullptr,
TheModule.get());
diff --git a/clang/lib/CodeGen/ObjectFilePCHContainerWriter.cpp b/clang/lib/CodeGen/ObjectFilePCHContainerWriter.cpp
index 95971e5..074f2a5 100644
--- a/clang/lib/CodeGen/ObjectFilePCHContainerWriter.cpp
+++ b/clang/lib/CodeGen/ObjectFilePCHContainerWriter.cpp
@@ -146,7 +146,7 @@ public:
: CI(CI), Diags(CI.getDiagnostics()), MainFileName(MainFileName),
OutputFileName(OutputFileName), Ctx(nullptr),
MMap(CI.getPreprocessor().getHeaderSearchInfo().getModuleMap()),
- FS(&CI.getVirtualFileSystem()),
+ FS(CI.getVirtualFileSystemPtr()),
HeaderSearchOpts(CI.getHeaderSearchOpts()),
PreprocessorOpts(CI.getPreprocessorOpts()),
TargetOpts(CI.getTargetOpts()), LangOpts(CI.getLangOpts()),
diff --git a/clang/lib/CodeGen/SanitizerHandler.h b/clang/lib/CodeGen/SanitizerHandler.h
index bb42e39..a66e7ab 100644
--- a/clang/lib/CodeGen/SanitizerHandler.h
+++ b/clang/lib/CodeGen/SanitizerHandler.h
@@ -14,35 +14,69 @@
#define LLVM_CLANG_LIB_CODEGEN_SANITIZER_HANDLER_H
#define LIST_SANITIZER_CHECKS \
- SANITIZER_CHECK(AddOverflow, add_overflow, 0) \
- SANITIZER_CHECK(BuiltinUnreachable, builtin_unreachable, 0) \
- SANITIZER_CHECK(CFICheckFail, cfi_check_fail, 0) \
- SANITIZER_CHECK(DivremOverflow, divrem_overflow, 0) \
- SANITIZER_CHECK(DynamicTypeCacheMiss, dynamic_type_cache_miss, 0) \
- SANITIZER_CHECK(FloatCastOverflow, float_cast_overflow, 0) \
- SANITIZER_CHECK(FunctionTypeMismatch, function_type_mismatch, 0) \
- SANITIZER_CHECK(ImplicitConversion, implicit_conversion, 0) \
- SANITIZER_CHECK(InvalidBuiltin, invalid_builtin, 0) \
- SANITIZER_CHECK(InvalidObjCCast, invalid_objc_cast, 0) \
- SANITIZER_CHECK(LoadInvalidValue, load_invalid_value, 0) \
- SANITIZER_CHECK(MissingReturn, missing_return, 0) \
- SANITIZER_CHECK(MulOverflow, mul_overflow, 0) \
- SANITIZER_CHECK(NegateOverflow, negate_overflow, 0) \
- SANITIZER_CHECK(NullabilityArg, nullability_arg, 0) \
- SANITIZER_CHECK(NullabilityReturn, nullability_return, 1) \
- SANITIZER_CHECK(NonnullArg, nonnull_arg, 0) \
- SANITIZER_CHECK(NonnullReturn, nonnull_return, 1) \
- SANITIZER_CHECK(OutOfBounds, out_of_bounds, 0) \
- SANITIZER_CHECK(PointerOverflow, pointer_overflow, 0) \
- SANITIZER_CHECK(ShiftOutOfBounds, shift_out_of_bounds, 0) \
- SANITIZER_CHECK(SubOverflow, sub_overflow, 0) \
- SANITIZER_CHECK(TypeMismatch, type_mismatch, 1) \
- SANITIZER_CHECK(AlignmentAssumption, alignment_assumption, 0) \
- SANITIZER_CHECK(VLABoundNotPositive, vla_bound_not_positive, 0) \
- SANITIZER_CHECK(BoundsSafety, bounds_safety, 0)
+ SANITIZER_CHECK(AddOverflow, add_overflow, 0, "Integer addition overflowed") \
+ SANITIZER_CHECK(BuiltinUnreachable, builtin_unreachable, 0, \
+ "_builtin_unreachable(), execution reached an unreachable " \
+ "program point") \
+ SANITIZER_CHECK(CFICheckFail, cfi_check_fail, 0, \
+ "Control flow integrity check failed") \
+ SANITIZER_CHECK(DivremOverflow, divrem_overflow, 0, \
+ "Integer divide or remainder overflowed") \
+ SANITIZER_CHECK(DynamicTypeCacheMiss, dynamic_type_cache_miss, 0, \
+ "Dynamic type cache miss, member call made on an object " \
+ "whose dynamic type differs from the expected type") \
+ SANITIZER_CHECK(FloatCastOverflow, float_cast_overflow, 0, \
+ "Floating-point to integer conversion overflowed") \
+ SANITIZER_CHECK(FunctionTypeMismatch, function_type_mismatch, 0, \
+ "Function called with mismatched signature") \
+ SANITIZER_CHECK(ImplicitConversion, implicit_conversion, 0, \
+ "Implicit integer conversion overflowed or lost data") \
+ SANITIZER_CHECK(InvalidBuiltin, invalid_builtin, 0, \
+ "Invalid use of builtin function") \
+ SANITIZER_CHECK(InvalidObjCCast, invalid_objc_cast, 0, \
+ "Invalid Objective-C cast") \
+ SANITIZER_CHECK(LoadInvalidValue, load_invalid_value, 0, \
+ "Loaded an invalid or uninitialized value for the type") \
+ SANITIZER_CHECK(MissingReturn, missing_return, 0, \
+ "Execution reached the end of a value-returning function " \
+ "without returning a value") \
+ SANITIZER_CHECK(MulOverflow, mul_overflow, 0, \
+ "Integer multiplication overflowed") \
+ SANITIZER_CHECK(NegateOverflow, negate_overflow, 0, \
+ "Integer negation overflowed") \
+ SANITIZER_CHECK( \
+ NullabilityArg, nullability_arg, 0, \
+ "Passing null as an argument which is annotated with _Nonnull") \
+ SANITIZER_CHECK(NullabilityReturn, nullability_return, 1, \
+ "Returning null from a function with a return type " \
+ "annotated with _Nonnull") \
+ SANITIZER_CHECK(NonnullArg, nonnull_arg, 0, \
+ "Passing null pointer as an argument which is declared to " \
+ "never be null") \
+ SANITIZER_CHECK(NonnullReturn, nonnull_return, 1, \
+ "Returning null pointer from a function which is declared " \
+ "to never return null") \
+ SANITIZER_CHECK(OutOfBounds, out_of_bounds, 0, "Array index out of bounds") \
+ SANITIZER_CHECK(PointerOverflow, pointer_overflow, 0, \
+ "Pointer arithmetic overflowed bounds") \
+ SANITIZER_CHECK(ShiftOutOfBounds, shift_out_of_bounds, 0, \
+ "Shift exponent is too large for the type") \
+ SANITIZER_CHECK(SubOverflow, sub_overflow, 0, \
+ "Integer subtraction overflowed") \
+ SANITIZER_CHECK(TypeMismatch, type_mismatch, 1, \
+ "Type mismatch in operation") \
+ SANITIZER_CHECK(AlignmentAssumption, alignment_assumption, 0, \
+ "Alignment assumption violated") \
+ SANITIZER_CHECK( \
+ VLABoundNotPositive, vla_bound_not_positive, 0, \
+ "Variable length array bound evaluates to non-positive value") \
+ SANITIZER_CHECK(BoundsSafety, bounds_safety, 0, \
+ "") // BoundsSafety Msg is empty because it is not considered
+ // part of UBSan; therefore, no trap reason is emitted for
+ // this case.
enum SanitizerHandler {
-#define SANITIZER_CHECK(Enum, Name, Version) Enum,
+#define SANITIZER_CHECK(Enum, Name, Version, Msg) Enum,
LIST_SANITIZER_CHECKS
#undef SANITIZER_CHECK
};
diff --git a/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp b/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
index 7dccf82..70f510a 100644
--- a/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
+++ b/clang/lib/CodeGen/TargetBuiltins/AMDGPU.cpp
@@ -274,7 +274,7 @@ llvm::Value *CodeGenFunction::EmitScalarOrConstFoldImmArg(unsigned ICEArguments,
void CodeGenFunction::AddAMDGPUFenceAddressSpaceMMRA(llvm::Instruction *Inst,
const CallExpr *E) {
- constexpr const char *Tag = "amdgpu-as";
+ constexpr const char *Tag = "amdgpu-synchronize-as";
LLVMContext &Ctx = Inst->getContext();
SmallVector<MMRAMetadata::TagT, 3> MMRAs;
@@ -633,6 +633,41 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
llvm::Function *F = CGM.getIntrinsic(IID, {LoadTy});
return Builder.CreateCall(F, {Addr});
}
+ case AMDGPU::BI__builtin_amdgcn_global_load_monitor_b32:
+ case AMDGPU::BI__builtin_amdgcn_global_load_monitor_b64:
+ case AMDGPU::BI__builtin_amdgcn_global_load_monitor_b128:
+ case AMDGPU::BI__builtin_amdgcn_flat_load_monitor_b32:
+ case AMDGPU::BI__builtin_amdgcn_flat_load_monitor_b64:
+ case AMDGPU::BI__builtin_amdgcn_flat_load_monitor_b128: {
+
+ Intrinsic::ID IID;
+ switch (BuiltinID) {
+ case AMDGPU::BI__builtin_amdgcn_global_load_monitor_b32:
+ IID = Intrinsic::amdgcn_global_load_monitor_b32;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_global_load_monitor_b64:
+ IID = Intrinsic::amdgcn_global_load_monitor_b64;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_global_load_monitor_b128:
+ IID = Intrinsic::amdgcn_global_load_monitor_b128;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_flat_load_monitor_b32:
+ IID = Intrinsic::amdgcn_flat_load_monitor_b32;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_flat_load_monitor_b64:
+ IID = Intrinsic::amdgcn_flat_load_monitor_b64;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_flat_load_monitor_b128:
+ IID = Intrinsic::amdgcn_flat_load_monitor_b128;
+ break;
+ }
+
+ llvm::Type *LoadTy = ConvertType(E->getType());
+ llvm::Value *Addr = EmitScalarExpr(E->getArg(0));
+ llvm::Value *Val = EmitScalarExpr(E->getArg(1));
+ llvm::Function *F = CGM.getIntrinsic(IID, {LoadTy});
+ return Builder.CreateCall(F, {Addr, Val});
+ }
case AMDGPU::BI__builtin_amdgcn_load_to_lds: {
// Should this have asan instrumentation?
return emitBuiltinWithOneOverloadedType<5>(*this, E,
diff --git a/clang/lib/CodeGen/TargetBuiltins/ARM.cpp b/clang/lib/CodeGen/TargetBuiltins/ARM.cpp
index 2e6b4b3..980f7eb 100644
--- a/clang/lib/CodeGen/TargetBuiltins/ARM.cpp
+++ b/clang/lib/CodeGen/TargetBuiltins/ARM.cpp
@@ -4922,19 +4922,6 @@ Value *CodeGenFunction::EmitAArch64SMEBuiltinExpr(unsigned BuiltinID,
if (Builtin->LLVMIntrinsic == 0)
return nullptr;
- if (BuiltinID == SME::BI__builtin_sme___arm_in_streaming_mode) {
- // If we already know the streaming mode, don't bother with the intrinsic
- // and emit a constant instead
- const auto *FD = cast<FunctionDecl>(CurFuncDecl);
- if (const auto *FPT = FD->getType()->getAs<FunctionProtoType>()) {
- unsigned SMEAttrs = FPT->getAArch64SMEAttributes();
- if (!(SMEAttrs & FunctionType::SME_PStateSMCompatibleMask)) {
- bool IsStreaming = SMEAttrs & FunctionType::SME_PStateSMEnabledMask;
- return ConstantInt::getBool(Builder.getContext(), IsStreaming);
- }
- }
- }
-
// Predicates must match the main datatype.
for (Value *&Op : Ops)
if (auto PredTy = dyn_cast<llvm::VectorType>(Op->getType()))
diff --git a/clang/lib/CodeGen/TargetBuiltins/WebAssembly.cpp b/clang/lib/CodeGen/TargetBuiltins/WebAssembly.cpp
index b7fd70e..33a8d8f 100644
--- a/clang/lib/CodeGen/TargetBuiltins/WebAssembly.cpp
+++ b/clang/lib/CodeGen/TargetBuiltins/WebAssembly.cpp
@@ -12,7 +12,10 @@
#include "CGBuiltin.h"
#include "clang/Basic/TargetBuiltins.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/IR/Constants.h"
#include "llvm/IR/IntrinsicsWebAssembly.h"
+#include "llvm/Support/ErrorHandling.h"
using namespace clang;
using namespace CodeGen;
@@ -218,6 +221,64 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_ref_null_func);
return Builder.CreateCall(Callee);
}
+ case WebAssembly::BI__builtin_wasm_test_function_pointer_signature: {
+ Value *FuncRef = EmitScalarExpr(E->getArg(0));
+
+ // Get the function type from the argument's static type
+ QualType ArgType = E->getArg(0)->getType();
+ const PointerType *PtrTy = ArgType->getAs<PointerType>();
+ assert(PtrTy && "Sema should have ensured this is a function pointer");
+
+ const FunctionType *FuncTy = PtrTy->getPointeeType()->getAs<FunctionType>();
+ assert(FuncTy && "Sema should have ensured this is a function pointer");
+
+ // In the llvm IR, we won't have access any more to the type of the function
+ // pointer so we need to insert this type information somehow. The
+ // @llvm.wasm.ref.test.func takes varargs arguments whose values are unused
+ // to indicate the type of the function to test for. See the test here:
+ // llvm/test/CodeGen/WebAssembly/ref-test-func.ll
+ //
+ // The format is: first we include the return types (since this is a C
+ // function pointer, there will be 0 or one of these) then a token type to
+ // indicate the boundary between return types and param types, then the
+ // param types.
+
+ llvm::FunctionType *LLVMFuncTy =
+ cast<llvm::FunctionType>(ConvertType(QualType(FuncTy, 0)));
+
+ unsigned NParams = LLVMFuncTy->getNumParams();
+ std::vector<Value *> Args;
+ Args.reserve(NParams + 3);
+ // The only real argument is the FuncRef
+ Args.push_back(FuncRef);
+
+ // Add the type information
+ auto addType = [this, &Args](llvm::Type *T) {
+ if (T->isVoidTy()) {
+ // Do nothing
+ } else if (T->isFloatingPointTy()) {
+ Args.push_back(ConstantFP::get(T, 0));
+ } else if (T->isIntegerTy()) {
+ Args.push_back(ConstantInt::get(T, 0));
+ } else if (T->isPointerTy()) {
+ Args.push_back(ConstantPointerNull::get(llvm::PointerType::get(
+ getLLVMContext(), T->getPointerAddressSpace())));
+ } else {
+ // TODO: Handle reference types. For now, we reject them in Sema.
+ llvm_unreachable("Unhandled type");
+ }
+ };
+
+ addType(LLVMFuncTy->getReturnType());
+ // The token type indicates the boundary between return types and param
+ // types.
+ Args.push_back(PoisonValue::get(llvm::Type::getTokenTy(getLLVMContext())));
+ for (unsigned i = 0; i < NParams; i++) {
+ addType(LLVMFuncTy->getParamType(i));
+ }
+ Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_ref_test_func);
+ return Builder.CreateCall(Callee, Args);
+ }
case WebAssembly::BI__builtin_wasm_swizzle_i8x16: {
Value *Src = EmitScalarExpr(E->getArg(0));
Value *Indices = EmitScalarExpr(E->getArg(1));
diff --git a/clang/lib/Driver/Driver.cpp b/clang/lib/Driver/Driver.cpp
index ef5af66..586f287 100644
--- a/clang/lib/Driver/Driver.cpp
+++ b/clang/lib/Driver/Driver.cpp
@@ -209,8 +209,8 @@ Driver::Driver(StringRef ClangExecutable, StringRef TargetTriple,
CCLogDiagnostics(false), CCGenDiagnostics(false),
CCPrintProcessStats(false), CCPrintInternalStats(false),
TargetTriple(TargetTriple), Saver(Alloc), PrependArg(nullptr),
- CheckInputsExist(true), ProbePrecompiled(true),
- SuppressMissingInputWarning(false) {
+ PreferredLinker(CLANG_DEFAULT_LINKER), CheckInputsExist(true),
+ ProbePrecompiled(true), SuppressMissingInputWarning(false) {
// Provide a sane fallback if no VFS is specified.
if (!this->VFS)
this->VFS = llvm::vfs::getRealFileSystem();
@@ -910,7 +910,7 @@ getSystemOffloadArchs(Compilation &C, Action::OffloadKind Kind) {
SmallVector<std::string> GPUArchs;
if (llvm::ErrorOr<std::string> Executable =
- llvm::sys::findProgramByName(Program)) {
+ llvm::sys::findProgramByName(Program, {C.getDriver().Dir})) {
llvm::SmallVector<StringRef> Args{*Executable};
if (Kind == Action::OFK_HIP)
Args.push_back("--only=amdgpu");
@@ -3606,7 +3606,7 @@ class OffloadingActionBuilder final {
if (!CompileDeviceOnly) {
C.getDriver().Diag(diag::err_opt_not_valid_without_opt)
<< "-fhip-emit-relocatable"
- << "--cuda-device-only";
+ << "--offload-device-only";
}
}
}
@@ -4774,6 +4774,21 @@ Action *Driver::BuildOffloadingActions(Compilation &C,
C.isOffloadingHostKind(Action::OFK_HIP) &&
!Args.hasFlag(options::OPT_fgpu_rdc, options::OPT_fno_gpu_rdc, false);
+ bool HIPRelocatableObj =
+ C.isOffloadingHostKind(Action::OFK_HIP) &&
+ Args.hasFlag(options::OPT_fhip_emit_relocatable,
+ options::OPT_fno_hip_emit_relocatable, false);
+
+ if (!HIPNoRDC && HIPRelocatableObj)
+ C.getDriver().Diag(diag::err_opt_not_valid_with_opt)
+ << "-fhip-emit-relocatable"
+ << "-fgpu-rdc";
+
+ if (!offloadDeviceOnly() && HIPRelocatableObj)
+ C.getDriver().Diag(diag::err_opt_not_valid_without_opt)
+ << "-fhip-emit-relocatable"
+ << "--offload-device-only";
+
// For HIP non-rdc non-device-only compilation, create a linker wrapper
// action for each host object to link, bundle and wrap device files in
// it.
@@ -4894,7 +4909,7 @@ Action *Driver::BuildOffloadingActions(Compilation &C,
A->getOffloadingToolChain()->getTriple().isSPIRV();
if ((A->getType() != types::TY_Object && !IsAMDGCNSPIRV &&
A->getType() != types::TY_LTO_BC) ||
- !HIPNoRDC || !offloadDeviceOnly())
+ HIPRelocatableObj || !HIPNoRDC || !offloadDeviceOnly())
continue;
ActionList LinkerInput = {A};
A = C.MakeAction<LinkJobAction>(LinkerInput, types::TY_Image);
@@ -4919,13 +4934,14 @@ Action *Driver::BuildOffloadingActions(Compilation &C,
}
// HIP code in device-only non-RDC mode will bundle the output if it invoked
- // the linker.
+ // the linker or if the user explicitly requested it.
bool ShouldBundleHIP =
- HIPNoRDC && offloadDeviceOnly() &&
Args.hasFlag(options::OPT_gpu_bundle_output,
- options::OPT_no_gpu_bundle_output, true) &&
- !llvm::any_of(OffloadActions,
- [](Action *A) { return A->getType() != types::TY_Image; });
+ options::OPT_no_gpu_bundle_output, false) ||
+ (HIPNoRDC && offloadDeviceOnly() &&
+ llvm::none_of(OffloadActions, [](Action *A) {
+ return A->getType() != types::TY_Image;
+ }));
// All kinds exit now in device-only mode except for non-RDC mode HIP.
if (offloadDeviceOnly() && !ShouldBundleHIP)
@@ -5105,7 +5121,10 @@ Action *Driver::ConstructPhaseAction(
false) ||
(Args.hasFlag(options::OPT_offload_new_driver,
options::OPT_no_offload_new_driver, false) &&
- !offloadDeviceOnly())) ||
+ (!offloadDeviceOnly() ||
+ (Input->getOffloadingToolChain() &&
+ TargetDeviceOffloadKind == Action::OFK_HIP &&
+ Input->getOffloadingToolChain()->getTriple().isSPIRV())))) ||
TargetDeviceOffloadKind == Action::OFK_OpenMP))) {
types::ID Output =
Args.hasArg(options::OPT_S) &&
diff --git a/clang/lib/Driver/SanitizerArgs.cpp b/clang/lib/Driver/SanitizerArgs.cpp
index 21e4cff..98793a5 100644
--- a/clang/lib/Driver/SanitizerArgs.cpp
+++ b/clang/lib/Driver/SanitizerArgs.cpp
@@ -1382,6 +1382,12 @@ void SanitizerArgs::addArgs(const ToolChain &TC, const llvm::opt::ArgList &Args,
CmdArgs.push_back(Args.MakeArgString("-fsanitize-annotate-debug-info=" +
toString(AnnotateDebugInfo)));
+ if (const Arg *A =
+ Args.getLastArg(options::OPT_fsanitize_debug_trap_reasons,
+ options::OPT_fno_sanitize_debug_trap_reasons)) {
+ CmdArgs.push_back(Args.MakeArgString(A->getAsString(Args)));
+ }
+
addSpecialCaseListOpt(Args, CmdArgs,
"-fsanitize-ignorelist=", UserIgnorelistFiles);
addSpecialCaseListOpt(Args, CmdArgs,
diff --git a/clang/lib/Driver/ToolChain.cpp b/clang/lib/Driver/ToolChain.cpp
index 47f93fa1..25c6b5a 100644
--- a/clang/lib/Driver/ToolChain.cpp
+++ b/clang/lib/Driver/ToolChain.cpp
@@ -191,9 +191,10 @@ static void getAArch64MultilibFlags(const Driver &D,
for (const auto &ArchInfo : AArch64::ArchInfos)
if (FeatureSet.contains(ArchInfo->ArchFeature))
ArchName = ArchInfo->Name;
- assert(!ArchName.empty() && "at least one architecture should be found");
- MArch.insert(MArch.begin(), ("-march=" + ArchName).str());
- Result.push_back(llvm::join(MArch, "+"));
+ if (!ArchName.empty()) {
+ MArch.insert(MArch.begin(), ("-march=" + ArchName).str());
+ Result.push_back(llvm::join(MArch, "+"));
+ }
const Arg *BranchProtectionArg =
Args.getLastArgNoClaim(options::OPT_mbranch_protection_EQ);
@@ -760,7 +761,7 @@ std::string ToolChain::buildCompilerRTBasename(const llvm::opt::ArgList &Args,
break;
case ToolChain::FT_Shared:
if (TT.isOSWindows())
- Suffix = TT.isWindowsGNUEnvironment() ? ".dll.a" : ".lib";
+ Suffix = TT.isOSCygMing() ? ".dll.a" : ".lib";
else if (TT.isOSAIX())
Suffix = ".a";
else
@@ -1087,7 +1088,7 @@ std::string ToolChain::GetLinkerPath(bool *LinkerIsLLD) const {
// Get -fuse-ld= first to prevent -Wunused-command-line-argument. -fuse-ld= is
// considered as the linker flavor, e.g. "bfd", "gold", or "lld".
const Arg* A = Args.getLastArg(options::OPT_fuse_ld_EQ);
- StringRef UseLinker = A ? A->getValue() : CLANG_DEFAULT_LINKER;
+ StringRef UseLinker = A ? A->getValue() : getDriver().getPreferredLinker();
// --ld-path= takes precedence over -fuse-ld= and specifies the executable
// name. -B, COMPILER_PATH and PATH and consulted if the value does not
diff --git a/clang/lib/Driver/ToolChains/AMDGPU.h b/clang/lib/Driver/ToolChains/AMDGPU.h
index 513c77d..e5d41e2 100644
--- a/clang/lib/Driver/ToolChains/AMDGPU.h
+++ b/clang/lib/Driver/ToolChains/AMDGPU.h
@@ -10,7 +10,6 @@
#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_AMDGPU_H
#include "Gnu.h"
-#include "ROCm.h"
#include "clang/Basic/TargetID.h"
#include "clang/Driver/Options.h"
#include "clang/Driver/Tool.h"
diff --git a/clang/lib/Driver/ToolChains/Arch/AArch64.cpp b/clang/lib/Driver/ToolChains/Arch/AArch64.cpp
index 6bd710e..418f9fd 100644
--- a/clang/lib/Driver/ToolChains/Arch/AArch64.cpp
+++ b/clang/lib/Driver/ToolChains/Arch/AArch64.cpp
@@ -467,3 +467,18 @@ void aarch64::setPAuthABIInTriple(const Driver &D, const ArgList &Args,
break;
}
}
+
+/// Is the triple {aarch64.aarch64_be}-none-elf?
+bool aarch64::isAArch64BareMetal(const llvm::Triple &Triple) {
+ if (Triple.getArch() != llvm::Triple::aarch64 &&
+ Triple.getArch() != llvm::Triple::aarch64_be)
+ return false;
+
+ if (Triple.getVendor() != llvm::Triple::UnknownVendor)
+ return false;
+
+ if (Triple.getOS() != llvm::Triple::UnknownOS)
+ return false;
+
+ return Triple.getEnvironmentName() == "elf";
+}
diff --git a/clang/lib/Driver/ToolChains/Arch/AArch64.h b/clang/lib/Driver/ToolChains/Arch/AArch64.h
index 2057272..2765ee8 100644
--- a/clang/lib/Driver/ToolChains/Arch/AArch64.h
+++ b/clang/lib/Driver/ToolChains/Arch/AArch64.h
@@ -30,6 +30,7 @@ std::string getAArch64TargetCPU(const llvm::opt::ArgList &Args,
void setPAuthABIInTriple(const Driver &D, const llvm::opt::ArgList &Args,
llvm::Triple &triple);
+bool isAArch64BareMetal(const llvm::Triple &Triple);
} // end namespace aarch64
} // end namespace target
diff --git a/clang/lib/Driver/ToolChains/Arch/Sparc.cpp b/clang/lib/Driver/ToolChains/Arch/Sparc.cpp
index 3333135..94a94f1 100644
--- a/clang/lib/Driver/ToolChains/Arch/Sparc.cpp
+++ b/clang/lib/Driver/ToolChains/Arch/Sparc.cpp
@@ -37,6 +37,13 @@ const char *sparc::getSparcAsmModeForCPU(StringRef Name,
.Case("niagara4", "-Av9d")
.Default(DefV9CPU);
} else {
+ const char *DefV8CPU;
+
+ if (Triple.isOSSolaris())
+ DefV8CPU = "-Av8plus";
+ else
+ DefV8CPU = "-Av8";
+
return llvm::StringSwitch<const char *>(Name)
.Case("v8", "-Av8")
.Case("supersparc", "-Av8")
@@ -72,7 +79,7 @@ const char *sparc::getSparcAsmModeForCPU(StringRef Name,
.Case("gr712rc", "-Aleon")
.Case("leon4", "-Aleon")
.Case("gr740", "-Aleon")
- .Default("-Av8");
+ .Default(DefV8CPU);
}
}
@@ -160,6 +167,8 @@ void sparc::getSparcTargetFeatures(const Driver &D, const llvm::Triple &Triple,
(Triple.getArch() == llvm::Triple::sparcv9) &&
(Triple.isOSLinux() || Triple.isOSFreeBSD() || Triple.isOSOpenBSD());
bool IsSparcV9BTarget = Triple.isOSSolaris();
+ bool IsSparcV8PlusTarget =
+ Triple.getArch() == llvm::Triple::sparc && Triple.isOSSolaris();
if (Arg *A = Args.getLastArg(options::OPT_mvis, options::OPT_mno_vis)) {
if (A->getOption().matches(options::OPT_mvis))
Features.push_back("+vis");
@@ -196,6 +205,8 @@ void sparc::getSparcTargetFeatures(const Driver &D, const llvm::Triple &Triple,
if (Arg *A = Args.getLastArg(options::OPT_mv8plus, options::OPT_mno_v8plus)) {
if (A->getOption().matches(options::OPT_mv8plus))
Features.push_back("+v8plus");
+ } else if (IsSparcV8PlusTarget) {
+ Features.push_back("+v8plus");
}
if (Args.hasArg(options::OPT_ffixed_g1))
diff --git a/clang/lib/Driver/ToolChains/BareMetal.cpp b/clang/lib/Driver/ToolChains/BareMetal.cpp
index 497f333..25a16fe 100644
--- a/clang/lib/Driver/ToolChains/BareMetal.cpp
+++ b/clang/lib/Driver/ToolChains/BareMetal.cpp
@@ -12,6 +12,7 @@
#include "clang/Driver/CommonArgs.h"
#include "clang/Driver/InputInfo.h"
+#include "Arch/AArch64.h"
#include "Arch/ARM.h"
#include "Arch/RISCV.h"
#include "clang/Driver/Compilation.h"
@@ -31,21 +32,6 @@ using namespace clang::driver;
using namespace clang::driver::tools;
using namespace clang::driver::toolchains;
-/// Is the triple {aarch64.aarch64_be}-none-elf?
-static bool isAArch64BareMetal(const llvm::Triple &Triple) {
- if (Triple.getArch() != llvm::Triple::aarch64 &&
- Triple.getArch() != llvm::Triple::aarch64_be)
- return false;
-
- if (Triple.getVendor() != llvm::Triple::UnknownVendor)
- return false;
-
- if (Triple.getOS() != llvm::Triple::UnknownOS)
- return false;
-
- return Triple.getEnvironmentName() == "elf";
-}
-
static bool isRISCVBareMetal(const llvm::Triple &Triple) {
if (!Triple.isRISCV())
return false;
@@ -363,8 +349,9 @@ void BareMetal::findMultilibs(const Driver &D, const llvm::Triple &Triple,
}
bool BareMetal::handlesTarget(const llvm::Triple &Triple) {
- return arm::isARMEABIBareMetal(Triple) || isAArch64BareMetal(Triple) ||
- isRISCVBareMetal(Triple) || isPPCBareMetal(Triple);
+ return arm::isARMEABIBareMetal(Triple) ||
+ aarch64::isAArch64BareMetal(Triple) || isRISCVBareMetal(Triple) ||
+ isPPCBareMetal(Triple);
}
Tool *BareMetal::buildLinker() const {
@@ -684,7 +671,8 @@ void baremetal::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
CmdArgs.push_back("--start-group");
AddRunTimeLibs(TC, D, CmdArgs, Args);
- CmdArgs.push_back("-lc");
+ if (!Args.hasArg(options::OPT_nolibc))
+ CmdArgs.push_back("-lc");
if (TC.hasValidGCCInstallation() || detectGCCToolchainAdjacent(D))
CmdArgs.push_back("-lgloss");
CmdArgs.push_back("--end-group");
diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp
index 7d0c142..4e1b1d9 100644
--- a/clang/lib/Driver/ToolChains/Clang.cpp
+++ b/clang/lib/Driver/ToolChains/Clang.cpp
@@ -16,6 +16,7 @@
#include "Arch/SystemZ.h"
#include "Hexagon.h"
#include "PS4CPU.h"
+#include "ToolChains/Cuda.h"
#include "clang/Basic/CLWarnings.h"
#include "clang/Basic/CodeGenOptions.h"
#include "clang/Basic/HeaderInclude.h"
@@ -225,17 +226,19 @@ static bool ShouldEnableAutolink(const ArgList &Args, const ToolChain &TC,
static const char *addDebugCompDirArg(const ArgList &Args,
ArgStringList &CmdArgs,
const llvm::vfs::FileSystem &VFS) {
+ std::string DebugCompDir;
if (Arg *A = Args.getLastArg(options::OPT_ffile_compilation_dir_EQ,
- options::OPT_fdebug_compilation_dir_EQ)) {
- if (A->getOption().matches(options::OPT_ffile_compilation_dir_EQ))
- CmdArgs.push_back(Args.MakeArgString(Twine("-fdebug-compilation-dir=") +
- A->getValue()));
+ options::OPT_fdebug_compilation_dir_EQ))
+ DebugCompDir = A->getValue();
+
+ if (DebugCompDir.empty()) {
+ if (llvm::ErrorOr<std::string> CWD = VFS.getCurrentWorkingDirectory())
+ DebugCompDir = std::move(*CWD);
else
- A->render(Args, CmdArgs);
- } else if (llvm::ErrorOr<std::string> CWD =
- VFS.getCurrentWorkingDirectory()) {
- CmdArgs.push_back(Args.MakeArgString("-fdebug-compilation-dir=" + *CWD));
+ return nullptr;
}
+ CmdArgs.push_back(
+ Args.MakeArgString("-fdebug-compilation-dir=" + DebugCompDir));
StringRef Path(CmdArgs.back());
return Path.substr(Path.find('=') + 1).data();
}
@@ -524,17 +527,17 @@ static void addPGOAndCoverageFlags(const ToolChain &TC, Compilation &C,
CmdArgs.push_back("-fcoverage-mcdc");
}
+ StringRef CoverageCompDir;
if (Arg *A = Args.getLastArg(options::OPT_ffile_compilation_dir_EQ,
- options::OPT_fcoverage_compilation_dir_EQ)) {
- if (A->getOption().matches(options::OPT_ffile_compilation_dir_EQ))
- CmdArgs.push_back(Args.MakeArgString(
- Twine("-fcoverage-compilation-dir=") + A->getValue()));
- else
- A->render(Args, CmdArgs);
- } else if (llvm::ErrorOr<std::string> CWD =
- D.getVFS().getCurrentWorkingDirectory()) {
- CmdArgs.push_back(Args.MakeArgString("-fcoverage-compilation-dir=" + *CWD));
- }
+ options::OPT_fcoverage_compilation_dir_EQ))
+ CoverageCompDir = A->getValue();
+ if (CoverageCompDir.empty()) {
+ if (auto CWD = D.getVFS().getCurrentWorkingDirectory())
+ CmdArgs.push_back(
+ Args.MakeArgString(Twine("-fcoverage-compilation-dir=") + *CWD));
+ } else
+ CmdArgs.push_back(Args.MakeArgString(Twine("-fcoverage-compilation-dir=") +
+ CoverageCompDir));
if (Args.hasArg(options::OPT_fprofile_exclude_files_EQ)) {
auto *Arg = Args.getLastArg(options::OPT_fprofile_exclude_files_EQ);
@@ -3881,17 +3884,17 @@ static bool RenderModulesOptions(Compilation &C, const Driver &D,
const ArgList &Args, const InputInfo &Input,
const InputInfo &Output, bool HaveStd20,
ArgStringList &CmdArgs) {
- bool IsCXX = types::isCXX(Input.getType());
- bool HaveStdCXXModules = IsCXX && HaveStd20;
+ const bool IsCXX = types::isCXX(Input.getType());
+ const bool HaveStdCXXModules = IsCXX && HaveStd20;
bool HaveModules = HaveStdCXXModules;
// -fmodules enables the use of precompiled modules (off by default).
// Users can pass -fno-cxx-modules to turn off modules support for
// C++/Objective-C++ programs.
+ const bool AllowedInCXX = Args.hasFlag(options::OPT_fcxx_modules,
+ options::OPT_fno_cxx_modules, true);
bool HaveClangModules = false;
if (Args.hasFlag(options::OPT_fmodules, options::OPT_fno_modules, false)) {
- bool AllowedInCXX = Args.hasFlag(options::OPT_fcxx_modules,
- options::OPT_fno_cxx_modules, true);
if (AllowedInCXX || !IsCXX) {
CmdArgs.push_back("-fmodules");
HaveClangModules = true;
@@ -3900,6 +3903,9 @@ static bool RenderModulesOptions(Compilation &C, const Driver &D,
HaveModules |= HaveClangModules;
+ if (HaveModules && !AllowedInCXX)
+ CmdArgs.push_back("-fno-cxx-modules");
+
// -fmodule-maps enables implicit reading of module map files. By default,
// this is enabled if we are using Clang's flavor of precompiled modules.
if (Args.hasFlag(options::OPT_fimplicit_module_maps,
@@ -5941,7 +5947,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-mms-bitfields");
}
- if (Triple.isWindowsGNUEnvironment()) {
+ if (Triple.isOSCygMing()) {
Args.addOptOutFlag(CmdArgs, options::OPT_fauto_import,
options::OPT_fno_auto_import);
}
diff --git a/clang/lib/Driver/ToolChains/CommonArgs.cpp b/clang/lib/Driver/ToolChains/CommonArgs.cpp
index 826e2ea..0771c7c 100644
--- a/clang/lib/Driver/ToolChains/CommonArgs.cpp
+++ b/clang/lib/Driver/ToolChains/CommonArgs.cpp
@@ -23,6 +23,7 @@
#include "Hexagon.h"
#include "MSP430.h"
#include "Solaris.h"
+#include "ToolChains/Cuda.h"
#include "clang/Basic/CodeGenOptions.h"
#include "clang/Config/config.h"
#include "clang/Driver/Action.h"
@@ -294,17 +295,22 @@ static void renderRemarksOptions(const ArgList &Args, ArgStringList &CmdArgs,
Format = A->getValue();
SmallString<128> F;
- const Arg *A = Args.getLastArg(options::OPT_foptimization_record_file_EQ);
- if (A)
+ if (const Arg *A =
+ Args.getLastArg(options::OPT_foptimization_record_file_EQ)) {
+ F = A->getValue();
+ F += ".";
+ } else if (const Arg *A = Args.getLastArg(options::OPT_dumpdir)) {
F = A->getValue();
- else if (Output.isFilename())
+ } else if (Output.isFilename()) {
F = Output.getFilename();
+ F += ".";
+ }
assert(!F.empty() && "Cannot determine remarks output name.");
// Append "opt.ld.<format>" to the end of the file name.
CmdArgs.push_back(Args.MakeArgString(Twine(PluginOptPrefix) +
- "opt-remarks-filename=" + F +
- ".opt.ld." + Format));
+ "opt-remarks-filename=" + F + "opt.ld." +
+ Format));
if (const Arg *A =
Args.getLastArg(options::OPT_foptimization_record_passes_EQ))
@@ -547,15 +553,22 @@ const char *tools::getLDMOption(const llvm::Triple &T, const ArgList &Args) {
case llvm::Triple::aarch64:
if (T.isOSManagarm())
return "aarch64managarm";
+ else if (aarch64::isAArch64BareMetal(T))
+ return "aarch64elf";
return "aarch64linux";
case llvm::Triple::aarch64_be:
+ if (aarch64::isAArch64BareMetal(T))
+ return "aarch64elfb";
return "aarch64linuxb";
case llvm::Triple::arm:
case llvm::Triple::thumb:
case llvm::Triple::armeb:
- case llvm::Triple::thumbeb:
- return tools::arm::isARMBigEndian(T, Args) ? "armelfb_linux_eabi"
- : "armelf_linux_eabi";
+ case llvm::Triple::thumbeb: {
+ bool IsBigEndian = tools::arm::isARMBigEndian(T, Args);
+ if (arm::isARMEABIBareMetal(T))
+ return IsBigEndian ? "armelfb" : "armelf";
+ return IsBigEndian ? "armelfb_linux_eabi" : "armelf_linux_eabi";
+ }
case llvm::Triple::m68k:
return "m68kelf";
case llvm::Triple::ppc:
@@ -1067,9 +1080,17 @@ void tools::addLTOOptions(const ToolChain &ToolChain, const ArgList &Args,
}
}
- if (Args.hasArg(options::OPT_gsplit_dwarf))
- CmdArgs.push_back(Args.MakeArgString(
- Twine(PluginOptPrefix) + "dwo_dir=" + Output.getFilename() + "_dwo"));
+ if (Args.hasArg(options::OPT_gsplit_dwarf)) {
+ SmallString<128> F;
+ if (const Arg *A = Args.getLastArg(options::OPT_dumpdir)) {
+ F = A->getValue();
+ } else {
+ F = Output.getFilename();
+ F += "_";
+ }
+ CmdArgs.push_back(
+ Args.MakeArgString(Twine(PluginOptPrefix) + "dwo_dir=" + F + "dwo"));
+ }
if (IsThinLTO && !IsOSAIX)
CmdArgs.push_back(Args.MakeArgString(Twine(PluginOptPrefix) + "thinlto"));
diff --git a/clang/lib/Driver/ToolChains/Cuda.h b/clang/lib/Driver/ToolChains/Cuda.h
index 259eda6..8aeba53 100644
--- a/clang/lib/Driver/ToolChains/Cuda.h
+++ b/clang/lib/Driver/ToolChains/Cuda.h
@@ -11,6 +11,7 @@
#include "clang/Basic/Cuda.h"
#include "clang/Driver/Action.h"
+#include "clang/Driver/CudaInstallationDetector.h"
#include "clang/Driver/Multilib.h"
#include "clang/Driver/Tool.h"
#include "clang/Driver/ToolChain.h"
@@ -22,61 +23,6 @@
namespace clang {
namespace driver {
-
-/// A class to find a viable CUDA installation
-class CudaInstallationDetector {
-private:
- const Driver &D;
- bool IsValid = false;
- CudaVersion Version = CudaVersion::UNKNOWN;
- std::string InstallPath;
- std::string BinPath;
- std::string LibDevicePath;
- std::string IncludePath;
- llvm::StringMap<std::string> LibDeviceMap;
-
- // CUDA architectures for which we have raised an error in
- // CheckCudaVersionSupportsArch.
- mutable std::bitset<(int)OffloadArch::LAST> ArchsWithBadVersion;
-
-public:
- CudaInstallationDetector(const Driver &D, const llvm::Triple &HostTriple,
- const llvm::opt::ArgList &Args);
-
- void AddCudaIncludeArgs(const llvm::opt::ArgList &DriverArgs,
- llvm::opt::ArgStringList &CC1Args) const;
-
- /// Emit an error if Version does not support the given Arch.
- ///
- /// If either Version or Arch is unknown, does not emit an error. Emits at
- /// most one error per Arch.
- void CheckCudaVersionSupportsArch(OffloadArch Arch) const;
-
- /// Check whether we detected a valid Cuda install.
- bool isValid() const { return IsValid; }
- /// Print information about the detected CUDA installation.
- void print(raw_ostream &OS) const;
-
- /// Get the detected Cuda install's version.
- CudaVersion version() const {
- return Version == CudaVersion::NEW ? CudaVersion::PARTIALLY_SUPPORTED
- : Version;
- }
- /// Get the detected Cuda installation path.
- StringRef getInstallPath() const { return InstallPath; }
- /// Get the detected path to Cuda's bin directory.
- StringRef getBinPath() const { return BinPath; }
- /// Get the detected Cuda Include path.
- StringRef getIncludePath() const { return IncludePath; }
- /// Get the detected Cuda device library path.
- StringRef getLibDevicePath() const { return LibDevicePath; }
- /// Get libdevice file for given architecture
- std::string getLibDeviceFile(StringRef Gpu) const {
- return LibDeviceMap.lookup(Gpu);
- }
- void WarnIfUnsupportedVersion() const;
-};
-
namespace tools {
namespace NVPTX {
diff --git a/clang/lib/Driver/ToolChains/Darwin.h b/clang/lib/Driver/ToolChains/Darwin.h
index b38bfe6..d1cfb6f 100644
--- a/clang/lib/Driver/ToolChains/Darwin.h
+++ b/clang/lib/Driver/ToolChains/Darwin.h
@@ -9,12 +9,12 @@
#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_DARWIN_H
#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_DARWIN_H
-#include "Cuda.h"
-#include "LazyDetector.h"
-#include "ROCm.h"
-#include "SYCL.h"
#include "clang/Basic/DarwinSDKInfo.h"
#include "clang/Basic/LangOptions.h"
+#include "clang/Driver/CudaInstallationDetector.h"
+#include "clang/Driver/LazyDetector.h"
+#include "clang/Driver/RocmInstallationDetector.h"
+#include "clang/Driver/SyclInstallationDetector.h"
#include "clang/Driver/Tool.h"
#include "clang/Driver/ToolChain.h"
#include "clang/Driver/XRayArgs.h"
diff --git a/clang/lib/Driver/ToolChains/Gnu.h b/clang/lib/Driver/ToolChains/Gnu.h
index 3b8df71..4c42a5e5 100644
--- a/clang/lib/Driver/ToolChains/Gnu.h
+++ b/clang/lib/Driver/ToolChains/Gnu.h
@@ -9,10 +9,10 @@
#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_GNU_H
#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_GNU_H
-#include "Cuda.h"
-#include "LazyDetector.h"
-#include "ROCm.h"
-#include "SYCL.h"
+#include "clang/Driver/CudaInstallationDetector.h"
+#include "clang/Driver/LazyDetector.h"
+#include "clang/Driver/RocmInstallationDetector.h"
+#include "clang/Driver/SyclInstallationDetector.h"
#include "clang/Driver/Tool.h"
#include "clang/Driver/ToolChain.h"
#include <set>
diff --git a/clang/lib/Driver/ToolChains/HIPAMD.h b/clang/lib/Driver/ToolChains/HIPAMD.h
index bcc3ebb..30fc01a 100644
--- a/clang/lib/Driver/ToolChains/HIPAMD.h
+++ b/clang/lib/Driver/ToolChains/HIPAMD.h
@@ -10,6 +10,7 @@
#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_HIPAMD_H
#include "AMDGPU.h"
+#include "clang/Driver/SyclInstallationDetector.h"
#include "clang/Driver/Tool.h"
#include "clang/Driver/ToolChain.h"
diff --git a/clang/lib/Driver/ToolChains/HIPSPV.cpp b/clang/lib/Driver/ToolChains/HIPSPV.cpp
index 643a67f..62bca04 100644
--- a/clang/lib/Driver/ToolChains/HIPSPV.cpp
+++ b/clang/lib/Driver/ToolChains/HIPSPV.cpp
@@ -69,8 +69,17 @@ void HIPSPV::Linker::constructLinkAndEmitSpirvCommand(
// Link LLVM bitcode.
ArgStringList LinkArgs{};
+
for (auto Input : Inputs)
LinkArgs.push_back(Input.getFilename());
+
+ // Add static device libraries using the common helper function.
+ // This handles unbundling archives (.a) containing bitcode bundles.
+ StringRef Arch = getToolChain().getTriple().getArchName();
+ StringRef Target =
+ "generic"; // SPIR-V is generic, no specific target ID like -mcpu
+ tools::AddStaticDeviceLibsLinking(C, *this, JA, Inputs, Args, LinkArgs, Arch,
+ Target, /*IsBitCodeSDL=*/true);
LinkArgs.append({"-o", TempFile});
const char *LlvmLink =
Args.MakeArgString(getToolChain().GetProgramPath("llvm-link"));
diff --git a/clang/lib/Driver/ToolChains/LazyDetector.h b/clang/lib/Driver/ToolChains/LazyDetector.h
deleted file mode 100644
index 813d00a..0000000
--- a/clang/lib/Driver/ToolChains/LazyDetector.h
+++ /dev/null
@@ -1,45 +0,0 @@
-//===--- LazyDetector.h - Lazy ToolChain Detection --------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_LAZYDETECTOR_H
-#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_LAZYDETECTOR_H
-
-#include "clang/Driver/Tool.h"
-#include "clang/Driver/ToolChain.h"
-#include <optional>
-
-namespace clang {
-
-/// Simple wrapper for toolchain detector with costly initialization. This
-/// delays the creation of the actual detector until its first usage.
-
-template <class T> class LazyDetector {
- const driver::Driver &D;
- llvm::Triple Triple;
- const llvm::opt::ArgList &Args;
-
- std::optional<T> Detector;
-
-public:
- LazyDetector(const driver::Driver &D, const llvm::Triple &Triple,
- const llvm::opt::ArgList &Args)
- : D(D), Triple(Triple), Args(Args) {}
- T *operator->() {
- if (!Detector)
- Detector.emplace(D, Triple, Args);
- return &*Detector;
- }
- const T *operator->() const {
- return const_cast<T const *>(
- const_cast<LazyDetector &>(*this).operator->());
- }
-};
-
-} // end namespace clang
-
-#endif // LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_LAZYDETECTOR_H
diff --git a/clang/lib/Driver/ToolChains/MSVC.cpp b/clang/lib/Driver/ToolChains/MSVC.cpp
index 7d31eea..bb469ff 100644
--- a/clang/lib/Driver/ToolChains/MSVC.cpp
+++ b/clang/lib/Driver/ToolChains/MSVC.cpp
@@ -279,8 +279,8 @@ void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
AddRunTimeLibs(TC, TC.getDriver(), CmdArgs, Args);
}
- StringRef Linker =
- Args.getLastArgValue(options::OPT_fuse_ld_EQ, CLANG_DEFAULT_LINKER);
+ StringRef Linker = Args.getLastArgValue(options::OPT_fuse_ld_EQ,
+ TC.getDriver().getPreferredLinker());
if (Linker.empty())
Linker = "link";
// We need to translate 'lld' into 'lld-link'.
diff --git a/clang/lib/Driver/ToolChains/MSVC.h b/clang/lib/Driver/ToolChains/MSVC.h
index b35390c..5c17edc 100644
--- a/clang/lib/Driver/ToolChains/MSVC.h
+++ b/clang/lib/Driver/ToolChains/MSVC.h
@@ -9,11 +9,11 @@
#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_MSVC_H
#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_MSVC_H
-#include "AMDGPU.h"
-#include "Cuda.h"
-#include "LazyDetector.h"
-#include "SYCL.h"
#include "clang/Driver/Compilation.h"
+#include "clang/Driver/CudaInstallationDetector.h"
+#include "clang/Driver/LazyDetector.h"
+#include "clang/Driver/RocmInstallationDetector.h"
+#include "clang/Driver/SyclInstallationDetector.h"
#include "clang/Driver/Tool.h"
#include "clang/Driver/ToolChain.h"
#include "llvm/Frontend/Debug/Options.h"
diff --git a/clang/lib/Driver/ToolChains/MinGW.cpp b/clang/lib/Driver/ToolChains/MinGW.cpp
index b2e36ae..1bb9bcf 100644
--- a/clang/lib/Driver/ToolChains/MinGW.cpp
+++ b/clang/lib/Driver/ToolChains/MinGW.cpp
@@ -85,11 +85,18 @@ void tools::MinGW::Linker::AddLibGCC(const ArgList &Args,
CmdArgs.push_back("-lmoldname");
CmdArgs.push_back("-lmingwex");
- for (auto Lib : Args.getAllArgValues(options::OPT_l))
+ for (auto Lib : Args.getAllArgValues(options::OPT_l)) {
if (StringRef(Lib).starts_with("msvcr") ||
StringRef(Lib).starts_with("ucrt") ||
- StringRef(Lib).starts_with("crtdll"))
+ StringRef(Lib).starts_with("crtdll")) {
+ std::string CRTLib = (llvm::Twine("-l") + Lib).str();
+ // Respect the user's chosen crt variant, but still provide it
+ // again as the last linker argument, because some of the libraries
+ // we added above may depend on it.
+ CmdArgs.push_back(Args.MakeArgStringRef(CRTLib));
return;
+ }
+ }
CmdArgs.push_back("-lmsvcrt");
}
@@ -548,7 +555,7 @@ toolchains::MinGW::MinGW(const Driver &D, const llvm::Triple &Triple,
getFilePaths().push_back(Base + "lib");
NativeLLVMSupport =
- Args.getLastArgValue(options::OPT_fuse_ld_EQ, CLANG_DEFAULT_LINKER)
+ Args.getLastArgValue(options::OPT_fuse_ld_EQ, D.getPreferredLinker())
.equals_insensitive("lld");
}
diff --git a/clang/lib/Driver/ToolChains/MinGW.h b/clang/lib/Driver/ToolChains/MinGW.h
index a9963d8..1730da4 100644
--- a/clang/lib/Driver/ToolChains/MinGW.h
+++ b/clang/lib/Driver/ToolChains/MinGW.h
@@ -11,8 +11,9 @@
#include "Cuda.h"
#include "Gnu.h"
-#include "LazyDetector.h"
-#include "ROCm.h"
+#include "clang/Driver/CudaInstallationDetector.h"
+#include "clang/Driver/LazyDetector.h"
+#include "clang/Driver/RocmInstallationDetector.h"
#include "clang/Driver/Tool.h"
#include "clang/Driver/ToolChain.h"
#include "llvm/Support/ErrorOr.h"
diff --git a/clang/lib/Driver/ToolChains/ROCm.h b/clang/lib/Driver/ToolChains/ROCm.h
deleted file mode 100644
index ebd5443..0000000
--- a/clang/lib/Driver/ToolChains/ROCm.h
+++ /dev/null
@@ -1,314 +0,0 @@
-//===--- ROCm.h - ROCm installation detector --------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_ROCM_H
-#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_ROCM_H
-
-#include "clang/Basic/Cuda.h"
-#include "clang/Basic/LLVM.h"
-#include "clang/Driver/CommonArgs.h"
-#include "clang/Driver/Driver.h"
-#include "clang/Driver/Options.h"
-#include "clang/Driver/SanitizerArgs.h"
-#include "llvm/ADT/SmallString.h"
-#include "llvm/ADT/StringMap.h"
-#include "llvm/Option/ArgList.h"
-#include "llvm/Support/VersionTuple.h"
-#include "llvm/TargetParser/TargetParser.h"
-#include "llvm/TargetParser/Triple.h"
-
-namespace clang {
-namespace driver {
-
-/// ABI version of device library.
-struct DeviceLibABIVersion {
- unsigned ABIVersion = 0;
- DeviceLibABIVersion(unsigned V) : ABIVersion(V) {}
- static DeviceLibABIVersion fromCodeObjectVersion(unsigned CodeObjectVersion) {
- if (CodeObjectVersion < 4)
- CodeObjectVersion = 4;
- return DeviceLibABIVersion(CodeObjectVersion * 100);
- }
- /// Whether ABI version bc file is requested.
- /// ABIVersion is code object version multiplied by 100. Code object v4
- /// and below works with ROCm 5.0 and below which does not have
- /// abi_version_*.bc. Code object v5 requires abi_version_500.bc.
- bool requiresLibrary() { return ABIVersion >= 500; }
- std::string toString() { return Twine(getAsCodeObjectVersion()).str(); }
-
- unsigned getAsCodeObjectVersion() const {
- assert(ABIVersion % 100 == 0 && "Not supported");
- return ABIVersion / 100;
- }
-};
-
-/// A class to find a viable ROCM installation
-/// TODO: Generalize to handle libclc.
-class RocmInstallationDetector {
-private:
- struct ConditionalLibrary {
- SmallString<0> On;
- SmallString<0> Off;
-
- bool isValid() const { return !On.empty() && !Off.empty(); }
-
- StringRef get(bool Enabled) const {
- assert(isValid());
- return Enabled ? On : Off;
- }
- };
-
- // Installation path candidate.
- struct Candidate {
- llvm::SmallString<0> Path;
- bool StrictChecking;
- // Release string for ROCm packages built with SPACK if not empty. The
- // installation directories of ROCm packages built with SPACK follow the
- // convention <package_name>-<rocm_release_string>-<hash>.
- std::string SPACKReleaseStr;
-
- bool isSPACK() const { return !SPACKReleaseStr.empty(); }
- Candidate(std::string Path, bool StrictChecking = false,
- StringRef SPACKReleaseStr = {})
- : Path(Path), StrictChecking(StrictChecking),
- SPACKReleaseStr(SPACKReleaseStr.str()) {}
- };
-
- struct CommonBitcodeLibsPreferences {
- CommonBitcodeLibsPreferences(const Driver &D,
- const llvm::opt::ArgList &DriverArgs,
- StringRef GPUArch,
- const Action::OffloadKind DeviceOffloadingKind,
- const bool NeedsASanRT);
-
- DeviceLibABIVersion ABIVer;
- bool IsOpenMP;
- bool Wave64;
- bool DAZ;
- bool FiniteOnly;
- bool UnsafeMathOpt;
- bool FastRelaxedMath;
- bool CorrectSqrt;
- bool GPUSan;
- };
-
- const Driver &D;
- bool HasHIPRuntime = false;
- bool HasDeviceLibrary = false;
- bool HasHIPStdParLibrary = false;
- bool HasRocThrustLibrary = false;
- bool HasRocPrimLibrary = false;
-
- // Default version if not detected or specified.
- const unsigned DefaultVersionMajor = 3;
- const unsigned DefaultVersionMinor = 5;
- const char *DefaultVersionPatch = "0";
-
- // The version string in Major.Minor.Patch format.
- std::string DetectedVersion;
- // Version containing major and minor.
- llvm::VersionTuple VersionMajorMinor;
- // Version containing patch.
- std::string VersionPatch;
-
- // ROCm path specified by --rocm-path.
- StringRef RocmPathArg;
- // ROCm device library paths specified by --rocm-device-lib-path.
- std::vector<std::string> RocmDeviceLibPathArg;
- // HIP runtime path specified by --hip-path.
- StringRef HIPPathArg;
- // HIP Standard Parallel Algorithm acceleration library specified by
- // --hipstdpar-path
- StringRef HIPStdParPathArg;
- // rocThrust algorithm library specified by --hipstdpar-thrust-path
- StringRef HIPRocThrustPathArg;
- // rocPrim algorithm library specified by --hipstdpar-prim-path
- StringRef HIPRocPrimPathArg;
- // HIP version specified by --hip-version.
- StringRef HIPVersionArg;
- // Wheter -nogpulib is specified.
- bool NoBuiltinLibs = false;
-
- // Paths
- SmallString<0> InstallPath;
- SmallString<0> BinPath;
- SmallString<0> LibPath;
- SmallString<0> LibDevicePath;
- SmallString<0> IncludePath;
- SmallString<0> SharePath;
- llvm::StringMap<std::string> LibDeviceMap;
-
- // Libraries that are always linked.
- SmallString<0> OCML;
- SmallString<0> OCKL;
-
- // Libraries that are always linked depending on the language
- SmallString<0> OpenCL;
-
- // Asan runtime library
- SmallString<0> AsanRTL;
-
- // Libraries swapped based on compile flags.
- ConditionalLibrary WavefrontSize64;
- ConditionalLibrary FiniteOnly;
- ConditionalLibrary UnsafeMath;
- ConditionalLibrary DenormalsAreZero;
- ConditionalLibrary CorrectlyRoundedSqrt;
-
- // Maps ABI version to library path. The version number is in the format of
- // three digits as used in the ABI version library name.
- std::map<unsigned, std::string> ABIVersionMap;
-
- // Cache ROCm installation search paths.
- SmallVector<Candidate, 4> ROCmSearchDirs;
- bool PrintROCmSearchDirs;
- bool Verbose;
-
- bool allGenericLibsValid() const {
- return !OCML.empty() && !OCKL.empty() && !OpenCL.empty() &&
- WavefrontSize64.isValid() && FiniteOnly.isValid() &&
- UnsafeMath.isValid() && DenormalsAreZero.isValid() &&
- CorrectlyRoundedSqrt.isValid();
- }
-
- void scanLibDevicePath(llvm::StringRef Path);
- bool parseHIPVersionFile(llvm::StringRef V);
- const SmallVectorImpl<Candidate> &getInstallationPathCandidates();
-
- /// Find the path to a SPACK package under the ROCm candidate installation
- /// directory if the candidate is a SPACK ROCm candidate. \returns empty
- /// string if the candidate is not SPACK ROCm candidate or the requested
- /// package is not found.
- llvm::SmallString<0> findSPACKPackage(const Candidate &Cand,
- StringRef PackageName);
-
-public:
- RocmInstallationDetector(const Driver &D, const llvm::Triple &HostTriple,
- const llvm::opt::ArgList &Args,
- bool DetectHIPRuntime = true,
- bool DetectDeviceLib = false);
-
- /// Get file paths of default bitcode libraries common to AMDGPU based
- /// toolchains.
- llvm::SmallVector<ToolChain::BitCodeLibraryInfo, 12>
- getCommonBitcodeLibs(const llvm::opt::ArgList &DriverArgs,
- StringRef LibDeviceFile, StringRef GPUArch,
- const Action::OffloadKind DeviceOffloadingKind,
- const bool NeedsASanRT) const;
- /// Check file paths of default bitcode libraries common to AMDGPU based
- /// toolchains. \returns false if there are invalid or missing files.
- bool checkCommonBitcodeLibs(StringRef GPUArch, StringRef LibDeviceFile,
- DeviceLibABIVersion ABIVer) const;
-
- /// Check whether we detected a valid HIP runtime.
- bool hasHIPRuntime() const { return HasHIPRuntime; }
-
- /// Check whether we detected a valid ROCm device library.
- bool hasDeviceLibrary() const { return HasDeviceLibrary; }
-
- /// Check whether we detected a valid HIP STDPAR Acceleration library.
- bool hasHIPStdParLibrary() const { return HasHIPStdParLibrary; }
-
- /// Print information about the detected ROCm installation.
- void print(raw_ostream &OS) const;
-
- /// Get the detected Rocm install's version.
- // RocmVersion version() const { return Version; }
-
- /// Get the detected Rocm installation path.
- StringRef getInstallPath() const { return InstallPath; }
-
- /// Get the detected path to Rocm's bin directory.
- // StringRef getBinPath() const { return BinPath; }
-
- /// Get the detected Rocm Include path.
- StringRef getIncludePath() const { return IncludePath; }
-
- /// Get the detected Rocm library path.
- StringRef getLibPath() const { return LibPath; }
-
- /// Get the detected Rocm device library path.
- StringRef getLibDevicePath() const { return LibDevicePath; }
-
- StringRef getOCMLPath() const {
- assert(!OCML.empty());
- return OCML;
- }
-
- StringRef getOCKLPath() const {
- assert(!OCKL.empty());
- return OCKL;
- }
-
- StringRef getOpenCLPath() const {
- assert(!OpenCL.empty());
- return OpenCL;
- }
-
- /// Returns empty string of Asan runtime library is not available.
- StringRef getAsanRTLPath() const { return AsanRTL; }
-
- StringRef getWavefrontSize64Path(bool Enabled) const {
- return WavefrontSize64.get(Enabled);
- }
-
- StringRef getFiniteOnlyPath(bool Enabled) const {
- return FiniteOnly.get(Enabled);
- }
-
- StringRef getUnsafeMathPath(bool Enabled) const {
- return UnsafeMath.get(Enabled);
- }
-
- StringRef getDenormalsAreZeroPath(bool Enabled) const {
- return DenormalsAreZero.get(Enabled);
- }
-
- StringRef getCorrectlyRoundedSqrtPath(bool Enabled) const {
- return CorrectlyRoundedSqrt.get(Enabled);
- }
-
- StringRef getABIVersionPath(DeviceLibABIVersion ABIVer) const {
- auto Loc = ABIVersionMap.find(ABIVer.ABIVersion);
- if (Loc == ABIVersionMap.end())
- return StringRef();
- return Loc->second;
- }
-
- /// Get libdevice file for given architecture
- StringRef getLibDeviceFile(StringRef Gpu) const {
- auto Loc = LibDeviceMap.find(Gpu);
- if (Loc == LibDeviceMap.end())
- return "";
- return Loc->second;
- }
-
- void AddHIPIncludeArgs(const llvm::opt::ArgList &DriverArgs,
- llvm::opt::ArgStringList &CC1Args) const;
-
- void detectDeviceLibrary();
- void detectHIPRuntime();
-
- /// Get the values for --rocm-device-lib-path arguments
- ArrayRef<std::string> getRocmDeviceLibPathArg() const {
- return RocmDeviceLibPathArg;
- }
-
- /// Get the value for --rocm-path argument
- StringRef getRocmPathArg() const { return RocmPathArg; }
-
- /// Get the value for --hip-version argument
- StringRef getHIPVersionArg() const { return HIPVersionArg; }
-
- StringRef getHIPVersion() const { return DetectedVersion; }
-};
-
-} // end namespace driver
-} // end namespace clang
-
-#endif // LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_ROCM_H
diff --git a/clang/lib/Driver/ToolChains/SYCL.h b/clang/lib/Driver/ToolChains/SYCL.h
index 2a8b4ec..be4ba47 100644
--- a/clang/lib/Driver/ToolChains/SYCL.h
+++ b/clang/lib/Driver/ToolChains/SYCL.h
@@ -9,21 +9,12 @@
#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_SYCL_H
#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_SYCL_H
+#include "clang/Driver/SyclInstallationDetector.h"
#include "clang/Driver/Tool.h"
#include "clang/Driver/ToolChain.h"
namespace clang {
namespace driver {
-
-class SYCLInstallationDetector {
-public:
- SYCLInstallationDetector(const Driver &D, const llvm::Triple &HostTriple,
- const llvm::opt::ArgList &Args);
-
- void addSYCLIncludeArgs(const llvm::opt::ArgList &DriverArgs,
- llvm::opt::ArgStringList &CC1Args) const;
-};
-
namespace toolchains {
class LLVM_LIBRARY_VISIBILITY SYCLToolChain : public ToolChain {
diff --git a/clang/lib/Driver/ToolChains/Solaris.cpp b/clang/lib/Driver/ToolChains/Solaris.cpp
index a3574e1..02aa598 100644
--- a/clang/lib/Driver/ToolChains/Solaris.cpp
+++ b/clang/lib/Driver/ToolChains/Solaris.cpp
@@ -39,7 +39,7 @@ void solaris::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
bool solaris::isLinkerGnuLd(const ToolChain &TC, const ArgList &Args) {
// Only used if targetting Solaris.
const Arg *A = Args.getLastArg(options::OPT_fuse_ld_EQ);
- StringRef UseLinker = A ? A->getValue() : CLANG_DEFAULT_LINKER;
+ StringRef UseLinker = A ? A->getValue() : TC.getDriver().getPreferredLinker();
return UseLinker == "bfd" || UseLinker == "gld";
}
@@ -52,7 +52,7 @@ static bool getPIE(const ArgList &Args, const ToolChain &TC) {
TC.isPIEDefault(Args));
}
-// FIXME: Need to handle CLANG_DEFAULT_LINKER here?
+// FIXME: Need to handle PreferredLinker here?
std::string solaris::Linker::getLinkerPath(const ArgList &Args) const {
const ToolChain &ToolChain = getToolChain();
if (const Arg *A = Args.getLastArg(options::OPT_fuse_ld_EQ)) {
@@ -345,7 +345,7 @@ SanitizerMask Solaris::getSupportedSanitizers() const {
const char *Solaris::getDefaultLinker() const {
// FIXME: Only handle Solaris ld and GNU ld here.
- return llvm::StringSwitch<const char *>(CLANG_DEFAULT_LINKER)
+ return llvm::StringSwitch<const char *>(getDriver().getPreferredLinker())
.Cases("bfd", "gld", "/usr/gnu/bin/ld")
.Default("/usr/bin/ld");
}
diff --git a/clang/lib/Driver/ToolChains/UEFI.cpp b/clang/lib/Driver/ToolChains/UEFI.cpp
index ac6668e..2b41173 100644
--- a/clang/lib/Driver/ToolChains/UEFI.cpp
+++ b/clang/lib/Driver/ToolChains/UEFI.cpp
@@ -83,8 +83,8 @@ void tools::uefi::Linker::ConstructJob(Compilation &C, const JobAction &JA,
// This should ideally be handled by ToolChain::GetLinkerPath but we need
// to special case some linker paths. In the case of lld, we need to
// translate 'lld' into 'lld-link'.
- StringRef Linker =
- Args.getLastArgValue(options::OPT_fuse_ld_EQ, CLANG_DEFAULT_LINKER);
+ StringRef Linker = Args.getLastArgValue(options::OPT_fuse_ld_EQ,
+ TC.getDriver().getPreferredLinker());
if (Linker.empty() || Linker == "lld")
Linker = "lld-link";
diff --git a/clang/lib/Format/ContinuationIndenter.cpp b/clang/lib/Format/ContinuationIndenter.cpp
index bf67f9e..9a10403 100644
--- a/clang/lib/Format/ContinuationIndenter.cpp
+++ b/clang/lib/Format/ContinuationIndenter.cpp
@@ -1725,7 +1725,8 @@ unsigned ContinuationIndenter::moveStateToNextToken(LineState &State,
}
if (Previous && (Previous->isOneOf(TT_BinaryOperator, TT_ConditionalExpr) ||
(Previous->isOneOf(tok::l_paren, tok::comma, tok::colon) &&
- !Previous->isOneOf(TT_DictLiteral, TT_ObjCMethodExpr)))) {
+ !Previous->isOneOf(TT_DictLiteral, TT_ObjCMethodExpr,
+ TT_CtorInitializerColon)))) {
CurrentState.NestedBlockInlined =
!Newline && hasNestedBlockInlined(Previous, Current, Style);
}
diff --git a/clang/lib/Format/Format.cpp b/clang/lib/Format/Format.cpp
index 62feb3d..0637807 100644
--- a/clang/lib/Format/Format.cpp
+++ b/clang/lib/Format/Format.cpp
@@ -731,6 +731,7 @@ template <> struct MappingTraits<FormatStyle::SpaceBeforeParensCustom> {
IO.mapOptional("AfterFunctionDeclarationName",
Spacing.AfterFunctionDeclarationName);
IO.mapOptional("AfterIfMacros", Spacing.AfterIfMacros);
+ IO.mapOptional("AfterNot", Spacing.AfterNot);
IO.mapOptional("AfterOverloadedOperator", Spacing.AfterOverloadedOperator);
IO.mapOptional("AfterPlacementOperator", Spacing.AfterPlacementOperator);
IO.mapOptional("AfterRequiresInClause", Spacing.AfterRequiresInClause);
@@ -1753,7 +1754,6 @@ FormatStyle getGoogleStyle(FormatStyle::LanguageKind Language) {
GoogleStyle.AttributeMacros.push_back("absl_nullable");
GoogleStyle.AttributeMacros.push_back("absl_nullability_unknown");
GoogleStyle.BreakTemplateDeclarations = FormatStyle::BTDS_Yes;
- GoogleStyle.DerivePointerAlignment = true;
GoogleStyle.IncludeStyle.IncludeBlocks = tooling::IncludeStyle::IBS_Regroup;
GoogleStyle.IncludeStyle.IncludeCategories = {{"^<ext/.*\\.h>", 2, 0, false},
{"^<.*\\.h>", 1, 0, false},
@@ -1862,6 +1862,7 @@ FormatStyle getGoogleStyle(FormatStyle::LanguageKind Language) {
} else if (Language == FormatStyle::LK_ObjC) {
GoogleStyle.AlwaysBreakBeforeMultilineStrings = false;
GoogleStyle.ColumnLimit = 100;
+ GoogleStyle.DerivePointerAlignment = true;
// "Regroup" doesn't work well for ObjC yet (main header heuristic,
// relationship between ObjC standard library headers and other heades,
// #imports, etc.)
@@ -2638,31 +2639,44 @@ private:
int countVariableAlignments(const SmallVectorImpl<AnnotatedLine *> &Lines) {
int AlignmentDiff = 0;
+
for (const AnnotatedLine *Line : Lines) {
AlignmentDiff += countVariableAlignments(Line->Children);
- for (FormatToken *Tok = Line->First; Tok && Tok->Next; Tok = Tok->Next) {
+
+ for (const auto *Tok = Line->getFirstNonComment(); Tok; Tok = Tok->Next) {
if (Tok->isNot(TT_PointerOrReference))
continue;
- // Don't treat space in `void foo() &&` as evidence.
- if (const auto *Prev = Tok->getPreviousNonComment()) {
- if (Prev->is(tok::r_paren) && Prev->MatchingParen) {
- if (const auto *Func =
- Prev->MatchingParen->getPreviousNonComment()) {
- if (Func->isOneOf(TT_FunctionDeclarationName, TT_StartOfName,
- TT_OverloadedOperator)) {
- continue;
- }
- }
- }
+
+ const auto *Prev = Tok->Previous;
+ const bool PrecededByName = Prev && Prev->Tok.getIdentifierInfo();
+ const bool SpaceBefore = Tok->hasWhitespaceBefore();
+
+ // e.g. `int **`, `int*&`, etc.
+ while (Tok->Next && Tok->Next->is(TT_PointerOrReference))
+ Tok = Tok->Next;
+
+ const auto *Next = Tok->Next;
+ const bool FollowedByName = Next && Next->Tok.getIdentifierInfo();
+ const bool SpaceAfter = Next && Next->hasWhitespaceBefore();
+
+ if ((!PrecededByName && !FollowedByName) ||
+ // e.g. `int * i` or `int*i`
+ (PrecededByName && FollowedByName && SpaceBefore == SpaceAfter)) {
+ continue;
}
- bool SpaceBefore = Tok->hasWhitespaceBefore();
- bool SpaceAfter = Tok->Next->hasWhitespaceBefore();
- if (SpaceBefore && !SpaceAfter)
+
+ if ((PrecededByName && SpaceBefore) ||
+ (FollowedByName && !SpaceAfter)) {
+ // Right alignment.
++AlignmentDiff;
- if (!SpaceBefore && SpaceAfter)
+ } else if ((PrecededByName && !SpaceBefore) ||
+ (FollowedByName && SpaceAfter)) {
+ // Left alignment.
--AlignmentDiff;
+ }
}
}
+
return AlignmentDiff;
}
diff --git a/clang/lib/Format/IntegerLiteralSeparatorFixer.cpp b/clang/lib/Format/IntegerLiteralSeparatorFixer.cpp
index 80487fa..7772a56 100644
--- a/clang/lib/Format/IntegerLiteralSeparatorFixer.cpp
+++ b/clang/lib/Format/IntegerLiteralSeparatorFixer.cpp
@@ -45,15 +45,18 @@ std::pair<tooling::Replacements, unsigned>
IntegerLiteralSeparatorFixer::process(const Environment &Env,
const FormatStyle &Style) {
switch (Style.Language) {
- case FormatStyle::LK_Cpp:
- case FormatStyle::LK_ObjC:
- Separator = '\'';
- break;
case FormatStyle::LK_CSharp:
case FormatStyle::LK_Java:
case FormatStyle::LK_JavaScript:
Separator = '_';
break;
+ case FormatStyle::LK_Cpp:
+ case FormatStyle::LK_ObjC:
+ if (Style.Standard >= FormatStyle::LS_Cpp14) {
+ Separator = '\'';
+ break;
+ }
+ [[fallthrough]];
default:
return {};
}
diff --git a/clang/lib/Format/TokenAnnotator.cpp b/clang/lib/Format/TokenAnnotator.cpp
index 581bfba..d28d2fd 100644
--- a/clang/lib/Format/TokenAnnotator.cpp
+++ b/clang/lib/Format/TokenAnnotator.cpp
@@ -5478,7 +5478,8 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
if (Left.TokenText == "!")
return Style.SpaceAfterLogicalNot;
assert(Left.TokenText == "not");
- return Right.isOneOf(tok::coloncolon, TT_UnaryOperator);
+ return Right.isOneOf(tok::coloncolon, TT_UnaryOperator) ||
+ (Right.is(tok::l_paren) && Style.SpaceBeforeParensOptions.AfterNot);
}
// If the next token is a binary operator or a selector name, we have
diff --git a/clang/lib/Frontend/ASTUnit.cpp b/clang/lib/Frontend/ASTUnit.cpp
index 67ed17b..09caf85 100644
--- a/clang/lib/Frontend/ASTUnit.cpp
+++ b/clang/lib/Frontend/ASTUnit.cpp
@@ -1773,7 +1773,7 @@ std::unique_ptr<ASTUnit> ASTUnit::LoadFromCompilerInvocation(
if (AST->LoadFromCompilerInvocation(std::move(PCHContainerOps),
PrecompilePreambleAfterNParses,
- &AST->FileMgr->getVirtualFileSystem()))
+ AST->FileMgr->getVirtualFileSystemPtr()))
return nullptr;
return AST;
}
@@ -1895,7 +1895,7 @@ bool ASTUnit::Reparse(std::shared_ptr<PCHContainerOperations> PCHContainerOps,
if (!VFS) {
assert(FileMgr && "FileMgr is null on Reparse call");
- VFS = &FileMgr->getVirtualFileSystem();
+ VFS = FileMgr->getVirtualFileSystemPtr();
}
clearFileLevelDecls();
@@ -2321,7 +2321,8 @@ void ASTUnit::CodeComplete(
std::unique_ptr<llvm::MemoryBuffer> OverrideMainBuffer;
if (Preamble && Line > 1 && hasSameUniqueID(File, OriginalSourceFile)) {
OverrideMainBuffer = getMainBufferWithPrecompiledPreamble(
- PCHContainerOps, Inv, &FileMgr.getVirtualFileSystem(), false, Line - 1);
+ PCHContainerOps, Inv, FileMgr.getVirtualFileSystemPtr(), false,
+ Line - 1);
}
// If the main file has been overridden due to the use of a preamble,
@@ -2331,7 +2332,7 @@ void ASTUnit::CodeComplete(
"No preamble was built, but OverrideMainBuffer is not null");
IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS =
- &FileMgr.getVirtualFileSystem();
+ FileMgr.getVirtualFileSystemPtr();
Preamble->AddImplicitPreamble(Clang->getInvocation(), VFS,
OverrideMainBuffer.get());
// FIXME: there is no way to update VFS if it was changed by
diff --git a/clang/lib/Frontend/CompilerInstance.cpp b/clang/lib/Frontend/CompilerInstance.cpp
index c7b82db..40fb070 100644
--- a/clang/lib/Frontend/CompilerInstance.cpp
+++ b/clang/lib/Frontend/CompilerInstance.cpp
@@ -160,6 +160,11 @@ llvm::vfs::FileSystem &CompilerInstance::getVirtualFileSystem() const {
return getFileManager().getVirtualFileSystem();
}
+llvm::IntrusiveRefCntPtr<llvm::vfs::FileSystem>
+CompilerInstance::getVirtualFileSystemPtr() const {
+ return getFileManager().getVirtualFileSystemPtr();
+}
+
void CompilerInstance::setFileManager(FileManager *Value) {
FileMgr = Value;
}
@@ -375,7 +380,7 @@ IntrusiveRefCntPtr<DiagnosticsEngine> CompilerInstance::createDiagnostics(
FileManager *CompilerInstance::createFileManager(
IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS) {
if (!VFS)
- VFS = FileMgr ? &FileMgr->getVirtualFileSystem()
+ VFS = FileMgr ? FileMgr->getVirtualFileSystemPtr()
: createVFSFromCompilerInvocation(getInvocation(),
getDiagnostics());
assert(VFS && "FileManager has no VFS?");
@@ -1218,7 +1223,7 @@ std::unique_ptr<CompilerInstance> CompilerInstance::cloneForModuleCompileImpl(
} else if (FrontendOpts.ModulesShareFileManager) {
Instance.setFileManager(&getFileManager());
} else {
- Instance.createFileManager(&getVirtualFileSystem());
+ Instance.createFileManager(getVirtualFileSystemPtr());
}
if (ThreadSafeConfig) {
diff --git a/clang/lib/Frontend/FrontendActions.cpp b/clang/lib/Frontend/FrontendActions.cpp
index dcfbd53..685a9bb 100644
--- a/clang/lib/Frontend/FrontendActions.cpp
+++ b/clang/lib/Frontend/FrontendActions.cpp
@@ -181,7 +181,7 @@ bool GeneratePCHAction::shouldEraseOutputFiles() {
bool GeneratePCHAction::BeginSourceFileAction(CompilerInstance &CI) {
CI.getLangOpts().CompilingPCH = true;
- return true;
+ return ASTFrontendAction::BeginSourceFileAction(CI);
}
std::vector<std::unique_ptr<ASTConsumer>>
diff --git a/clang/lib/Frontend/InitPreprocessor.cpp b/clang/lib/Frontend/InitPreprocessor.cpp
index 382ccd6..008a35d 100644
--- a/clang/lib/Frontend/InitPreprocessor.cpp
+++ b/clang/lib/Frontend/InitPreprocessor.cpp
@@ -945,8 +945,8 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
if (LangOpts.GNUCVersion && LangOpts.CPlusPlus11)
Builder.defineMacro("__GXX_EXPERIMENTAL_CXX0X__");
- if (TI.getTriple().isWindowsGNUEnvironment()) {
- // Set ABI defining macros for libstdc++ for MinGW, where the
+ if (TI.getTriple().isOSCygMing()) {
+ // Set ABI defining macros for libstdc++ for MinGW and Cygwin, where the
// default in libstdc++ differs from the defaults for this target.
Builder.defineMacro("__GXX_TYPEINFO_EQUALITY_INLINE", "0");
}
diff --git a/clang/lib/Frontend/PrecompiledPreamble.cpp b/clang/lib/Frontend/PrecompiledPreamble.cpp
index 146cf90..486cd95 100644
--- a/clang/lib/Frontend/PrecompiledPreamble.cpp
+++ b/clang/lib/Frontend/PrecompiledPreamble.cpp
@@ -57,11 +57,9 @@ createVFSOverlayForPreamblePCH(StringRef PCHFilename,
IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS) {
// We want only the PCH file from the real filesystem to be available,
// so we create an in-memory VFS with just that and overlay it on top.
- IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> PCHFS(
- new llvm::vfs::InMemoryFileSystem());
+ auto PCHFS = llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
PCHFS->addFile(PCHFilename, 0, std::move(PCHBuffer));
- IntrusiveRefCntPtr<llvm::vfs::OverlayFileSystem> Overlay(
- new llvm::vfs::OverlayFileSystem(VFS));
+ auto Overlay = llvm::makeIntrusiveRefCnt<llvm::vfs::OverlayFileSystem>(VFS);
Overlay->pushOverlay(PCHFS);
return Overlay;
}
diff --git a/clang/lib/Frontend/Rewrite/FrontendActions.cpp b/clang/lib/Frontend/Rewrite/FrontendActions.cpp
index 84e7a4f..6c9c9d5 100644
--- a/clang/lib/Frontend/Rewrite/FrontendActions.cpp
+++ b/clang/lib/Frontend/Rewrite/FrontendActions.cpp
@@ -103,12 +103,13 @@ bool FixItAction::BeginSourceFileAction(CompilerInstance &CI) {
}
Rewriter.reset(new FixItRewriter(CI.getDiagnostics(), CI.getSourceManager(),
CI.getLangOpts(), FixItOpts.get()));
- return true;
+ return ASTFrontendAction::BeginSourceFileAction(CI);
}
void FixItAction::EndSourceFileAction() {
// Otherwise rewrite all files.
Rewriter->WriteFixedFiles();
+ ASTFrontendAction::EndSourceFileAction();
}
bool FixItRecompile::BeginInvocation(CompilerInstance &CI) {
@@ -298,7 +299,7 @@ bool RewriteIncludesAction::BeginSourceFileAction(CompilerInstance &CI) {
std::make_unique<RewriteImportsListener>(CI, OutputStream));
}
- return true;
+ return PreprocessorFrontendAction::BeginSourceFileAction(CI);
}
void RewriteIncludesAction::ExecuteAction() {
diff --git a/clang/lib/Headers/avx10_2_512niintrin.h b/clang/lib/Headers/avx10_2_512niintrin.h
index 7e614f7..9d96e36c7 100644
--- a/clang/lib/Headers/avx10_2_512niintrin.h
+++ b/clang/lib/Headers/avx10_2_512niintrin.h
@@ -197,7 +197,7 @@ _mm512_mask_dpwsud_epi32(__m512i __A, __mmask16 __U, __m512i __B, __m512i __C) {
}
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_dpwsud_epi32(
- __m512i __A, __mmask16 __U, __m512i __B, __m512i __C) {
+ __mmask16 __U, __m512i __A, __m512i __B, __m512i __C) {
return (__m512i)__builtin_ia32_selectd_512(
(__mmask16)__U, (__v16si)_mm512_dpwsud_epi32(__A, __B, __C),
(__v16si)_mm512_setzero_si512());
@@ -218,7 +218,7 @@ static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_dpwsuds_epi32(
}
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_dpwsuds_epi32(
- __m512i __A, __mmask16 __U, __m512i __B, __m512i __C) {
+ __mmask16 __U, __m512i __A, __m512i __B, __m512i __C) {
return (__m512i)__builtin_ia32_selectd_512(
(__mmask16)__U, (__v16si)_mm512_dpwsuds_epi32(__A, __B, __C),
(__v16si)_mm512_setzero_si512());
@@ -239,7 +239,7 @@ _mm512_mask_dpwusd_epi32(__m512i __A, __mmask16 __U, __m512i __B, __m512i __C) {
}
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_dpwusd_epi32(
- __m512i __A, __mmask16 __U, __m512i __B, __m512i __C) {
+ __mmask16 __U, __m512i __A, __m512i __B, __m512i __C) {
return (__m512i)__builtin_ia32_selectd_512(
(__mmask16)__U, (__v16si)_mm512_dpwusd_epi32(__A, __B, __C),
(__v16si)_mm512_setzero_si512());
@@ -260,7 +260,7 @@ static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_dpwusds_epi32(
}
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_dpwusds_epi32(
- __m512i __A, __mmask16 __U, __m512i __B, __m512i __C) {
+ __mmask16 __U, __m512i __A, __m512i __B, __m512i __C) {
return (__m512i)__builtin_ia32_selectd_512(
(__mmask16)__U, (__v16si)_mm512_dpwusds_epi32(__A, __B, __C),
(__v16si)_mm512_setzero_si512());
@@ -281,7 +281,7 @@ _mm512_mask_dpwuud_epi32(__m512i __A, __mmask16 __U, __m512i __B, __m512i __C) {
}
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_dpwuud_epi32(
- __m512i __A, __mmask16 __U, __m512i __B, __m512i __C) {
+ __mmask16 __U, __m512i __A, __m512i __B, __m512i __C) {
return (__m512i)__builtin_ia32_selectd_512(
(__mmask16)__U, (__v16si)_mm512_dpwuud_epi32(__A, __B, __C),
(__v16si)_mm512_setzero_si512());
@@ -302,7 +302,7 @@ static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_dpwuuds_epi32(
}
static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_dpwuuds_epi32(
- __m512i __A, __mmask16 __U, __m512i __B, __m512i __C) {
+ __mmask16 __U, __m512i __A, __m512i __B, __m512i __C) {
return (__m512i)__builtin_ia32_selectd_512(
(__mmask16)__U, (__v16si)_mm512_dpwuuds_epi32(__A, __B, __C),
(__v16si)_mm512_setzero_si512());
diff --git a/clang/lib/Headers/avx10_2niintrin.h b/clang/lib/Headers/avx10_2niintrin.h
index 992be18..d5a66cf 100644
--- a/clang/lib/Headers/avx10_2niintrin.h
+++ b/clang/lib/Headers/avx10_2niintrin.h
@@ -253,7 +253,7 @@ _mm_mask_dpwsud_epi32(__m128i __A, __mmask8 __U, __m128i __B, __m128i __C) {
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_dpwsud_epi32(__m128i __A, __mmask8 __U, __m128i __B, __m128i __C) {
+_mm_maskz_dpwsud_epi32(__mmask8 __U, __m128i __A, __m128i __B, __m128i __C) {
return (__m128i)__builtin_ia32_selectd_128(
(__mmask8)__U, (__v4si)_mm_dpwsud_epi32(__A, __B, __C),
(__v4si)_mm_setzero_si128());
@@ -266,7 +266,7 @@ _mm256_mask_dpwsud_epi32(__m256i __A, __mmask8 __U, __m256i __B, __m256i __C) {
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_dpwsud_epi32(__m256i __A, __mmask8 __U, __m256i __B, __m256i __C) {
+_mm256_maskz_dpwsud_epi32(__mmask8 __U, __m256i __A, __m256i __B, __m256i __C) {
return (__m256i)__builtin_ia32_selectd_256(
(__mmask8)__U, (__v8si)_mm256_dpwsud_epi32(__A, __B, __C),
(__v8si)_mm256_setzero_si256());
@@ -279,7 +279,7 @@ _mm_mask_dpwsuds_epi32(__m128i __A, __mmask8 __U, __m128i __B, __m128i __C) {
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_dpwsuds_epi32(__m128i __A, __mmask8 __U, __m128i __B, __m128i __C) {
+_mm_maskz_dpwsuds_epi32(__mmask8 __U, __m128i __A, __m128i __B, __m128i __C) {
return (__m128i)__builtin_ia32_selectd_128(
(__mmask8)__U, (__v4si)_mm_dpwsuds_epi32(__A, __B, __C),
(__v4si)_mm_setzero_si128());
@@ -292,7 +292,7 @@ _mm256_mask_dpwsuds_epi32(__m256i __A, __mmask8 __U, __m256i __B, __m256i __C) {
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_dpwsuds_epi32(
- __m256i __A, __mmask8 __U, __m256i __B, __m256i __C) {
+ __mmask8 __U, __m256i __A, __m256i __B, __m256i __C) {
return (__m256i)__builtin_ia32_selectd_256(
(__mmask8)__U, (__v8si)_mm256_dpwsuds_epi32(__A, __B, __C),
(__v8si)_mm256_setzero_si256());
@@ -305,7 +305,7 @@ _mm_mask_dpwusd_epi32(__m128i __A, __mmask8 __U, __m128i __B, __m128i __C) {
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_dpwusd_epi32(__m128i __A, __mmask8 __U, __m128i __B, __m128i __C) {
+_mm_maskz_dpwusd_epi32(__mmask8 __U, __m128i __A, __m128i __B, __m128i __C) {
return (__m128i)__builtin_ia32_selectd_128(
(__mmask8)__U, (__v4si)_mm_dpwusd_epi32(__A, __B, __C),
(__v4si)_mm_setzero_si128());
@@ -318,7 +318,7 @@ _mm256_mask_dpwusd_epi32(__m256i __A, __mmask8 __U, __m256i __B, __m256i __C) {
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_dpwusd_epi32(__m256i __A, __mmask8 __U, __m256i __B, __m256i __C) {
+_mm256_maskz_dpwusd_epi32(__mmask8 __U, __m256i __A, __m256i __B, __m256i __C) {
return (__m256i)__builtin_ia32_selectd_256(
(__mmask8)__U, (__v8si)_mm256_dpwusd_epi32(__A, __B, __C),
(__v8si)_mm256_setzero_si256());
@@ -331,7 +331,7 @@ _mm_mask_dpwusds_epi32(__m128i __A, __mmask8 __U, __m128i __B, __m128i __C) {
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_dpwusds_epi32(__m128i __A, __mmask8 __U, __m128i __B, __m128i __C) {
+_mm_maskz_dpwusds_epi32(__mmask8 __U, __m128i __A, __m128i __B, __m128i __C) {
return (__m128i)__builtin_ia32_selectd_128(
(__mmask8)__U, (__v4si)_mm_dpwusds_epi32(__A, __B, __C),
(__v4si)_mm_setzero_si128());
@@ -344,7 +344,7 @@ _mm256_mask_dpwusds_epi32(__m256i __A, __mmask8 __U, __m256i __B, __m256i __C) {
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_dpwusds_epi32(
- __m256i __A, __mmask8 __U, __m256i __B, __m256i __C) {
+ __mmask8 __U, __m256i __A, __m256i __B, __m256i __C) {
return (__m256i)__builtin_ia32_selectd_256(
(__mmask8)__U, (__v8si)_mm256_dpwusds_epi32(__A, __B, __C),
(__v8si)_mm256_setzero_si256());
@@ -357,7 +357,7 @@ _mm_mask_dpwuud_epi32(__m128i __A, __mmask8 __U, __m128i __B, __m128i __C) {
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_dpwuud_epi32(__m128i __A, __mmask8 __U, __m128i __B, __m128i __C) {
+_mm_maskz_dpwuud_epi32(__mmask8 __U, __m128i __A, __m128i __B, __m128i __C) {
return (__m128i)__builtin_ia32_selectd_128(
(__mmask8)__U, (__v4si)_mm_dpwuud_epi32(__A, __B, __C),
(__v4si)_mm_setzero_si128());
@@ -370,7 +370,7 @@ _mm256_mask_dpwuud_epi32(__m256i __A, __mmask8 __U, __m256i __B, __m256i __C) {
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
-_mm256_maskz_dpwuud_epi32(__m256i __A, __mmask8 __U, __m256i __B, __m256i __C) {
+_mm256_maskz_dpwuud_epi32(__mmask8 __U, __m256i __A, __m256i __B, __m256i __C) {
return (__m256i)__builtin_ia32_selectd_256(
(__mmask8)__U, (__v8si)_mm256_dpwuud_epi32(__A, __B, __C),
(__v8si)_mm256_setzero_si256());
@@ -383,7 +383,7 @@ _mm_mask_dpwuuds_epi32(__m128i __A, __mmask8 __U, __m128i __B, __m128i __C) {
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
-_mm_maskz_dpwuuds_epi32(__m128i __A, __mmask8 __U, __m128i __B, __m128i __C) {
+_mm_maskz_dpwuuds_epi32(__mmask8 __U, __m128i __A, __m128i __B, __m128i __C) {
return (__m128i)__builtin_ia32_selectd_128(
(__mmask8)__U, (__v4si)_mm_dpwuuds_epi32(__A, __B, __C),
(__v4si)_mm_setzero_si128());
@@ -396,7 +396,7 @@ _mm256_mask_dpwuuds_epi32(__m256i __A, __mmask8 __U, __m256i __B, __m256i __C) {
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256 _mm256_maskz_dpwuuds_epi32(
- __m256i __A, __mmask8 __U, __m256i __B, __m256i __C) {
+ __mmask8 __U, __m256i __A, __m256i __B, __m256i __C) {
return (__m256i)__builtin_ia32_selectd_256(
(__mmask8)__U, (__v8si)_mm256_dpwuuds_epi32(__A, __B, __C),
(__v8si)_mm256_setzero_si256());
diff --git a/clang/lib/Headers/hlsl/hlsl_intrinsic_helpers.h b/clang/lib/Headers/hlsl/hlsl_intrinsic_helpers.h
index e8ccccb..c877234 100644
--- a/clang/lib/Headers/hlsl/hlsl_intrinsic_helpers.h
+++ b/clang/lib/Headers/hlsl/hlsl_intrinsic_helpers.h
@@ -12,7 +12,7 @@
namespace hlsl {
namespace __detail {
-constexpr vector<uint, 4> d3d_color_to_ubyte4_impl(vector<float, 4> V) {
+constexpr int4 d3d_color_to_ubyte4_impl(float4 V) {
// Use the same scaling factor used by FXC, and DXC for DXIL
// (i.e., 255.001953)
// https://github.com/microsoft/DirectXShaderCompiler/blob/070d0d5a2beacef9eeb51037a9b04665716fd6f3/lib/HLSL/HLOperationLower.cpp#L666C1-L697C2
diff --git a/clang/lib/Headers/hlsl/hlsl_intrinsics.h b/clang/lib/Headers/hlsl/hlsl_intrinsics.h
index 499a053..d9d87c8 100644
--- a/clang/lib/Headers/hlsl/hlsl_intrinsics.h
+++ b/clang/lib/Headers/hlsl/hlsl_intrinsics.h
@@ -418,7 +418,7 @@ const inline float4 lit(float NDotL, float NDotH, float M) {
/// This function swizzles and scales components of the \a x parameter. Use this
/// function to compensate for the lack of UBYTE4 support in some hardware.
-constexpr vector<uint, 4> D3DCOLORtoUBYTE4(vector<float, 4> V) {
+constexpr int4 D3DCOLORtoUBYTE4(float4 V) {
return __detail::d3d_color_to_ubyte4_impl(V);
}
diff --git a/clang/lib/Headers/opencl-c-base.h b/clang/lib/Headers/opencl-c-base.h
index 2b7f504..6206a34 100644
--- a/clang/lib/Headers/opencl-c-base.h
+++ b/clang/lib/Headers/opencl-c-base.h
@@ -697,7 +697,16 @@ template <typename _Tp> struct __remove_address_space<__constant _Tp> {
#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
// OpenCL v1.2 s6.12.13, v2.0 s6.13.13 - printf
-int printf(__constant const char* st, ...) __attribute__((format(printf, 1, 2)));
+#ifdef __OPENCL_CPP_VERSION__
+#define CLINKAGE extern "C"
+#else
+#define CLINKAGE
+#endif
+
+CLINKAGE int printf(__constant const char *st, ...)
+ __attribute__((format(printf, 1, 2)));
+
+#undef CLINKAGE
#endif
#ifdef cl_intel_device_side_avc_motion_estimation
diff --git a/clang/lib/Headers/opencl-c.h b/clang/lib/Headers/opencl-c.h
index e1e0fde..f65b4b3 100644
--- a/clang/lib/Headers/opencl-c.h
+++ b/clang/lib/Headers/opencl-c.h
@@ -18410,6 +18410,22 @@ intel_sub_group_avc_mce_convert_to_sic_result(
#pragma OPENCL EXTENSION cl_intel_device_side_avc_motion_estimation : end
#endif // cl_intel_device_side_avc_motion_estimation
+#if defined(cl_intel_bfloat16_conversions)
+ushort __ovld intel_convert_bfloat16_as_ushort(float source);
+ushort2 __ovld intel_convert_bfloat162_as_ushort2(float2 source);
+ushort3 __ovld intel_convert_bfloat163_as_ushort3(float3 source);
+ushort4 __ovld intel_convert_bfloat164_as_ushort4(float4 source);
+ushort8 __ovld intel_convert_bfloat168_as_ushort8(float8 source);
+ushort16 __ovld intel_convert_bfloat1616_as_ushort16(float16 source);
+
+float __ovld intel_convert_as_bfloat16_float(ushort source);
+float2 __ovld intel_convert_as_bfloat162_float2(ushort2 source);
+float3 __ovld intel_convert_as_bfloat163_float3(ushort3 source);
+float4 __ovld intel_convert_as_bfloat164_float4(ushort4 source);
+float8 __ovld intel_convert_as_bfloat168_float8(ushort8 source);
+float16 __ovld intel_convert_as_bfloat1616_float16(ushort16 source);
+#endif // cl_intel_bfloat16_conversions
+
#ifdef cl_amd_media_ops
uint __ovld amd_bitalign(uint, uint, uint);
uint2 __ovld amd_bitalign(uint2, uint2, uint2);
diff --git a/clang/lib/Lex/PPMacroExpansion.cpp b/clang/lib/Lex/PPMacroExpansion.cpp
index 890567c..6f12ac8 100644
--- a/clang/lib/Lex/PPMacroExpansion.cpp
+++ b/clang/lib/Lex/PPMacroExpansion.cpp
@@ -1760,7 +1760,8 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
Tok, *this, diag::err_feature_check_malformed);
if (!II)
return false;
- else if (II->getBuiltinID() != 0) {
+ unsigned BuiltinID = II->getBuiltinID();
+ if (BuiltinID != 0) {
switch (II->getBuiltinID()) {
case Builtin::BI__builtin_cpu_is:
return getTargetInfo().supportsCpuIs();
@@ -1774,8 +1775,11 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
// usual allocation and deallocation functions. Required by libc++
return 201802;
default:
+ // __has_builtin should return false for aux builtins.
+ if (getBuiltinInfo().isAuxBuiltinID(BuiltinID))
+ return false;
return Builtin::evaluateRequiredTargetFeatures(
- getBuiltinInfo().getRequiredFeatures(II->getBuiltinID()),
+ getBuiltinInfo().getRequiredFeatures(BuiltinID),
getTargetInfo().getTargetOpts().FeatureMap);
}
return true;
diff --git a/clang/lib/Parse/ParseDecl.cpp b/clang/lib/Parse/ParseDecl.cpp
index 893ef02..e47caeb 100644
--- a/clang/lib/Parse/ParseDecl.cpp
+++ b/clang/lib/Parse/ParseDecl.cpp
@@ -5695,11 +5695,10 @@ Parser::DeclGroupPtrTy Parser::ParseTopLevelStmtDecl() {
Scope::CompoundStmtScope);
TopLevelStmtDecl *TLSD = Actions.ActOnStartTopLevelStmtDecl(getCurScope());
StmtResult R = ParseStatementOrDeclaration(Stmts, SubStmtCtx);
+ Actions.ActOnFinishTopLevelStmtDecl(TLSD, R.get());
if (!R.isUsable())
R = Actions.ActOnNullStmt(Tok.getLocation());
- Actions.ActOnFinishTopLevelStmtDecl(TLSD, R.get());
-
if (Tok.is(tok::annot_repl_input_end) &&
Tok.getAnnotationValue() != nullptr) {
ConsumeAnnotationToken();
diff --git a/clang/lib/Parse/ParseDeclCXX.cpp b/clang/lib/Parse/ParseDeclCXX.cpp
index 31392d1d..bc8841c 100644
--- a/clang/lib/Parse/ParseDeclCXX.cpp
+++ b/clang/lib/Parse/ParseDeclCXX.cpp
@@ -4940,9 +4940,8 @@ void Parser::ParseHLSLRootSignatureAttributeArgs(ParsedAttributes &Attrs) {
// signature string and construct the in-memory elements
if (!Found) {
// Invoke the root signature parser to construct the in-memory constructs
- SmallVector<hlsl::RootSignatureElement> RootElements;
- hlsl::RootSignatureParser Parser(getLangOpts().HLSLRootSigVer, RootElements,
- Signature, PP);
+ hlsl::RootSignatureParser Parser(getLangOpts().HLSLRootSigVer, Signature,
+ PP);
if (Parser.parse()) {
T.consumeClose();
return;
@@ -4950,7 +4949,7 @@ void Parser::ParseHLSLRootSignatureAttributeArgs(ParsedAttributes &Attrs) {
// Construct the declaration.
Actions.HLSL().ActOnFinishRootSignatureDecl(RootSignatureLoc, DeclIdent,
- RootElements);
+ Parser.getElements());
}
// Create the arg for the ParsedAttr
diff --git a/clang/lib/Parse/ParseHLSLRootSignature.cpp b/clang/lib/Parse/ParseHLSLRootSignature.cpp
index db9ed83..98dc458 100644
--- a/clang/lib/Parse/ParseHLSLRootSignature.cpp
+++ b/clang/lib/Parse/ParseHLSLRootSignature.cpp
@@ -27,11 +27,10 @@ static const TokenKind RootElementKeywords[] = {
};
RootSignatureParser::RootSignatureParser(
- llvm::dxbc::RootSignatureVersion Version,
- SmallVector<RootSignatureElement> &Elements, StringLiteral *Signature,
+ llvm::dxbc::RootSignatureVersion Version, StringLiteral *Signature,
Preprocessor &PP)
- : Version(Version), Elements(Elements), Signature(Signature),
- Lexer(Signature->getString()), PP(PP), CurToken(0) {}
+ : Version(Version), Signature(Signature), Lexer(Signature->getString()),
+ PP(PP), CurToken(0) {}
bool RootSignatureParser::parse() {
// Iterate as many RootSignatureElements as possible, until we hit the
diff --git a/clang/lib/Sema/AnalysisBasedWarnings.cpp b/clang/lib/Sema/AnalysisBasedWarnings.cpp
index 829c81b..35ad0b5 100644
--- a/clang/lib/Sema/AnalysisBasedWarnings.cpp
+++ b/clang/lib/Sema/AnalysisBasedWarnings.cpp
@@ -503,8 +503,12 @@ static bool areAllValuesNoReturn(const VarDecl *VD, const CFGBlock &VarBlk,
TransferFunctions TF(VD);
BackwardDataflowWorklist Worklist(*AC.getCFG(), AC);
+ llvm::DenseSet<const CFGBlock *> Visited;
Worklist.enqueueBlock(&VarBlk);
while (const CFGBlock *B = Worklist.dequeue()) {
+ if (Visited.contains(B))
+ continue;
+ Visited.insert(B);
// First check the current block.
for (CFGBlock::const_reverse_iterator ri = B->rbegin(), re = B->rend();
ri != re; ++ri) {
diff --git a/clang/lib/Sema/Sema.cpp b/clang/lib/Sema/Sema.cpp
index 56608e9..43a7f9e 100644
--- a/clang/lib/Sema/Sema.cpp
+++ b/clang/lib/Sema/Sema.cpp
@@ -1616,6 +1616,8 @@ void Sema::ActOnEndOfTranslationUnit() {
if (!PP.isIncrementalProcessingEnabled())
TUScope = nullptr;
+
+ checkExposure(Context.getTranslationUnitDecl());
}
@@ -2249,16 +2251,15 @@ void Sema::checkTypeSupport(QualType Ty, SourceLocation Loc, ValueDecl *D) {
}
// Don't allow SVE types in functions without a SVE target.
- if (Ty->isSVESizelessBuiltinType() && FD) {
+ if (Ty->isSVESizelessBuiltinType() && FD && !FD->getType().isNull()) {
llvm::StringMap<bool> CallerFeatureMap;
Context.getFunctionFeatureMap(CallerFeatureMap, FD);
if (!Builtin::evaluateRequiredTargetFeatures("sve", CallerFeatureMap)) {
if (!Builtin::evaluateRequiredTargetFeatures("sme", CallerFeatureMap))
Diag(Loc, diag::err_sve_vector_in_non_sve_target) << Ty;
else if (!IsArmStreamingFunction(FD,
- /*IncludeLocallyStreaming=*/true)) {
+ /*IncludeLocallyStreaming=*/true))
Diag(Loc, diag::err_sve_vector_in_non_streaming_function) << Ty;
- }
}
}
diff --git a/clang/lib/Sema/SemaARM.cpp b/clang/lib/Sema/SemaARM.cpp
index 8e27fab..e09c352 100644
--- a/clang/lib/Sema/SemaARM.cpp
+++ b/clang/lib/Sema/SemaARM.cpp
@@ -846,9 +846,9 @@ bool SemaARM::CheckARMCoprocessorImmediate(const TargetInfo &TI,
return false;
}
-bool SemaARM::CheckARMBuiltinExclusiveCall(unsigned BuiltinID,
- CallExpr *TheCall,
- unsigned MaxWidth) {
+bool SemaARM::CheckARMBuiltinExclusiveCall(const TargetInfo &TI,
+ unsigned BuiltinID,
+ CallExpr *TheCall) {
assert((BuiltinID == ARM::BI__builtin_arm_ldrex ||
BuiltinID == ARM::BI__builtin_arm_ldaex ||
BuiltinID == ARM::BI__builtin_arm_strex ||
@@ -923,12 +923,56 @@ bool SemaARM::CheckARMBuiltinExclusiveCall(unsigned BuiltinID,
return true;
}
- // But ARM doesn't have instructions to deal with 128-bit versions.
- if (Context.getTypeSize(ValType) > MaxWidth) {
- assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate");
- Diag(DRE->getBeginLoc(), diag::err_atomic_exclusive_builtin_pointer_size)
- << PointerArg->getType() << PointerArg->getSourceRange();
- return true;
+ // Check whether the size of the type can be handled atomically on this
+ // target.
+ if (!TI.getTriple().isAArch64()) {
+ unsigned Mask = TI.getARMLDREXMask();
+ unsigned Bits = Context.getTypeSize(ValType);
+ bool Supported =
+ (llvm::isPowerOf2_64(Bits)) && Bits >= 8 && (Mask & (Bits / 8));
+
+ if (!Supported) {
+ // Emit a diagnostic saying that this size isn't available. If _no_ size
+ // of exclusive access is supported on this target, we emit a diagnostic
+ // with special wording for that case, but otherwise, we emit
+ // err_atomic_exclusive_builtin_pointer_size and loop over `Mask` to
+ // control what subset of sizes it lists as legal.
+ if (Mask) {
+ auto D = Diag(DRE->getBeginLoc(),
+ diag::err_atomic_exclusive_builtin_pointer_size)
+ << PointerArg->getType();
+ bool Started = false;
+ for (unsigned Size = 1; Size <= 8; Size <<= 1) {
+ // For each of the sizes 1,2,4,8, pass two integers into the
+ // diagnostic. The first selects a separator from the previous
+ // number: 0 for no separator at all, 1 for a comma, 2 for " or "
+ // which appears before the final number in a list of more than one.
+ // The second integer just indicates whether we print this size in
+ // the message at all.
+ if (!(Mask & Size)) {
+ // This size isn't one of the supported ones, so emit no separator
+ // text and don't print the size itself.
+ D << 0 << 0;
+ } else {
+ // This size is supported, so print it, and an appropriate
+ // separator.
+ Mask &= ~Size;
+ if (!Started)
+ D << 0; // No separator if this is the first size we've printed
+ else if (Mask)
+ D << 1; // "," if there's still another size to come
+ else
+ D << 2; // " or " if the size we're about to print is the last
+ D << 1; // print the size itself
+ Started = true;
+ }
+ }
+ } else {
+ Diag(DRE->getBeginLoc(),
+ diag::err_atomic_exclusive_builtin_pointer_size_none)
+ << PointerArg->getSourceRange();
+ }
+ }
}
switch (ValType.getObjCLifetime()) {
@@ -972,7 +1016,7 @@ bool SemaARM::CheckARMBuiltinFunctionCall(const TargetInfo &TI,
BuiltinID == ARM::BI__builtin_arm_ldaex ||
BuiltinID == ARM::BI__builtin_arm_strex ||
BuiltinID == ARM::BI__builtin_arm_stlex) {
- return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 64);
+ return CheckARMBuiltinExclusiveCall(TI, BuiltinID, TheCall);
}
if (BuiltinID == ARM::BI__builtin_arm_prefetch) {
@@ -1053,7 +1097,7 @@ bool SemaARM::CheckAArch64BuiltinFunctionCall(const TargetInfo &TI,
BuiltinID == AArch64::BI__builtin_arm_ldaex ||
BuiltinID == AArch64::BI__builtin_arm_strex ||
BuiltinID == AArch64::BI__builtin_arm_stlex) {
- return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 128);
+ return CheckARMBuiltinExclusiveCall(TI, BuiltinID, TheCall);
}
if (BuiltinID == AArch64::BI__builtin_arm_prefetch) {
diff --git a/clang/lib/Sema/SemaAvailability.cpp b/clang/lib/Sema/SemaAvailability.cpp
index 8c6a173..68a698f 100644
--- a/clang/lib/Sema/SemaAvailability.cpp
+++ b/clang/lib/Sema/SemaAvailability.cpp
@@ -547,6 +547,12 @@ static void DoEmitAvailabilityWarning(Sema &S, AvailabilityResult K,
return;
}
case AR_Deprecated:
+ // Suppress -Wdeprecated-declarations in implicit
+ // functions.
+ if (const auto *FD = dyn_cast_or_null<FunctionDecl>(S.getCurFunctionDecl());
+ FD && FD->isImplicit())
+ return;
+
if (ObjCPropertyAccess)
diag = diag::warn_property_method_deprecated;
else if (S.currentEvaluationContext().IsCaseExpr)
diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp
index c74b671..bc87611 100644
--- a/clang/lib/Sema/SemaChecking.cpp
+++ b/clang/lib/Sema/SemaChecking.cpp
@@ -15893,9 +15893,7 @@ ExprResult Sema::BuiltinMatrixTranspose(CallExpr *TheCall,
// Get and verify the matrix dimensions.
static std::optional<unsigned>
getAndVerifyMatrixDimension(Expr *Expr, StringRef Name, Sema &S) {
- SourceLocation ErrorPos;
- std::optional<llvm::APSInt> Value =
- Expr->getIntegerConstantExpr(S.Context, &ErrorPos);
+ std::optional<llvm::APSInt> Value = Expr->getIntegerConstantExpr(S.Context);
if (!Value) {
S.Diag(Expr->getBeginLoc(), diag::err_builtin_matrix_scalar_unsigned_arg)
<< Name;
diff --git a/clang/lib/Sema/SemaCodeComplete.cpp b/clang/lib/Sema/SemaCodeComplete.cpp
index a43ac9e..0de5580 100644
--- a/clang/lib/Sema/SemaCodeComplete.cpp
+++ b/clang/lib/Sema/SemaCodeComplete.cpp
@@ -4034,6 +4034,14 @@ static void AddOverloadParameterChunks(
return;
}
+ // C++23 introduces an explicit object parameter, a.k.a. "deducing this"
+ // Skip it for autocomplete and treat the next parameter as the first
+ // parameter
+ if (Function && FirstParameter &&
+ Function->getParamDecl(P)->isExplicitObjectParameter()) {
+ continue;
+ }
+
if (FirstParameter)
FirstParameter = false;
else
diff --git a/clang/lib/Sema/SemaConcept.cpp b/clang/lib/Sema/SemaConcept.cpp
index 044cf5c..da85959 100644
--- a/clang/lib/Sema/SemaConcept.cpp
+++ b/clang/lib/Sema/SemaConcept.cpp
@@ -1105,10 +1105,6 @@ static bool CheckFunctionConstraintsWithoutInstantiation(
}
Sema::ContextRAII SavedContext(SemaRef, FD);
- std::optional<Sema::CXXThisScopeRAII> ThisScope;
- if (auto *Method = dyn_cast<CXXMethodDecl>(FD))
- ThisScope.emplace(SemaRef, /*Record=*/Method->getParent(),
- /*ThisQuals=*/Method->getMethodQualifiers());
return SemaRef.CheckConstraintSatisfaction(
Template, TemplateAC, MLTAL, PointOfInstantiation, Satisfaction);
}
diff --git a/clang/lib/Sema/SemaDecl.cpp b/clang/lib/Sema/SemaDecl.cpp
index fd22e01..c7e7507 100644
--- a/clang/lib/Sema/SemaDecl.cpp
+++ b/clang/lib/Sema/SemaDecl.cpp
@@ -3267,6 +3267,14 @@ void Sema::mergeDeclAttributes(NamedDecl *New, Decl *Old,
if (isa<UsedAttr>(I) || isa<RetainAttr>(I))
continue;
+ if (isa<InferredNoReturnAttr>(I)) {
+ if (auto *FD = dyn_cast<FunctionDecl>(New)) {
+ if (FD->getTemplateSpecializationKind() == TSK_ExplicitSpecialization)
+ continue; // Don't propagate inferred noreturn attributes to explicit
+ // specializations.
+ }
+ }
+
if (mergeDeclAttribute(*this, New, I, LocalAMK))
foundAny = true;
}
@@ -12578,9 +12586,9 @@ static bool isDefaultStdCall(FunctionDecl *FD, Sema &S) {
if (FD->getName() == "main" || FD->getName() == "wmain")
return false;
- // Default calling convention for MinGW is __cdecl
+ // Default calling convention for MinGW and Cygwin is __cdecl
const llvm::Triple &T = S.Context.getTargetInfo().getTriple();
- if (T.isWindowsGNUEnvironment())
+ if (T.isOSCygMing())
return false;
// Default calling convention for WinMain, wWinMain and DllMain
@@ -20573,7 +20581,8 @@ TopLevelStmtDecl *Sema::ActOnStartTopLevelStmtDecl(Scope *S) {
}
void Sema::ActOnFinishTopLevelStmtDecl(TopLevelStmtDecl *D, Stmt *Statement) {
- D->setStmt(Statement);
+ if (Statement)
+ D->setStmt(Statement);
PopCompoundScope();
PopFunctionScopeInfo();
PopDeclContext();
diff --git a/clang/lib/Sema/SemaDeclAttr.cpp b/clang/lib/Sema/SemaDeclAttr.cpp
index 9a2950c..16b18bc 100644
--- a/clang/lib/Sema/SemaDeclAttr.cpp
+++ b/clang/lib/Sema/SemaDeclAttr.cpp
@@ -1970,6 +1970,13 @@ void clang::inferNoReturnAttr(Sema &S, const Decl *D) {
if (!FD)
return;
+ // Skip explicit specializations here as they may have
+ // a user-provided definition that may deliberately differ from the primary
+ // template. If an explicit specialization truly never returns, the user
+ // should explicitly mark it with [[noreturn]].
+ if (FD->getTemplateSpecializationKind() == TSK_ExplicitSpecialization)
+ return;
+
auto *NonConstFD = const_cast<FunctionDecl *>(FD);
DiagnosticsEngine &Diags = S.getDiagnostics();
if (Diags.isIgnored(diag::warn_falloff_nonvoid, FD->getLocation()) &&
@@ -2034,7 +2041,8 @@ bool Sema::CheckAttrTarget(const ParsedAttr &AL) {
// Check whether the attribute is valid on the current target.
if (!AL.existsInTarget(Context.getTargetInfo())) {
if (AL.isRegularKeywordAttribute())
- Diag(AL.getLoc(), diag::err_keyword_not_supported_on_target);
+ Diag(AL.getLoc(), diag::err_keyword_not_supported_on_target)
+ << AL << AL.getRange();
else
DiagnoseUnknownAttribute(AL);
AL.setInvalid();
@@ -4797,10 +4805,10 @@ void Sema::AddModeAttr(Decl *D, const AttributeCommonInfo &CI,
static void handleNonStringAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// This only applies to fields and variable declarations which have an array
- // type.
+ // type or pointer type, with character elements.
QualType QT = cast<ValueDecl>(D)->getType();
- if (!QT->isArrayType() ||
- !QT->getBaseElementTypeUnsafe()->isAnyCharacterType()) {
+ if ((!QT->isArrayType() && !QT->isPointerType()) ||
+ !QT->getPointeeOrArrayElementType()->isAnyCharacterType()) {
S.Diag(D->getBeginLoc(), diag::warn_attribute_non_character_array)
<< AL << AL.isRegularKeywordAttribute() << QT << AL.getRange();
return;
diff --git a/clang/lib/Sema/SemaModule.cpp b/clang/lib/Sema/SemaModule.cpp
index 7c982bc..b137549 100644
--- a/clang/lib/Sema/SemaModule.cpp
+++ b/clang/lib/Sema/SemaModule.cpp
@@ -13,6 +13,7 @@
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTMutationListener.h"
+#include "clang/AST/DynamicRecursiveASTVisitor.h"
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/ParsedAttr.h"
@@ -485,6 +486,7 @@ Sema::ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc,
// implementation unit importing its interface). Make this module visible
// and return the import decl to be added to the current TU.
if (Interface) {
+ HadImportedNamedModules = true;
makeTransitiveImportsVisible(getASTContext(), VisibleModules, Interface,
Mod, ModuleLoc,
@@ -728,6 +730,8 @@ DeclResult Sema::ActOnModuleImport(SourceLocation StartLoc,
getCurrentModule()->Imports.insert(Mod);
}
+ HadImportedNamedModules = true;
+
return Import;
}
@@ -1102,3 +1106,467 @@ bool Sema::isCurrentModulePurview() const {
return false;
}
}
+
+//===----------------------------------------------------------------------===//
+// Checking Exposure in modules //
+//===----------------------------------------------------------------------===//
+
+namespace {
+class ExposureChecker {
+public:
+ ExposureChecker(Sema &S) : SemaRef(S) {}
+
+ bool checkExposure(const VarDecl *D, bool Diag);
+ bool checkExposure(const CXXRecordDecl *D, bool Diag);
+ bool checkExposure(const Stmt *S, bool Diag);
+ bool checkExposure(const FunctionDecl *D, bool Diag);
+ bool checkExposure(const NamedDecl *D, bool Diag);
+ void checkExposureInContext(const DeclContext *DC);
+ bool isExposureCandidate(const NamedDecl *D);
+
+ bool isTULocal(QualType Ty);
+ bool isTULocal(const NamedDecl *ND);
+ bool isTULocal(const Expr *E);
+
+ Sema &SemaRef;
+
+private:
+ llvm::DenseSet<const NamedDecl *> ExposureSet;
+ llvm::DenseSet<const NamedDecl *> KnownNonExposureSet;
+};
+
+bool ExposureChecker::isTULocal(QualType Ty) {
+ // [basic.link]p15:
+ // An entity is TU-local if it is
+ // - a type, type alias, namespace, namespace alias, function, variable, or
+ // template that
+ // -- has internal linkage, or
+ return Ty->getLinkage() == Linkage::Internal;
+
+ // TODO:
+ // [basic.link]p15.2:
+ // a type with no name that is defined outside a class-specifier, function
+ // body, or initializer or is introduced by a defining-type-specifier that
+ // is used to declare only TU-local entities,
+}
+
+bool ExposureChecker::isTULocal(const NamedDecl *D) {
+ if (!D)
+ return false;
+
+ // [basic.link]p15:
+ // An entity is TU-local if it is
+ // - a type, type alias, namespace, namespace alias, function, variable, or
+ // template that
+ // -- has internal linkage, or
+ if (D->getLinkageInternal() == Linkage::Internal)
+ return true;
+
+ if (D->isInAnonymousNamespace())
+ return true;
+
+ // [basic.link]p15.1.2:
+ // does not have a name with linkage and is declared, or introduced by a
+ // lambda-expression, within the definition of a TU-local entity,
+ if (D->getLinkageInternal() == Linkage::None)
+ if (auto *ND = dyn_cast<NamedDecl>(D->getDeclContext());
+ ND && isTULocal(ND))
+ return true;
+
+ // [basic.link]p15.3, p15.4:
+ // - a specialization of a TU-local template,
+ // - a specialization of a template with any TU-local template argument, or
+ ArrayRef<TemplateArgument> TemplateArgs;
+ NamedDecl *PrimaryTemplate = nullptr;
+ if (auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(D)) {
+ TemplateArgs = CTSD->getTemplateArgs().asArray();
+ PrimaryTemplate = CTSD->getSpecializedTemplate();
+ if (isTULocal(PrimaryTemplate))
+ return true;
+ } else if (auto *VTSD = dyn_cast<VarTemplateSpecializationDecl>(D)) {
+ TemplateArgs = VTSD->getTemplateArgs().asArray();
+ PrimaryTemplate = VTSD->getSpecializedTemplate();
+ if (isTULocal(PrimaryTemplate))
+ return true;
+ } else if (auto *FD = dyn_cast<FunctionDecl>(D)) {
+ if (auto *TAList = FD->getTemplateSpecializationArgs())
+ TemplateArgs = TAList->asArray();
+
+ PrimaryTemplate = FD->getPrimaryTemplate();
+ if (isTULocal(PrimaryTemplate))
+ return true;
+ }
+
+ if (!PrimaryTemplate)
+ // Following off, we only check for specializations.
+ return false;
+
+ if (KnownNonExposureSet.count(D))
+ return false;
+
+ for (auto &TA : TemplateArgs) {
+ switch (TA.getKind()) {
+ case TemplateArgument::Type:
+ if (isTULocal(TA.getAsType()))
+ return true;
+ break;
+ case TemplateArgument::Declaration:
+ if (isTULocal(TA.getAsDecl()))
+ return true;
+ break;
+ default:
+ break;
+ }
+ }
+
+ // [basic.link]p15.5
+ // - a specialization of a template whose (possibly instantiated) declaration
+ // is an exposure.
+ if (checkExposure(PrimaryTemplate, /*Diag=*/false))
+ return true;
+
+ // Avoid calling checkExposure again since it is expensive.
+ KnownNonExposureSet.insert(D);
+ return false;
+}
+
+bool ExposureChecker::isTULocal(const Expr *E) {
+ if (!E)
+ return false;
+
+ // [basic.link]p16:
+ // A value or object is TU-local if either
+ // - it is of TU-local type,
+ if (isTULocal(E->getType()))
+ return true;
+
+ E = E->IgnoreParenImpCasts();
+ // [basic.link]p16.2:
+ // - it is, or is a pointer to, a TU-local function or the object associated
+ // with a TU-local variable,
+ // - it is an object of class or array type and any of its subobjects or any
+ // of the objects or functions to which its non-static data members of
+ // reference type refer is TU-local and is usable in constant expressions, or
+ // FIXME: But how can we know the value of pointers or arrays at compile time?
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
+ if (auto *FD = dyn_cast_or_null<FunctionDecl>(DRE->getFoundDecl()))
+ return isTULocal(FD);
+ else if (auto *VD = dyn_cast_or_null<VarDecl>(DRE->getFoundDecl()))
+ return isTULocal(VD);
+ else if (auto *RD = dyn_cast_or_null<CXXRecordDecl>(DRE->getFoundDecl()))
+ return isTULocal(RD);
+ }
+
+ // TODO:
+ // [basic.link]p16.4:
+ // it is a reflection value that represents...
+
+ return false;
+}
+
+bool ExposureChecker::isExposureCandidate(const NamedDecl *D) {
+ if (!D)
+ return false;
+
+ // [basic.link]p17:
+ // If a (possibly instantiated) declaration of, or a deduction guide for,
+ // a non-TU-local entity in a module interface unit
+ // (outside the private-module-fragment, if any) or
+ // module partition is an exposure, the program is ill-formed.
+ Module *M = D->getOwningModule();
+ if (!M || !M->isInterfaceOrPartition())
+ return false;
+
+ if (D->isImplicit())
+ return false;
+
+ // [basic.link]p14:
+ // A declaration is an exposure if it either names a TU-local entity
+ // (defined below), ignoring:
+ // ...
+ // - friend declarations in a class definition
+ if (D->getFriendObjectKind() &&
+ isa<CXXRecordDecl>(D->getLexicalDeclContext()))
+ return false;
+
+ return true;
+}
+
+bool ExposureChecker::checkExposure(const NamedDecl *D, bool Diag) {
+ if (!isExposureCandidate(D))
+ return false;
+
+ if (auto *FD = dyn_cast<FunctionDecl>(D))
+ return checkExposure(FD, Diag);
+ if (auto *FTD = dyn_cast<FunctionTemplateDecl>(D))
+ return checkExposure(FTD->getTemplatedDecl(), Diag);
+
+ if (auto *VD = dyn_cast<VarDecl>(D))
+ return checkExposure(VD, Diag);
+ if (auto *VTD = dyn_cast<VarTemplateDecl>(D))
+ return checkExposure(VTD->getTemplatedDecl(), Diag);
+
+ if (auto *RD = dyn_cast<CXXRecordDecl>(D))
+ return checkExposure(RD, Diag);
+
+ if (auto *CTD = dyn_cast<ClassTemplateDecl>(D))
+ return checkExposure(CTD->getTemplatedDecl(), Diag);
+
+ return false;
+}
+
+bool ExposureChecker::checkExposure(const FunctionDecl *FD, bool Diag) {
+ bool IsExposure = false;
+ if (isTULocal(FD->getReturnType())) {
+ IsExposure = true;
+ if (Diag)
+ SemaRef.Diag(FD->getReturnTypeSourceRange().getBegin(),
+ diag::warn_exposure)
+ << FD->getReturnType();
+ }
+
+ for (ParmVarDecl *Parms : FD->parameters())
+ if (isTULocal(Parms->getType())) {
+ IsExposure = true;
+ if (Diag)
+ SemaRef.Diag(Parms->getLocation(), diag::warn_exposure)
+ << Parms->getType();
+ }
+
+ bool IsImplicitInstantiation =
+ FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation;
+
+ // [basic.link]p14:
+ // A declaration is an exposure if it either names a TU-local entity
+ // (defined below), ignoring:
+ // - the function-body for a non-inline function or function template
+ // (but not the deduced return
+ // type for a (possibly instantiated) definition of a function with a
+ // declared return type that uses a placeholder type
+ // ([dcl.spec.auto])),
+ Diag &=
+ (FD->isInlined() || IsImplicitInstantiation) && !FD->isDependentContext();
+
+ IsExposure |= checkExposure(FD->getBody(), Diag);
+ if (IsExposure)
+ ExposureSet.insert(FD);
+
+ return IsExposure;
+}
+
+bool ExposureChecker::checkExposure(const VarDecl *VD, bool Diag) {
+ bool IsExposure = false;
+ // [basic.link]p14:
+ // A declaration is an exposure if it either names a TU-local entity (defined
+ // below), ignoring:
+ // ...
+ // or defines a constexpr variable initialized to a TU-local value (defined
+ // below).
+ if (VD->isConstexpr() && isTULocal(VD->getInit())) {
+ IsExposure = true;
+ if (Diag)
+ SemaRef.Diag(VD->getInit()->getExprLoc(), diag::warn_exposure)
+ << VD->getInit();
+ }
+
+ if (isTULocal(VD->getType())) {
+ IsExposure = true;
+ if (Diag)
+ SemaRef.Diag(VD->getLocation(), diag::warn_exposure) << VD->getType();
+ }
+
+ // [basic.link]p14:
+ // ..., ignoring:
+ // - the initializer for a variable or variable template (but not the
+ // variable's type),
+ //
+ // Note: although the spec says to ignore the initializer for all variable,
+ // for the code we generated now for inline variables, it is dangerous if the
+ // initializer of an inline variable is TULocal.
+ Diag &= !VD->getDeclContext()->isDependentContext() && VD->isInline();
+ IsExposure |= checkExposure(VD->getInit(), Diag);
+ if (IsExposure)
+ ExposureSet.insert(VD);
+
+ return IsExposure;
+}
+
+bool ExposureChecker::checkExposure(const CXXRecordDecl *RD, bool Diag) {
+ if (!RD->hasDefinition())
+ return false;
+
+ bool IsExposure = false;
+ for (CXXMethodDecl *Method : RD->methods())
+ IsExposure |= checkExposure(Method, Diag);
+
+ for (FieldDecl *FD : RD->fields()) {
+ if (isTULocal(FD->getType())) {
+ IsExposure = true;
+ if (Diag)
+ SemaRef.Diag(FD->getLocation(), diag::warn_exposure) << FD->getType();
+ }
+ }
+
+ for (const CXXBaseSpecifier &Base : RD->bases()) {
+ if (isTULocal(Base.getType())) {
+ IsExposure = true;
+ if (Diag)
+ SemaRef.Diag(Base.getBaseTypeLoc(), diag::warn_exposure)
+ << Base.getType();
+ }
+ }
+
+ if (IsExposure)
+ ExposureSet.insert(RD);
+
+ return IsExposure;
+}
+
+class ReferenceTULocalChecker : public DynamicRecursiveASTVisitor {
+public:
+ using CallbackTy = std::function<void(DeclRefExpr *, ValueDecl *)>;
+
+ ReferenceTULocalChecker(ExposureChecker &C, CallbackTy &&Callback)
+ : Checker(C), Callback(std::move(Callback)) {}
+
+ bool VisitDeclRefExpr(DeclRefExpr *DRE) override {
+ ValueDecl *Referenced = DRE->getDecl();
+ if (!Referenced)
+ return true;
+
+ if (!Checker.isTULocal(Referenced))
+ // We don't care if the referenced declaration is not TU-local.
+ return true;
+
+ Qualifiers Qual = DRE->getType().getQualifiers();
+ // [basic.link]p14:
+ // A declaration is an exposure if it either names a TU-local entity
+ // (defined below), ignoring:
+ // ...
+ // - any reference to a non-volatile const object ...
+ if (Qual.hasConst() && !Qual.hasVolatile())
+ return true;
+
+ // [basic.link]p14:
+ // ..., ignoring:
+ // ...
+ // (p14.4) - ... or reference with internal or no linkage initialized with
+ // a constant expression that is not an odr-use
+ ASTContext &Context = Referenced->getASTContext();
+ Linkage L = Referenced->getLinkageInternal();
+ if (DRE->isNonOdrUse() && (L == Linkage::Internal || L == Linkage::None))
+ if (auto *VD = dyn_cast<VarDecl>(Referenced);
+ VD && VD->getInit() && !VD->getInit()->isValueDependent() &&
+ VD->getInit()->isConstantInitializer(Context, /*IsForRef=*/false))
+ return true;
+
+ Callback(DRE, Referenced);
+ return true;
+ }
+
+ ExposureChecker &Checker;
+ CallbackTy Callback;
+};
+
+bool ExposureChecker::checkExposure(const Stmt *S, bool Diag) {
+ if (!S)
+ return false;
+
+ bool HasReferencedTULocals = false;
+ ReferenceTULocalChecker Checker(
+ *this, [this, &HasReferencedTULocals, Diag](DeclRefExpr *DRE,
+ ValueDecl *Referenced) {
+ if (Diag) {
+ SemaRef.Diag(DRE->getExprLoc(), diag::warn_exposure) << Referenced;
+ }
+ HasReferencedTULocals = true;
+ });
+ Checker.TraverseStmt(const_cast<Stmt *>(S));
+ return HasReferencedTULocals;
+}
+
+void ExposureChecker::checkExposureInContext(const DeclContext *DC) {
+ for (auto *TopD : DC->noload_decls()) {
+ auto *TopND = dyn_cast<NamedDecl>(TopD);
+ if (!TopND)
+ continue;
+
+ if (auto *Namespace = dyn_cast<NamespaceDecl>(TopND)) {
+ checkExposureInContext(Namespace);
+ continue;
+ }
+
+ // [basic.link]p17:
+ // If a (possibly instantiated) declaration of, or a deduction guide for,
+ // a non-TU-local entity in a module interface unit
+ // (outside the private-module-fragment, if any) or
+ // module partition is an exposure, the program is ill-formed.
+ if (!TopND->isFromASTFile() && isExposureCandidate(TopND) &&
+ !isTULocal(TopND))
+ checkExposure(TopND, /*Diag=*/true);
+ }
+}
+
+} // namespace
+
+void Sema::checkExposure(const TranslationUnitDecl *TU) {
+ if (!TU)
+ return;
+
+ ExposureChecker Checker(*this);
+
+ Module *M = TU->getOwningModule();
+ if (M && M->isInterfaceOrPartition())
+ Checker.checkExposureInContext(TU);
+
+ // [basic.link]p18:
+ // If a declaration that appears in one translation unit names a TU-local
+ // entity declared in another translation unit that is not a header unit,
+ // the program is ill-formed.
+ for (auto FDAndInstantiationLocPair : PendingCheckReferenceForTULocal) {
+ FunctionDecl *FD = FDAndInstantiationLocPair.first;
+ SourceLocation PointOfInstantiation = FDAndInstantiationLocPair.second;
+
+ if (!FD->hasBody())
+ continue;
+
+ ReferenceTULocalChecker(Checker, [&, this](DeclRefExpr *DRE,
+ ValueDecl *Referenced) {
+ // A "defect" in current implementation. Now an implicit instantiation of
+ // a template, the instantiation is considered to be in the same module
+ // unit as the template instead of the module unit where the instantiation
+ // happens.
+ //
+ // See test/Modules/Exposre-2.cppm for example.
+ if (!Referenced->isFromASTFile())
+ return;
+
+ if (!Referenced->isInAnotherModuleUnit())
+ return;
+
+ // This is not standard conforming. But given there are too many static
+ // (inline) functions in headers in existing code, it is more user
+ // friendly to ignore them temporarily now. maybe we can have another flag
+ // for this.
+ if (Referenced->getOwningModule()->isExplicitGlobalModule() &&
+ isa<FunctionDecl>(Referenced))
+ return;
+
+ Diag(PointOfInstantiation,
+ diag::warn_reference_tu_local_entity_in_other_tu)
+ << FD << Referenced
+ << Referenced->getOwningModule()->getTopLevelModuleName();
+ }).TraverseStmt(FD->getBody());
+ }
+}
+
+void Sema::checkReferenceToTULocalFromOtherTU(
+ FunctionDecl *FD, SourceLocation PointOfInstantiation) {
+ // Checking if a declaration have any reference to TU-local entities in other
+ // TU is expensive. Try to avoid it as much as possible.
+ if (!FD || !HadImportedNamedModules)
+ return;
+
+ PendingCheckReferenceForTULocal.push_back(
+ std::make_pair(FD, PointOfInstantiation));
+}
diff --git a/clang/lib/Sema/SemaOpenACC.cpp b/clang/lib/Sema/SemaOpenACC.cpp
index 128a5db..8bfea62 100644
--- a/clang/lib/Sema/SemaOpenACC.cpp
+++ b/clang/lib/Sema/SemaOpenACC.cpp
@@ -699,11 +699,19 @@ ExprResult SemaOpenACC::ActOnVar(OpenACCDirectiveKind DK, OpenACCClauseKind CK,
// OpenACC3.3 2.13:
// A 'var' in a 'declare' directive must be a variable or array name.
if ((CK == OpenACCClauseKind::UseDevice ||
- DK == OpenACCDirectiveKind::Declare) &&
- isa<ArraySectionExpr, ArraySubscriptExpr>(CurVarExpr)) {
- Diag(VarExpr->getExprLoc(), diag::err_acc_not_a_var_ref_use_device_declare)
- << (DK == OpenACCDirectiveKind::Declare);
- return ExprError();
+ DK == OpenACCDirectiveKind::Declare)) {
+ if (isa<ArraySubscriptExpr>(CurVarExpr)) {
+ Diag(VarExpr->getExprLoc(),
+ diag::err_acc_not_a_var_ref_use_device_declare)
+ << (DK == OpenACCDirectiveKind::Declare);
+ return ExprError();
+ }
+ // As an extension, we allow 'array sections'/'sub-arrays' here, as that is
+ // effectively defining an array, and are in common use.
+ if (isa<ArraySectionExpr>(CurVarExpr))
+ Diag(VarExpr->getExprLoc(),
+ diag::ext_acc_array_section_use_device_declare)
+ << (DK == OpenACCDirectiveKind::Declare);
}
// Sub-arrays/subscript-exprs are fine as long as the base is a
diff --git a/clang/lib/Sema/SemaOpenACCAtomic.cpp b/clang/lib/Sema/SemaOpenACCAtomic.cpp
index 9c8c8d1..a9319dc 100644
--- a/clang/lib/Sema/SemaOpenACCAtomic.cpp
+++ b/clang/lib/Sema/SemaOpenACCAtomic.cpp
@@ -576,6 +576,11 @@ class AtomicOperandChecker {
return AssocStmt;
}
+ const Expr *IgnoreBeforeCompare(const Expr *E) {
+ return E->IgnoreParenImpCasts()->IgnoreParenNoopCasts(
+ SemaRef.getASTContext());
+ }
+
bool CheckVarRefsSame(IDACInfo::ExprKindTy FirstKind, const Expr *FirstX,
IDACInfo::ExprKindTy SecondKind, const Expr *SecondX) {
llvm::FoldingSetNodeID First_ID, Second_ID;
@@ -648,8 +653,10 @@ class AtomicOperandChecker {
if (CheckOperandVariable(AssignRes->RHS, PD))
return getRecoveryExpr();
- if (CheckVarRefsSame(FirstExprResults.ExprKind, FirstExprResults.X_Var,
- IDACInfo::SimpleAssign, AssignRes->RHS))
+ if (CheckVarRefsSame(FirstExprResults.ExprKind,
+ IgnoreBeforeCompare(FirstExprResults.X_Var),
+ IDACInfo::SimpleAssign,
+ IgnoreBeforeCompare(AssignRes->RHS)))
return getRecoveryExpr();
break;
}
@@ -660,9 +667,10 @@ class AtomicOperandChecker {
if (SecondExprResults.Failed)
return getRecoveryExpr();
- if (CheckVarRefsSame(FirstExprResults.ExprKind, FirstExprResults.X_Var,
+ if (CheckVarRefsSame(FirstExprResults.ExprKind,
+ IgnoreBeforeCompare(FirstExprResults.X_Var),
SecondExprResults.ExprKind,
- SecondExprResults.X_Var))
+ IgnoreBeforeCompare(SecondExprResults.X_Var)))
return getRecoveryExpr();
break;
}
diff --git a/clang/lib/Sema/SemaOpenACCClause.cpp b/clang/lib/Sema/SemaOpenACCClause.cpp
index 3f90fe8..b54a012 100644
--- a/clang/lib/Sema/SemaOpenACCClause.cpp
+++ b/clang/lib/Sema/SemaOpenACCClause.cpp
@@ -1919,6 +1919,14 @@ ExprResult SemaOpenACC::CheckReductionVar(OpenACCDirectiveKind DirectiveKind,
<< EltTy << /*Sub array base type*/ 1;
return ExprError();
}
+ } else if (VarExpr->getType()->isArrayType()) {
+ // Arrays are considered an 'aggregate variable' explicitly, so are OK, no
+ // additional checking required.
+ //
+ // Glossary: Aggregate variables – a variable of any non-scalar datatype,
+ // including array or composite variables.
+ //
+ // The next branch (record decl) checks for composite variables.
} else if (auto *RD = VarExpr->getType()->getAsRecordDecl()) {
if (!RD->isStruct() && !RD->isClass()) {
Diag(VarExpr->getExprLoc(), diag::err_acc_reduction_composite_type)
@@ -2246,7 +2254,13 @@ bool SemaOpenACC::CheckDeclareClause(SemaOpenACC::OpenACCParsedClause &Clause,
continue;
}
} else {
- const auto *DRE = cast<DeclRefExpr>(VarExpr);
+
+ const Expr *VarExprTemp = VarExpr;
+
+ while (const auto *ASE = dyn_cast<ArraySectionExpr>(VarExprTemp))
+ VarExprTemp = ASE->getBase()->IgnoreParenImpCasts();
+
+ const auto *DRE = cast<DeclRefExpr>(VarExprTemp);
if (const auto *Var = dyn_cast<VarDecl>(DRE->getDecl())) {
CurDecl = Var->getCanonicalDecl();
diff --git a/clang/lib/Sema/SemaOpenMP.cpp b/clang/lib/Sema/SemaOpenMP.cpp
index 4ecc9b0..2c5d97c 100644
--- a/clang/lib/Sema/SemaOpenMP.cpp
+++ b/clang/lib/Sema/SemaOpenMP.cpp
@@ -2829,7 +2829,7 @@ static void checkReductionClauses(Sema &S, DSAStackTy *Stack,
continue;
}
for (Expr *Ref : RC->varlist()) {
- assert(Ref && "NULL expr in OpenMP nontemporal clause.");
+ assert(Ref && "NULL expr in OpenMP reduction clause.");
SourceLocation ELoc;
SourceRange ERange;
Expr *SimpleRefExpr = Ref;
@@ -7612,6 +7612,23 @@ void SemaOpenMP::ActOnOpenMPDeclareVariantDirective(
return;
}
+ // OpenMP 6.0 [9.6.2 (page 332, line 31-33, adjust_args clause, Restrictions]
+ // If the `need_device_addr` adjust-op modifier is present, each list item
+ // that appears in the clause must refer to an argument in the declaration of
+ // the function variant that has a reference type
+ if (getLangOpts().OpenMP >= 60) {
+ for (Expr *E : AdjustArgsNeedDeviceAddr) {
+ E = E->IgnoreParenImpCasts();
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
+ if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
+ if (!VD->getType()->isReferenceType())
+ Diag(E->getExprLoc(),
+ diag::err_omp_non_by_ref_need_device_addr_modifier_argument);
+ }
+ }
+ }
+ }
+
auto *NewAttr = OMPDeclareVariantAttr::CreateImplicit(
getASTContext(), VariantRef, &TI,
const_cast<Expr **>(AdjustArgsNothing.data()), AdjustArgsNothing.size(),
@@ -18344,7 +18361,7 @@ OMPClause *SemaOpenMP::ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation EndLoc) {
SmallVector<Expr *, 8> Vars;
for (Expr *RefExpr : VarList) {
- assert(RefExpr && "NULL expr in OpenMP lastprivate clause.");
+ assert(RefExpr && "NULL expr in OpenMP shared clause.");
SourceLocation ELoc;
SourceRange ERange;
Expr *SimpleRefExpr = RefExpr;
@@ -19991,7 +20008,7 @@ OMPClause *SemaOpenMP::ActOnOpenMPAlignedClause(
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc) {
SmallVector<Expr *, 8> Vars;
for (Expr *RefExpr : VarList) {
- assert(RefExpr && "NULL expr in OpenMP linear clause.");
+ assert(RefExpr && "NULL expr in OpenMP aligned clause.");
SourceLocation ELoc;
SourceRange ERange;
Expr *SimpleRefExpr = RefExpr;
@@ -20167,7 +20184,7 @@ OMPClause *SemaOpenMP::ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SmallVector<Expr *, 8> DstExprs;
SmallVector<Expr *, 8> AssignmentOps;
for (Expr *RefExpr : VarList) {
- assert(RefExpr && "NULL expr in OpenMP linear clause.");
+ assert(RefExpr && "NULL expr in OpenMP copyprivate clause.");
SourceLocation ELoc;
SourceRange ERange;
Expr *SimpleRefExpr = RefExpr;
@@ -20526,7 +20543,7 @@ OMPClause *SemaOpenMP::ActOnOpenMPDependClause(
TotalDepCount = VarOffset.TotalDepCount;
} else {
for (Expr *RefExpr : VarList) {
- assert(RefExpr && "NULL expr in OpenMP shared clause.");
+ assert(RefExpr && "NULL expr in OpenMP depend clause.");
if (isa<DependentScopeDeclRefExpr>(RefExpr)) {
// It will be analyzed later.
Vars.push_back(RefExpr);
@@ -23737,7 +23754,7 @@ OMPClause *SemaOpenMP::ActOnOpenMPAllocateClause(
// Analyze and build list of variables.
SmallVector<Expr *, 8> Vars;
for (Expr *RefExpr : VarList) {
- assert(RefExpr && "NULL expr in OpenMP private clause.");
+ assert(RefExpr && "NULL expr in OpenMP allocate clause.");
SourceLocation ELoc;
SourceRange ERange;
Expr *SimpleRefExpr = RefExpr;
@@ -23829,7 +23846,7 @@ OMPClause *SemaOpenMP::ActOnOpenMPInclusiveClause(ArrayRef<Expr *> VarList,
SourceLocation EndLoc) {
SmallVector<Expr *, 8> Vars;
for (Expr *RefExpr : VarList) {
- assert(RefExpr && "NULL expr in OpenMP nontemporal clause.");
+ assert(RefExpr && "NULL expr in OpenMP inclusive clause.");
SourceLocation ELoc;
SourceRange ERange;
Expr *SimpleRefExpr = RefExpr;
@@ -23870,7 +23887,7 @@ OMPClause *SemaOpenMP::ActOnOpenMPExclusiveClause(ArrayRef<Expr *> VarList,
SourceLocation EndLoc) {
SmallVector<Expr *, 8> Vars;
for (Expr *RefExpr : VarList) {
- assert(RefExpr && "NULL expr in OpenMP nontemporal clause.");
+ assert(RefExpr && "NULL expr in OpenMP exclusive clause.");
SourceLocation ELoc;
SourceRange ERange;
Expr *SimpleRefExpr = RefExpr;
@@ -24063,7 +24080,7 @@ OMPClause *SemaOpenMP::ActOnOpenMPAffinityClause(
SourceLocation EndLoc, Expr *Modifier, ArrayRef<Expr *> Locators) {
SmallVector<Expr *, 8> Vars;
for (Expr *RefExpr : Locators) {
- assert(RefExpr && "NULL expr in OpenMP shared clause.");
+ assert(RefExpr && "NULL expr in OpenMP affinity clause.");
if (isa<DependentScopeDeclRefExpr>(RefExpr) || RefExpr->isTypeDependent()) {
// It will be analyzed later.
Vars.push_back(RefExpr);
@@ -24375,7 +24392,7 @@ ExprResult SemaOpenMP::ActOnOMPArraySectionExpr(
return ExprError();
}
}
- } else if (ColonLocFirst.isValid() &&
+ } else if (SemaRef.getLangOpts().OpenMP < 60 && ColonLocFirst.isValid() &&
(OriginalTy.isNull() || (!OriginalTy->isConstantArrayType() &&
!OriginalTy->isVariableArrayType()))) {
// OpenMP 5.0, [2.1.5 Array Sections]
diff --git a/clang/lib/Sema/SemaOverload.cpp b/clang/lib/Sema/SemaOverload.cpp
index 5dd5b49..76e189d 100644
--- a/clang/lib/Sema/SemaOverload.cpp
+++ b/clang/lib/Sema/SemaOverload.cpp
@@ -8042,8 +8042,8 @@ static void AddTemplateOverloadCandidateImmediately(
Candidate.IgnoreObjectArgument =
isa<CXXMethodDecl>(Candidate.Function) &&
- cast<CXXMethodDecl>(Candidate.Function)
- ->isImplicitObjectMemberFunction() &&
+ !cast<CXXMethodDecl>(Candidate.Function)
+ ->isExplicitObjectMemberFunction() &&
!isa<CXXConstructorDecl>(Candidate.Function);
Candidate.ExplicitCallArguments = Args.size();
diff --git a/clang/lib/Sema/SemaStmt.cpp b/clang/lib/Sema/SemaStmt.cpp
index f85826a..3f89843 100644
--- a/clang/lib/Sema/SemaStmt.cpp
+++ b/clang/lib/Sema/SemaStmt.cpp
@@ -295,8 +295,7 @@ void DiagnoseUnused(Sema &S, const Expr *E, std::optional<unsigned> DiagID) {
return;
auto [OffendingDecl, A] = CE->getUnusedResultAttr(S.Context);
- if (DiagnoseNoDiscard(S, OffendingDecl,
- cast_or_null<WarnUnusedResultAttr>(A), Loc, R1, R2,
+ if (DiagnoseNoDiscard(S, OffendingDecl, A, Loc, R1, R2,
/*isCtor=*/false))
return;
@@ -344,13 +343,11 @@ void DiagnoseUnused(Sema &S, const Expr *E, std::optional<unsigned> DiagID) {
S.Diag(Loc, diag::err_arc_unused_init_message) << R1;
return;
}
- const ObjCMethodDecl *MD = ME->getMethodDecl();
- if (MD) {
- if (DiagnoseNoDiscard(S, nullptr, MD->getAttr<WarnUnusedResultAttr>(),
- Loc, R1, R2,
- /*isCtor=*/false))
- return;
- }
+
+ auto [OffendingDecl, A] = ME->getUnusedResultAttr(S.Context);
+ if (DiagnoseNoDiscard(S, OffendingDecl, A, Loc, R1, R2,
+ /*isCtor=*/false))
+ return;
} else if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(E)) {
const Expr *Source = POE->getSyntacticForm();
// Handle the actually selected call of an OpenMP specialized call.
diff --git a/clang/lib/Sema/SemaStmtAttr.cpp b/clang/lib/Sema/SemaStmtAttr.cpp
index 857d46a..77aa716 100644
--- a/clang/lib/Sema/SemaStmtAttr.cpp
+++ b/clang/lib/Sema/SemaStmtAttr.cpp
@@ -795,6 +795,10 @@ ExprResult Sema::BuildCXXAssumeExpr(Expr *Assumption,
if (Res.isInvalid())
return ExprError();
+ Res = ActOnFinishFullExpr(Res.get(), /*DiscardedValue=*/false);
+ if (Res.isInvalid())
+ return ExprError();
+
Assumption = Res.get();
if (Assumption->HasSideEffects(Context))
Diag(Assumption->getBeginLoc(), diag::warn_assume_side_effects)
diff --git a/clang/lib/Sema/SemaTemplate.cpp b/clang/lib/Sema/SemaTemplate.cpp
index 698d127..21fed2e 100644
--- a/clang/lib/Sema/SemaTemplate.cpp
+++ b/clang/lib/Sema/SemaTemplate.cpp
@@ -4749,8 +4749,6 @@ Sema::CheckConceptTemplateId(const CXXScopeSpec &SS,
EnterExpressionEvaluationContext EECtx{
*this, ExpressionEvaluationContext::Unevaluated, CSD};
- ContextRAII CurContext(*this, CSD->getDeclContext(),
- /*NewThisContext=*/false);
if (!AreArgsDependent &&
CheckConstraintSatisfaction(
NamedConcept, AssociatedConstraint(NamedConcept->getConstraintExpr()),
diff --git a/clang/lib/Sema/SemaTemplateDeduction.cpp b/clang/lib/Sema/SemaTemplateDeduction.cpp
index e1a975b..9e56e697 100644
--- a/clang/lib/Sema/SemaTemplateDeduction.cpp
+++ b/clang/lib/Sema/SemaTemplateDeduction.cpp
@@ -5523,6 +5523,15 @@ static TemplateDeductionResult CheckDeductionConsistency(
// FIXME: A substitution can be incomplete on a non-structural part of the
// type. Use the canonical type for now, until the TemplateInstantiator can
// deal with that.
+
+ // Workaround: Implicit deduction guides use InjectedClassNameTypes, whereas
+ // the explicit guides don't. The substitution doesn't transform these types,
+ // so let it transform their specializations instead.
+ bool IsDeductionGuide = isa<CXXDeductionGuideDecl>(FTD->getTemplatedDecl());
+ if (IsDeductionGuide) {
+ if (auto *Injected = P->getAs<InjectedClassNameType>())
+ P = Injected->getInjectedSpecializationType();
+ }
QualType InstP = S.SubstType(P.getCanonicalType(), MLTAL, FTD->getLocation(),
FTD->getDeclName(), &IsIncompleteSubstitution);
if (InstP.isNull() && !IsIncompleteSubstitution)
@@ -5537,9 +5546,15 @@ static TemplateDeductionResult CheckDeductionConsistency(
if (auto *PA = dyn_cast<PackExpansionType>(A);
PA && !isa<PackExpansionType>(InstP))
A = PA->getPattern();
- if (!S.Context.hasSameType(
- S.Context.getUnqualifiedArrayType(InstP.getNonReferenceType()),
- S.Context.getUnqualifiedArrayType(A.getNonReferenceType())))
+ auto T1 = S.Context.getUnqualifiedArrayType(InstP.getNonReferenceType());
+ auto T2 = S.Context.getUnqualifiedArrayType(A.getNonReferenceType());
+ if (IsDeductionGuide) {
+ if (auto *Injected = T1->getAs<InjectedClassNameType>())
+ T1 = Injected->getInjectedSpecializationType();
+ if (auto *Injected = T2->getAs<InjectedClassNameType>())
+ T2 = Injected->getInjectedSpecializationType();
+ }
+ if (!S.Context.hasSameType(T1, T2))
return TemplateDeductionResult::NonDeducedMismatch;
return TemplateDeductionResult::Success;
}
diff --git a/clang/lib/Sema/SemaTemplateInstantiate.cpp b/clang/lib/Sema/SemaTemplateInstantiate.cpp
index 20bac0e..d84d0ca1 100644
--- a/clang/lib/Sema/SemaTemplateInstantiate.cpp
+++ b/clang/lib/Sema/SemaTemplateInstantiate.cpp
@@ -2270,11 +2270,6 @@ TemplateInstantiator::TransformCXXAssumeAttr(const CXXAssumeAttr *AA) {
if (!Res.isUsable())
return AA;
- Res = getSema().ActOnFinishFullExpr(Res.get(),
- /*DiscardedValue=*/false);
- if (!Res.isUsable())
- return AA;
-
if (!(Res.get()->getDependence() & ExprDependence::TypeValueInstantiation)) {
Res = getSema().BuildCXXAssumeExpr(Res.get(), AA->getAttrName(),
AA->getRange());
diff --git a/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp b/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
index e2c3cdc..233bb65 100644
--- a/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
+++ b/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
@@ -5853,6 +5853,8 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
// context seems wrong. Investigate more.
ActOnFinishFunctionBody(Function, Body.get(), /*IsInstantiation=*/true);
+ checkReferenceToTULocalFromOtherTU(Function, PointOfInstantiation);
+
PerformDependentDiagnostics(PatternDecl, TemplateArgs);
if (auto *Listener = getASTMutationListener())
diff --git a/clang/lib/Sema/SemaTypeTraits.cpp b/clang/lib/Sema/SemaTypeTraits.cpp
index 1d8687e..c2f0600 100644
--- a/clang/lib/Sema/SemaTypeTraits.cpp
+++ b/clang/lib/Sema/SemaTypeTraits.cpp
@@ -11,9 +11,7 @@
//===----------------------------------------------------------------------===//
#include "clang/AST/DeclCXX.h"
-#include "clang/AST/TemplateBase.h"
#include "clang/AST/Type.h"
-#include "clang/Basic/DiagnosticIDs.h"
#include "clang/Basic/DiagnosticParse.h"
#include "clang/Basic/DiagnosticSema.h"
#include "clang/Basic/TypeTraits.h"
@@ -1965,7 +1963,6 @@ static std::optional<TypeTrait> StdNameToTypeTrait(StringRef Name) {
.Case("is_assignable", TypeTrait::BTT_IsAssignable)
.Case("is_empty", TypeTrait::UTT_IsEmpty)
.Case("is_standard_layout", TypeTrait::UTT_IsStandardLayout)
- .Case("is_constructible", TypeTrait::TT_IsConstructible)
.Default(std::nullopt);
}
@@ -2002,16 +1999,8 @@ static ExtractedTypeTraitInfo ExtractTypeTraitFromExpression(const Expr *E) {
Trait = StdNameToTypeTrait(Name);
if (!Trait)
return std::nullopt;
- for (const auto &Arg : VD->getTemplateArgs().asArray()) {
- if (Arg.getKind() == TemplateArgument::ArgKind::Pack) {
- for (const auto &InnerArg : Arg.pack_elements())
- Args.push_back(InnerArg.getAsType());
- } else if (Arg.getKind() == TemplateArgument::ArgKind::Type) {
- Args.push_back(Arg.getAsType());
- } else {
- llvm_unreachable("Unexpected kind");
- }
- }
+ for (const auto &Arg : VD->getTemplateArgs().asArray())
+ Args.push_back(Arg.getAsType());
return {{Trait.value(), std::move(Args)}};
}
@@ -2284,60 +2273,6 @@ static void DiagnoseNonTriviallyCopyableReason(Sema &SemaRef,
}
}
-static void DiagnoseNonConstructibleReason(
- Sema &SemaRef, SourceLocation Loc,
- const llvm::SmallVector<clang::QualType, 1> &Ts) {
- if (Ts.empty()) {
- return;
- }
-
- bool ContainsVoid = false;
- for (const QualType &ArgTy : Ts) {
- ContainsVoid |= ArgTy->isVoidType();
- }
-
- if (ContainsVoid)
- SemaRef.Diag(Loc, diag::note_unsatisfied_trait_reason)
- << diag::TraitNotSatisfiedReason::CVVoidType;
-
- QualType T = Ts[0];
- if (T->isFunctionType())
- SemaRef.Diag(Loc, diag::note_unsatisfied_trait_reason)
- << diag::TraitNotSatisfiedReason::FunctionType;
-
- if (T->isIncompleteArrayType())
- SemaRef.Diag(Loc, diag::note_unsatisfied_trait_reason)
- << diag::TraitNotSatisfiedReason::IncompleteArrayType;
-
- const CXXRecordDecl *D = T->getAsCXXRecordDecl();
- if (!D || D->isInvalidDecl() || !D->hasDefinition())
- return;
-
- llvm::BumpPtrAllocator OpaqueExprAllocator;
- SmallVector<Expr *, 2> ArgExprs;
- ArgExprs.reserve(Ts.size() - 1);
- for (unsigned I = 1, N = Ts.size(); I != N; ++I) {
- QualType ArgTy = Ts[I];
- if (ArgTy->isObjectType() || ArgTy->isFunctionType())
- ArgTy = SemaRef.Context.getRValueReferenceType(ArgTy);
- ArgExprs.push_back(
- new (OpaqueExprAllocator.Allocate<OpaqueValueExpr>())
- OpaqueValueExpr(Loc, ArgTy.getNonLValueExprType(SemaRef.Context),
- Expr::getValueKindForType(ArgTy)));
- }
-
- EnterExpressionEvaluationContext Unevaluated(
- SemaRef, Sema::ExpressionEvaluationContext::Unevaluated);
- Sema::ContextRAII TUContext(SemaRef,
- SemaRef.Context.getTranslationUnitDecl());
- InitializedEntity To(InitializedEntity::InitializeTemporary(T));
- InitializationKind InitKind(InitializationKind::CreateDirect(Loc, Loc, Loc));
- InitializationSequence Init(SemaRef, To, InitKind, ArgExprs);
-
- Init.Diagnose(SemaRef, To, InitKind, ArgExprs);
- SemaRef.Diag(D->getLocation(), diag::note_defined_here) << D;
-}
-
static void DiagnoseNonTriviallyCopyableReason(Sema &SemaRef,
SourceLocation Loc, QualType T) {
SemaRef.Diag(Loc, diag::note_unsatisfied_trait)
@@ -2624,9 +2559,6 @@ void Sema::DiagnoseTypeTraitDetails(const Expr *E) {
case UTT_IsStandardLayout:
DiagnoseNonStandardLayoutReason(*this, E->getBeginLoc(), Args[0]);
break;
- case TT_IsConstructible:
- DiagnoseNonConstructibleReason(*this, E->getBeginLoc(), Args);
- break;
default:
break;
}
diff --git a/clang/lib/Sema/SemaWasm.cpp b/clang/lib/Sema/SemaWasm.cpp
index 6faea24..8998492 100644
--- a/clang/lib/Sema/SemaWasm.cpp
+++ b/clang/lib/Sema/SemaWasm.cpp
@@ -227,6 +227,53 @@ bool SemaWasm::BuiltinWasmTableCopy(CallExpr *TheCall) {
return false;
}
+bool SemaWasm::BuiltinWasmTestFunctionPointerSignature(CallExpr *TheCall) {
+ if (SemaRef.checkArgCount(TheCall, 1))
+ return true;
+
+ Expr *FuncPtrArg = TheCall->getArg(0);
+ QualType ArgType = FuncPtrArg->getType();
+
+ // Check that the argument is a function pointer
+ const PointerType *PtrTy = ArgType->getAs<PointerType>();
+ if (!PtrTy) {
+ return Diag(FuncPtrArg->getBeginLoc(),
+ diag::err_typecheck_expect_function_pointer)
+ << ArgType << FuncPtrArg->getSourceRange();
+ }
+
+ const FunctionProtoType *FuncTy =
+ PtrTy->getPointeeType()->getAs<FunctionProtoType>();
+ if (!FuncTy) {
+ return Diag(FuncPtrArg->getBeginLoc(),
+ diag::err_typecheck_expect_function_pointer)
+ << ArgType << FuncPtrArg->getSourceRange();
+ }
+
+ // Check that the function pointer doesn't use reference types
+ if (FuncTy->getReturnType().isWebAssemblyReferenceType()) {
+ return Diag(
+ FuncPtrArg->getBeginLoc(),
+ diag::err_wasm_builtin_test_fp_sig_cannot_include_reference_type)
+ << 0 << FuncTy->getReturnType() << FuncPtrArg->getSourceRange();
+ }
+ auto NParams = FuncTy->getNumParams();
+ for (unsigned I = 0; I < NParams; I++) {
+ if (FuncTy->getParamType(I).isWebAssemblyReferenceType()) {
+ return Diag(
+ FuncPtrArg->getBeginLoc(),
+ diag::
+ err_wasm_builtin_test_fp_sig_cannot_include_reference_type)
+ << 1 << FuncPtrArg->getSourceRange();
+ }
+ }
+
+ // Set return type to int (the result of the test)
+ TheCall->setType(getASTContext().IntTy);
+
+ return false;
+}
+
bool SemaWasm::CheckWebAssemblyBuiltinFunctionCall(const TargetInfo &TI,
unsigned BuiltinID,
CallExpr *TheCall) {
@@ -249,6 +296,8 @@ bool SemaWasm::CheckWebAssemblyBuiltinFunctionCall(const TargetInfo &TI,
return BuiltinWasmTableFill(TheCall);
case WebAssembly::BI__builtin_wasm_table_copy:
return BuiltinWasmTableCopy(TheCall);
+ case WebAssembly::BI__builtin_wasm_test_function_pointer_signature:
+ return BuiltinWasmTestFunctionPointerSignature(TheCall);
}
return false;
diff --git a/clang/lib/Serialization/ASTReader.cpp b/clang/lib/Serialization/ASTReader.cpp
index 10aedb6..f896f9f1 100644
--- a/clang/lib/Serialization/ASTReader.cpp
+++ b/clang/lib/Serialization/ASTReader.cpp
@@ -8488,6 +8488,7 @@ bool ASTReader::LoadExternalSpecializationsImpl(SpecLookupTableTy &SpecLookups,
bool ASTReader::LoadExternalSpecializations(const Decl *D, bool OnlyPartial) {
assert(D);
+ CompleteRedeclChain(D);
bool NewSpecsFound =
LoadExternalSpecializationsImpl(PartialSpecializationsLookups, D);
if (OnlyPartial)
diff --git a/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp b/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
index 88feb6a..e682c4e 100644
--- a/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
@@ -99,7 +99,7 @@ class NilArgChecker : public Checker<check::PreObjCMessage,
check::PostStmt<ObjCDictionaryLiteral>,
check::PostStmt<ObjCArrayLiteral>,
EventDispatcher<ImplicitNullDerefEvent>> {
- mutable std::unique_ptr<APIMisuse> BT;
+ const APIMisuse BT{this, "nil argument"};
mutable llvm::SmallDenseMap<Selector, unsigned, 16> StringSelectors;
mutable Selector ArrayWithObjectSel;
@@ -218,10 +218,7 @@ void NilArgChecker::generateBugReport(ExplodedNode *N,
SourceRange Range,
const Expr *E,
CheckerContext &C) const {
- if (!BT)
- BT.reset(new APIMisuse(this, "nil argument"));
-
- auto R = std::make_unique<PathSensitiveBugReport>(*BT, Msg, N);
+ auto R = std::make_unique<PathSensitiveBugReport>(BT, Msg, N);
R->addRange(Range);
bugreporter::trackExpressionValue(N, E, *R);
C.emitReport(std::move(R));
@@ -350,7 +347,7 @@ void NilArgChecker::checkPostStmt(const ObjCDictionaryLiteral *DL,
namespace {
class CFNumberChecker : public Checker< check::PreStmt<CallExpr> > {
- mutable std::unique_ptr<APIMisuse> BT;
+ const APIMisuse BT{this, "Bad use of CFNumber APIs"};
mutable IdentifierInfo *ICreate = nullptr, *IGetValue = nullptr;
public:
CFNumberChecker() = default;
@@ -524,10 +521,7 @@ void CFNumberChecker::checkPreStmt(const CallExpr *CE,
<< " bits of the integer value will be "
<< (isCreate ? "lost." : "garbage.");
- if (!BT)
- BT.reset(new APIMisuse(this, "Bad use of CFNumber APIs"));
-
- auto report = std::make_unique<PathSensitiveBugReport>(*BT, os.str(), N);
+ auto report = std::make_unique<PathSensitiveBugReport>(BT, os.str(), N);
report->addRange(CE->getArg(2)->getSourceRange());
C.emitReport(std::move(report));
}
@@ -539,7 +533,7 @@ void CFNumberChecker::checkPreStmt(const CallExpr *CE,
namespace {
class CFRetainReleaseChecker : public Checker<check::PreCall> {
- mutable APIMisuse BT{this, "null passed to CF memory management function"};
+ const APIMisuse BT{this, "null passed to CF memory management function"};
const CallDescriptionSet ModelledCalls = {
{CDM::CLibrary, {"CFRetain"}, 1},
{CDM::CLibrary, {"CFRelease"}, 1},
@@ -600,7 +594,8 @@ class ClassReleaseChecker : public Checker<check::PreObjCMessage> {
mutable Selector retainS;
mutable Selector autoreleaseS;
mutable Selector drainS;
- mutable std::unique_ptr<BugType> BT;
+ const APIMisuse BT{
+ this, "message incorrectly sent to class instead of class instance"};
public:
void checkPreObjCMessage(const ObjCMethodCall &msg, CheckerContext &C) const;
@@ -609,10 +604,7 @@ public:
void ClassReleaseChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
CheckerContext &C) const {
- if (!BT) {
- BT.reset(new APIMisuse(
- this, "message incorrectly sent to class instead of class instance"));
-
+ if (releaseS.isNull()) {
ASTContext &Ctx = C.getASTContext();
releaseS = GetNullarySelector("release", Ctx);
retainS = GetNullarySelector("retain", Ctx);
@@ -639,7 +631,7 @@ void ClassReleaseChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
"of class '" << Class->getName()
<< "' and not the class directly";
- auto report = std::make_unique<PathSensitiveBugReport>(*BT, os.str(), N);
+ auto report = std::make_unique<PathSensitiveBugReport>(BT, os.str(), N);
report->addRange(msg.getSourceRange());
C.emitReport(std::move(report));
}
@@ -658,7 +650,8 @@ class VariadicMethodTypeChecker : public Checker<check::PreObjCMessage> {
mutable Selector orderedSetWithObjectsS;
mutable Selector initWithObjectsS;
mutable Selector initWithObjectsAndKeysS;
- mutable std::unique_ptr<BugType> BT;
+ const APIMisuse BT{this, "Arguments passed to variadic method aren't all "
+ "Objective-C pointer types"};
bool isVariadicMessage(const ObjCMethodCall &msg) const;
@@ -717,11 +710,7 @@ VariadicMethodTypeChecker::isVariadicMessage(const ObjCMethodCall &msg) const {
void VariadicMethodTypeChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
CheckerContext &C) const {
- if (!BT) {
- BT.reset(new APIMisuse(this,
- "Arguments passed to variadic method aren't all "
- "Objective-C pointer types"));
-
+ if (arrayWithObjectsS.isNull()) {
ASTContext &Ctx = C.getASTContext();
arrayWithObjectsS = GetUnarySelector("arrayWithObjects", Ctx);
dictionaryWithObjectsAndKeysS =
@@ -792,8 +781,7 @@ void VariadicMethodTypeChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
ArgTy.print(os, C.getLangOpts());
os << "'";
- auto R =
- std::make_unique<PathSensitiveBugReport>(*BT, os.str(), *errorNode);
+ auto R = std::make_unique<PathSensitiveBugReport>(BT, os.str(), *errorNode);
R->addRange(msg.getArgSourceRange(I));
C.emitReport(std::move(R));
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
index 31cb150..fd0a398 100644
--- a/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
@@ -78,35 +78,30 @@ static QualType getCharPtrType(ASTContext &Ctx, CharKind CK) {
: Ctx.WideCharTy);
}
-class CStringChecker : public Checker< eval::Call,
- check::PreStmt<DeclStmt>,
- check::LiveSymbols,
- check::DeadSymbols,
- check::RegionChanges
- > {
- mutable std::unique_ptr<BugType> BT_Null, BT_Bounds, BT_Overlap,
- BT_NotCString, BT_AdditionOverflow, BT_UninitRead;
-
+class CStringChecker
+ : public CheckerFamily<eval::Call, check::PreStmt<DeclStmt>,
+ check::LiveSymbols, check::DeadSymbols,
+ check::RegionChanges> {
mutable const char *CurrentFunctionDescription = nullptr;
public:
- /// The filter is used to filter out the diagnostics which are not enabled by
- /// the user.
- struct CStringChecksFilter {
- bool CheckCStringNullArg = false;
- bool CheckCStringOutOfBounds = false;
- bool CheckCStringBufferOverlap = false;
- bool CheckCStringNotNullTerm = false;
- bool CheckCStringUninitializedRead = false;
-
- CheckerNameRef CheckNameCStringNullArg;
- CheckerNameRef CheckNameCStringOutOfBounds;
- CheckerNameRef CheckNameCStringBufferOverlap;
- CheckerNameRef CheckNameCStringNotNullTerm;
- CheckerNameRef CheckNameCStringUninitializedRead;
- };
-
- CStringChecksFilter Filter;
+ // FIXME: The bug types emitted by this checker family have confused garbage
+ // in their Description and Category fields (e.g. `categories::UnixAPI` is
+ // passed as the description in several cases and `uninitialized` is mistyped
+ // as `unitialized`). This should be cleaned up.
+ CheckerFrontendWithBugType NullArg{categories::UnixAPI};
+ CheckerFrontendWithBugType OutOfBounds{"Out-of-bound array access"};
+ CheckerFrontendWithBugType BufferOverlap{categories::UnixAPI,
+ "Improper arguments"};
+ CheckerFrontendWithBugType NotNullTerm{categories::UnixAPI};
+ CheckerFrontendWithBugType UninitializedRead{
+ "Accessing unitialized/garbage values"};
+
+ // FIXME: This bug type should be removed because it is only emitted in a
+ // situation that is practically impossible.
+ const BugType AdditionOverflow{&OutOfBounds, "API"};
+
+ StringRef getDebugTag() const override { return "MallocChecker"; }
static void *getTag() { static int tag; return &tag; }
@@ -384,7 +379,7 @@ ProgramStateRef CStringChecker::checkNonNull(CheckerContext &C,
assumeZero(C, State, l, Arg.Expression->getType());
if (stateNull && !stateNonNull) {
- if (Filter.CheckCStringNullArg) {
+ if (NullArg.isEnabled()) {
SmallString<80> buf;
llvm::raw_svector_ostream OS(buf);
assert(CurrentFunctionDescription);
@@ -468,7 +463,7 @@ ProgramStateRef CStringChecker::checkInit(CheckerContext &C,
return State;
// Ensure that we wouldn't read uninitialized value.
- if (Filter.CheckCStringUninitializedRead &&
+ if (UninitializedRead.isEnabled() &&
State->getSVal(*FirstElementVal).isUndef()) {
llvm::SmallString<258> Buf;
llvm::raw_svector_ostream OS(Buf);
@@ -524,7 +519,7 @@ ProgramStateRef CStringChecker::checkInit(CheckerContext &C,
if (!isa<Loc>(LastElementVal))
return State;
- if (Filter.CheckCStringUninitializedRead &&
+ if (UninitializedRead.isEnabled() &&
State->getSVal(LastElementVal.castAs<Loc>()).isUndef()) {
const llvm::APSInt *IdxInt = LastIdx.getAsInteger();
// If we can't get emit a sensible last element index, just bail out --
@@ -581,13 +576,9 @@ ProgramStateRef CStringChecker::CheckLocation(CheckerContext &C,
auto [StInBound, StOutBound] = state->assumeInBoundDual(*Idx, Size);
if (StOutBound && !StInBound) {
- // These checks are either enabled by the CString out-of-bounds checker
- // explicitly or implicitly by the Malloc checker.
- // In the latter case we only do modeling but do not emit warning.
- if (!Filter.CheckCStringOutOfBounds)
+ if (!OutOfBounds.isEnabled())
return nullptr;
- // Emit a bug report.
ErrorMessage Message =
createOutOfBoundErrorMsg(CurrentFunctionDescription, Access);
emitOutOfBoundsBug(C, StOutBound, Buffer.Expression, Message);
@@ -620,7 +611,7 @@ CStringChecker::CheckBufferAccess(CheckerContext &C, ProgramStateRef State,
return nullptr;
// If out-of-bounds checking is turned off, skip the rest.
- if (!Filter.CheckCStringOutOfBounds)
+ if (!OutOfBounds.isEnabled())
return State;
SVal BufStart =
@@ -670,7 +661,7 @@ ProgramStateRef CStringChecker::CheckOverlap(CheckerContext &C,
SizeArgExpr Size, AnyArgExpr First,
AnyArgExpr Second,
CharKind CK) const {
- if (!Filter.CheckCStringBufferOverlap)
+ if (!BufferOverlap.isEnabled())
return state;
// Do a simple check for overlap: if the two arguments are from the same
@@ -789,13 +780,9 @@ void CStringChecker::emitOverlapBug(CheckerContext &C, ProgramStateRef state,
if (!N)
return;
- if (!BT_Overlap)
- BT_Overlap.reset(new BugType(Filter.CheckNameCStringBufferOverlap,
- categories::UnixAPI, "Improper arguments"));
-
// Generate a report for this bug.
auto report = std::make_unique<PathSensitiveBugReport>(
- *BT_Overlap, "Arguments must not be overlapping buffers", N);
+ BufferOverlap, "Arguments must not be overlapping buffers", N);
report->addRange(First->getSourceRange());
report->addRange(Second->getSourceRange());
@@ -805,15 +792,8 @@ void CStringChecker::emitOverlapBug(CheckerContext &C, ProgramStateRef state,
void CStringChecker::emitNullArgBug(CheckerContext &C, ProgramStateRef State,
const Stmt *S, StringRef WarningMsg) const {
if (ExplodedNode *N = C.generateErrorNode(State)) {
- if (!BT_Null) {
- // FIXME: This call uses the string constant 'categories::UnixAPI' as the
- // description of the bug; it should be replaced by a real description.
- BT_Null.reset(
- new BugType(Filter.CheckNameCStringNullArg, categories::UnixAPI));
- }
-
auto Report =
- std::make_unique<PathSensitiveBugReport>(*BT_Null, WarningMsg, N);
+ std::make_unique<PathSensitiveBugReport>(NullArg, WarningMsg, N);
Report->addRange(S->getSourceRange());
if (const auto *Ex = dyn_cast<Expr>(S))
bugreporter::trackExpressionValue(N, Ex, *Report);
@@ -826,12 +806,8 @@ void CStringChecker::emitUninitializedReadBug(CheckerContext &C,
const Expr *E, const MemRegion *R,
StringRef Msg) const {
if (ExplodedNode *N = C.generateErrorNode(State)) {
- if (!BT_UninitRead)
- BT_UninitRead.reset(new BugType(Filter.CheckNameCStringUninitializedRead,
- "Accessing unitialized/garbage values"));
-
auto Report =
- std::make_unique<PathSensitiveBugReport>(*BT_UninitRead, Msg, N);
+ std::make_unique<PathSensitiveBugReport>(UninitializedRead, Msg, N);
Report->addNote("Other elements might also be undefined",
Report->getLocation());
Report->addRange(E->getSourceRange());
@@ -845,17 +821,11 @@ void CStringChecker::emitOutOfBoundsBug(CheckerContext &C,
ProgramStateRef State, const Stmt *S,
StringRef WarningMsg) const {
if (ExplodedNode *N = C.generateErrorNode(State)) {
- if (!BT_Bounds)
- BT_Bounds.reset(new BugType(Filter.CheckCStringOutOfBounds
- ? Filter.CheckNameCStringOutOfBounds
- : Filter.CheckNameCStringNullArg,
- "Out-of-bound array access"));
-
// FIXME: It would be nice to eventually make this diagnostic more clear,
// e.g., by referencing the original declaration or by saying *why* this
// reference is outside the range.
auto Report =
- std::make_unique<PathSensitiveBugReport>(*BT_Bounds, WarningMsg, N);
+ std::make_unique<PathSensitiveBugReport>(OutOfBounds, WarningMsg, N);
Report->addRange(S->getSourceRange());
C.emitReport(std::move(Report));
}
@@ -865,15 +835,8 @@ void CStringChecker::emitNotCStringBug(CheckerContext &C, ProgramStateRef State,
const Stmt *S,
StringRef WarningMsg) const {
if (ExplodedNode *N = C.generateNonFatalErrorNode(State)) {
- if (!BT_NotCString) {
- // FIXME: This call uses the string constant 'categories::UnixAPI' as the
- // description of the bug; it should be replaced by a real description.
- BT_NotCString.reset(
- new BugType(Filter.CheckNameCStringNotNullTerm, categories::UnixAPI));
- }
-
auto Report =
- std::make_unique<PathSensitiveBugReport>(*BT_NotCString, WarningMsg, N);
+ std::make_unique<PathSensitiveBugReport>(NotNullTerm, WarningMsg, N);
Report->addRange(S->getSourceRange());
C.emitReport(std::move(Report));
@@ -883,14 +846,6 @@ void CStringChecker::emitNotCStringBug(CheckerContext &C, ProgramStateRef State,
void CStringChecker::emitAdditionOverflowBug(CheckerContext &C,
ProgramStateRef State) const {
if (ExplodedNode *N = C.generateErrorNode(State)) {
- if (!BT_AdditionOverflow) {
- // FIXME: This call uses the word "API" as the description of the bug;
- // it should be replaced by a better error message (if this unlikely
- // situation continues to exist as a separate bug type).
- BT_AdditionOverflow.reset(
- new BugType(Filter.CheckNameCStringOutOfBounds, "API"));
- }
-
// This isn't a great error message, but this should never occur in real
// code anyway -- you'd have to create a buffer longer than a size_t can
// represent, which is sort of a contradiction.
@@ -898,7 +853,7 @@ void CStringChecker::emitAdditionOverflowBug(CheckerContext &C,
"This expression will create a string whose length is too big to "
"be represented as a size_t";
- auto Report = std::make_unique<PathSensitiveBugReport>(*BT_AdditionOverflow,
+ auto Report = std::make_unique<PathSensitiveBugReport>(AdditionOverflow,
WarningMsg, N);
C.emitReport(std::move(Report));
}
@@ -909,7 +864,7 @@ ProgramStateRef CStringChecker::checkAdditionOverflow(CheckerContext &C,
NonLoc left,
NonLoc right) const {
// If out-of-bounds checking is turned off, skip the rest.
- if (!Filter.CheckCStringOutOfBounds)
+ if (!OutOfBounds.isEnabled())
return state;
// If a previous check has failed, propagate the failure.
@@ -1048,7 +1003,7 @@ SVal CStringChecker::getCStringLength(CheckerContext &C, ProgramStateRef &state,
// C string. In the context of locations, the only time we can issue such
// a warning is for labels.
if (std::optional<loc::GotoLabel> Label = Buf.getAs<loc::GotoLabel>()) {
- if (Filter.CheckCStringNotNullTerm) {
+ if (NotNullTerm.isEnabled()) {
SmallString<120> buf;
llvm::raw_svector_ostream os(buf);
assert(CurrentFunctionDescription);
@@ -1110,7 +1065,7 @@ SVal CStringChecker::getCStringLength(CheckerContext &C, ProgramStateRef &state,
// Other regions (mostly non-data) can't have a reliable C string length.
// In this case, an error is emitted and UndefinedVal is returned.
// The caller should always be prepared to handle this case.
- if (Filter.CheckCStringNotNullTerm) {
+ if (NotNullTerm.isEnabled()) {
SmallString<120> buf;
llvm::raw_svector_ostream os(buf);
@@ -2873,24 +2828,27 @@ void CStringChecker::checkDeadSymbols(SymbolReaper &SR,
}
void ento::registerCStringModeling(CheckerManager &Mgr) {
- Mgr.registerChecker<CStringChecker>();
+ // Other checker relies on the modeling implemented in this checker family,
+ // so this "modeling checker" can register the 'CStringChecker' backend for
+ // its callbacks without enabling any of its frontends.
+ Mgr.getChecker<CStringChecker>();
}
-bool ento::shouldRegisterCStringModeling(const CheckerManager &mgr) {
+bool ento::shouldRegisterCStringModeling(const CheckerManager &) {
return true;
}
-#define REGISTER_CHECKER(name) \
- void ento::register##name(CheckerManager &mgr) { \
- CStringChecker *checker = mgr.getChecker<CStringChecker>(); \
- checker->Filter.Check##name = true; \
- checker->Filter.CheckName##name = mgr.getCurrentCheckerName(); \
+#define REGISTER_CHECKER(NAME) \
+ void ento::registerCString##NAME(CheckerManager &Mgr) { \
+ Mgr.getChecker<CStringChecker>()->NAME.enable(Mgr); \
} \
\
- bool ento::shouldRegister##name(const CheckerManager &mgr) { return true; }
+ bool ento::shouldRegisterCString##NAME(const CheckerManager &) { \
+ return true; \
+ }
-REGISTER_CHECKER(CStringNullArg)
-REGISTER_CHECKER(CStringOutOfBounds)
-REGISTER_CHECKER(CStringBufferOverlap)
-REGISTER_CHECKER(CStringNotNullTerm)
-REGISTER_CHECKER(CStringUninitializedRead)
+REGISTER_CHECKER(NullArg)
+REGISTER_CHECKER(OutOfBounds)
+REGISTER_CHECKER(BufferOverlap)
+REGISTER_CHECKER(NotNullTerm)
+REGISTER_CHECKER(UninitializedRead)
diff --git a/clang/lib/StaticAnalyzer/Checkers/CheckPlacementNew.cpp b/clang/lib/StaticAnalyzer/Checkers/CheckPlacementNew.cpp
index 839c8bc..a227ca0 100644
--- a/clang/lib/StaticAnalyzer/Checkers/CheckPlacementNew.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/CheckPlacementNew.cpp
@@ -111,32 +111,12 @@ bool PlacementNewChecker::checkPlaceCapacityIsSufficient(
if (!SizeOfPlaceCI)
return true;
- if ((SizeOfPlaceCI->getValue() < SizeOfTargetCI->getValue()) ||
- (IsArrayTypeAllocated &&
- SizeOfPlaceCI->getValue() >= SizeOfTargetCI->getValue())) {
+ if ((SizeOfPlaceCI->getValue() < SizeOfTargetCI->getValue())) {
if (ExplodedNode *N = C.generateErrorNode(C.getState())) {
- std::string Msg;
- // TODO: use clang constant
- if (IsArrayTypeAllocated &&
- SizeOfPlaceCI->getValue() > SizeOfTargetCI->getValue())
- Msg = std::string(llvm::formatv(
- "{0} bytes is possibly not enough for array allocation which "
- "requires {1} bytes. Current overhead requires the size of {2} "
- "bytes",
- SizeOfPlaceCI->getValue(), SizeOfTargetCI->getValue(),
- *SizeOfPlaceCI->getValue() - SizeOfTargetCI->getValue()));
- else if (IsArrayTypeAllocated &&
- SizeOfPlaceCI->getValue() == SizeOfTargetCI->getValue())
- Msg = std::string(llvm::formatv(
- "Storage provided to placement new is only {0} bytes, "
- "whereas the allocated array type requires more space for "
- "internal needs",
- SizeOfPlaceCI->getValue()));
- else
- Msg = std::string(llvm::formatv(
- "Storage provided to placement new is only {0} bytes, "
- "whereas the allocated type requires {1} bytes",
- SizeOfPlaceCI->getValue(), SizeOfTargetCI->getValue()));
+ std::string Msg =
+ llvm::formatv("Storage provided to placement new is only {0} bytes, "
+ "whereas the allocated type requires {1} bytes",
+ SizeOfPlaceCI->getValue(), SizeOfTargetCI->getValue());
auto R = std::make_unique<PathSensitiveBugReport>(SBT, Msg, N);
bugreporter::trackExpressionValue(N, NE->getPlacementArg(0), *R);
diff --git a/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
index d7eea7e..152129e 100644
--- a/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
@@ -25,18 +25,22 @@ using namespace clang;
using namespace ento;
namespace {
+
+class DerefBugType : public BugType {
+ StringRef ArrayMsg, FieldMsg;
+
+public:
+ DerefBugType(CheckerFrontend *FE, StringRef Desc, const char *AMsg,
+ const char *FMsg = nullptr)
+ : BugType(FE, Desc), ArrayMsg(AMsg), FieldMsg(FMsg ? FMsg : AMsg) {}
+ StringRef getArrayMsg() const { return ArrayMsg; }
+ StringRef getFieldMsg() const { return FieldMsg; }
+};
+
class DereferenceChecker
- : public Checker< check::Location,
- check::Bind,
- EventDispatcher<ImplicitNullDerefEvent> > {
- enum DerefKind {
- NullPointer,
- UndefinedPointerValue,
- AddressOfLabel,
- FixedAddress,
- };
-
- void reportBug(DerefKind K, ProgramStateRef State, const Stmt *S,
+ : public CheckerFamily<check::Location, check::Bind,
+ EventDispatcher<ImplicitNullDerefEvent>> {
+ void reportBug(const DerefBugType &BT, ProgramStateRef State, const Stmt *S,
CheckerContext &C) const;
bool suppressReport(CheckerContext &C, const Expr *E) const;
@@ -52,13 +56,23 @@ public:
const LocationContext *LCtx,
bool loadedFrom = false);
- bool CheckNullDereference = false;
- bool CheckFixedDereference = false;
-
- std::unique_ptr<BugType> BT_Null;
- std::unique_ptr<BugType> BT_Undef;
- std::unique_ptr<BugType> BT_Label;
- std::unique_ptr<BugType> BT_FixedAddress;
+ CheckerFrontend NullDerefChecker, FixedDerefChecker;
+ const DerefBugType NullBug{&NullDerefChecker, "Dereference of null pointer",
+ "a null pointer dereference",
+ "a dereference of a null pointer"};
+ const DerefBugType UndefBug{&NullDerefChecker,
+ "Dereference of undefined pointer value",
+ "an undefined pointer dereference",
+ "a dereference of an undefined pointer value"};
+ const DerefBugType LabelBug{&NullDerefChecker,
+ "Dereference of the address of a label",
+ "an undefined pointer dereference",
+ "a dereference of an address of a label"};
+ const DerefBugType FixedAddressBug{&FixedDerefChecker,
+ "Dereference of a fixed address",
+ "a dereference of a fixed address"};
+
+ StringRef getDebugTag() const override { return "DereferenceChecker"; }
};
} // end anonymous namespace
@@ -158,115 +172,87 @@ static bool isDeclRefExprToReference(const Expr *E) {
return false;
}
-void DereferenceChecker::reportBug(DerefKind K, ProgramStateRef State,
- const Stmt *S, CheckerContext &C) const {
- const BugType *BT = nullptr;
- llvm::StringRef DerefStr1;
- llvm::StringRef DerefStr2;
- switch (K) {
- case DerefKind::NullPointer:
- if (!CheckNullDereference) {
- C.addSink();
- return;
- }
- BT = BT_Null.get();
- DerefStr1 = " results in a null pointer dereference";
- DerefStr2 = " results in a dereference of a null pointer";
- break;
- case DerefKind::UndefinedPointerValue:
- if (!CheckNullDereference) {
- C.addSink();
+void DereferenceChecker::reportBug(const DerefBugType &BT,
+ ProgramStateRef State, const Stmt *S,
+ CheckerContext &C) const {
+ if (&BT == &FixedAddressBug) {
+ if (!FixedDerefChecker.isEnabled())
+ // Deliberately don't add a sink node if check is disabled.
+ // This situation may be valid in special cases.
return;
- }
- BT = BT_Undef.get();
- DerefStr1 = " results in an undefined pointer dereference";
- DerefStr2 = " results in a dereference of an undefined pointer value";
- break;
- case DerefKind::AddressOfLabel:
- if (!CheckNullDereference) {
+ } else {
+ if (!NullDerefChecker.isEnabled()) {
C.addSink();
return;
}
- BT = BT_Label.get();
- DerefStr1 = " results in an undefined pointer dereference";
- DerefStr2 = " results in a dereference of an address of a label";
- break;
- case DerefKind::FixedAddress:
- // Deliberately don't add a sink node if check is disabled.
- // This situation may be valid in special cases.
- if (!CheckFixedDereference)
- return;
-
- BT = BT_FixedAddress.get();
- DerefStr1 = " results in a dereference of a fixed address";
- DerefStr2 = " results in a dereference of a fixed address";
- break;
- };
+ }
// Generate an error node.
ExplodedNode *N = C.generateErrorNode(State);
if (!N)
return;
- SmallString<100> buf;
- llvm::raw_svector_ostream os(buf);
+ SmallString<100> Buf;
+ llvm::raw_svector_ostream Out(Buf);
SmallVector<SourceRange, 2> Ranges;
switch (S->getStmtClass()) {
case Stmt::ArraySubscriptExprClass: {
- os << "Array access";
+ Out << "Array access";
const ArraySubscriptExpr *AE = cast<ArraySubscriptExpr>(S);
- AddDerefSource(os, Ranges, AE->getBase()->IgnoreParenCasts(),
- State.get(), N->getLocationContext());
- os << DerefStr1;
+ AddDerefSource(Out, Ranges, AE->getBase()->IgnoreParenCasts(), State.get(),
+ N->getLocationContext());
+ Out << " results in " << BT.getArrayMsg();
break;
}
case Stmt::ArraySectionExprClass: {
- os << "Array access";
+ Out << "Array access";
const ArraySectionExpr *AE = cast<ArraySectionExpr>(S);
- AddDerefSource(os, Ranges, AE->getBase()->IgnoreParenCasts(),
- State.get(), N->getLocationContext());
- os << DerefStr1;
+ AddDerefSource(Out, Ranges, AE->getBase()->IgnoreParenCasts(), State.get(),
+ N->getLocationContext());
+ Out << " results in " << BT.getArrayMsg();
break;
}
case Stmt::UnaryOperatorClass: {
- os << BT->getDescription();
+ Out << BT.getDescription();
const UnaryOperator *U = cast<UnaryOperator>(S);
- AddDerefSource(os, Ranges, U->getSubExpr()->IgnoreParens(),
- State.get(), N->getLocationContext(), true);
+ AddDerefSource(Out, Ranges, U->getSubExpr()->IgnoreParens(), State.get(),
+ N->getLocationContext(), true);
break;
}
case Stmt::MemberExprClass: {
const MemberExpr *M = cast<MemberExpr>(S);
if (M->isArrow() || isDeclRefExprToReference(M->getBase())) {
- os << "Access to field '" << M->getMemberNameInfo() << "'" << DerefStr2;
- AddDerefSource(os, Ranges, M->getBase()->IgnoreParenCasts(),
- State.get(), N->getLocationContext(), true);
+ Out << "Access to field '" << M->getMemberNameInfo() << "' results in "
+ << BT.getFieldMsg();
+ AddDerefSource(Out, Ranges, M->getBase()->IgnoreParenCasts(), State.get(),
+ N->getLocationContext(), true);
}
break;
}
case Stmt::ObjCIvarRefExprClass: {
const ObjCIvarRefExpr *IV = cast<ObjCIvarRefExpr>(S);
- os << "Access to instance variable '" << *IV->getDecl() << "'" << DerefStr2;
- AddDerefSource(os, Ranges, IV->getBase()->IgnoreParenCasts(),
- State.get(), N->getLocationContext(), true);
+ Out << "Access to instance variable '" << *IV->getDecl() << "' results in "
+ << BT.getFieldMsg();
+ AddDerefSource(Out, Ranges, IV->getBase()->IgnoreParenCasts(), State.get(),
+ N->getLocationContext(), true);
break;
}
default:
break;
}
- auto report = std::make_unique<PathSensitiveBugReport>(
- *BT, buf.empty() ? BT->getDescription() : buf.str(), N);
+ auto BR = std::make_unique<PathSensitiveBugReport>(
+ BT, Buf.empty() ? BT.getDescription() : Buf.str(), N);
- bugreporter::trackExpressionValue(N, bugreporter::getDerefExpr(S), *report);
+ bugreporter::trackExpressionValue(N, bugreporter::getDerefExpr(S), *BR);
for (SmallVectorImpl<SourceRange>::iterator
I = Ranges.begin(), E = Ranges.end(); I!=E; ++I)
- report->addRange(*I);
+ BR->addRange(*I);
- C.emitReport(std::move(report));
+ C.emitReport(std::move(BR));
}
void DereferenceChecker::checkLocation(SVal l, bool isLoad, const Stmt* S,
@@ -275,7 +261,7 @@ void DereferenceChecker::checkLocation(SVal l, bool isLoad, const Stmt* S,
if (l.isUndef()) {
const Expr *DerefExpr = getDereferenceExpr(S);
if (!suppressReport(C, DerefExpr))
- reportBug(DerefKind::UndefinedPointerValue, C.getState(), DerefExpr, C);
+ reportBug(UndefBug, C.getState(), DerefExpr, C);
return;
}
@@ -296,7 +282,7 @@ void DereferenceChecker::checkLocation(SVal l, bool isLoad, const Stmt* S,
// we call an "explicit" null dereference.
const Expr *expr = getDereferenceExpr(S);
if (!suppressReport(C, expr)) {
- reportBug(DerefKind::NullPointer, nullState, expr, C);
+ reportBug(NullBug, nullState, expr, C);
return;
}
}
@@ -314,7 +300,7 @@ void DereferenceChecker::checkLocation(SVal l, bool isLoad, const Stmt* S,
if (location.isConstant()) {
const Expr *DerefExpr = getDereferenceExpr(S, isLoad);
if (!suppressReport(C, DerefExpr))
- reportBug(DerefKind::FixedAddress, notNullState, DerefExpr, C);
+ reportBug(FixedAddressBug, notNullState, DerefExpr, C);
return;
}
@@ -330,7 +316,7 @@ void DereferenceChecker::checkBind(SVal L, SVal V, const Stmt *S,
// One should never write to label addresses.
if (auto Label = L.getAs<loc::GotoLabel>()) {
- reportBug(DerefKind::AddressOfLabel, C.getState(), S, C);
+ reportBug(LabelBug, C.getState(), S, C);
return;
}
@@ -351,7 +337,7 @@ void DereferenceChecker::checkBind(SVal L, SVal V, const Stmt *S,
if (!StNonNull) {
const Expr *expr = getDereferenceExpr(S, /*IsBind=*/true);
if (!suppressReport(C, expr)) {
- reportBug(DerefKind::NullPointer, StNull, expr, C);
+ reportBug(NullBug, StNull, expr, C);
return;
}
}
@@ -369,7 +355,7 @@ void DereferenceChecker::checkBind(SVal L, SVal V, const Stmt *S,
if (V.isConstant()) {
const Expr *DerefExpr = getDereferenceExpr(S, true);
if (!suppressReport(C, DerefExpr))
- reportBug(DerefKind::FixedAddress, State, DerefExpr, C);
+ reportBug(FixedAddressBug, State, DerefExpr, C);
return;
}
@@ -392,26 +378,8 @@ void DereferenceChecker::checkBind(SVal L, SVal V, const Stmt *S,
C.addTransition(State, this);
}
-void ento::registerDereferenceModeling(CheckerManager &Mgr) {
- Mgr.registerChecker<DereferenceChecker>();
-}
-
-bool ento::shouldRegisterDereferenceModeling(const CheckerManager &) {
- return true;
-}
-
void ento::registerNullDereferenceChecker(CheckerManager &Mgr) {
- auto *Chk = Mgr.getChecker<DereferenceChecker>();
- Chk->CheckNullDereference = true;
- Chk->BT_Null.reset(new BugType(Mgr.getCurrentCheckerName(),
- "Dereference of null pointer",
- categories::LogicError));
- Chk->BT_Undef.reset(new BugType(Mgr.getCurrentCheckerName(),
- "Dereference of undefined pointer value",
- categories::LogicError));
- Chk->BT_Label.reset(new BugType(Mgr.getCurrentCheckerName(),
- "Dereference of the address of a label",
- categories::LogicError));
+ Mgr.getChecker<DereferenceChecker>()->NullDerefChecker.enable(Mgr);
}
bool ento::shouldRegisterNullDereferenceChecker(const CheckerManager &) {
@@ -419,11 +387,7 @@ bool ento::shouldRegisterNullDereferenceChecker(const CheckerManager &) {
}
void ento::registerFixedAddressDereferenceChecker(CheckerManager &Mgr) {
- auto *Chk = Mgr.getChecker<DereferenceChecker>();
- Chk->CheckFixedDereference = true;
- Chk->BT_FixedAddress.reset(new BugType(Mgr.getCurrentCheckerName(),
- "Dereference of a fixed address",
- categories::LogicError));
+ Mgr.getChecker<DereferenceChecker>()->FixedDerefChecker.enable(Mgr);
}
bool ento::shouldRegisterFixedAddressDereferenceChecker(
diff --git a/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
index 68efdba..369d619 100644
--- a/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
@@ -2693,7 +2693,7 @@ void MallocChecker::HandleUseAfterFree(CheckerContext &C, SourceRange Range,
Frontend->UseFreeBug,
AF.Kind == AF_InnerBuffer
? "Inner pointer of container used after re/deallocation"
- : "Use of memory after it is freed",
+ : "Use of memory after it is released",
N);
R->markInteresting(Sym);
@@ -2721,8 +2721,8 @@ void MallocChecker::HandleDoubleFree(CheckerContext &C, SourceRange Range,
if (ExplodedNode *N = C.generateErrorNode()) {
auto R = std::make_unique<PathSensitiveBugReport>(
Frontend->DoubleFreeBug,
- (Released ? "Attempt to free released memory"
- : "Attempt to free non-owned memory"),
+ (Released ? "Attempt to release already released memory"
+ : "Attempt to release non-owned memory"),
N);
if (Range.isValid())
R->addRange(Range);
@@ -3730,13 +3730,15 @@ PathDiagnosticPieceRef MallocBugVisitor::VisitNode(const ExplodedNode *N,
return nullptr;
}
- // Save the first destructor/function as release point.
- assert(!ReleaseFunctionLC && "There should be only one release point");
+ // Record the stack frame that is _responsible_ for this memory release
+ // event. This will be used by the false positive suppression heuristics
+ // that recognize the release points of reference-counted objects.
+ //
+ // Usually (e.g. in C) we say that the _responsible_ stack frame is the
+ // current innermost stack frame:
ReleaseFunctionLC = CurrentLC->getStackFrame();
-
- // See if we're releasing memory while inlining a destructor that
- // decrement reference counters (or one of its callees).
- // This turns on various common false positive suppressions.
+ // ...but if the stack contains a destructor call, then we say that the
+ // outermost destructor stack frame is the _responsible_ one:
for (const LocationContext *LC = CurrentLC; LC; LC = LC->getParent()) {
if (const auto *DD = dyn_cast<CXXDestructorDecl>(LC->getDecl())) {
if (isReferenceCountingPointerDestructor(DD)) {
diff --git a/clang/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp
index 15fd9a0..d2760ca 100644
--- a/clang/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp
@@ -142,34 +142,19 @@ void CFErrorFunctionChecker::checkASTDecl(const FunctionDecl *D,
//===----------------------------------------------------------------------===//
namespace {
+class NSOrCFErrorDerefChecker
+ : public CheckerFamily<check::Location,
+ check::Event<ImplicitNullDerefEvent>> {
+ mutable IdentifierInfo *NSErrorII = nullptr;
+ mutable IdentifierInfo *CFErrorII = nullptr;
-class NSErrorDerefBug : public BugType {
-public:
- NSErrorDerefBug(const CheckerNameRef Checker)
- : BugType(Checker, "NSError** null dereference",
- "Coding conventions (Apple)") {}
-};
-
-class CFErrorDerefBug : public BugType {
public:
- CFErrorDerefBug(const CheckerNameRef Checker)
- : BugType(Checker, "CFErrorRef* null dereference",
- "Coding conventions (Apple)") {}
-};
-
-}
+ CheckerFrontendWithBugType NSError{"NSError** null dereference",
+ "Coding conventions (Apple)"};
+ CheckerFrontendWithBugType CFError{"CFErrorRef* null dereference",
+ "Coding conventions (Apple)"};
-namespace {
-class NSOrCFErrorDerefChecker
- : public Checker< check::Location,
- check::Event<ImplicitNullDerefEvent> > {
- mutable IdentifierInfo *NSErrorII, *CFErrorII;
- mutable std::unique_ptr<NSErrorDerefBug> NSBT;
- mutable std::unique_ptr<CFErrorDerefBug> CFBT;
-public:
- bool ShouldCheckNSError = false, ShouldCheckCFError = false;
- CheckerNameRef NSErrorName, CFErrorName;
- NSOrCFErrorDerefChecker() : NSErrorII(nullptr), CFErrorII(nullptr) {}
+ StringRef getDebugTag() const override { return "NSOrCFErrorDerefChecker"; }
void checkLocation(SVal loc, bool isLoad, const Stmt *S,
CheckerContext &C) const;
@@ -236,12 +221,12 @@ void NSOrCFErrorDerefChecker::checkLocation(SVal loc, bool isLoad,
if (!CFErrorII)
CFErrorII = &Ctx.Idents.get("CFErrorRef");
- if (ShouldCheckNSError && IsNSError(parmT, NSErrorII)) {
+ if (NSError.isEnabled() && IsNSError(parmT, NSErrorII)) {
setFlag<NSErrorOut>(state, state->getSVal(loc.castAs<Loc>()), C);
return;
}
- if (ShouldCheckCFError && IsCFError(parmT, CFErrorII)) {
+ if (CFError.isEnabled() && IsCFError(parmT, CFErrorII)) {
setFlag<CFErrorOut>(state, state->getSVal(loc.castAs<Loc>()), C);
return;
}
@@ -274,19 +259,9 @@ void NSOrCFErrorDerefChecker::checkEvent(ImplicitNullDerefEvent event) const {
os << " may be null";
- BugType *bug = nullptr;
- if (isNSError) {
- if (!NSBT)
- NSBT.reset(new NSErrorDerefBug(NSErrorName));
- bug = NSBT.get();
- }
- else {
- if (!CFBT)
- CFBT.reset(new CFErrorDerefBug(CFErrorName));
- bug = CFBT.get();
- }
+ const BugType &BT = isNSError ? NSError : CFError;
BR.emitReport(
- std::make_unique<PathSensitiveBugReport>(*bug, os.str(), event.SinkNode));
+ std::make_unique<PathSensitiveBugReport>(BT, os.str(), event.SinkNode));
}
static bool IsNSError(QualType T, IdentifierInfo *II) {
@@ -320,32 +295,21 @@ static bool IsCFError(QualType T, IdentifierInfo *II) {
return TT->getDecl()->getIdentifier() == II;
}
-void ento::registerNSOrCFErrorDerefChecker(CheckerManager &mgr) {
- mgr.registerChecker<NSOrCFErrorDerefChecker>();
-}
-
-bool ento::shouldRegisterNSOrCFErrorDerefChecker(const CheckerManager &mgr) {
- return true;
-}
-
-void ento::registerNSErrorChecker(CheckerManager &mgr) {
- mgr.registerChecker<NSErrorMethodChecker>();
- NSOrCFErrorDerefChecker *checker = mgr.getChecker<NSOrCFErrorDerefChecker>();
- checker->ShouldCheckNSError = true;
- checker->NSErrorName = mgr.getCurrentCheckerName();
-}
-
-bool ento::shouldRegisterNSErrorChecker(const CheckerManager &mgr) {
- return true;
-}
-
-void ento::registerCFErrorChecker(CheckerManager &mgr) {
- mgr.registerChecker<CFErrorFunctionChecker>();
- NSOrCFErrorDerefChecker *checker = mgr.getChecker<NSOrCFErrorDerefChecker>();
- checker->ShouldCheckCFError = true;
- checker->CFErrorName = mgr.getCurrentCheckerName();
-}
+// This source file implements two user-facing checkers ("osx.cocoa.NSError"
+// and "osx.coreFoundation.CFError") which are both implemented as the
+// combination of two `CheckerFrontend`s that are registered under the same
+// name (but otherwise act independently). Among these 2+2 `CheckerFrontend`s
+// two are coming from the checker family `NSOrCFErrorDerefChecker` while the
+// other two (the `ADDITIONAL_PART`s) are small standalone checkers.
+#define REGISTER_CHECKER(NAME, ADDITIONAL_PART) \
+ void ento::register##NAME##Checker(CheckerManager &Mgr) { \
+ Mgr.getChecker<NSOrCFErrorDerefChecker>()->NAME.enable(Mgr); \
+ Mgr.registerChecker<ADDITIONAL_PART>(); \
+ } \
+ \
+ bool ento::shouldRegister##NAME##Checker(const CheckerManager &) { \
+ return true; \
+ }
-bool ento::shouldRegisterCFErrorChecker(const CheckerManager &mgr) {
- return true;
-}
+REGISTER_CHECKER(NSError, NSErrorMethodChecker)
+REGISTER_CHECKER(CFError, CFErrorFunctionChecker)
diff --git a/clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp
index a63497c..019e81f 100644
--- a/clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp
@@ -28,23 +28,22 @@ using namespace ento;
namespace {
class StackAddrEscapeChecker
- : public Checker<check::PreCall, check::PreStmt<ReturnStmt>,
- check::EndFunction> {
+ : public CheckerFamily<check::PreCall, check::PreStmt<ReturnStmt>,
+ check::EndFunction> {
mutable IdentifierInfo *dispatch_semaphore_tII = nullptr;
- mutable std::unique_ptr<BugType> BT_stackleak;
- mutable std::unique_ptr<BugType> BT_returnstack;
- mutable std::unique_ptr<BugType> BT_capturedstackasync;
- mutable std::unique_ptr<BugType> BT_capturedstackret;
public:
- enum CheckKind {
- CK_StackAddrEscapeChecker,
- CK_StackAddrAsyncEscapeChecker,
- CK_NumCheckKinds
- };
+ StringRef getDebugTag() const override { return "StackAddrEscapeChecker"; }
+
+ CheckerFrontend StackAddrEscape;
+ CheckerFrontend StackAddrAsyncEscape;
- bool ChecksEnabled[CK_NumCheckKinds] = {false};
- CheckerNameRef CheckNames[CK_NumCheckKinds];
+ const BugType StackLeak{&StackAddrEscape,
+ "Stack address leaks outside of stack frame"};
+ const BugType ReturnStack{&StackAddrEscape,
+ "Return of address to stack-allocated memory"};
+ const BugType CapturedStackAsync{
+ &StackAddrAsyncEscape, "Address of stack-allocated memory is captured"};
void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
void checkPreStmt(const ReturnStmt *RS, CheckerContext &C) const;
@@ -170,10 +169,6 @@ void StackAddrEscapeChecker::EmitReturnLeakError(CheckerContext &C,
ExplodedNode *N = C.generateNonFatalErrorNode();
if (!N)
return;
- if (!BT_returnstack)
- BT_returnstack = std::make_unique<BugType>(
- CheckNames[CK_StackAddrEscapeChecker],
- "Return of address to stack-allocated memory");
// Generate a report for this bug.
SmallString<128> buf;
@@ -184,7 +179,7 @@ void StackAddrEscapeChecker::EmitReturnLeakError(CheckerContext &C,
EmitReturnedAsPartOfError(os, C.getSVal(RetE), R);
auto report =
- std::make_unique<PathSensitiveBugReport>(*BT_returnstack, os.str(), N);
+ std::make_unique<PathSensitiveBugReport>(ReturnStack, os.str(), N);
report->addRange(RetE->getSourceRange());
if (range.isValid())
report->addRange(range);
@@ -215,16 +210,12 @@ void StackAddrEscapeChecker::checkAsyncExecutedBlockCaptures(
ExplodedNode *N = C.generateNonFatalErrorNode();
if (!N)
continue;
- if (!BT_capturedstackasync)
- BT_capturedstackasync = std::make_unique<BugType>(
- CheckNames[CK_StackAddrAsyncEscapeChecker],
- "Address of stack-allocated memory is captured");
SmallString<128> Buf;
llvm::raw_svector_ostream Out(Buf);
SourceRange Range = genName(Out, Region, C.getASTContext());
Out << " is captured by an asynchronously-executed block";
- auto Report = std::make_unique<PathSensitiveBugReport>(
- *BT_capturedstackasync, Out.str(), N);
+ auto Report = std::make_unique<PathSensitiveBugReport>(CapturedStackAsync,
+ Out.str(), N);
if (Range.isValid())
Report->addRange(Range);
C.emitReport(std::move(Report));
@@ -233,7 +224,7 @@ void StackAddrEscapeChecker::checkAsyncExecutedBlockCaptures(
void StackAddrEscapeChecker::checkPreCall(const CallEvent &Call,
CheckerContext &C) const {
- if (!ChecksEnabled[CK_StackAddrAsyncEscapeChecker])
+ if (!StackAddrAsyncEscape.isEnabled())
return;
if (!Call.isGlobalCFunction("dispatch_after") &&
!Call.isGlobalCFunction("dispatch_async"))
@@ -357,7 +348,7 @@ FindEscapingStackRegions(CheckerContext &C, const Expr *RetE, SVal RetVal) {
void StackAddrEscapeChecker::checkPreStmt(const ReturnStmt *RS,
CheckerContext &C) const {
- if (!ChecksEnabled[CK_StackAddrEscapeChecker])
+ if (!StackAddrEscape.isEnabled())
return;
const Expr *RetE = RS->getRetValue();
@@ -456,7 +447,7 @@ static bool isInvalidatedSymbolRegion(const MemRegion *Region) {
void StackAddrEscapeChecker::checkEndFunction(const ReturnStmt *RS,
CheckerContext &Ctx) const {
- if (!ChecksEnabled[CK_StackAddrEscapeChecker])
+ if (!StackAddrEscape.isEnabled())
return;
ExplodedNode *Node = Ctx.getPredecessor();
@@ -581,11 +572,6 @@ void StackAddrEscapeChecker::checkEndFunction(const ReturnStmt *RS,
if (!N)
return;
- if (!BT_stackleak)
- BT_stackleak =
- std::make_unique<BugType>(CheckNames[CK_StackAddrEscapeChecker],
- "Stack address leaks outside of stack frame");
-
for (const auto &P : Cb.V) {
const MemRegion *Referrer = P.first->getBaseRegion();
const MemRegion *Referred = P.second;
@@ -604,7 +590,7 @@ void StackAddrEscapeChecker::checkEndFunction(const ReturnStmt *RS,
Out << " is still referred to by a temporary object on the stack"
<< CommonSuffix;
auto Report =
- std::make_unique<PathSensitiveBugReport>(*BT_stackleak, Out.str(), N);
+ std::make_unique<PathSensitiveBugReport>(StackLeak, Out.str(), N);
if (Range.isValid())
Report->addRange(Range);
Ctx.emitReport(std::move(Report));
@@ -618,7 +604,7 @@ void StackAddrEscapeChecker::checkEndFunction(const ReturnStmt *RS,
Out << " is still referred to by the " << *ReferrerVariable << CommonSuffix;
auto Report =
- std::make_unique<PathSensitiveBugReport>(*BT_stackleak, Out.str(), N);
+ std::make_unique<PathSensitiveBugReport>(StackLeak, Out.str(), N);
if (Range.isValid())
Report->addRange(Range);
@@ -626,23 +612,14 @@ void StackAddrEscapeChecker::checkEndFunction(const ReturnStmt *RS,
}
}
-void ento::registerStackAddrEscapeBase(CheckerManager &mgr) {
- mgr.registerChecker<StackAddrEscapeChecker>();
-}
-
-bool ento::shouldRegisterStackAddrEscapeBase(const CheckerManager &mgr) {
- return true;
-}
-
-#define REGISTER_CHECKER(name) \
- void ento::register##name(CheckerManager &Mgr) { \
- StackAddrEscapeChecker *Chk = Mgr.getChecker<StackAddrEscapeChecker>(); \
- Chk->ChecksEnabled[StackAddrEscapeChecker::CK_##name] = true; \
- Chk->CheckNames[StackAddrEscapeChecker::CK_##name] = \
- Mgr.getCurrentCheckerName(); \
+#define REGISTER_CHECKER(NAME) \
+ void ento::register##NAME##Checker(CheckerManager &Mgr) { \
+ Mgr.getChecker<StackAddrEscapeChecker>()->NAME.enable(Mgr); \
} \
\
- bool ento::shouldRegister##name(const CheckerManager &mgr) { return true; }
+ bool ento::shouldRegister##NAME##Checker(const CheckerManager &) { \
+ return true; \
+ }
-REGISTER_CHECKER(StackAddrEscapeChecker)
-REGISTER_CHECKER(StackAddrAsyncEscapeChecker)
+REGISTER_CHECKER(StackAddrEscape)
+REGISTER_CHECKER(StackAddrAsyncEscape)
diff --git a/clang/lib/Tooling/Core/Replacement.cpp b/clang/lib/Tooling/Core/Replacement.cpp
index 1506218..a3214de 100644
--- a/clang/lib/Tooling/Core/Replacement.cpp
+++ b/clang/lib/Tooling/Core/Replacement.cpp
@@ -581,8 +581,8 @@ llvm::Expected<std::string> applyAllReplacements(StringRef Code,
if (Replaces.empty())
return Code.str();
- IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> InMemoryFileSystem(
- new llvm::vfs::InMemoryFileSystem);
+ auto InMemoryFileSystem =
+ llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
FileManager Files(FileSystemOptions(), InMemoryFileSystem);
DiagnosticOptions DiagOpts;
DiagnosticsEngine Diagnostics(
diff --git a/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp b/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp
index 8ce2706..b2b61de7 100644
--- a/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp
+++ b/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp
@@ -605,8 +605,8 @@ DependencyScanningWorker::DependencyScanningWorker(
switch (Service.getMode()) {
case ScanningMode::DependencyDirectivesScan:
- DepFS =
- new DependencyScanningWorkerFilesystem(Service.getSharedCache(), FS);
+ DepFS = llvm::makeIntrusiveRefCnt<DependencyScanningWorkerFilesystem>(
+ Service.getSharedCache(), FS);
BaseFS = DepFS;
break;
case ScanningMode::CanonicalPreprocessing:
diff --git a/clang/lib/Tooling/Tooling.cpp b/clang/lib/Tooling/Tooling.cpp
index 5333956..ecafe26 100644
--- a/clang/lib/Tooling/Tooling.cpp
+++ b/clang/lib/Tooling/Tooling.cpp
@@ -227,10 +227,11 @@ bool runToolOnCodeWithArgs(
const Twine &ToolName,
std::shared_ptr<PCHContainerOperations> PCHContainerOps,
const FileContentMappings &VirtualMappedFiles) {
- llvm::IntrusiveRefCntPtr<llvm::vfs::OverlayFileSystem> OverlayFileSystem(
- new llvm::vfs::OverlayFileSystem(llvm::vfs::getRealFileSystem()));
- llvm::IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> InMemoryFileSystem(
- new llvm::vfs::InMemoryFileSystem);
+ auto OverlayFileSystem =
+ llvm::makeIntrusiveRefCnt<llvm::vfs::OverlayFileSystem>(
+ llvm::vfs::getRealFileSystem());
+ auto InMemoryFileSystem =
+ llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
OverlayFileSystem->pushOverlay(InMemoryFileSystem);
SmallString<1024> CodeStorage;
@@ -403,7 +404,7 @@ bool ToolInvocation::run() {
}
const std::unique_ptr<driver::Driver> Driver(
- newDriver(&*Diagnostics, BinaryName, &Files->getVirtualFileSystem()));
+ newDriver(&*Diagnostics, BinaryName, Files->getVirtualFileSystemPtr()));
// The "input file not found" diagnostics from the driver are useful.
// The driver is only aware of the VFS working directory, but some clients
// change this at the FileManager level instead.
@@ -473,8 +474,10 @@ ClangTool::ClangTool(const CompilationDatabase &Compilations,
IntrusiveRefCntPtr<FileManager> Files)
: Compilations(Compilations), SourcePaths(SourcePaths),
PCHContainerOps(std::move(PCHContainerOps)),
- OverlayFileSystem(new llvm::vfs::OverlayFileSystem(std::move(BaseFS))),
- InMemoryFileSystem(new llvm::vfs::InMemoryFileSystem),
+ OverlayFileSystem(llvm::makeIntrusiveRefCnt<llvm::vfs::OverlayFileSystem>(
+ std::move(BaseFS))),
+ InMemoryFileSystem(
+ llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>()),
Files(Files ? Files
: new FileManager(FileSystemOptions(), OverlayFileSystem)) {
OverlayFileSystem->pushOverlay(InMemoryFileSystem);
@@ -692,10 +695,11 @@ std::unique_ptr<ASTUnit> buildASTFromCodeWithArgs(
IntrusiveRefCntPtr<llvm::vfs::FileSystem> BaseFS) {
std::vector<std::unique_ptr<ASTUnit>> ASTs;
ASTBuilderAction Action(ASTs);
- llvm::IntrusiveRefCntPtr<llvm::vfs::OverlayFileSystem> OverlayFileSystem(
- new llvm::vfs::OverlayFileSystem(std::move(BaseFS)));
- llvm::IntrusiveRefCntPtr<llvm::vfs::InMemoryFileSystem> InMemoryFileSystem(
- new llvm::vfs::InMemoryFileSystem);
+ auto OverlayFileSystem =
+ llvm::makeIntrusiveRefCnt<llvm::vfs::OverlayFileSystem>(
+ std::move(BaseFS));
+ auto InMemoryFileSystem =
+ llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
OverlayFileSystem->pushOverlay(InMemoryFileSystem);
llvm::IntrusiveRefCntPtr<FileManager> Files(
new FileManager(FileSystemOptions(), OverlayFileSystem));